[Midnightbsd-cvs] src [10136] trunk/sys/contrib/octeon-sdk: add
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Mon May 28 14:54:18 EDT 2018
Revision: 10136
http://svnweb.midnightbsd.org/src/?rev=10136
Author: laffer1
Date: 2018-05-28 14:54:17 -0400 (Mon, 28 May 2018)
Log Message:
-----------
add
Added Paths:
-----------
trunk/sys/contrib/octeon-sdk/
trunk/sys/contrib/octeon-sdk/cvmip.h
trunk/sys/contrib/octeon-sdk/cvmx-abi.h
trunk/sys/contrib/octeon-sdk/cvmx-access-native.h
trunk/sys/contrib/octeon-sdk/cvmx-access.h
trunk/sys/contrib/octeon-sdk/cvmx-address.h
trunk/sys/contrib/octeon-sdk/cvmx-agl-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.c
trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.h
trunk/sys/contrib/octeon-sdk/cvmx-app-init-linux.c
trunk/sys/contrib/octeon-sdk/cvmx-app-init.c
trunk/sys/contrib/octeon-sdk/cvmx-app-init.h
trunk/sys/contrib/octeon-sdk/cvmx-asm.h
trunk/sys/contrib/octeon-sdk/cvmx-asx0-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-asxx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-atomic.h
trunk/sys/contrib/octeon-sdk/cvmx-bootloader.h
trunk/sys/contrib/octeon-sdk/cvmx-bootmem.c
trunk/sys/contrib/octeon-sdk/cvmx-bootmem.h
trunk/sys/contrib/octeon-sdk/cvmx-ciu-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-ciu2-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-clock.c
trunk/sys/contrib/octeon-sdk/cvmx-clock.h
trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.c
trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.h
trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.c
trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.h
trunk/sys/contrib/octeon-sdk/cvmx-compactflash.c
trunk/sys/contrib/octeon-sdk/cvmx-compactflash.h
trunk/sys/contrib/octeon-sdk/cvmx-core.c
trunk/sys/contrib/octeon-sdk/cvmx-core.h
trunk/sys/contrib/octeon-sdk/cvmx-coremask.c
trunk/sys/contrib/octeon-sdk/cvmx-coremask.h
trunk/sys/contrib/octeon-sdk/cvmx-crypto.c
trunk/sys/contrib/octeon-sdk/cvmx-crypto.h
trunk/sys/contrib/octeon-sdk/cvmx-csr-enums.h
trunk/sys/contrib/octeon-sdk/cvmx-csr-typedefs.h
trunk/sys/contrib/octeon-sdk/cvmx-csr.h
trunk/sys/contrib/octeon-sdk/cvmx-dbg-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-debug-handler.S
trunk/sys/contrib/octeon-sdk/cvmx-debug-remote.c
trunk/sys/contrib/octeon-sdk/cvmx-debug-uart.c
trunk/sys/contrib/octeon-sdk/cvmx-debug.c
trunk/sys/contrib/octeon-sdk/cvmx-debug.h
trunk/sys/contrib/octeon-sdk/cvmx-dfa-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-dfa.c
trunk/sys/contrib/octeon-sdk/cvmx-dfa.h
trunk/sys/contrib/octeon-sdk/cvmx-dfm-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.c
trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.h
trunk/sys/contrib/octeon-sdk/cvmx-dpi-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.c
trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.h
trunk/sys/contrib/octeon-sdk/cvmx-endor-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-eoi-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-fau.h
trunk/sys/contrib/octeon-sdk/cvmx-flash.c
trunk/sys/contrib/octeon-sdk/cvmx-flash.h
trunk/sys/contrib/octeon-sdk/cvmx-fpa-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-fpa.c
trunk/sys/contrib/octeon-sdk/cvmx-fpa.h
trunk/sys/contrib/octeon-sdk/cvmx-gmx.h
trunk/sys/contrib/octeon-sdk/cvmx-gmxx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-gpio-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-gpio.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-board.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-board.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-check-defines.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-util.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-util.h
trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.c
trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.h
trunk/sys/contrib/octeon-sdk/cvmx-helper.c
trunk/sys/contrib/octeon-sdk/cvmx-helper.h
trunk/sys/contrib/octeon-sdk/cvmx-hfa.c
trunk/sys/contrib/octeon-sdk/cvmx-hfa.h
trunk/sys/contrib/octeon-sdk/cvmx-higig.h
trunk/sys/contrib/octeon-sdk/cvmx-ilk-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-ilk.c
trunk/sys/contrib/octeon-sdk/cvmx-ilk.h
trunk/sys/contrib/octeon-sdk/cvmx-interrupt-handler.S
trunk/sys/contrib/octeon-sdk/cvmx-interrupt.c
trunk/sys/contrib/octeon-sdk/cvmx-interrupt.h
trunk/sys/contrib/octeon-sdk/cvmx-iob-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-iob1-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-ipd-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-ipd.c
trunk/sys/contrib/octeon-sdk/cvmx-ipd.h
trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.c
trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.h
trunk/sys/contrib/octeon-sdk/cvmx-key-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-key.h
trunk/sys/contrib/octeon-sdk/cvmx-l2c-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-l2c.c
trunk/sys/contrib/octeon-sdk/cvmx-l2c.h
trunk/sys/contrib/octeon-sdk/cvmx-l2d-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-l2t-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-led-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-llm.c
trunk/sys/contrib/octeon-sdk/cvmx-llm.h
trunk/sys/contrib/octeon-sdk/cvmx-lmcx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-log-arc.S
trunk/sys/contrib/octeon-sdk/cvmx-log.c
trunk/sys/contrib/octeon-sdk/cvmx-log.h
trunk/sys/contrib/octeon-sdk/cvmx-malloc/
trunk/sys/contrib/octeon-sdk/cvmx-malloc/README-malloc
trunk/sys/contrib/octeon-sdk/cvmx-malloc/arena.c
trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.c
trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.h
trunk/sys/contrib/octeon-sdk/cvmx-malloc/thread-m.h
trunk/sys/contrib/octeon-sdk/cvmx-malloc.h
trunk/sys/contrib/octeon-sdk/cvmx-mdio.h
trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.c
trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.h
trunk/sys/contrib/octeon-sdk/cvmx-mio-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-mixx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-mpi-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-nand.c
trunk/sys/contrib/octeon-sdk/cvmx-nand.h
trunk/sys/contrib/octeon-sdk/cvmx-ndf-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-npei-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-npi-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-npi.h
trunk/sys/contrib/octeon-sdk/cvmx-packet.h
trunk/sys/contrib/octeon-sdk/cvmx-pci-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pci.h
trunk/sys/contrib/octeon-sdk/cvmx-pcie.c
trunk/sys/contrib/octeon-sdk/cvmx-pcie.h
trunk/sys/contrib/octeon-sdk/cvmx-pcieepx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pciercx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pcm-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pcmx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pcsx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pcsxx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pemx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pescx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pexp-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pip-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pip.h
trunk/sys/contrib/octeon-sdk/cvmx-pko-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pko.c
trunk/sys/contrib/octeon-sdk/cvmx-pko.h
trunk/sys/contrib/octeon-sdk/cvmx-platform.h
trunk/sys/contrib/octeon-sdk/cvmx-pow-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-pow.c
trunk/sys/contrib/octeon-sdk/cvmx-pow.h
trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.c
trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.h
trunk/sys/contrib/octeon-sdk/cvmx-profiler.c
trunk/sys/contrib/octeon-sdk/cvmx-profiler.h
trunk/sys/contrib/octeon-sdk/cvmx-qlm-tables.c
trunk/sys/contrib/octeon-sdk/cvmx-qlm.c
trunk/sys/contrib/octeon-sdk/cvmx-qlm.h
trunk/sys/contrib/octeon-sdk/cvmx-rad-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-raid.c
trunk/sys/contrib/octeon-sdk/cvmx-raid.h
trunk/sys/contrib/octeon-sdk/cvmx-resources.config
trunk/sys/contrib/octeon-sdk/cvmx-rng.h
trunk/sys/contrib/octeon-sdk/cvmx-rnm-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-rtc.h
trunk/sys/contrib/octeon-sdk/cvmx-rwlock.h
trunk/sys/contrib/octeon-sdk/cvmx-scratch.h
trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-n32.ld
trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-o32.ld
trunk/sys/contrib/octeon-sdk/cvmx-shared-linux.ld
trunk/sys/contrib/octeon-sdk/cvmx-shmem.c
trunk/sys/contrib/octeon-sdk/cvmx-shmem.h
trunk/sys/contrib/octeon-sdk/cvmx-sim-magic.h
trunk/sys/contrib/octeon-sdk/cvmx-sli-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-smi-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-smix-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-spi.c
trunk/sys/contrib/octeon-sdk/cvmx-spi.h
trunk/sys/contrib/octeon-sdk/cvmx-spi4000.c
trunk/sys/contrib/octeon-sdk/cvmx-spinlock.h
trunk/sys/contrib/octeon-sdk/cvmx-spx0-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-spxx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-srio.c
trunk/sys/contrib/octeon-sdk/cvmx-srio.h
trunk/sys/contrib/octeon-sdk/cvmx-sriomaintx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-sriox-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-srxx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-sso-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-stxx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-swap.h
trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.c
trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.h
trunk/sys/contrib/octeon-sdk/cvmx-thunder.c
trunk/sys/contrib/octeon-sdk/cvmx-thunder.h
trunk/sys/contrib/octeon-sdk/cvmx-tim-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-tim.c
trunk/sys/contrib/octeon-sdk/cvmx-tim.h
trunk/sys/contrib/octeon-sdk/cvmx-tlb.c
trunk/sys/contrib/octeon-sdk/cvmx-tlb.h
trunk/sys/contrib/octeon-sdk/cvmx-tra-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-tra.c
trunk/sys/contrib/octeon-sdk/cvmx-tra.h
trunk/sys/contrib/octeon-sdk/cvmx-trax-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-twsi.c
trunk/sys/contrib/octeon-sdk/cvmx-twsi.h
trunk/sys/contrib/octeon-sdk/cvmx-uahcx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-uart.c
trunk/sys/contrib/octeon-sdk/cvmx-uart.h
trunk/sys/contrib/octeon-sdk/cvmx-uctlx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-usb.c
trunk/sys/contrib/octeon-sdk/cvmx-usb.h
trunk/sys/contrib/octeon-sdk/cvmx-usbcx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-usbd.c
trunk/sys/contrib/octeon-sdk/cvmx-usbd.h
trunk/sys/contrib/octeon-sdk/cvmx-usbnx-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-utils.h
trunk/sys/contrib/octeon-sdk/cvmx-version.h
trunk/sys/contrib/octeon-sdk/cvmx-warn.c
trunk/sys/contrib/octeon-sdk/cvmx-warn.h
trunk/sys/contrib/octeon-sdk/cvmx-wqe.h
trunk/sys/contrib/octeon-sdk/cvmx-zip-defs.h
trunk/sys/contrib/octeon-sdk/cvmx-zip.c
trunk/sys/contrib/octeon-sdk/cvmx-zip.h
trunk/sys/contrib/octeon-sdk/cvmx-zone.c
trunk/sys/contrib/octeon-sdk/cvmx.h
trunk/sys/contrib/octeon-sdk/octeon-boot-info.h
trunk/sys/contrib/octeon-sdk/octeon-feature.c
trunk/sys/contrib/octeon-sdk/octeon-feature.h
trunk/sys/contrib/octeon-sdk/octeon-model.c
trunk/sys/contrib/octeon-sdk/octeon-model.h
trunk/sys/contrib/octeon-sdk/octeon-pci-console.c
trunk/sys/contrib/octeon-sdk/octeon-pci-console.h
Added: trunk/sys/contrib/octeon-sdk/cvmip.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmip.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmip.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,210 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Cavium Inc. Internet Protocol (IP)
+ *
+ * Definitions for the Internet Protocol (IP) support.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMIP_H__
+#define __CVMIP_H__
+
+
+/*
+ * IP protocol values (1 byte)
+ *
+ */
+#define CVMIP_PROTO_ICMP 1 /* Internet Control Message Protocol */
+#define CVMIP_PROTO_TCP 6 /* Transmission Control Protocol */
+#define CVMIP_PROTO_UDP 17 /* User Datagram Protocol */
+#define CVMIP_PROTO_ESP 50 /* Encapsulated Security Payload */
+#define CVMIP_PROTO_AH 51 /* Authentication Header */
+
+
+/**
+ * network packet header definitions
+ * (originally from octane_hw.h)
+ *
+ */
+
+/**
+ * UDP Packet header
+ */
+typedef struct {
+ union {
+ int32_t s32 ;
+ uint32_t u32 ;
+ struct {
+ uint16_t src_prt ;
+ uint16_t dst_prt ;
+ } s;
+ } prts;
+ uint16_t len ;
+ uint16_t chksum ;
+} cvmip_udp_hdr_t;
+
+/**
+ * TCP Packet header
+ */
+typedef struct {
+ uint16_t src_prt ;
+ uint16_t dst_prt ;
+ uint32_t seq ;
+ uint32_t ack_seq ;
+ uint32_t hlen :4;
+ uint32_t rsvd :6;
+ uint32_t urg :1;
+ uint32_t ack :1;
+ uint32_t psh :1;
+ uint32_t rst :1;
+ uint32_t syn :1;
+ uint32_t fin :1;
+ uint16_t win_sz ;
+ uint16_t chksum ;
+ uint16_t urg_ptr ;
+ uint32_t junk ;
+} cvmip_tcp_hdr_t;
+
+/**
+ * L4 Packet header
+ */
+typedef union {
+ cvmip_udp_hdr_t udphdr;
+ cvmip_tcp_hdr_t tcphdr;
+ struct {
+ union {
+ int32_t s32 ;
+ uint32_t u32 ;
+ struct {
+ uint16_t src_prt;
+ uint16_t dst_prt;
+ } s;
+ } prts;
+ uint16_t len ;
+ uint16_t chksum ;
+ char dat[48] ; // 48 for IPv6 with no extension hdrs, 64 for IPv4 without options
+ } udp;
+ struct {
+ uint16_t src_prt ;
+ uint16_t dst_prt ;
+ uint32_t seq ;
+ uint32_t ack_seq ;
+ uint32_t hlen :4;
+ uint32_t rsvd :6;
+ uint32_t urg :1;
+ uint32_t ack :1;
+ uint32_t psh :1;
+ uint32_t rst :1;
+ uint32_t syn :1;
+ uint32_t fin :1;
+ uint16_t win_sz ;
+ uint16_t chksum ;
+ uint16_t urg_ptr ;
+ char dat[36] ; // 36 for IPv6 with no extension hdrs, 52 for IPv6 without options
+ } tcp;
+} cvmip_l4_info_t;
+
+/**
+ * Special struct to add a pad to IPv4 header
+ */
+typedef struct {
+ uint32_t pad;
+
+ uint32_t version : 4;
+ uint32_t hl : 4;
+ uint8_t tos ;
+ uint16_t len ;
+
+ uint16_t id ;
+ uint32_t mbz : 1;
+ uint32_t df : 1;
+ uint32_t mf : 1;
+ uint32_t off :13;
+
+ uint8_t ttl ;
+ uint8_t protocol;
+ uint16_t chksum ;
+
+ union {
+ uint64_t u64;
+ struct {
+ uint32_t src;
+ uint32_t dst;
+ } s;
+ } src_dst;
+} cvmip_ipv4_hdr_t;
+
+/**
+ * IPv6 Packet header
+ */
+typedef struct {
+
+ uint32_t version : 4;
+ uint32_t v6class : 8;
+ uint32_t flow :20;
+
+ uint16_t len ; // includes extension headers plus payload (add 40 to be equiv to v4 len field)
+ uint8_t next_hdr; // equivalent to the v4 protocol field
+ uint8_t hop_lim ; // equivalent to the v4 TTL field
+
+ union {
+ uint64_t u64[4];
+ struct {
+ uint64_t src[2];
+ uint64_t dst[2];
+ } s;
+ } src_dst;
+
+} cvmip_ipv6_hdr_t;
+
+
+#endif /* __CVMIP_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmip.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-abi.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-abi.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-abi.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,113 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+/**
+ * @file
+ *
+ * This file defines macros for use in determining the current calling ABI.
+ *
+ * <hr>$Revision: 70030 $<hr>
+*/
+
+#ifndef __CVMX_ABI_H__
+#define __CVMX_ABI_H__
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#include <machine/endian.h>
+#else
+#ifndef __U_BOOT__
+#include <endian.h>
+#endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Check for N32 ABI, defined for 32-bit Simple Exec applications
+ and Linux N32 ABI.*/
+#if (defined _ABIN32 && _MIPS_SIM == _ABIN32)
+#define CVMX_ABI_N32
+/* Check for N64 ABI, defined for 64-bit Linux toolchain. */
+#elif (defined _ABI64 && _MIPS_SIM == _ABI64)
+#define CVMX_ABI_N64
+/* Check for O32 ABI, defined for Linux 032 ABI, not supported yet. */
+#elif (defined _ABIO32 && _MIPS_SIM == _ABIO32)
+#define CVMX_ABI_O32
+/* Check for EABI ABI, defined for 64-bit Simple Exec applications. */
+#else
+#define CVMX_ABI_EABI
+#endif
+
+#ifndef __BYTE_ORDER
+ #if defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
+ #define __BYTE_ORDER __BIG_ENDIAN
+ #elif !defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+ #define __BYTE_ORDER __LITTLE_ENDIAN
+ #define __BIG_ENDIAN 4321
+ #elif !defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
+ #define __BIG_ENDIAN 4321
+ #define __BYTE_ORDER __BIG_ENDIAN
+ #else
+ #error Unable to determine Endian mode
+ #endif
+#endif
+
+/* For compatibility with Linux definitions... */
+#if __BYTE_ORDER == __BIG_ENDIAN
+# ifndef __BIG_ENDIAN_BITFIELD
+# define __BIG_ENDIAN_BITFIELD
+# endif
+#else
+# ifndef __LITTLE_ENDIAN_BITFIELD
+# define __LITTLE_ENDIAN_BITFIELD
+# endif
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) && defined(__LITTLE_ENDIAN_BITFIELD)
+# error Cannot define both __BIG_ENDIAN_BITFIELD and __LITTLE_ENDIAN_BITFIELD
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ABI_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-abi.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-access-native.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-access-native.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-access-native.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,726 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ * Functions for accessing memory and CSRs on Octeon when we are compiling
+ * natively.
+ *
+ * <hr>$Revision: 38306 $<hr>
+*/
+#ifndef __CVMX_ACCESS_NATIVE_H__
+#define __CVMX_ACCESS_NATIVE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Returns the Octeon processor ID.
+ *
+ * @return Octeon processor ID from COP0
+ */
+static inline uint32_t cvmx_get_proc_id(void)
+{
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+ extern uint32_t cvmx_app_init_processor_id;
+ return cvmx_app_init_processor_id;
+#else
+ uint32_t id;
+ asm ("mfc0 %0, $15,0" : "=r" (id));
+ return id;
+#endif
+}
+
+/**
+ * Convert a memory pointer (void*) into a hardware compatable
+ * memory address (uint64_t). Octeon hardware widgets don't
+ * understand logical addresses.
+ *
+ * @param ptr C style memory pointer
+ * @return Hardware physical address
+ */
+static inline uint64_t cvmx_ptr_to_phys(void *ptr)
+{
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ cvmx_warn_if(ptr==NULL, "cvmx_ptr_to_phys() passed a NULL pointer\n");
+
+#ifdef CVMX_BUILD_FOR_UBOOT
+ uint64_t uboot_tlb_ptr_to_phys(void *ptr);
+
+ if (((uint32_t)ptr) < 0x80000000)
+ {
+ /* Handle useg (unmapped due to ERL) here*/
+ return(CAST64(ptr) & 0x7FFFFFFF);
+ }
+ else if (((uint32_t)ptr) < 0xC0000000)
+ {
+ /* Here we handle KSEG0/KSEG1 _pointers_. We know we are dealing
+ ** with 32 bit only values, so we treat them that way. Note that
+ ** a cvmx_phys_to_ptr(cvmx_ptr_to_phys(X)) will not return X in this case,
+ ** but the physical address of the KSEG0/KSEG1 address. */
+ return(CAST64(ptr) & 0x1FFFFFFF);
+ }
+ else
+ return(uboot_tlb_ptr_to_phys(ptr)); /* Should not get get here in !TLB case */
+
+#endif
+
+#ifdef __linux__
+ if (sizeof(void*) == 8)
+ {
+ /* We're running in 64 bit mode. Normally this means that we can use
+ 40 bits of address space (the hardware limit). Unfortunately there
+ is one case were we need to limit this to 30 bits, sign extended
+ 32 bit. Although these are 64 bits wide, only 30 bits can be used */
+ if ((CAST64(ptr) >> 62) == 3)
+ return CAST64(ptr) & cvmx_build_mask(30);
+ else
+ return CAST64(ptr) & cvmx_build_mask(40);
+ }
+ else
+ {
+#ifdef __KERNEL__
+ return (long)(ptr) & 0x1fffffff;
+#else
+ extern uint64_t linux_mem32_offset;
+ if (cvmx_likely(ptr))
+ return CAST64(ptr) - linux_mem32_offset;
+ else
+ return 0;
+#endif
+ }
+#elif defined(_WRS_KERNEL)
+ return (long)(ptr) & 0x7fffffff;
+#elif defined(VXWORKS_USER_MAPPINGS)
+ /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
+ 2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
+ uint64_t address = (long)ptr;
+ if (address & 0x80000000)
+ return address & 0x1fffffff; /* KSEG pointers directly map the lower 256MB and bootbus */
+ else if ((address >= 0x10000000) && (address < 0x20000000))
+ return address + 0x400000000ull; /* 256MB-512MB is a virtual mapping for the 2nd 256MB */
+ else
+ return address; /* Looks to be a 1:1 mapped userspace pointer */
+#elif defined(__FreeBSD__) && defined(_KERNEL)
+ return (pmap_kextract((vm_offset_t)ptr));
+#else
+#if CVMX_USE_1_TO_1_TLB_MAPPINGS
+ /* We are assumung we're running the Simple Executive standalone. In this
+ mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
+ addresses are never used. Since we know all this, save the masking
+ cycles and do nothing */
+ return CAST64(ptr);
+#else
+
+ if (sizeof(void*) == 8)
+ {
+ /* We're running in 64 bit mode. Normally this means that we can use
+ 40 bits of address space (the hardware limit). Unfortunately there
+ is one case were we need to limit this to 30 bits, sign extended
+ 32 bit. Although these are 64 bits wide, only 30 bits can be used */
+ if ((CAST64(ptr) >> 62) == 3)
+ return CAST64(ptr) & cvmx_build_mask(30);
+ else
+ return CAST64(ptr) & cvmx_build_mask(40);
+ }
+ else
+ return (long)(ptr) & 0x7fffffff;
+
+#endif
+#endif
+}
+
+
+/**
+ * Convert a hardware physical address (uint64_t) into a
+ * memory pointer (void *).
+ *
+ * @param physical_address
+ * Hardware physical address to memory
+ * @return Pointer to memory
+ */
+static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
+{
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ cvmx_warn_if(physical_address==0, "cvmx_phys_to_ptr() passed a zero address\n");
+
+#ifdef CVMX_BUILD_FOR_UBOOT
+
+ /* U-boot is a special case, as it is running in 32 bit mode, using the TLB to map code/data
+ ** which can have a physical address above the 32 bit address space. 1-1 mappings are used
+ ** to allow the low 2 GBytes to be accessed as in error level.
+ **
+ ** NOTE: This conversion can cause problems in u-boot, as users may want to enter addresses
+ ** like 0xBFC00000 (kseg1 boot bus address), which is a valid 64 bit physical address,
+ ** but is likely intended to be a boot bus address. */
+
+ if (physical_address < 0x80000000)
+ {
+ /* Handle useg here. ERL is set, so useg is unmapped. This is the only physical
+ ** address range that is directly addressable by u-boot. */
+ return CASTPTR(void, physical_address);
+ }
+ else
+ {
+ DECLARE_GLOBAL_DATA_PTR;
+ extern char uboot_start;
+ /* Above 0x80000000 we can only support one case - a physical address
+ ** that is mapped for u-boot code/data. We check against the u-boot mem range,
+ ** and return NULL if it is out of this range.
+ */
+ if (physical_address >= gd->bd->bi_uboot_ram_addr
+ && physical_address < gd->bd->bi_uboot_ram_addr + gd->bd->bi_uboot_ram_used_size)
+ {
+ return ((char *)&uboot_start + (physical_address - gd->bd->bi_uboot_ram_addr));
+ }
+ else
+ return(NULL);
+ }
+
+ if (physical_address >= 0x80000000)
+ return NULL;
+ else
+#endif
+
+#ifdef __linux__
+ if (sizeof(void*) == 8)
+ {
+ /* Just set the top bit, avoiding any TLB uglyness */
+ return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
+ }
+ else
+ {
+#ifdef __KERNEL__
+ return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
+#else
+ extern uint64_t linux_mem32_offset;
+ if (cvmx_likely(physical_address))
+ return CASTPTR(void, physical_address + linux_mem32_offset);
+ else
+ return NULL;
+#endif
+ }
+#elif defined(_WRS_KERNEL)
+ return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
+#elif defined(VXWORKS_USER_MAPPINGS)
+ /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
+ 2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
+ if ((physical_address >= 0x10000000) && (physical_address < 0x20000000))
+ return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
+ else if ((OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ && (physical_address >= 0x410000000ull)
+ && (physical_address < 0x420000000ull))
+ return CASTPTR(void, physical_address - 0x400000000ull);
+ else
+ return CASTPTR(void, physical_address);
+#elif defined(__FreeBSD__) && defined(_KERNEL)
+#if defined(__mips_n64)
+ return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
+#else
+ if (physical_address < 0x20000000)
+ return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
+ else
+ panic("%s: mapping high address (%#jx) not yet supported.\n", __func__, (uintmax_t)physical_address);
+#endif
+#else
+
+#if CVMX_USE_1_TO_1_TLB_MAPPINGS
+ /* We are assumung we're running the Simple Executive standalone. In this
+ mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
+ addresses are never used. Since we know all this, save bit insert
+ cycles and do nothing */
+ return CASTPTR(void, physical_address);
+#else
+ /* Set the XKPHYS/KSEG0 bit as appropriate based on ABI */
+ if (sizeof(void*) == 8)
+ return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
+ else
+ return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
+
+#endif
+
+#endif
+}
+
+
+/* The following #if controls the definition of the macro
+ CVMX_BUILD_WRITE64. This macro is used to build a store operation to
+ a full 64bit address. With a 64bit ABI, this can be done with a simple
+ pointer access. 32bit ABIs require more complicated assembly */
+#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ *CASTPTR(volatile TYPE##_t, addr) = val; \
+}
+
+#elif defined(CVMX_ABI_N32)
+
+/* The N32 ABI passes all 64bit quantities in a single register, so it is
+ possible to use the arguments directly. We have to use inline assembly
+ for the actual store since a pointer would truncate the address */
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ asm volatile (ST " %[v], 0(%[c])" ::[v] "r" (val), [c] "r" (addr)); \
+}
+
+#elif defined(CVMX_ABI_O32)
+
+#ifdef __KERNEL__
+#define CVMX_BUILD_WRITE64(TYPE, LT) extern void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val);
+#else
+
+/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
+ separate registers. Assembly must be used to put them back together
+ before they're used. What should be a simple store becomes a
+ convoluted mess of shifts and ors */
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val) \
+{ \
+ if (sizeof(TYPE##_t) == 8) \
+ { \
+ uint32_t csr_addrh = csr_addr>>32; \
+ uint32_t csr_addrl = csr_addr; \
+ uint32_t valh = (uint64_t)val>>32; \
+ uint32_t vall = val; \
+ uint32_t tmp1; \
+ uint32_t tmp2; \
+ uint32_t tmp3; \
+ \
+ asm volatile ( \
+ ".set push\n" \
+ ".set mips64\n" \
+ "dsll %[tmp1], %[valh], 32\n" \
+ "dsll %[tmp2], %[csrh], 32\n" \
+ "dsll %[tmp3], %[vall], 32\n" \
+ "dsrl %[tmp3], %[tmp3], 32\n" \
+ "or %[tmp1], %[tmp1], %[tmp3]\n" \
+ "dsll %[tmp3], %[csrl], 32\n" \
+ "dsrl %[tmp3], %[tmp3], 32\n" \
+ "or %[tmp2], %[tmp2], %[tmp3]\n" \
+ ST " %[tmp1], 0(%[tmp2])\n" \
+ ".set pop\n" \
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3)\
+ : [valh] "r" (valh), [vall] "r" (vall), \
+ [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
+ ); \
+ } \
+ else \
+ { \
+ uint32_t csr_addrh = csr_addr>>32; \
+ uint32_t csr_addrl = csr_addr; \
+ uint32_t tmp1; \
+ uint32_t tmp2; \
+ \
+ asm volatile ( \
+ ".set push\n" \
+ ".set mips64\n" \
+ "dsll %[tmp1], %[csrh], 32\n" \
+ "dsll %[tmp2], %[csrl], 32\n" \
+ "dsrl %[tmp2], %[tmp2], 32\n" \
+ "or %[tmp1], %[tmp1], %[tmp2]\n" \
+ ST " %[val], 0(%[tmp1])\n" \
+ ".set pop\n" \
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2) \
+ : [val] "r" (val), [csrh] "r" (csr_addrh), \
+ [csrl] "r" (csr_addrl) \
+ ); \
+ } \
+}
+
+#endif
+
+#else
+
+/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
+#error: Unsupported ABI
+
+#endif
+
+/* The following #if controls the definition of the macro
+ CVMX_BUILD_READ64. This macro is used to build a load operation from
+ a full 64bit address. With a 64bit ABI, this can be done with a simple
+ pointer access. 32bit ABIs require more complicated assembly */
+#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
+{ \
+ return *CASTPTR(volatile TYPE##_t, addr); \
+}
+
+#elif defined(CVMX_ABI_N32)
+
+/* The N32 ABI passes all 64bit quantities in a single register, so it is
+ possible to use the arguments directly. We have to use inline assembly
+ for the actual store since a pointer would truncate the address */
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
+{ \
+ TYPE##_t val; \
+ asm volatile (LT " %[v], 0(%[c])": [v] "=r" (val) : [c] "r" (addr));\
+ return val; \
+}
+
+#elif defined(CVMX_ABI_O32)
+
+#ifdef __KERNEL__
+#define CVMX_BUILD_READ64(TYPE, LT) extern TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr);
+#else
+
+/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
+ separate registers. Assembly must be used to put them back together
+ before they're used. What should be a simple load becomes a
+ convoluted mess of shifts and ors */
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr) \
+{ \
+ if (sizeof(TYPE##_t) == 8) \
+ { \
+ uint32_t csr_addrh = csr_addr>>32; \
+ uint32_t csr_addrl = csr_addr; \
+ uint32_t valh; \
+ uint32_t vall; \
+ \
+ asm volatile ( \
+ ".set push\n" \
+ ".set mips64\n" \
+ "dsll %[valh], %[csrh], 32\n" \
+ "dsll %[vall], %[csrl], 32\n" \
+ "dsrl %[vall], %[vall], 32\n" \
+ "or %[valh], %[valh], %[vall]\n" \
+ LT " %[vall], 0(%[valh])\n" \
+ "dsrl %[valh], %[vall], 32\n" \
+ "sll %[vall], 0\n" \
+ "sll %[valh], 0\n" \
+ ".set pop\n" \
+ : [valh] "=&r" (valh), [vall] "=&r" (vall) \
+ : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
+ ); \
+ return ((uint64_t)valh<<32) | vall; \
+ } \
+ else \
+ { \
+ uint32_t csr_addrh = csr_addr>>32; \
+ uint32_t csr_addrl = csr_addr; \
+ TYPE##_t val; \
+ uint32_t tmp; \
+ \
+ asm volatile ( \
+ ".set push\n" \
+ ".set mips64\n" \
+ "dsll %[val], %[csrh], 32\n" \
+ "dsll %[tmp], %[csrl], 32\n" \
+ "dsrl %[tmp], %[tmp], 32\n" \
+ "or %[val], %[val], %[tmp]\n" \
+ LT " %[val], 0(%[val])\n" \
+ ".set pop\n" \
+ : [val] "=&r" (val), [tmp] "=&r" (tmp) \
+ : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
+ ); \
+ return val; \
+ } \
+}
+
+#endif /* __KERNEL__ */
+
+#else
+
+/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
+#error: Unsupported ABI
+
+#endif
+
+/* The following defines 8 functions for writing to a 64bit address. Each
+ takes two arguments, the address and the value to write.
+ cvmx_write64_int64 cvmx_write64_uint64
+ cvmx_write64_int32 cvmx_write64_uint32
+ cvmx_write64_int16 cvmx_write64_uint16
+ cvmx_write64_int8 cvmx_write64_uint8 */
+CVMX_BUILD_WRITE64(int64, "sd");
+CVMX_BUILD_WRITE64(int32, "sw");
+CVMX_BUILD_WRITE64(int16, "sh");
+CVMX_BUILD_WRITE64(int8, "sb");
+CVMX_BUILD_WRITE64(uint64, "sd");
+CVMX_BUILD_WRITE64(uint32, "sw");
+CVMX_BUILD_WRITE64(uint16, "sh");
+CVMX_BUILD_WRITE64(uint8, "sb");
+
+/* The following defines 8 functions for reading from a 64bit address. Each
+ takes the address as the only argument
+ cvmx_read64_int64 cvmx_read64_uint64
+ cvmx_read64_int32 cvmx_read64_uint32
+ cvmx_read64_int16 cvmx_read64_uint16
+ cvmx_read64_int8 cvmx_read64_uint8 */
+CVMX_BUILD_READ64(int64, "ld");
+CVMX_BUILD_READ64(int32, "lw");
+CVMX_BUILD_READ64(int16, "lh");
+CVMX_BUILD_READ64(int8, "lb");
+CVMX_BUILD_READ64(uint64, "ld");
+CVMX_BUILD_READ64(uint32, "lw");
+CVMX_BUILD_READ64(uint16, "lhu");
+CVMX_BUILD_READ64(uint8, "lbu");
+
+static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
+{
+ cvmx_write64_uint64(csr_addr, val);
+
+ /* Perform an immediate read after every write to an RSL register to force
+ the write to complete. It doesn't matter what RSL read we do, so we
+ choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
+ if (((csr_addr >> 40) & 0x7ffff) == (0x118))
+ cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT);
+}
+
+static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
+{
+ cvmx_write64_uint64(io_addr, val);
+}
+
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
+{
+ return cvmx_read64_uint64(csr_addr);
+}
+
+static inline void cvmx_send_single(uint64_t data)
+{
+ const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
+ cvmx_write64_uint64(CVMX_IOBDMA_SENDSINGLE, data);
+}
+
+static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
+{
+ union
+ {
+ uint64_t u64;
+ struct {
+ uint64_t scraddr : 8;
+ uint64_t len : 8;
+ uint64_t addr :48;
+ } s;
+ } addr;
+ addr.u64 = csr_addr;
+ addr.s.scraddr = scraddr >> 3;
+ addr.s.len = 1;
+ cvmx_send_single(addr.u64);
+}
+
+
+/**
+ * Number of the Core on which the program is currently running.
+ *
+ * @return Number of cores
+ */
+static inline unsigned int cvmx_get_core_num(void)
+{
+ unsigned int core_num;
+ CVMX_RDHWRNV(core_num, 0);
+ return core_num;
+}
+
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for POP instruction.
+ *
+ * @param val 32 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+static inline uint32_t cvmx_pop(uint32_t val)
+{
+ uint32_t pop;
+ CVMX_POP(pop, val);
+ return pop;
+}
+
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for DPOP instruction.
+ *
+ * @param val 64 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+static inline int cvmx_dpop(uint64_t val)
+{
+ int pop;
+ CVMX_DPOP(pop, val);
+ return pop;
+}
+
+
+/**
+ * @deprecated
+ * Provide current cycle counter as a return value. Deprecated, use
+ * cvmx_clock_get_count(CVMX_CLOCK_CORE) to get cycle counter.
+ *
+ * @return current cycle counter
+ */
+static inline uint64_t cvmx_get_cycle(void)
+{
+ return cvmx_clock_get_count(CVMX_CLOCK_CORE);
+}
+
+
+/**
+ * @deprecated
+ * Reads a chip global cycle counter. This counts SCLK cycles since
+ * chip reset. The counter is 64 bit. This function is deprecated as the rate
+ * of the global cycle counter is different between Octeon+ and Octeon2, use
+ * cvmx_clock_get_count(CVMX_CLOCK_SCLK) instead. For Octeon2, the clock rate
+ * of SCLK may be differnet than the core clock.
+ *
+ * @return Global chip cycle count since chip reset.
+ */
+static inline uint64_t cvmx_get_cycle_global(void)
+{
+ return cvmx_clock_get_count(CVMX_CLOCK_IPD);
+}
+
+
+/**
+ * Wait for the specified number of core clock cycles
+ *
+ * @param cycles
+ */
+static inline void cvmx_wait(uint64_t cycles)
+{
+ uint64_t done = cvmx_get_cycle() + cycles;
+
+ while (cvmx_get_cycle() < done)
+ {
+ /* Spin */
+ }
+}
+
+
+/**
+ * Wait for the specified number of micro seconds
+ *
+ * @param usec micro seconds to wait
+ */
+static inline void cvmx_wait_usec(uint64_t usec)
+{
+ uint64_t done = cvmx_get_cycle() + usec * cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000;
+ while (cvmx_get_cycle() < done)
+ {
+ /* Spin */
+ }
+}
+
+
+/**
+ * Wait for the specified number of io clock cycles
+ *
+ * @param cycles
+ */
+static inline void cvmx_wait_io(uint64_t cycles)
+{
+ uint64_t done = cvmx_clock_get_count(CVMX_CLOCK_SCLK) + cycles;
+
+ while (cvmx_clock_get_count(CVMX_CLOCK_SCLK) < done)
+ {
+ /* Spin */
+ }
+}
+
+
+/**
+ * Perform a soft reset of Octeon
+ *
+ * @return
+ */
+static inline void cvmx_reset_octeon(void)
+{
+ cvmx_ciu_soft_rst_t ciu_soft_rst;
+ ciu_soft_rst.u64 = 0;
+ ciu_soft_rst.s.soft_rst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
+}
+
+
+/**
+ * Read a byte of fuse data
+ * @param byte_addr address to read
+ *
+ * @return fuse value: 0 or 1
+ */
+static inline uint8_t cvmx_fuse_read_byte(int byte_addr)
+{
+ cvmx_mio_fus_rcmd_t read_cmd;
+
+ read_cmd.u64 = 0;
+ read_cmd.s.addr = byte_addr;
+ read_cmd.s.pend = 1;
+ cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
+ while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) && read_cmd.s.pend)
+ ;
+ return(read_cmd.s.dat);
+}
+
+
+/**
+ * Read a single fuse bit
+ *
+ * @param fuse Fuse number (0-1024)
+ *
+ * @return fuse value: 0 or 1
+ */
+static inline int cvmx_fuse_read(int fuse)
+{
+ return((cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ACCESS_NATIVE_H__ */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-access-native.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-access.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-access.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-access.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,243 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ * Function prototypes for accessing memory and CSRs on Octeon.
+ *
+ * <hr>$Revision: 38306 $<hr>
+*/
+#ifndef __CVMX_ACCESS_H__
+#define __CVMX_ACCESS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* We're going to assume that if we are compiling for Mips then we must be
+ running natively on Octoen. It is possible that this code could be
+ compiled on a non Octeon Mips that is acting as a PCI/PCIe host. In this
+ case this assumption will be wrong and cause issues We can't key off of
+ __octeon__ since some people use stock gcc toolchains */
+#if defined(__mips__) && !defined(CVMX_BUILD_FOR_LINUX_HOST)
+ #define CVMX_FUNCTION static inline
+#else
+ #define CVMX_FUNCTION extern
+#endif
+
+/**
+ * simprintf uses simulator tricks to speed up printouts. The format
+ * and args are passed to the simulator and processed natively on the host.
+ * Simprintf is limited to 7 arguments, and they all must use %ll (long long)
+ * format specifiers to be displayed correctly.
+ *
+ * @param format
+ *
+ * @return
+ */
+EXTERN_ASM void simprintf(const char *format, ...);
+
+/**
+ * This function performs some default initialization of the Octeon executive.
+ * It initializes the cvmx_bootmem memory allocator with the list of physical
+ * memory provided by the bootloader, and creates 1-1 TLB mappings for this
+ * memory. This function should be called on all cores that will use either the
+ * bootmem allocator or the 1-1 TLB mappings. Applications which require a
+ * different configuration can replace this function with a suitable application
+ * specific one.
+ *
+ * @return 0 on success
+ * -1 on failure
+ */
+extern int cvmx_user_app_init(void);
+
+/**
+ * Returns the Octeon processor ID.
+ *
+ * @return Octeon processor ID from COP0
+ */
+CVMX_FUNCTION uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
+
+/**
+ * Convert a memory pointer (void*) into a hardware compatable
+ * memory address (uint64_t). Octeon hardware widgets don't
+ * understand logical addresses.
+ *
+ * @param ptr C style memory pointer
+ * @return Hardware physical address
+ */
+CVMX_FUNCTION uint64_t cvmx_ptr_to_phys(void *ptr);
+
+/**
+ * Convert a hardware physical address (uint64_t) into a
+ * memory pointer (void *).
+ *
+ * @param physical_address
+ * Hardware physical address to memory
+ * @return Pointer to memory
+ */
+CVMX_FUNCTION void *cvmx_phys_to_ptr(uint64_t physical_address);
+
+CVMX_FUNCTION void cvmx_write64_int64(uint64_t address, int64_t value);
+CVMX_FUNCTION void cvmx_write64_uint64(uint64_t address, uint64_t value);
+CVMX_FUNCTION void cvmx_write64_int32(uint64_t address, int32_t value);
+CVMX_FUNCTION void cvmx_write64_uint32(uint64_t address, uint32_t value);
+CVMX_FUNCTION void cvmx_write64_int16(uint64_t address, int16_t value);
+CVMX_FUNCTION void cvmx_write64_uint16(uint64_t address, uint16_t value);
+CVMX_FUNCTION void cvmx_write64_int8(uint64_t address, int8_t value);
+CVMX_FUNCTION void cvmx_write64_uint8(uint64_t address, uint8_t value);
+CVMX_FUNCTION void cvmx_write_csr(uint64_t csr_addr, uint64_t val);
+CVMX_FUNCTION void cvmx_write_io(uint64_t io_addr, uint64_t val);
+
+CVMX_FUNCTION int64_t cvmx_read64_int64(uint64_t address);
+CVMX_FUNCTION uint64_t cvmx_read64_uint64(uint64_t address);
+CVMX_FUNCTION int32_t cvmx_read64_int32(uint64_t address);
+CVMX_FUNCTION uint32_t cvmx_read64_uint32(uint64_t address);
+CVMX_FUNCTION int16_t cvmx_read64_int16(uint64_t address);
+CVMX_FUNCTION uint16_t cvmx_read64_uint16(uint64_t address);
+CVMX_FUNCTION int8_t cvmx_read64_int8(uint64_t address);
+CVMX_FUNCTION uint8_t cvmx_read64_uint8(uint64_t address);
+CVMX_FUNCTION uint64_t cvmx_read_csr(uint64_t csr_addr);
+
+CVMX_FUNCTION void cvmx_send_single(uint64_t data);
+CVMX_FUNCTION void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr);
+
+/**
+ * Number of the Core on which the program is currently running.
+ *
+ * @return Number of cores
+ */
+CVMX_FUNCTION unsigned int cvmx_get_core_num(void);
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for POP instruction.
+ *
+ * @param val 32 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+CVMX_FUNCTION uint32_t cvmx_pop(uint32_t val);
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for DPOP instruction.
+ *
+ * @param val 64 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+CVMX_FUNCTION int cvmx_dpop(uint64_t val);
+
+/**
+ * @deprecated
+ * Provide current cycle counter as a return value. Deprecated, use
+ * cvmx_clock_get_count(CVMX_CLOCK_CORE) to get cycle counter.
+ *
+ * @return current cycle counter
+ */
+CVMX_FUNCTION uint64_t cvmx_get_cycle(void);
+
+/**
+ * @deprecated
+ * Reads a chip global cycle counter. This counts SCLK cycles since
+ * chip reset. The counter is 64 bit. This function is deprecated as the rate
+ * of the global cycle counter is different between Octeon+ and Octeon2, use
+ * cvmx_clock_get_count(CVMX_CLOCK_SCLK) instead. For Octeon2, the clock rate
+ * of SCLK may be differnet than the core clock.
+ *
+ * @return Global chip cycle count since chip reset.
+ */
+CVMX_FUNCTION uint64_t cvmx_get_cycle_global(void) __attribute__((deprecated));
+
+/**
+ * Wait for the specified number of core clock cycles
+ *
+ * @param cycles
+ */
+CVMX_FUNCTION void cvmx_wait(uint64_t cycles);
+
+/**
+ * Wait for the specified number of micro seconds
+ *
+ * @param usec micro seconds to wait
+ */
+CVMX_FUNCTION void cvmx_wait_usec(uint64_t usec);
+
+/**
+ * Wait for the specified number of io clock cycles
+ *
+ * @param cycles
+ */
+CVMX_FUNCTION void cvmx_wait_io(uint64_t cycles);
+
+/**
+ * Perform a soft reset of Octeon
+ *
+ * @return
+ */
+CVMX_FUNCTION void cvmx_reset_octeon(void);
+
+/**
+ * Read a byte of fuse data
+ * @param byte_addr address to read
+ *
+ * @return fuse value: 0 or 1
+ */
+CVMX_FUNCTION uint8_t cvmx_fuse_read_byte(int byte_addr);
+
+/**
+ * Read a single fuse bit
+ *
+ * @param fuse Fuse number (0-1024)
+ *
+ * @return fuse value: 0 or 1
+ */
+CVMX_FUNCTION int cvmx_fuse_read(int fuse);
+
+#undef CVMX_FUNCTION
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ACCESS_H__ */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-access.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-address.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-address.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-address.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,267 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ * Typedefs and defines for working with Octeon physical addresses.
+ *
+ * <hr>$Revision: 38306 $<hr>
+*/
+#ifndef __CVMX_ADDRESS_H__
+#define __CVMX_ADDRESS_H__
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+#include "cvmx-abi.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ CVMX_MIPS_SPACE_XKSEG = 3LL,
+ CVMX_MIPS_SPACE_XKPHYS = 2LL,
+ CVMX_MIPS_SPACE_XSSEG = 1LL,
+ CVMX_MIPS_SPACE_XUSEG = 0LL
+} cvmx_mips_space_t;
+
+typedef enum {
+ CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
+ CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
+ CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
+ CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
+} cvmx_mips_xkseg_space_t;
+
+ /* decodes <14:13> of a kseg3 window address */
+typedef enum {
+ CVMX_ADD_WIN_SCR = 0L,
+ CVMX_ADD_WIN_DMA = 1L, /* see cvmx_add_win_dma_dec_t for further decode */
+ CVMX_ADD_WIN_UNUSED = 2L,
+ CVMX_ADD_WIN_UNUSED2 = 3L
+} cvmx_add_win_dec_t;
+
+ /* decode within DMA space */
+typedef enum {
+ CVMX_ADD_WIN_DMA_ADD = 0L, /* add store data to the write buffer entry, allocating it if necessary */
+ CVMX_ADD_WIN_DMA_SENDMEM = 1L, /* send out the write buffer entry to DRAM */
+ /* store data must be normal DRAM memory space address in this case */
+ CVMX_ADD_WIN_DMA_SENDDMA = 2L, /* send out the write buffer entry as an IOBDMA command */
+ /* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
+ CVMX_ADD_WIN_DMA_SENDIO = 3L, /* send out the write buffer entry as an IO write */
+ /* store data must be normal IO space address in this case */
+ CVMX_ADD_WIN_DMA_SENDSINGLE = 4L, /* send out a single-tick command on the NCB bus */
+ /* no write buffer data needed/used */
+} cvmx_add_win_dma_dec_t;
+
+/**
+ * Physical Address Decode
+ *
+ * Octeon-I HW never interprets this X (<39:36> reserved
+ * for future expansion), software should set to 0.
+ *
+ * - 0x0 XXX0 0000 0000 to DRAM Cached
+ * - 0x0 XXX0 0FFF FFFF
+ *
+ * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
+ * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
+ *
+ * - 0x0 XXX0 2000 0000 to DRAM Cached
+ * - 0x0 XXXF FFFF FFFF
+ *
+ * - 0x1 00X0 0000 0000 to Boot Bus Uncached
+ * - 0x1 00XF FFFF FFFF
+ *
+ * - 0x1 01X0 0000 0000 to Other NCB Uncached
+ * - 0x1 FFXF FFFF FFFF devices
+ *
+ * Decode of all Octeon addresses
+ */
+typedef union {
+
+ uint64_t u64;
+
+ struct {
+ cvmx_mips_space_t R : 2;
+ uint64_t offset :62;
+ } sva; /* mapped or unmapped virtual address */
+
+ struct {
+ uint64_t zeroes :33;
+ uint64_t offset :31;
+ } suseg; /* mapped USEG virtual addresses (typically) */
+
+ struct {
+ uint64_t ones :33;
+ cvmx_mips_xkseg_space_t sp : 2;
+ uint64_t offset :29;
+ } sxkseg; /* mapped or unmapped virtual address */
+
+ struct {
+ cvmx_mips_space_t R : 2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
+ uint64_t cca : 3; /* ignored by octeon */
+ uint64_t mbz :10;
+ uint64_t pa :49; /* physical address */
+ } sxkphys; /* physical address accessed through xkphys unmapped virtual address */
+
+ struct {
+ uint64_t mbz :15;
+ uint64_t is_io : 1; /* if set, the address is uncached and resides on MCB bus */
+ uint64_t did : 8; /* the hardware ignores this field when is_io==0, else device ID */
+ uint64_t unaddr: 4; /* the hardware ignores <39:36> in Octeon I */
+ uint64_t offset :36;
+ } sphys; /* physical address */
+
+ struct {
+ uint64_t zeroes :24; /* techically, <47:40> are dont-cares */
+ uint64_t unaddr: 4; /* the hardware ignores <39:36> in Octeon I */
+ uint64_t offset :36;
+ } smem; /* physical mem address */
+
+ struct {
+ uint64_t mem_region :2;
+ uint64_t mbz :13;
+ uint64_t is_io : 1; /* 1 in this case */
+ uint64_t did : 8; /* the hardware ignores this field when is_io==0, else device ID */
+ uint64_t unaddr: 4; /* the hardware ignores <39:36> in Octeon I */
+ uint64_t offset :36;
+ } sio; /* physical IO address */
+
+ struct {
+ uint64_t ones : 49;
+ cvmx_add_win_dec_t csrdec : 2; /* CVMX_ADD_WIN_SCR (0) in this case */
+ uint64_t addr : 13;
+ } sscr; /* scratchpad virtual address - accessed through a window at the end of kseg3 */
+
+ /* there should only be stores to IOBDMA space, no loads */
+ struct {
+ uint64_t ones : 49;
+ cvmx_add_win_dec_t csrdec : 2; /* CVMX_ADD_WIN_DMA (1) in this case */
+ uint64_t unused2: 3;
+ cvmx_add_win_dma_dec_t type : 3;
+ uint64_t addr : 7;
+ } sdma; /* IOBDMA virtual address - accessed through a window at the end of kseg3 */
+
+ struct {
+ uint64_t didspace : 24;
+ uint64_t unused : 40;
+ } sfilldidspace;
+
+} cvmx_addr_t;
+
+/* These macros for used by 32 bit applications */
+
+#define CVMX_MIPS32_SPACE_KSEG0 1l
+#define CVMX_ADD_SEG32(segment, add) (((int32_t)segment << 31) | (int32_t)(add))
+
+/* Currently all IOs are performed using XKPHYS addressing. Linux uses the
+ CvmMemCtl register to enable XKPHYS addressing to IO space from user mode.
+ Future OSes may need to change the upper bits of IO addresses. The
+ following define controls the upper two bits for all IO addresses generated
+ by the simple executive library */
+#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
+
+/* These macros simplify the process of creating common IO addresses */
+#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
+#ifndef CVMX_ADD_IO_SEG
+#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
+#endif
+#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
+#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
+#define CVMX_FULL_DID(did,subdid) (((did) << 3) | (subdid))
+
+
+ /* from include/ncb_rsl_id.v */
+#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
+#define CVMX_OCT_DID_GMX0 1ULL
+#define CVMX_OCT_DID_GMX1 2ULL
+#define CVMX_OCT_DID_PCI 3ULL
+#define CVMX_OCT_DID_KEY 4ULL
+#define CVMX_OCT_DID_FPA 5ULL
+#define CVMX_OCT_DID_DFA 6ULL
+#define CVMX_OCT_DID_ZIP 7ULL
+#define CVMX_OCT_DID_RNG 8ULL
+#define CVMX_OCT_DID_IPD 9ULL
+#define CVMX_OCT_DID_PKT 10ULL
+#define CVMX_OCT_DID_TIM 11ULL
+#define CVMX_OCT_DID_TAG 12ULL
+ /* the rest are not on the IO bus */
+#define CVMX_OCT_DID_L2C 16ULL
+#define CVMX_OCT_DID_LMC 17ULL
+#define CVMX_OCT_DID_SPX0 18ULL
+#define CVMX_OCT_DID_SPX1 19ULL
+#define CVMX_OCT_DID_PIP 20ULL
+#define CVMX_OCT_DID_ASX0 22ULL
+#define CVMX_OCT_DID_ASX1 23ULL
+#define CVMX_OCT_DID_IOB 30ULL
+
+#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT,2ULL)
+#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG,0ULL)
+#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG,1ULL)
+#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG,2ULL)
+#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG,3ULL)
+#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG,4ULL)
+#define CVMX_OCT_DID_TAG_TAG5 CVMX_FULL_DID(CVMX_OCT_DID_TAG,5ULL)
+#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG,7ULL)
+#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB,0ULL)
+#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM,0ULL)
+#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY,0ULL)
+#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI,6ULL)
+#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS,0ULL)
+#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI,0ULL)
+#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD,7ULL)
+#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA,7ULL)
+#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS,7ULL)
+#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP,0ULL)
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+#ifdef CVMX_ABI_N32
+#define UNMAPPED_PTR(x) ( (1U << 31) | x )
+#else
+#define UNMAPPED_PTR(x) ( (1ULL << 63) | x )
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ADDRESS_H__ */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-address.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-agl-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-agl-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-agl-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,4948 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-agl-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon agl.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_AGL_DEFS_H__
+#define __CVMX_AGL_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_BAD_REG CVMX_AGL_GMX_BAD_REG_FUNC()
+static inline uint64_t CVMX_AGL_GMX_BAD_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_BAD_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000518ull);
+}
+#else
+#define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_BIST CVMX_AGL_GMX_BIST_FUNC()
+static inline uint64_t CVMX_AGL_GMX_BIST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_BIST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000400ull);
+}
+#else
+#define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_DRV_CTL CVMX_AGL_GMX_DRV_CTL_FUNC()
+static inline uint64_t CVMX_AGL_GMX_DRV_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_DRV_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00007F0ull);
+}
+#else
+#define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_INF_MODE CVMX_AGL_GMX_INF_MODE_FUNC()
+static inline uint64_t CVMX_AGL_GMX_INF_MODE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_INF_MODE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00007F8ull);
+}
+#else
+#define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_PRTX_CFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_PRTX_CFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM5(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM5(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM_EN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM_EN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_DECISION(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_DECISION(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CHK(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CHK(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MAX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MAX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MIN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MIN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_IFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_IFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_INT_EN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_INT_EN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_INT_REG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_INT_REG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_JABBER(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_JABBER(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_RX_INBND(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_RX_INBND(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_UDD_SKP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_UDD_SKP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RX_BP_DROPX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RX_BP_DROPX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RX_BP_OFFX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RX_BP_OFFX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RX_BP_ONX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_RX_BP_ONX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_RX_PRT_INFO CVMX_AGL_GMX_RX_PRT_INFO_FUNC()
+static inline uint64_t CVMX_AGL_GMX_RX_PRT_INFO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_RX_PRT_INFO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004E8ull);
+}
+#else
+#define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_RX_TX_STATUS CVMX_AGL_GMX_RX_TX_STATUS_FUNC()
+static inline uint64_t CVMX_AGL_GMX_RX_TX_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_RX_TX_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00007E8ull);
+}
+#else
+#define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_SMACX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_SMACX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_STAT_BP CVMX_AGL_GMX_STAT_BP_FUNC()
+static inline uint64_t CVMX_AGL_GMX_STAT_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_STAT_BP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000520ull);
+}
+#else
+#define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_APPEND(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_APPEND(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_CLK(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_CLK(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_MIN_PKT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_MIN_PKT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_TOGO(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_TOGO(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_ZERO(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_ZERO(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_SOFT_PAUSE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_SOFT_PAUSE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT5(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT5(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT6(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT6(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT7(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT7(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT8(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT8(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT9(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT9(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STATS_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_THRESH(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_THRESH(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_BP CVMX_AGL_GMX_TX_BP_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_BP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004D0ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_COL_ATTEMPT CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_COL_ATTEMPT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000498ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_IFG CVMX_AGL_GMX_TX_IFG_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_IFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_IFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000488ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_INT_EN CVMX_AGL_GMX_TX_INT_EN_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000508ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_INT_REG CVMX_AGL_GMX_TX_INT_REG_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_INT_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_INT_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000500ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_JAM CVMX_AGL_GMX_TX_JAM_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_JAM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_JAM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000490ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_LFSR CVMX_AGL_GMX_TX_LFSR_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_LFSR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_LFSR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004F8ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_OVR_BP CVMX_AGL_GMX_TX_OVR_BP_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_OVR_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_OVR_BP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004C8ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004A0ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004A8ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_PRTX_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_AGL_PRTX_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8)
+#endif
+
+/**
+ * cvmx_agl_gmx_bad_reg
+ *
+ * AGL_GMX_BAD_REG = A collection of things that have gone very, very wrong
+ *
+ *
+ * Notes:
+ * OUT_OVR[0], LOSTSTAT[0], OVRFLW, TXPOP, TXPSH will be reset when MIX0_CTL[RESET] is set to 1.
+ * OUT_OVR[1], LOSTSTAT[1], OVRFLW1, TXPOP1, TXPSH1 will be reset when MIX1_CTL[RESET] is set to 1.
+ * STATOVR will be reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+union cvmx_agl_gmx_bad_reg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_bad_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */
+ uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */
+ uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */
+ uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */
+ uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */
+ uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */
+ uint64_t reserved_27_31 : 5;
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_24_25 : 2;
+ uint64_t loststat : 2; /**< TX Statistics data was over-written
+ In MII/RGMII, one bit per port
+ TX Stats are corrupted */
+ uint64_t reserved_4_21 : 18;
+ uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 2;
+ uint64_t reserved_4_21 : 18;
+ uint64_t loststat : 2;
+ uint64_t reserved_24_25 : 2;
+ uint64_t statovr : 1;
+ uint64_t reserved_27_31 : 5;
+ uint64_t ovrflw : 1;
+ uint64_t txpop : 1;
+ uint64_t txpsh : 1;
+ uint64_t ovrflw1 : 1;
+ uint64_t txpop1 : 1;
+ uint64_t txpsh1 : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_agl_gmx_bad_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */
+ uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */
+ uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */
+ uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */
+ uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */
+ uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */
+ uint64_t reserved_27_31 : 5;
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_23_25 : 3;
+ uint64_t loststat : 1; /**< TX Statistics data was over-written
+ TX Stats are corrupted */
+ uint64_t reserved_4_21 : 18;
+ uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 2;
+ uint64_t reserved_4_21 : 18;
+ uint64_t loststat : 1;
+ uint64_t reserved_23_25 : 3;
+ uint64_t statovr : 1;
+ uint64_t reserved_27_31 : 5;
+ uint64_t ovrflw : 1;
+ uint64_t txpop : 1;
+ uint64_t txpsh : 1;
+ uint64_t ovrflw1 : 1;
+ uint64_t txpop1 : 1;
+ uint64_t txpsh1 : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_bad_reg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_bad_reg_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t txpsh : 1; /**< TX FIFO overflow */
+ uint64_t txpop : 1; /**< TX FIFO underflow */
+ uint64_t ovrflw : 1; /**< RX FIFO overflow */
+ uint64_t reserved_27_31 : 5;
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_23_25 : 3;
+ uint64_t loststat : 1; /**< TX Statistics data was over-written
+ TX Stats are corrupted */
+ uint64_t reserved_3_21 : 19;
+ uint64_t out_ovr : 1; /**< Outbound data FIFO overflow */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 1;
+ uint64_t reserved_3_21 : 19;
+ uint64_t loststat : 1;
+ uint64_t reserved_23_25 : 3;
+ uint64_t statovr : 1;
+ uint64_t reserved_27_31 : 5;
+ uint64_t ovrflw : 1;
+ uint64_t txpop : 1;
+ uint64_t txpsh : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_bad_reg_s cn61xx;
+ struct cvmx_agl_gmx_bad_reg_s cn63xx;
+ struct cvmx_agl_gmx_bad_reg_s cn63xxp1;
+ struct cvmx_agl_gmx_bad_reg_s cn66xx;
+ struct cvmx_agl_gmx_bad_reg_s cn68xx;
+ struct cvmx_agl_gmx_bad_reg_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_bad_reg cvmx_agl_gmx_bad_reg_t;
+
+/**
+ * cvmx_agl_gmx_bist
+ *
+ * AGL_GMX_BIST = GMX BIST Results
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_bist {
+ uint64_t u64;
+ struct cvmx_agl_gmx_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t status : 25; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.fif_bnk0
+ - 1: gmx#.inb.fif_bnk1
+ - 2: gmx#.inb.fif_bnk2
+ - 3: gmx#.inb.fif_bnk3
+ - 4: gmx#.inb.fif_bnk_ext0
+ - 5: gmx#.inb.fif_bnk_ext1
+ - 6: gmx#.inb.fif_bnk_ext2
+ - 7: gmx#.inb.fif_bnk_ext3
+ - 8: gmx#.outb.fif.fif_bnk0
+ - 9: gmx#.outb.fif.fif_bnk1
+ - 10: RAZ
+ - 11: RAZ
+ - 12: gmx#.outb.fif.fif_bnk_ext0
+ - 13: gmx#.outb.fif.fif_bnk_ext1
+ - 14: RAZ
+ - 15: RAZ
+ - 16: RAZ
+ - 17: RAZ
+ - 18: RAZ
+ - 19: RAZ
+ - 20: gmx#.csr.drf20x32m2_bist
+ - 21: gmx#.csr.drf20x48m2_bist
+ - 22: gmx#.outb.stat.drf16x27m1_bist
+ - 23: gmx#.outb.stat.drf40x64m1_bist
+ - 24: RAZ */
+#else
+ uint64_t status : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_agl_gmx_bist_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t status : 10; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.drf128x78m1_bist
+ - 1: gmx#.outb.fif.drf128x71m1_bist
+ - 2: gmx#.csr.gmi0.srf8x64m1_bist
+ - 3: gmx#.csr.gmi1.srf8x64m1_bist
+ - 4: 0
+ - 5: 0
+ - 6: gmx#.csr.drf20x80m1_bist
+ - 7: gmx#.outb.stat.drf16x27m1_bist
+ - 8: gmx#.outb.stat.drf40x64m1_bist
+ - 9: 0 */
+#else
+ uint64_t status : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_bist_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_bist_cn52xx cn56xx;
+ struct cvmx_agl_gmx_bist_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_bist_s cn61xx;
+ struct cvmx_agl_gmx_bist_s cn63xx;
+ struct cvmx_agl_gmx_bist_s cn63xxp1;
+ struct cvmx_agl_gmx_bist_s cn66xx;
+ struct cvmx_agl_gmx_bist_s cn68xx;
+ struct cvmx_agl_gmx_bist_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_bist cvmx_agl_gmx_bist_t;
+
+/**
+ * cvmx_agl_gmx_drv_ctl
+ *
+ * AGL_GMX_DRV_CTL = GMX Drive Control
+ *
+ *
+ * Notes:
+ * NCTL, PCTL, BYP_EN will be reset when MIX0_CTL[RESET] is set to 1.
+ * NCTL1, PCTL1, BYP_EN1 will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_drv_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_drv_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t byp_en1 : 1; /**< Compensation Controller Bypass Enable (MII1) */
+ uint64_t reserved_45_47 : 3;
+ uint64_t pctl1 : 5; /**< AGL PCTL (MII1) */
+ uint64_t reserved_37_39 : 3;
+ uint64_t nctl1 : 5; /**< AGL NCTL (MII1) */
+ uint64_t reserved_17_31 : 15;
+ uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pctl : 5; /**< AGL PCTL */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< AGL NCTL */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t byp_en : 1;
+ uint64_t reserved_17_31 : 15;
+ uint64_t nctl1 : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t pctl1 : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t byp_en1 : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_agl_gmx_drv_ctl_s cn52xx;
+ struct cvmx_agl_gmx_drv_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_drv_ctl_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pctl : 5; /**< AGL PCTL */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< AGL NCTL */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t byp_en : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1;
+};
+typedef union cvmx_agl_gmx_drv_ctl cvmx_agl_gmx_drv_ctl_t;
+
+/**
+ * cvmx_agl_gmx_inf_mode
+ *
+ * AGL_GMX_INF_MODE = Interface Mode
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_inf_mode {
+ uint64_t u64;
+ struct cvmx_agl_gmx_inf_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t en : 1; /**< Interface Enable */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_inf_mode_s cn52xx;
+ struct cvmx_agl_gmx_inf_mode_s cn52xxp1;
+ struct cvmx_agl_gmx_inf_mode_s cn56xx;
+ struct cvmx_agl_gmx_inf_mode_s cn56xxp1;
+};
+typedef union cvmx_agl_gmx_inf_mode cvmx_agl_gmx_inf_mode_t;
+
+/**
+ * cvmx_agl_gmx_prt#_cfg
+ *
+ * AGL_GMX_PRT_CFG = Port description
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_prtx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t tx_idle : 1; /**< TX Machine is idle */
+ uint64_t rx_idle : 1; /**< RX Machine is idle */
+ uint64_t reserved_9_11 : 3;
+ uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED]
+ 10 = 10Mbs operation
+ 00 = 100Mbs operation
+ 01 = 1000Mbs operation
+ 11 = Reserved */
+ uint64_t reserved_7_7 : 1;
+ uint64_t burst : 1; /**< Half-Duplex Burst Enable
+ Only valid for 1000Mbs half-duplex operation
+ 0 = burst length of 0x2000 (halfdup / 1000Mbs)
+ 1 = burst length of 0x0 (all other modes) */
+ uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all packet cycles will appear as
+ inter-frame cycles. */
+ uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all packet cycles will appear as
+ inter-frame cycles. */
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = 4096 bitimes (1000Mbs operation) */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex */
+ uint64_t speed : 1; /**< Link Speed LSB [SPEED_MSB:SPEED]
+ 10 = 10Mbs operation
+ 00 = 100Mbs operation
+ 01 = 1000Mbs operation
+ 11 = Reserved */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t rx_en : 1;
+ uint64_t tx_en : 1;
+ uint64_t burst : 1;
+ uint64_t reserved_7_7 : 1;
+ uint64_t speed_msb : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t rx_idle : 1;
+ uint64_t tx_idle : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all MII cycles will appear as
+ inter-frame cycles. */
+ uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all MII cycles will appear as
+ inter-frame cycles. */
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = Reserved */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex */
+ uint64_t speed : 1; /**< Link Speed
+ 0 = 10/100Mbs operation
+ 1 = Reserved */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t rx_en : 1;
+ uint64_t tx_en : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xx;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_s cn61xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn63xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn63xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_s cn66xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn68xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_prtx_cfg cvmx_agl_gmx_prtx_cfg_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam0
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam0 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn61xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn66xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam0 cvmx_agl_gmx_rxx_adr_cam0_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam1
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam1 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn61xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn66xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam1 cvmx_agl_gmx_rxx_adr_cam1_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam2
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam2 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn61xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn66xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam2 cvmx_agl_gmx_rxx_adr_cam2_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam3
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam3 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn61xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn66xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam3 cvmx_agl_gmx_rxx_adr_cam3_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam4
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam4 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn61xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn66xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam4 cvmx_agl_gmx_rxx_adr_cam4_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam5
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam5 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn61xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn66xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam5 cvmx_agl_gmx_rxx_adr_cam5_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam_en
+ *
+ * AGL_GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam_en {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t en : 8; /**< CAM Entry Enables */
+#else
+ uint64_t en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn61xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn66xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam_en cvmx_agl_gmx_rxx_adr_cam_en_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_ctl
+ *
+ * AGL_GMX_RX_ADR_CTL = Address Filtering Control
+ *
+ *
+ * Notes:
+ * * ALGORITHM
+ * Here is some pseudo code that represents the address filter behavior.
+ *
+ * @verbatim
+ * bool dmac_addr_filter(uint8 prt, uint48 dmac) [
+ * ASSERT(prt >= 0 && prt <= 3);
+ * if (is_bcst(dmac)) // broadcast accept
+ * return (AGL_GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
+ * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject
+ * return REJECT;
+ * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept
+ * return ACCEPT;
+ *
+ * cam_hit = 0;
+ *
+ * for (i=0; i<8; i++) [
+ * if (AGL_GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0)
+ * continue;
+ * uint48 unswizzled_mac_adr = 0x0;
+ * for (j=5; j>=0; j--) [
+ * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | AGL_GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>];
+ * ]
+ * if (unswizzled_mac_adr == dmac) [
+ * cam_hit = 1;
+ * break;
+ * ]
+ * ]
+ *
+ * if (cam_hit)
+ * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
+ * else
+ * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
+ * ]
+ * @endverbatim
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_adr_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter
+ 0 = reject the packet on DMAC address match
+ 1 = accept the packet on DMAC address match */
+ uint64_t mcst : 2; /**< Multicast Mode
+ 0 = Use the Address Filter CAM
+ 1 = Force reject all multicast packets
+ 2 = Force accept all multicast packets
+ 3 = Reserved */
+ uint64_t bcst : 1; /**< Accept All Broadcast Packets */
+#else
+ uint64_t bcst : 1;
+ uint64_t mcst : 2;
+ uint64_t cam_mode : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn61xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn66xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_ctl cvmx_agl_gmx_rxx_adr_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_decision
+ *
+ * AGL_GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
+ *
+ *
+ * Notes:
+ * As each byte in a packet is received by GMX, the L2 byte count is compared
+ * against the AGL_GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes
+ * from the beginning of the L2 header (DMAC). In normal operation, the L2
+ * header begins after the PREAMBLE+SFD (AGL_GMX_RX_FRM_CTL[PRE_CHK]=1) and any
+ * optional UDD skip data (AGL_GMX_RX_UDD_SKP[LEN]).
+ *
+ * When AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
+ * packet and would require UDD skip length to account for them.
+ *
+ * L2 Size
+ * Port Mode <=AGL_GMX_RX_DECISION bytes (default=24) >AGL_GMX_RX_DECISION bytes (default=24)
+ *
+ * MII/Full Duplex accept packet apply filters
+ * no filtering is applied accept packet based on DMAC and PAUSE packet filters
+ *
+ * MII/Half Duplex drop packet apply filters
+ * packet is unconditionally dropped accept packet based on DMAC
+ *
+ * where l2_size = MAX(0, total_packet_size - AGL_GMX_RX_UDD_SKP[LEN] - ((AGL_GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_decision {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_decision_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t cnt : 5; /**< The byte count to decide when to accept or filter
+ a packet. */
+#else
+ uint64_t cnt : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_decision_s cn52xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_decision_s cn56xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_decision_s cn61xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn63xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_decision_s cn66xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn68xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_decision cvmx_agl_gmx_rxx_decision_t;
+
+/**
+ * cvmx_agl_gmx_rx#_frm_chk
+ *
+ * AGL_GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
+ *
+ *
+ * Notes:
+ * If AGL_GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_frm_chk {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_chk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t niberr : 1; /**< Nibble error */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with packet data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< Carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t reserved_1_1 : 1;
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn61xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn66xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn68xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_frm_chk cvmx_agl_gmx_rxx_frm_chk_t;
+
+/**
+ * cvmx_agl_gmx_rx#_frm_ctl
+ *
+ * AGL_GMX_RX_FRM_CTL = Frame Control
+ *
+ *
+ * Notes:
+ * * PRE_STRP
+ * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
+ * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
+ * core as part of the packet.
+ *
+ * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
+ * size when checking against the MIN and MAX bounds. Furthermore, the bytes
+ * are skipped when locating the start of the L2 header for DMAC and Control
+ * frame recognition.
+ *
+ * * CTL_BCK/CTL_DRP
+ * These bits control how the HW handles incoming PAUSE packets. Here are
+ * the most common modes of operation:
+ * CTL_BCK=1,CTL_DRP=1 - HW does it all
+ * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames
+ * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored
+ *
+ * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
+ * Since PAUSE packets only apply to fulldup operation, any PAUSE packet
+ * would constitute an exception which should be handled by the processing
+ * cores. PAUSE packets should not be forwarded.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_frm_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t ptp_mode : 1; /**< Timestamp mode
+ When PTP_MODE is set, a 64-bit timestamp will be
+ prepended to every incoming packet. The timestamp
+ bytes are added to the packet in such a way as to
+ not modify the packet's receive byte count. This
+ implies that the AGL_GMX_RX_JABBER,
+ AGL_GMX_RX_FRM_MIN, AGL_GMX_RX_FRM_MAX,
+ AGL_GMX_RX_DECISION, AGL_GMX_RX_UDD_SKP, and the
+ AGL_GMX_RX_STATS_* do not require any adjustment
+ as they operate on the received packet size.
+ If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
+ uint64_t reserved_11_11 : 1;
+ uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
+ due to PARITAL packets */
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PRE_STRP should be set to
+ account for the variable nature of the PREAMBLE.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ AGL will begin the frame at the first SFD.
+ PRE_FREE must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped
+ PRE_STRP must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3
+ PREAMBLE to begin every frame. AGL checks that a
+ valid PREAMBLE is received (based on PRE_FREE).
+ When a problem does occur within the PREAMBLE
+ seqeunce, the frame is marked as bad and not sent
+ into the core. The AGL_GMX_RX_INT_REG[PCTERR]
+ interrupt is also raised. */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t pre_align : 1;
+ uint64_t null_dis : 1;
+ uint64_t reserved_11_11 : 1;
+ uint64_t ptp_mode : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PREAMBLE can be consumed
+ by the HW so when PRE_ALIGN is set, PRE_FREE,
+ PRE_STRP must be set for correct operation.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ 0 - 254 cycles of PREAMBLE followed by SFD
+ PRE_FREE must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped
+ PRE_STRP must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t pre_align : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn61xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn66xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_frm_ctl cvmx_agl_gmx_rxx_frm_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_frm_max
+ *
+ * AGL_GMX_RX_FRM_MAX = Frame Max length
+ *
+ *
+ * Notes:
+ * When changing the LEN field, be sure that LEN does not exceed
+ * AGL_GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
+ * are within the maximum length parameter to be rejected because they exceed
+ * the AGL_GMX_RX_JABBER[CNT] limit.
+ *
+ * Notes:
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_frm_max {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_max_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Max-sized frame check
+ AGL_GMX_RXn_FRM_CHK[MAXERR] enables the check
+ for port n.
+ If enabled, failing packets set the MAXERR
+ interrupt and the MIX opcode is set to OVER_FCS
+ (0x3, if packet has bad FCS) or OVER_ERR (0x4, if
+ packet has good FCS).
+ LEN <= AGL_GMX_RX_JABBER[CNT] */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn61xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn66xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn68xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_frm_max cvmx_agl_gmx_rxx_frm_max_t;
+
+/**
+ * cvmx_agl_gmx_rx#_frm_min
+ *
+ * AGL_GMX_RX_FRM_MIN = Frame Min length
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_frm_min {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_min_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Min-sized frame check
+ AGL_GMX_RXn_FRM_CHK[MINERR] enables the check
+ for port n.
+ If enabled, failing packets set the MINERR
+ interrupt and the MIX opcode is set to UNDER_FCS
+ (0x6, if packet has bad FCS) or UNDER_ERR (0x8,
+ if packet has good FCS). */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn61xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn66xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn68xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_frm_min cvmx_agl_gmx_rxx_frm_min_t;
+
+/**
+ * cvmx_agl_gmx_rx#_ifg
+ *
+ * AGL_GMX_RX_IFG = RX Min IFG
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_ifg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_ifg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t ifg : 4; /**< Min IFG (in IFG*8 bits) between packets used to
+ determine IFGERR. Normally IFG is 96 bits.
+ Note in some operating modes, IFG cycles can be
+ inserted or removed in order to achieve clock rate
+ adaptation. For these reasons, the default value
+ is slightly conservative and does not check upto
+ the full 96 bits of IFG. */
+#else
+ uint64_t ifg : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_ifg_s cn52xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_ifg_s cn56xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_ifg_s cn61xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn63xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_ifg_s cn66xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn68xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_ifg cvmx_agl_gmx_rxx_ifg_t;
+
+/**
+ * cvmx_agl_gmx_rx#_int_en
+ *
+ * AGL_GMX_RX_INT_EN = Interrupt Enable
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_int_en {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex | NS */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed | NS */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus | NS */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< Packet reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< Carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< MII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t reserved_1_1 : 1;
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_s cn61xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn63xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_s cn66xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn68xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_int_en cvmx_agl_gmx_rxx_int_en_t;
+
+/**
+ * cvmx_agl_gmx_rx#_int_reg
+ *
+ * AGL_GMX_RX_INT_REG = Interrupt Register
+ *
+ *
+ * Notes:
+ * (1) exceptions will only be raised to the control processor if the
+ * corresponding bit in the AGL_GMX_RX_INT_EN register is set.
+ *
+ * (2) exception conditions 10:0 can also set the rcv/opcode in the received
+ * packet's workQ entry. The AGL_GMX_RX_FRM_CHK register provides a bit mask
+ * for configuring which conditions set the error.
+ *
+ * (3) in half duplex operation, the expectation is that collisions will appear
+ * as MINERRs.
+ *
+ * (4) JABBER - An RX Jabber error indicates that a packet was received which
+ * is longer than the maximum allowed packet as defined by the
+ * system. GMX will truncate the packet at the JABBER count.
+ * Failure to do so could lead to system instabilty.
+ *
+ * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
+ * AGL_GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
+ * > AGL_GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
+ *
+ * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < AGL_GMX_RX_FRM_MIN.
+ *
+ * (8) ALNERR - Indicates that the packet received was not an integer number of
+ * bytes. If FCS checking is enabled, ALNERR will only assert if
+ * the FCS is bad. If FCS checking is disabled, ALNERR will
+ * assert in all non-integer frame cases.
+ *
+ * (9) Collisions - Collisions can only occur in half-duplex mode. A collision
+ * is assumed by the receiver when the received
+ * frame < AGL_GMX_RX_FRM_MIN - this is normally a MINERR
+ *
+ * (A) LENERR - Length errors occur when the received packet does not match the
+ * length field. LENERR is only checked for packets between 64
+ * and 1500 bytes. For untagged frames, the length must exact
+ * match. For tagged frames the length or length+4 must match.
+ *
+ * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence.
+ * Does not check the number of PREAMBLE cycles.
+ *
+ * (C) OVRERR -
+ *
+ * OVRERR is an architectural assertion check internal to GMX to
+ * make sure no assumption was violated. In a correctly operating
+ * system, this interrupt can never fire.
+ *
+ * GMX has an internal arbiter which selects which of 4 ports to
+ * buffer in the main RX FIFO. If we normally buffer 8 bytes,
+ * then each port will typically push a tick every 8 cycles - if
+ * the packet interface is going as fast as possible. If there
+ * are four ports, they push every two cycles. So that's the
+ * assumption. That the inbound module will always be able to
+ * consume the tick before another is produced. If that doesn't
+ * happen - that's when OVRERR will assert.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_int_reg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RGMII inbound LinkDuplex | NS */
+ uint64_t phy_spd : 1; /**< Change in the RGMII inbound LinkSpeed | NS */
+ uint64_t phy_link : 1; /**< Change in the RGMII inbound LinkStatus | NS */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< Packet reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Packet Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< Carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< MII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t reserved_1_1 : 1;
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn61xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn63xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn66xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn68xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_int_reg cvmx_agl_gmx_rxx_int_reg_t;
+
+/**
+ * cvmx_agl_gmx_rx#_jabber
+ *
+ * AGL_GMX_RX_JABBER = The max size packet after which GMX will truncate
+ *
+ *
+ * Notes:
+ * CNT must be 8-byte aligned such that CNT[2:0] == 0
+ *
+ * The packet that will be sent to the packet input logic will have an
+ * additionl 8 bytes if AGL_GMX_RX_FRM_CTL[PRE_CHK] is set and
+ * AGL_GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is
+ * defined as...
+ *
+ * max_sized_packet = AGL_GMX_RX_JABBER[CNT]+((AGL_GMX_RX_FRM_CTL[PRE_CHK] & !AGL_GMX_RX_FRM_CTL[PRE_STRP])*8)
+ *
+ * Be sure the CNT field value is at least as large as the
+ * AGL_GMX_RX_FRM_MAX[LEN] value. Failure to meet this constraint will cause
+ * packets that are within the AGL_GMX_RX_FRM_MAX[LEN] length to be rejected
+ * because they exceed the CNT limit.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_jabber {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_jabber_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< Byte count for jabber check
+ Failing packets set the JABBER interrupt and are
+ optionally sent with opcode==JABBER
+ GMX will truncate the packet to CNT bytes
+ CNT >= AGL_GMX_RX_FRM_MAX[LEN] */
+#else
+ uint64_t cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_jabber_s cn52xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_jabber_s cn56xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_jabber_s cn61xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn63xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_jabber_s cn66xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn68xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_jabber cvmx_agl_gmx_rxx_jabber_t;
+
+/**
+ * cvmx_agl_gmx_rx#_pause_drop_time
+ *
+ * AGL_GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_pause_drop_time {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */
+#else
+ uint64_t status : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn61xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn66xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_pause_drop_time cvmx_agl_gmx_rxx_pause_drop_time_t;
+
+/**
+ * cvmx_agl_gmx_rx#_rx_inbnd
+ *
+ * AGL_GMX_RX_INBND = RGMII InBand Link Status
+ *
+ *
+ * Notes:
+ * These fields are only valid if the attached PHY is operating in RGMII mode
+ * and supports the optional in-band status (see section 3.4.1 of the RGMII
+ * specification, version 1.3 for more information).
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t duplex : 1; /**< RGMII Inbound LinkDuplex | NS
+ 0=half-duplex
+ 1=full-duplex */
+ uint64_t speed : 2; /**< RGMII Inbound LinkSpeed | NS
+ 00=2.5MHz
+ 01=25MHz
+ 10=125MHz
+ 11=Reserved */
+ uint64_t status : 1; /**< RGMII Inbound LinkStatus | NS
+ 0=down
+ 1=up */
+#else
+ uint64_t status : 1;
+ uint64_t speed : 2;
+ uint64_t duplex : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn61xx;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xx;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn66xx;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xx;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_rx_inbnd cvmx_agl_gmx_rxx_rx_inbnd_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_ctl
+ *
+ * AGL_GMX_RX_STATS_CTL = RX Stats Control register
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */
+#else
+ uint64_t rd_clr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_ctl cvmx_agl_gmx_rxx_stats_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_octs {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of received good packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_octs cvmx_agl_gmx_rxx_stats_octs_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs_ctl
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_octs_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of received pause packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_octs_ctl cvmx_agl_gmx_rxx_stats_octs_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs_dmac
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_octs_dmac {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of filtered dmac packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_octs_dmac cvmx_agl_gmx_rxx_stats_octs_dmac_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs_drp
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_octs_drp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of dropped packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_octs_drp cvmx_agl_gmx_rxx_stats_octs_drp_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts
+ *
+ * AGL_GMX_RX_STATS_PKTS
+ *
+ * Count of good received packets - packets that are not recognized as PAUSE
+ * packets, dropped due the DMAC filter, dropped due FIFO full status, or
+ * have any other OPCODE (FCS, Length, etc).
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of received good packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts cvmx_agl_gmx_rxx_stats_pkts_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_bad
+ *
+ * AGL_GMX_RX_STATS_PKTS_BAD
+ *
+ * Count of all packets received with some error that were not dropped
+ * either due to the dmac filter or lack of room in the receive FIFO.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts_bad {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of bad packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts_bad cvmx_agl_gmx_rxx_stats_pkts_bad_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_ctl
+ *
+ * AGL_GMX_RX_STATS_PKTS_CTL
+ *
+ * Count of all packets received that were recognized as Flow Control or
+ * PAUSE packets. PAUSE packets with any kind of error are counted in
+ * AGL_GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or
+ * forwarded based on the AGL_GMX_RX_FRM_CTL[CTL_DRP] bit. This count
+ * increments regardless of whether the packet is dropped. Pause packets
+ * will never be counted in AGL_GMX_RX_STATS_PKTS. Packets dropped due the dmac
+ * filter will be counted in AGL_GMX_RX_STATS_PKTS_DMAC and not here.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of received pause packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts_ctl cvmx_agl_gmx_rxx_stats_pkts_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_dmac
+ *
+ * AGL_GMX_RX_STATS_PKTS_DMAC
+ *
+ * Count of all packets received that were dropped by the dmac filter.
+ * Packets that match the DMAC will be dropped and counted here regardless
+ * of if they were bad packets. These packets will never be counted in
+ * AGL_GMX_RX_STATS_PKTS.
+ *
+ * Some packets that were not able to satisify the DECISION_CNT may not
+ * actually be dropped by Octeon, but they will be counted here as if they
+ * were dropped.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts_dmac {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of filtered dmac packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts_dmac cvmx_agl_gmx_rxx_stats_pkts_dmac_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_drp
+ *
+ * AGL_GMX_RX_STATS_PKTS_DRP
+ *
+ * Count of all packets received that were dropped due to a full receive
+ * FIFO. This counts good and bad packets received - all packets dropped by
+ * the FIFO. It does not count packets dropped by the dmac or pause packet
+ * filters.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts_drp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of dropped packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn61xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn66xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts_drp cvmx_agl_gmx_rxx_stats_pkts_drp_t;
+
+/**
+ * cvmx_agl_gmx_rx#_udd_skp
+ *
+ * AGL_GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
+ *
+ *
+ * Notes:
+ * (1) The skip bytes are part of the packet and will be sent down the NCB
+ * packet interface and will be handled by PKI.
+ *
+ * (2) The system can determine if the UDD bytes are included in the FCS check
+ * by using the FCSSEL field - if the FCS check is enabled.
+ *
+ * (3) Assume that the preamble/sfd is always at the start of the frame - even
+ * before UDD bytes. In most cases, there will be no preamble in these
+ * cases since it will be MII to MII communication without a PHY
+ * involved.
+ *
+ * (4) We can still do address filtering and control packet filtering is the
+ * user desires.
+ *
+ * (5) UDD_SKP must be 0 in half-duplex operation unless
+ * AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear. If AGL_GMX_RX_FRM_CTL[PRE_CHK] is set,
+ * then UDD_SKP will normally be 8.
+ *
+ * (6) In all cases, the UDD bytes will be sent down the packet interface as
+ * part of the packet. The UDD bytes are never stripped from the actual
+ * packet.
+ *
+ * (7) If LEN != 0, then AGL_GMX_RX_FRM_CHK[LENERR] will be disabled and AGL_GMX_RX_INT_REG[LENERR] will be zero
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_udd_skp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_udd_skp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation
+ 0 = all skip bytes are included in FCS
+ 1 = the skip bytes are not included in FCS */
+ uint64_t reserved_7_7 : 1;
+ uint64_t len : 7; /**< Amount of User-defined data before the start of
+ the L2 data. Zero means L2 comes first.
+ Max value is 64. */
+#else
+ uint64_t len : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t fcssel : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn61xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn63xxp1;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn66xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn68xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_udd_skp cvmx_agl_gmx_rxx_udd_skp_t;
+
+/**
+ * cvmx_agl_gmx_rx_bp_drop#
+ *
+ * AGL_GMX_RX_BP_DROP = FIFO mark for packet drop
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rx_bp_dropx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_dropx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO.
+ When the FIFO exceeds this count, packets will
+ be dropped and not buffered.
+ MARK should typically be programmed to 2.
+ Failure to program correctly can lead to system
+ instability. */
+#else
+ uint64_t mark : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn61xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn63xxp1;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn66xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn68xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rx_bp_dropx cvmx_agl_gmx_rx_bp_dropx_t;
+
+/**
+ * cvmx_agl_gmx_rx_bp_off#
+ *
+ * AGL_GMX_RX_BP_OFF = Lowater mark for packet drop
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rx_bp_offx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_offx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */
+#else
+ uint64_t mark : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn61xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn63xxp1;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn66xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn68xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rx_bp_offx cvmx_agl_gmx_rx_bp_offx_t;
+
+/**
+ * cvmx_agl_gmx_rx_bp_on#
+ *
+ * AGL_GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rx_bp_onx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_onx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure. */
+#else
+ uint64_t mark : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn61xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn63xxp1;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn66xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn68xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rx_bp_onx cvmx_agl_gmx_rx_bp_onx_t;
+
+/**
+ * cvmx_agl_gmx_rx_prt_info
+ *
+ * AGL_GMX_RX_PRT_INFO = state information for the ports
+ *
+ *
+ * Notes:
+ * COMMIT[0], DROP[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * COMMIT[1], DROP[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rx_prt_info {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_prt_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t drop : 2; /**< Port indication that data was dropped */
+ uint64_t reserved_2_15 : 14;
+ uint64_t commit : 2; /**< Port indication that SOP was accepted */
+#else
+ uint64_t commit : 2;
+ uint64_t reserved_2_15 : 14;
+ uint64_t drop : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_prt_info_s cn52xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_prt_info_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t drop : 1; /**< Port indication that data was dropped */
+ uint64_t reserved_1_15 : 15;
+ uint64_t commit : 1; /**< Port indication that SOP was accepted */
+#else
+ uint64_t commit : 1;
+ uint64_t reserved_1_15 : 15;
+ uint64_t drop : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_rx_prt_info_s cn61xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn63xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn63xxp1;
+ struct cvmx_agl_gmx_rx_prt_info_s cn66xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn68xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rx_prt_info cvmx_agl_gmx_rx_prt_info_t;
+
+/**
+ * cvmx_agl_gmx_rx_tx_status
+ *
+ * AGL_GMX_RX_TX_STATUS = GMX RX/TX Status
+ *
+ *
+ * Notes:
+ * RX[0], TX[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * RX[1], TX[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rx_tx_status {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_tx_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t tx : 2; /**< Transmit data since last read */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rx : 2; /**< Receive data since last read */
+#else
+ uint64_t rx : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t tx : 2;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_tx_status_s cn52xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_tx_status_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t tx : 1; /**< Transmit data since last read */
+ uint64_t reserved_1_3 : 3;
+ uint64_t rx : 1; /**< Receive data since last read */
+#else
+ uint64_t rx : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t tx : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_rx_tx_status_s cn61xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn63xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn63xxp1;
+ struct cvmx_agl_gmx_rx_tx_status_s cn66xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn68xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_rx_tx_status cvmx_agl_gmx_rx_tx_status_t;
+
+/**
+ * cvmx_agl_gmx_smac#
+ *
+ * AGL_GMX_SMAC = Packet SMAC
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_smacx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_smacx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t smac : 48; /**< The SMAC field is used for generating and
+ accepting Control Pause packets */
+#else
+ uint64_t smac : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_smacx_s cn52xx;
+ struct cvmx_agl_gmx_smacx_s cn52xxp1;
+ struct cvmx_agl_gmx_smacx_s cn56xx;
+ struct cvmx_agl_gmx_smacx_s cn56xxp1;
+ struct cvmx_agl_gmx_smacx_s cn61xx;
+ struct cvmx_agl_gmx_smacx_s cn63xx;
+ struct cvmx_agl_gmx_smacx_s cn63xxp1;
+ struct cvmx_agl_gmx_smacx_s cn66xx;
+ struct cvmx_agl_gmx_smacx_s cn68xx;
+ struct cvmx_agl_gmx_smacx_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_smacx cvmx_agl_gmx_smacx_t;
+
+/**
+ * cvmx_agl_gmx_stat_bp
+ *
+ * AGL_GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ *
+ *
+ * It has no relationship with the TX FIFO per se. The TX engine sends packets
+ * from PKO and upon completion, sends a command to the TX stats block for an
+ * update based on the packet size. The stats operation can take a few cycles -
+ * normally not enough to be visible considering the 64B min packet size that is
+ * ethernet convention.
+ *
+ * In the rare case in which SW attempted to schedule really, really, small packets
+ * or the sclk (6xxx) is running ass-slow, then the stats updates may not happen in
+ * real time and can back up the TX engine.
+ *
+ * This counter is the number of cycles in which the TX engine was stalled. In
+ * normal operation, it should always be zeros.
+ */
+union cvmx_agl_gmx_stat_bp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_stat_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t bp : 1; /**< Current TX stats BP state
+ When the TX stats machine cannot update the stats
+ registers quickly enough, the machine has the
+ ability to BP TX datapath. This is a rare event
+ and will not occur in normal operation.
+ 0 = no backpressure is applied
+ 1 = backpressure is applied to TX datapath to
+ allow stat update operations to complete */
+ uint64_t cnt : 16; /**< Number of cycles that BP has been asserted
+ Saturating counter */
+#else
+ uint64_t cnt : 16;
+ uint64_t bp : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_agl_gmx_stat_bp_s cn52xx;
+ struct cvmx_agl_gmx_stat_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_stat_bp_s cn56xx;
+ struct cvmx_agl_gmx_stat_bp_s cn56xxp1;
+ struct cvmx_agl_gmx_stat_bp_s cn61xx;
+ struct cvmx_agl_gmx_stat_bp_s cn63xx;
+ struct cvmx_agl_gmx_stat_bp_s cn63xxp1;
+ struct cvmx_agl_gmx_stat_bp_s cn66xx;
+ struct cvmx_agl_gmx_stat_bp_s cn68xx;
+ struct cvmx_agl_gmx_stat_bp_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_stat_bp cvmx_agl_gmx_stat_bp_t;
+
+/**
+ * cvmx_agl_gmx_tx#_append
+ *
+ * AGL_GMX_TX_APPEND = Packet TX Append Control
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_append {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_append_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet
+ when FCS is clear. Pause packets are normally
+ padded to 60 bytes. If
+ AGL_GMX_TX_MIN_PKT[MIN_SIZE] exceeds 59, then
+ FORCE_FCS will not be used. */
+ uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */
+ uint64_t pad : 1; /**< Append PAD bytes such that min sized */
+ uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer */
+#else
+ uint64_t preamble : 1;
+ uint64_t pad : 1;
+ uint64_t fcs : 1;
+ uint64_t force_fcs : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_append_s cn52xx;
+ struct cvmx_agl_gmx_txx_append_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_append_s cn56xx;
+ struct cvmx_agl_gmx_txx_append_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_append_s cn61xx;
+ struct cvmx_agl_gmx_txx_append_s cn63xx;
+ struct cvmx_agl_gmx_txx_append_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_append_s cn66xx;
+ struct cvmx_agl_gmx_txx_append_s cn68xx;
+ struct cvmx_agl_gmx_txx_append_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_append cvmx_agl_gmx_txx_append_t;
+
+/**
+ * cvmx_agl_gmx_tx#_clk
+ *
+ * AGL_GMX_TX_CLK = RGMII TX Clock Generation Register
+ *
+ *
+ * Notes:
+ * Normal Programming Values:
+ * (1) RGMII, 1000Mbs (AGL_GMX_PRT_CFG[SPEED]==1), CLK_CNT == 1
+ * (2) RGMII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 50/5
+ * (3) MII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 1
+ *
+ * RGMII Example:
+ * Given a 125MHz PLL reference clock...
+ * CLK_CNT == 1 ==> 125.0MHz TXC clock period (8ns* 1)
+ * CLK_CNT == 5 ==> 25.0MHz TXC clock period (8ns* 5)
+ * CLK_CNT == 50 ==> 2.5MHz TXC clock period (8ns*50)
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_clk {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_clk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t clk_cnt : 6; /**< Controls the RGMII TXC frequency | NS
+ TXC(period) =
+ rgm_ref_clk(period)*CLK_CNT */
+#else
+ uint64_t clk_cnt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_clk_s cn61xx;
+ struct cvmx_agl_gmx_txx_clk_s cn63xx;
+ struct cvmx_agl_gmx_txx_clk_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_clk_s cn66xx;
+ struct cvmx_agl_gmx_txx_clk_s cn68xx;
+ struct cvmx_agl_gmx_txx_clk_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_clk cvmx_agl_gmx_txx_clk_t;
+
+/**
+ * cvmx_agl_gmx_tx#_ctl
+ *
+ * AGL_GMX_TX_CTL = TX Control register
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats
+ and interrupts */
+ uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats
+ and interrupts */
+#else
+ uint64_t xscol_en : 1;
+ uint64_t xsdef_en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_ctl_s cn52xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_ctl_s cn56xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_ctl_s cn61xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn63xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_ctl_s cn66xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn68xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_ctl cvmx_agl_gmx_txx_ctl_t;
+
+/**
+ * cvmx_agl_gmx_tx#_min_pkt
+ *
+ * AGL_GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size)
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_min_pkt {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_min_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied
+ Padding is only appened when
+ AGL_GMX_TX_APPEND[PAD] for the coresponding packet
+ port is set. Packets will be padded to
+ MIN_SIZE+1 The reset value will pad to 60 bytes. */
+#else
+ uint64_t min_size : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn52xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn56xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn61xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn63xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn66xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn68xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_min_pkt cvmx_agl_gmx_txx_min_pkt_t;
+
+/**
+ * cvmx_agl_gmx_tx#_pause_pkt_interval
+ *
+ * AGL_GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent
+ *
+ *
+ * Notes:
+ * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
+ * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
+ * designer. It is suggested that TIME be much greater than INTERVAL and
+ * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
+ * count and then when the backpressure condition is lifted, a PAUSE packet
+ * with TIME==0 will be sent indicating that Octane is ready for additional
+ * data.
+ *
+ * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they satisify the
+ * following rule...
+ *
+ * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
+ *
+ * where largest_pkt_size is that largest packet that the system can send
+ * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
+ * of the PAUSE packet (normally 64B).
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_pause_pkt_interval {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t interval : 16; /**< Arbitrate for a pause packet every (INTERVAL*512)
+ bit-times.
+ Normally, 0 < INTERVAL < AGL_GMX_TX_PAUSE_PKT_TIME
+ INTERVAL=0, will only send a single PAUSE packet
+ for each backpressure event */
+#else
+ uint64_t interval : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn61xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn66xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_pause_pkt_interval cvmx_agl_gmx_txx_pause_pkt_interval_t;
+
+/**
+ * cvmx_agl_gmx_tx#_pause_pkt_time
+ *
+ * AGL_GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field
+ *
+ *
+ * Notes:
+ * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
+ * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
+ * designer. It is suggested that TIME be much greater than INTERVAL and
+ * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
+ * count and then when the backpressure condition is lifted, a PAUSE packet
+ * with TIME==0 will be sent indicating that Octane is ready for additional
+ * data.
+ *
+ * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they satisify the
+ * following rule...
+ *
+ * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
+ *
+ * where largest_pkt_size is that largest packet that the system can send
+ * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
+ * of the PAUSE packet (normally 64B).
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_pause_pkt_time {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< The pause_time field placed is outbnd pause pkts
+ pause_time is in 512 bit-times
+ Normally, TIME > AGL_GMX_TX_PAUSE_PKT_INTERVAL */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn61xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn66xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_pause_pkt_time cvmx_agl_gmx_txx_pause_pkt_time_t;
+
+/**
+ * cvmx_agl_gmx_tx#_pause_togo
+ *
+ * AGL_GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_pause_togo {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_togo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< Amount of time remaining to backpressure */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn61xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn66xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn68xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_pause_togo cvmx_agl_gmx_txx_pause_togo_t;
+
+/**
+ * cvmx_agl_gmx_tx#_pause_zero
+ *
+ * AGL_GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_pause_zero {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_zero_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t send : 1; /**< When backpressure condition clear, send PAUSE
+ packet with pause_time of zero to enable the
+ channel */
+#else
+ uint64_t send : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn61xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn66xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn68xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_pause_zero cvmx_agl_gmx_txx_pause_zero_t;
+
+/**
+ * cvmx_agl_gmx_tx#_soft_pause
+ *
+ * AGL_GMX_TX_SOFT_PAUSE = Packet TX Software Pause
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_soft_pause {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_soft_pause_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times
+ for full-duplex operation only */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn52xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn56xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn61xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn63xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn66xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn68xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_soft_pause cvmx_agl_gmx_txx_soft_pause_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat0
+ *
+ * AGL_GMX_TX_STAT0 = AGL_GMX_TX_STATS_XSDEF / AGL_GMX_TX_STATS_XSCOL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat0 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t xsdef : 32; /**< Number of packets dropped (never successfully
+ sent) due to excessive deferal */
+ uint64_t xscol : 32; /**< Number of packets dropped (never successfully
+ sent) due to excessive collision. Defined by
+ AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
+#else
+ uint64_t xscol : 32;
+ uint64_t xsdef : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat0_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat0_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat0_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat0_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat0 cvmx_agl_gmx_txx_stat0_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat1
+ *
+ * AGL_GMX_TX_STAT1 = AGL_GMX_TX_STATS_SCOL / AGL_GMX_TX_STATS_MCOL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat1 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scol : 32; /**< Number of packets sent with a single collision */
+ uint64_t mcol : 32; /**< Number of packets sent with multiple collisions
+ but < AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
+#else
+ uint64_t mcol : 32;
+ uint64_t scol : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat1_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat1_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat1_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat1_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat1 cvmx_agl_gmx_txx_stat1_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat2
+ *
+ * AGL_GMX_TX_STAT2 = AGL_GMX_TX_STATS_OCTS
+ *
+ *
+ * Notes:
+ * - Octect counts are the sum of all data transmitted on the wire including
+ * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect
+ * counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat2 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< Number of total octets sent on the interface.
+ Does not count octets from frames that were
+ truncated due to collisions in halfdup mode. */
+#else
+ uint64_t octs : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat2_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat2_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat2_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat2_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat2 cvmx_agl_gmx_txx_stat2_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat3
+ *
+ * AGL_GMX_TX_STAT3 = AGL_GMX_TX_STATS_PKTS
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat3 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pkts : 32; /**< Number of total frames sent on the interface.
+ Does not count frames that were truncated due to
+ collisions in halfdup mode. */
+#else
+ uint64_t pkts : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat3_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat3_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat3_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat3_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat3 cvmx_agl_gmx_txx_stat3_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat4
+ *
+ * AGL_GMX_TX_STAT4 = AGL_GMX_TX_STATS_HIST1 (64) / AGL_GMX_TX_STATS_HIST0 (<64)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat4 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */
+ uint64_t hist0 : 32; /**< Number of packets sent with an octet count
+ of < 64. */
+#else
+ uint64_t hist0 : 32;
+ uint64_t hist1 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat4_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat4_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat4_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat4_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat4 cvmx_agl_gmx_txx_stat4_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat5
+ *
+ * AGL_GMX_TX_STAT5 = AGL_GMX_TX_STATS_HIST3 (128- 255) / AGL_GMX_TX_STATS_HIST2 (65- 127)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat5 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist3 : 32; /**< Number of packets sent with an octet count of
+ 128 - 255. */
+ uint64_t hist2 : 32; /**< Number of packets sent with an octet count of
+ 65 - 127. */
+#else
+ uint64_t hist2 : 32;
+ uint64_t hist3 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat5_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat5_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat5_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat5_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat5 cvmx_agl_gmx_txx_stat5_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat6
+ *
+ * AGL_GMX_TX_STAT6 = AGL_GMX_TX_STATS_HIST5 (512-1023) / AGL_GMX_TX_STATS_HIST4 (256-511)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat6 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist5 : 32; /**< Number of packets sent with an octet count of
+ 512 - 1023. */
+ uint64_t hist4 : 32; /**< Number of packets sent with an octet count of
+ 256 - 511. */
+#else
+ uint64_t hist4 : 32;
+ uint64_t hist5 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat6_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat6_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat6_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat6_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat6 cvmx_agl_gmx_txx_stat6_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat7
+ *
+ * AGL_GMX_TX_STAT7 = AGL_GMX_TX_STATS_HIST7 (1024-1518) / AGL_GMX_TX_STATS_HIST6 (>1518)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat7 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist7 : 32; /**< Number of packets sent with an octet count
+ of > 1518. */
+ uint64_t hist6 : 32; /**< Number of packets sent with an octet count of
+ 1024 - 1518. */
+#else
+ uint64_t hist6 : 32;
+ uint64_t hist7 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat7_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat7_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat7_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat7_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat7 cvmx_agl_gmx_txx_stat7_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat8
+ *
+ * AGL_GMX_TX_STAT8 = AGL_GMX_TX_STATS_MCST / AGL_GMX_TX_STATS_BCST
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
+ * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet
+ * as per the 802.3 frame definition. If the system requires additional data
+ * before the L2 header, then the MCST and BCST counters may not reflect
+ * reality and should be ignored by software.
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat8 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat8_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC.
+ Does not include BCST packets. */
+ uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC.
+ Does not include MCST packets. */
+#else
+ uint64_t bcst : 32;
+ uint64_t mcst : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat8_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat8_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat8_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat8_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat8 cvmx_agl_gmx_txx_stat8_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat9
+ *
+ * AGL_GMX_TX_STAT9 = AGL_GMX_TX_STATS_UNDFLW / AGL_GMX_TX_STATS_CTL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat9 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat9_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t undflw : 32; /**< Number of underflow packets */
+ uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control)
+ generated by GMX. It does not include control
+ packets forwarded or generated by the PP's. */
+#else
+ uint64_t ctl : 32;
+ uint64_t undflw : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat9_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat9_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat9_s cn61xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stat9_s cn66xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn68xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat9 cvmx_agl_gmx_txx_stat9_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stats_ctl
+ *
+ * AGL_GMX_TX_STATS_CTL = TX Stats Control register
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stats_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t rd_clr : 1; /**< Stats registers will clear on reads */
+#else
+ uint64_t rd_clr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn61xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn63xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn66xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn68xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stats_ctl cvmx_agl_gmx_txx_stats_ctl_t;
+
+/**
+ * cvmx_agl_gmx_tx#_thresh
+ *
+ * AGL_GMX_TX_THRESH = Packet TX Threshold
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_thresh {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_thresh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t cnt : 6; /**< Number of 16B ticks to accumulate in the TX FIFO
+ before sending on the packet interface
+ This register should be large enough to prevent
+ underflow on the packet interface and must never
+ be set below 4. This register cannot exceed the
+ the TX FIFO depth which is 128, 8B entries. */
+#else
+ uint64_t cnt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_thresh_s cn52xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_thresh_s cn56xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_thresh_s cn61xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn63xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn63xxp1;
+ struct cvmx_agl_gmx_txx_thresh_s cn66xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn68xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_txx_thresh cvmx_agl_gmx_txx_thresh_t;
+
+/**
+ * cvmx_agl_gmx_tx_bp
+ *
+ * AGL_GMX_TX_BP = Packet TX BackPressure Register
+ *
+ *
+ * Notes:
+ * BP[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * BP[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_tx_bp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t bp : 2; /**< Port BackPressure status
+ 0=Port is available
+ 1=Port should be back pressured */
+#else
+ uint64_t bp : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_bp_s cn52xx;
+ struct cvmx_agl_gmx_tx_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_bp_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t bp : 1; /**< Port BackPressure status
+ 0=Port is available
+ 1=Port should be back pressured */
+#else
+ uint64_t bp : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_bp_s cn61xx;
+ struct cvmx_agl_gmx_tx_bp_s cn63xx;
+ struct cvmx_agl_gmx_tx_bp_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_bp_s cn66xx;
+ struct cvmx_agl_gmx_tx_bp_s cn68xx;
+ struct cvmx_agl_gmx_tx_bp_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_bp cvmx_agl_gmx_tx_bp_t;
+
+/**
+ * cvmx_agl_gmx_tx_col_attempt
+ *
+ * AGL_GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_col_attempt {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_col_attempt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t limit : 5; /**< Collision Attempts */
+#else
+ uint64_t limit : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn52xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn56xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn61xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn63xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn66xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn68xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_col_attempt cvmx_agl_gmx_tx_col_attempt_t;
+
+/**
+ * cvmx_agl_gmx_tx_ifg
+ *
+ * Common
+ *
+ *
+ * AGL_GMX_TX_IFG = Packet TX Interframe Gap
+ *
+ * Notes:
+ * Notes:
+ * * Programming IFG1 and IFG2.
+ *
+ * For half-duplex systems that require IEEE 802.3 compatibility, IFG1 must
+ * be in the range of 1-8, IFG2 must be in the range of 4-12, and the
+ * IFG1+IFG2 sum must be 12.
+ *
+ * For full-duplex systems that require IEEE 802.3 compatibility, IFG1 must
+ * be in the range of 1-11, IFG2 must be in the range of 1-11, and the
+ * IFG1+IFG2 sum must be 12.
+ *
+ * For all other systems, IFG1 and IFG2 can be any value in the range of
+ * 1-15. Allowing for a total possible IFG sum of 2-30.
+ *
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+union cvmx_agl_gmx_tx_ifg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ifg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing
+ If CRS is detected during IFG2, then the
+ interFrameSpacing timer is not reset and a frame
+ is transmited once the timer expires. */
+ uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing
+ If CRS is detected during IFG1, then the
+ interFrameSpacing timer is reset and a frame is
+ not transmited. */
+#else
+ uint64_t ifg1 : 4;
+ uint64_t ifg2 : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_ifg_s cn52xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_ifg_s cn56xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_ifg_s cn61xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn63xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_ifg_s cn66xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn68xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_ifg cvmx_agl_gmx_tx_ifg_t;
+
+/**
+ * cvmx_agl_gmx_tx_int_en
+ *
+ * AGL_GMX_TX_INT_EN = Interrupt Enable
+ *
+ *
+ * Notes:
+ * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+union cvmx_agl_gmx_tx_int_en {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t reserved_18_19 : 2;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_19 : 2;
+ uint64_t ptp_lost : 2;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_en_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_en_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t late_col : 1; /**< TX Late Collision */
+ uint64_t reserved_13_15 : 3;
+ uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_9_11 : 3;
+ uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_3_7 : 5;
+ uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t xscol : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t xsdef : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t late_col : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_int_en_s cn61xx;
+ struct cvmx_agl_gmx_tx_int_en_s cn63xx;
+ struct cvmx_agl_gmx_tx_int_en_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_int_en_s cn66xx;
+ struct cvmx_agl_gmx_tx_int_en_s cn68xx;
+ struct cvmx_agl_gmx_tx_int_en_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_int_en cvmx_agl_gmx_tx_int_en_t;
+
+/**
+ * cvmx_agl_gmx_tx_int_reg
+ *
+ * AGL_GMX_TX_INT_REG = Interrupt Register
+ *
+ *
+ * Notes:
+ * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+union cvmx_agl_gmx_tx_int_reg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t reserved_18_19 : 2;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_19 : 2;
+ uint64_t ptp_lost : 2;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t late_col : 1; /**< TX Late Collision */
+ uint64_t reserved_13_15 : 3;
+ uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_9_11 : 3;
+ uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_3_7 : 5;
+ uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t xscol : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t xsdef : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t late_col : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_s cn61xx;
+ struct cvmx_agl_gmx_tx_int_reg_s cn63xx;
+ struct cvmx_agl_gmx_tx_int_reg_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_s cn66xx;
+ struct cvmx_agl_gmx_tx_int_reg_s cn68xx;
+ struct cvmx_agl_gmx_tx_int_reg_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_int_reg cvmx_agl_gmx_tx_int_reg_t;
+
+/**
+ * cvmx_agl_gmx_tx_jam
+ *
+ * AGL_GMX_TX_JAM = Packet TX Jam Pattern
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_jam {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_jam_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t jam : 8; /**< Jam pattern */
+#else
+ uint64_t jam : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_jam_s cn52xx;
+ struct cvmx_agl_gmx_tx_jam_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_jam_s cn56xx;
+ struct cvmx_agl_gmx_tx_jam_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_jam_s cn61xx;
+ struct cvmx_agl_gmx_tx_jam_s cn63xx;
+ struct cvmx_agl_gmx_tx_jam_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_jam_s cn66xx;
+ struct cvmx_agl_gmx_tx_jam_s cn68xx;
+ struct cvmx_agl_gmx_tx_jam_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_jam cvmx_agl_gmx_tx_jam_t;
+
+/**
+ * cvmx_agl_gmx_tx_lfsr
+ *
+ * AGL_GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_lfsr {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_lfsr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random
+ numbers to compute truncated binary exponential
+ backoff. */
+#else
+ uint64_t lfsr : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_lfsr_s cn52xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_lfsr_s cn56xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_lfsr_s cn61xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn63xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_lfsr_s cn66xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn68xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_lfsr cvmx_agl_gmx_tx_lfsr_t;
+
+/**
+ * cvmx_agl_gmx_tx_ovr_bp
+ *
+ * AGL_GMX_TX_OVR_BP = Packet TX Override BackPressure
+ *
+ *
+ * Notes:
+ * IGN_FULL[0], BP[0], EN[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * IGN_FULL[1], BP[1], EN[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_tx_ovr_bp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ovr_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t en : 2; /**< Per port Enable back pressure override */
+ uint64_t reserved_6_7 : 2;
+ uint64_t bp : 2; /**< Port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t reserved_2_3 : 2;
+ uint64_t ign_full : 2; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t bp : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t en : 2;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_ovr_bp_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t en : 1; /**< Per port Enable back pressure override */
+ uint64_t reserved_5_7 : 3;
+ uint64_t bp : 1; /**< Port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t reserved_1_3 : 3;
+ uint64_t ign_full : 1; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t bp : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t en : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn61xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn63xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn66xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn68xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_ovr_bp cvmx_agl_gmx_tx_ovr_bp_t;
+
+/**
+ * cvmx_agl_gmx_tx_pause_pkt_dmac
+ *
+ * AGL_GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_pause_pkt_dmac {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */
+#else
+ uint64_t dmac : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn61xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn66xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_pause_pkt_dmac cvmx_agl_gmx_tx_pause_pkt_dmac_t;
+
+/**
+ * cvmx_agl_gmx_tx_pause_pkt_type
+ *
+ * AGL_GMX_TX_PAUSE_PKT_TYPE = Packet TX Pause Packet TYPE field
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_pause_pkt_type {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */
+#else
+ uint64_t type : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn61xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn66xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xxp1;
+};
+typedef union cvmx_agl_gmx_tx_pause_pkt_type cvmx_agl_gmx_tx_pause_pkt_type_t;
+
+/**
+ * cvmx_agl_prt#_ctl
+ *
+ * AGL_PRT_CTL = AGL Port Control
+ *
+ *
+ * Notes:
+ * The RGMII timing specification requires that devices transmit clock and
+ * data synchronously. The specification requires external sources (namely
+ * the PC board trace routes) to introduce the appropriate 1.5 to 2.0 ns of
+ * delay.
+ *
+ * To eliminate the need for the PC board delays, the MIX RGMII interface
+ * has optional onboard DLL's for both transmit and receive. For correct
+ * operation, at most one of the transmitter, board, or receiver involved
+ * in an RGMII link should introduce delay. By default/reset,
+ * the MIX RGMII receivers delay the received clock, and the MIX
+ * RGMII transmitters do not delay the transmitted clock. Whether this
+ * default works as-is with a given link partner depends on the behavior
+ * of the link partner and the PC board.
+ *
+ * These are the possible modes of MIX RGMII receive operation:
+ * o AGL_PRTx_CTL[CLKRX_BYP] = 0 (reset value) - The OCTEON MIX RGMII
+ * receive interface introduces clock delay using its internal DLL.
+ * This mode is appropriate if neither the remote
+ * transmitter nor the PC board delays the clock.
+ * o AGL_PRTx_CTL[CLKRX_BYP] = 1, [CLKRX_SET] = 0x0 - The OCTEON MIX
+ * RGMII receive interface introduces no clock delay. This mode
+ * is appropriate if either the remote transmitter or the PC board
+ * delays the clock.
+ *
+ * These are the possible modes of MIX RGMII transmit operation:
+ * o AGL_PRTx_CTL[CLKTX_BYP] = 1, [CLKTX_SET] = 0x0 (reset value) -
+ * The OCTEON MIX RGMII transmit interface introduces no clock
+ * delay. This mode is appropriate is either the remote receiver
+ * or the PC board delays the clock.
+ * o AGL_PRTx_CTL[CLKTX_BYP] = 0 - The OCTEON MIX RGMII transmit
+ * interface introduces clock delay using its internal DLL.
+ * This mode is appropriate if neither the remote receiver
+ * nor the PC board delays the clock.
+ *
+ * AGL_PRT0_CTL will be reset when MIX0_CTL[RESET] is set to 1.
+ * AGL_PRT1_CTL will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_prtx_ctl {
+ uint64_t u64;
+ struct cvmx_agl_prtx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drv_byp : 1; /**< Bypass the compensation controller and use
+ DRV_NCTL and DRV_PCTL */
+ uint64_t reserved_62_62 : 1;
+ uint64_t cmp_pctl : 6; /**< PCTL drive strength from the compensation ctl */
+ uint64_t reserved_54_55 : 2;
+ uint64_t cmp_nctl : 6; /**< NCTL drive strength from the compensation ctl */
+ uint64_t reserved_46_47 : 2;
+ uint64_t drv_pctl : 6; /**< PCTL drive strength to use in bypass mode
+ Reset value of 19 is for 50 ohm termination */
+ uint64_t reserved_38_39 : 2;
+ uint64_t drv_nctl : 6; /**< NCTL drive strength to use in bypass mode
+ Reset value of 15 is for 50 ohm termination */
+ uint64_t reserved_29_31 : 3;
+ uint64_t clk_set : 5; /**< The clock delay as determined by the DLL */
+ uint64_t clkrx_byp : 1; /**< Bypass the RX clock delay setting
+ Skews RXC from RXD,RXCTL in RGMII mode
+ By default, HW internally shifts the RXC clock
+ to sample RXD,RXCTL assuming clock and data and
+ sourced synchronously from the link partner.
+ In MII mode, the CLKRX_BYP is forced to 1. */
+ uint64_t reserved_21_22 : 2;
+ uint64_t clkrx_set : 5; /**< RX clock delay setting to use in bypass mode
+ Skews RXC from RXD in RGMII mode */
+ uint64_t clktx_byp : 1; /**< Bypass the TX clock delay setting
+ Skews TXC from TXD,TXCTL in RGMII mode
+ By default, clock and data and sourced
+ synchronously.
+ In MII mode, the CLKRX_BYP is forced to 1. */
+ uint64_t reserved_13_14 : 2;
+ uint64_t clktx_set : 5; /**< TX clock delay setting to use in bypass mode
+ Skews TXC from TXD in RGMII mode */
+ uint64_t reserved_5_7 : 3;
+ uint64_t dllrst : 1; /**< DLL Reset */
+ uint64_t comp : 1; /**< Compensation Enable */
+ uint64_t enable : 1; /**< Port Enable */
+ uint64_t clkrst : 1; /**< Clock Tree Reset */
+ uint64_t mode : 1; /**< Port Mode
+ MODE must be set the same for all ports in which
+ AGL_PRTx_CTL[ENABLE] is set.
+ 0=RGMII
+ 1=MII */
+#else
+ uint64_t mode : 1;
+ uint64_t clkrst : 1;
+ uint64_t enable : 1;
+ uint64_t comp : 1;
+ uint64_t dllrst : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t clktx_set : 5;
+ uint64_t reserved_13_14 : 2;
+ uint64_t clktx_byp : 1;
+ uint64_t clkrx_set : 5;
+ uint64_t reserved_21_22 : 2;
+ uint64_t clkrx_byp : 1;
+ uint64_t clk_set : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t drv_nctl : 6;
+ uint64_t reserved_38_39 : 2;
+ uint64_t drv_pctl : 6;
+ uint64_t reserved_46_47 : 2;
+ uint64_t cmp_nctl : 6;
+ uint64_t reserved_54_55 : 2;
+ uint64_t cmp_pctl : 6;
+ uint64_t reserved_62_62 : 1;
+ uint64_t drv_byp : 1;
+#endif
+ } s;
+ struct cvmx_agl_prtx_ctl_s cn61xx;
+ struct cvmx_agl_prtx_ctl_s cn63xx;
+ struct cvmx_agl_prtx_ctl_s cn63xxp1;
+ struct cvmx_agl_prtx_ctl_s cn66xx;
+ struct cvmx_agl_prtx_ctl_s cn68xx;
+ struct cvmx_agl_prtx_ctl_s cn68xxp1;
+};
+typedef union cvmx_agl_prtx_ctl cvmx_agl_prtx_ctl_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-agl-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,886 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Provides APIs for applications to register for hotplug. It also provides
+ * APIs for requesting shutdown of a running target application.
+ *
+ * <hr>$Revision: $<hr>
+ */
+
+#include "cvmx-app-hotplug.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-debug.h"
+
+//#define DEBUG 1
+
+static cvmx_app_hotplug_global_t *hotplug_global_ptr = 0;
+
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+
+static CVMX_SHARED cvmx_spinlock_t cvmx_app_hotplug_sync_lock = { CVMX_SPINLOCK_UNLOCKED_VAL };
+static CVMX_SHARED cvmx_spinlock_t cvmx_app_hotplug_lock = { CVMX_SPINLOCK_UNLOCKED_VAL };
+static CVMX_SHARED cvmx_app_hotplug_info_t *cvmx_app_hotplug_info_ptr = NULL;
+
+static void __cvmx_app_hotplug_shutdown(int irq_number, uint64_t registers[32], void *user_arg);
+static void __cvmx_app_hotplug_sync(void);
+static void __cvmx_app_hotplug_reset(void);
+
+/* Declaring this array here is a compile time check to ensure that the
+ size of cvmx_app_hotplug_info_t is 1024. If the size is not 1024
+ the size of the array will be -1 and this results in a compilation
+ error */
+char __hotplug_info_check[(sizeof(cvmx_app_hotplug_info_t) == 1024) ? 1 : -1];
+/**
+ * This routine registers an application for hotplug. It installs a handler for
+ * any incoming shutdown request. It also registers a callback routine from the
+ * application. This callback is invoked when the application receives a
+ * shutdown notification.
+ *
+ * This routine only needs to be called once per application.
+ *
+ * @param fn Callback routine from the application.
+ * @param arg Argument to the application callback routine.
+ * @return Return 0 on success, -1 on failure
+ *
+ */
+int cvmx_app_hotplug_register(void(*fn)(void*), void* arg)
+{
+ /* Find the list of applications launched by bootoct utility. */
+
+ if (!(cvmx_app_hotplug_info_ptr = cvmx_app_hotplug_get_info(cvmx_sysinfo_get()->core_mask)))
+ {
+ /* Application not launched by bootoct? */
+ printf("ERROR: cmvx_app_hotplug_register() failed\n");
+ return -1;
+ }
+
+ /* Register the callback */
+ cvmx_app_hotplug_info_ptr->data = CAST64(arg);
+ cvmx_app_hotplug_info_ptr->shutdown_callback = CAST64(fn);
+
+#ifdef DEBUG
+ printf("cvmx_app_hotplug_register(): coremask 0x%x valid %d\n",
+ cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid);
+#endif
+
+ cvmx_interrupt_register(CVMX_IRQ_MBOX0, __cvmx_app_hotplug_shutdown, NULL);
+
+ return 0;
+}
+
+/**
+ * This routine deprecates the the cvmx_app_hotplug_register method. This
+ * registers application for hotplug and the application will have CPU
+ * hotplug callbacks. Various callbacks are specified in cb.
+ * cvmx_app_hotplug_callbacks_t documents the callbacks
+ *
+ * This routine only needs to be called once per application.
+ *
+ * @param cb Callback routine from the application.
+ * @param arg Argument to the application callback routins
+ * @param app_shutdown When set to 1 the application will invoke core_shutdown
+ on each core. When set to 0 core shutdown will be
+ called invoked automatically after invoking the
+ application callback.
+ * @return Return index of app on success, -1 on failure
+ *
+ */
+int cvmx_app_hotplug_register_cb(cvmx_app_hotplug_callbacks_t *cb, void* arg,
+ int app_shutdown)
+{
+ cvmx_app_hotplug_info_t *app_info;
+
+ /* Find the list of applications launched by bootoct utility. */
+ app_info = cvmx_app_hotplug_get_info(cvmx_sysinfo_get()->core_mask);
+ cvmx_app_hotplug_info_ptr = app_info;
+ if (!app_info)
+ {
+ /* Application not launched by bootoct? */
+ printf("ERROR: cmvx_app_hotplug_register() failed\n");
+ return -1;
+ }
+ /* Register the callback */
+ app_info->data = CAST64(arg);
+ app_info->shutdown_callback = CAST64(cb->shutdown_callback);
+ app_info->cores_added_callback = CAST64(cb->cores_added_callback);
+ app_info->cores_removed_callback = CAST64(cb->cores_removed_callback);
+ app_info->unplug_callback = CAST64(cb->unplug_core_callback);
+ app_info->hotplug_start = CAST64(cb->hotplug_start);
+ app_info->app_shutdown = app_shutdown;
+#ifdef DEBUG
+ printf("cvmx_app_hotplug_register(): coremask 0x%x valid %d\n",
+ app_info->coremask, app_info->valid);
+#endif
+
+ cvmx_interrupt_register(CVMX_IRQ_MBOX0, __cvmx_app_hotplug_shutdown, NULL);
+ return 0;
+
+}
+
+void cvmx_app_hotplug_remove_self_from_core_mask(void)
+{
+ int core = cvmx_get_core_num();
+ uint32_t core_mask = 1ull << core;
+
+ cvmx_spinlock_lock(&cvmx_app_hotplug_lock);
+ cvmx_app_hotplug_info_ptr->coremask = cvmx_app_hotplug_info_ptr->coremask & ~core_mask ;
+ cvmx_app_hotplug_info_ptr->hotplug_activated_coremask =
+ cvmx_app_hotplug_info_ptr->hotplug_activated_coremask & ~core_mask ;
+ cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
+}
+
+
+
+/**
+* Returns 1 if the running core is being unplugged, else it returns 0.
+*/
+int is_core_being_unplugged(void)
+{
+ if (cvmx_app_hotplug_info_ptr->unplug_cores &
+ (1ull << cvmx_get_core_num()))
+ return 1;
+ return 0;
+}
+
+
+/**
+ * Activate the current application core for receiving hotplug shutdown requests.
+ *
+ * This routine makes sure that each core belonging to the application is enabled
+ * to receive the shutdown notification and also provides a barrier sync to make
+ * sure that all cores are ready.
+ */
+int cvmx_app_hotplug_activate(void)
+{
+ uint64_t cnt = 0;
+ uint64_t cnt_interval = 10000000;
+
+ while (!cvmx_app_hotplug_info_ptr)
+ {
+ cnt++;
+ if ((cnt % cnt_interval) == 0)
+ printf("waiting for cnt=%lld\n", (unsigned long long)cnt);
+ }
+
+ if (cvmx_app_hotplug_info_ptr->hplugged_cores & (1ull << cvmx_get_core_num()))
+ {
+#ifdef DEBUG
+ printf("core=%d : is being hotplugged \n", cvmx_get_core_num());
+#endif
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+ sys_info_ptr->core_mask |= 1ull << cvmx_get_core_num();
+ }
+ else
+ {
+ __cvmx_app_hotplug_sync();
+ }
+ cvmx_spinlock_lock(&cvmx_app_hotplug_lock);
+ if (!cvmx_app_hotplug_info_ptr)
+ {
+ cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
+ printf("ERROR: This application is not registered for hotplug\n");
+ return -1;
+ }
+ /* Enable the interrupt before we mark the core as activated */
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
+ cvmx_app_hotplug_info_ptr->hotplug_activated_coremask |= (1ull<<cvmx_get_core_num());
+
+#ifdef DEBUG
+ printf("cvmx_app_hotplug_activate(): coremask 0x%x valid %d sizeof %d\n",
+ cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid,
+ sizeof(*cvmx_app_hotplug_info_ptr));
+#endif
+
+ cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
+
+ return 0;
+}
+
+/**
+ * This routine is only required if cvmx_app_hotplug_shutdown_request() was called
+ * with wait=0. This routine waits for the application shutdown to complete.
+ *
+ * @param coremask Coremask the application is running on.
+ * @return 0 on success, -1 on error
+ *
+ */
+int cvmx_app_hotplug_shutdown_complete(uint32_t coremask)
+{
+ cvmx_app_hotplug_info_t *hotplug_info_ptr;
+
+ if (!(hotplug_info_ptr = cvmx_app_hotplug_get_info(coremask)))
+ {
+ printf("\nERROR: Failed to get hotplug info for coremask: 0x%x\n", (unsigned int)coremask);
+ return -1;
+ }
+
+ while(!hotplug_info_ptr->shutdown_done);
+
+ /* Clean up the hotplug info region for this app */
+ bzero(hotplug_info_ptr, sizeof(*hotplug_info_ptr));
+
+ return 0;
+}
+
+/**
+ * Disable recognition of any incoming shutdown request.
+ */
+
+void cvmx_app_hotplug_shutdown_disable(void)
+{
+ cvmx_interrupt_mask_irq(CVMX_IRQ_MBOX0);
+}
+
+/**
+ * Re-enable recognition of incoming shutdown requests.
+ */
+
+void cvmx_app_hotplug_shutdown_enable(void)
+{
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
+}
+
+/**
+* Request shutdown of the currently running core. Should be
+* called by the application when it has been registered with
+* app_shutdown option set to 1.
+*/
+void cvmx_app_hotplug_core_shutdown(void)
+{
+ uint32_t flags;
+ if (cvmx_app_hotplug_info_ptr->shutdown_cores)
+ {
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+ __cvmx_app_hotplug_sync();
+ if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
+ {
+ bzero(cvmx_app_hotplug_info_ptr,
+ sizeof(*cvmx_app_hotplug_info_ptr));
+ #ifdef DEBUG
+ printf("__cvmx_app_hotplug_shutdown(): setting shutdown done! \n");
+ #endif
+ cvmx_app_hotplug_info_ptr->shutdown_done = 1;
+ }
+ /* Tell the debugger that this application is finishing. */
+ cvmx_debug_finish ();
+ flags = cvmx_interrupt_disable_save();
+ __cvmx_app_hotplug_sync();
+ /* Reset the core */
+ __cvmx_app_hotplug_reset();
+ }
+ else
+ {
+ cvmx_sysinfo_remove_self_from_core_mask();
+ cvmx_app_hotplug_remove_self_from_core_mask();
+ flags = cvmx_interrupt_disable_save();
+ __cvmx_app_hotplug_reset();
+ }
+}
+
+/*
+ * ISR for the incoming shutdown request interrupt.
+ */
+static void __cvmx_app_hotplug_shutdown(int irq_number, uint64_t registers[32],
+ void *user_arg)
+{
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+ uint64_t mbox;
+ cvmx_app_hotplug_info_t *ai = cvmx_app_hotplug_info_ptr;
+ int dbg = 0;
+
+#ifdef DEBUG
+ dbg = 1;
+#endif
+ cvmx_interrupt_mask_irq(CVMX_IRQ_MBOX0);
+
+ mbox = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()));
+ /* Clear the interrupt */
+ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), mbox);
+
+ /* Make sure the write above completes */
+ cvmx_read_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()));
+
+
+ if (!cvmx_app_hotplug_info_ptr)
+ {
+ printf("ERROR: Application is not registered for hotplug!\n");
+ return;
+ }
+
+ if (ai->hotplug_activated_coremask != sys_info_ptr->core_mask)
+ {
+ printf("ERROR: Shutdown requested when not all app cores have "
+ "activated hotplug\n" "Application coremask: 0x%x Hotplug "
+ "coremask: 0x%x\n", (unsigned int)sys_info_ptr->core_mask,
+ (unsigned int)ai->hotplug_activated_coremask);
+ return;
+ }
+
+ if (mbox & 1ull)
+ {
+ int core = cvmx_get_core_num();
+ if (dbg)
+ printf("Shutting down application .\n");
+ /* Call the application's own callback function */
+ if (ai->shutdown_callback)
+ {
+ ((void(*)(void*))(long)ai->shutdown_callback)(CASTPTR(void *, ai->data));
+ }
+ else
+ {
+ printf("ERROR : Shutdown callback has not been registered\n");
+ }
+ if (!ai->app_shutdown)
+ {
+ if (dbg)
+ printf("%s : core = %d Invoking app shutdown\n", __FUNCTION__, core);
+ cvmx_app_hotplug_core_shutdown();
+ }
+ }
+ else if (mbox & 2ull)
+ {
+ int core = cvmx_get_core_num();
+ int unplug = is_core_being_unplugged();
+ if (dbg) printf("%s : core=%d Unplug event \n", __FUNCTION__, core);
+
+ if (unplug)
+ {
+ /* Call the application's own callback function */
+ if (ai->unplug_callback)
+ {
+ if (dbg) printf("%s : core=%d Calling unplug callback\n",
+ __FUNCTION__, core);
+ ((void(*)(void*))(long)ai->unplug_callback)(CASTPTR(void *,
+ ai->data));
+ }
+ if (!ai->app_shutdown)
+ {
+ if (dbg) printf("%s : core = %d Invoking app shutdown\n",
+ __FUNCTION__, core);
+ cvmx_app_hotplug_core_shutdown();
+ }
+ }
+ else
+ {
+ if (ai->cores_removed_callback)
+ {
+ if (dbg) printf("%s : core=%d Calling cores removed callback\n",
+ __FUNCTION__, core);
+ ((void(*)(uint32_t, void*))(long)ai->cores_removed_callback)
+ (ai->unplug_cores, CASTPTR(void *, ai->data));
+ }
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
+ }
+ }
+ else if (mbox & 4ull)
+ {
+ int core = cvmx_get_core_num();
+ if (dbg) printf("%s : core=%d Add cores event \n", __FUNCTION__, core);
+
+ if (ai->cores_added_callback)
+ {
+ if (dbg) printf("%s : core=%d Calling cores added callback\n",
+ __FUNCTION__, core);
+ ((void(*)(uint32_t, void*))(long)ai->cores_added_callback)
+ (ai->hplugged_cores, CASTPTR(void *, ai->data));
+ }
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
+ }
+ else
+ {
+ printf("ERROR: unexpected mbox=%llx\n", (unsigned long long)mbox);
+ }
+
+}
+
+void __cvmx_app_hotplug_reset(void)
+{
+#define IDLE_CORE_BLOCK_NAME "idle-core-loop"
+#define HPLUG_MAKE_XKPHYS(x) ((1ULL << 63) | (x))
+ uint64_t reset_addr;
+ const cvmx_bootmem_named_block_desc_t *block_desc;
+
+ block_desc = cvmx_bootmem_find_named_block(IDLE_CORE_BLOCK_NAME);
+ if (!block_desc) {
+ cvmx_dprintf("Named block(%s) is not created\n", IDLE_CORE_BLOCK_NAME);
+ /* loop here, should not happen */
+ __asm__ volatile (
+ ".set noreorder \n"
+ "\tsync \n"
+ "\tnop \n"
+ "1:\twait \n"
+ "\tb 1b \n"
+ "\tnop \n"
+ ".set reorder \n"
+ ::
+ );
+ }
+
+ reset_addr = HPLUG_MAKE_XKPHYS(block_desc->base_addr);
+ asm volatile (" .set push \n"
+ " .set mips64 \n"
+ " .set noreorder \n"
+ " move $2, %[addr] \n"
+ " jr $2 \n"
+ " nop \n"
+ " .set pop "
+ :: [addr] "r"(reset_addr)
+ : "$2");
+
+ /*Should never reach here*/
+ while (1) ;
+
+}
+
+/*
+ * We need a separate sync operation from cvmx_coremask_barrier_sync() to
+ * avoid a deadlock on state.lock, since the application itself maybe doing a
+ * cvmx_coremask_barrier_sync().
+ */
+static void __cvmx_app_hotplug_sync(void)
+{
+ static CVMX_SHARED volatile uint32_t sync_coremask = 0;
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+
+ cvmx_spinlock_lock(&cvmx_app_hotplug_sync_lock);
+
+ sync_coremask |= cvmx_coremask_core(cvmx_get_core_num());
+
+ cvmx_spinlock_unlock(&cvmx_app_hotplug_sync_lock);
+
+ while (sync_coremask != sys_info_ptr->core_mask);
+
+ cvmx_spinlock_lock(&cvmx_app_hotplug_sync_lock);
+ sync_coremask = 0;
+ cvmx_spinlock_unlock(&cvmx_app_hotplug_sync_lock);
+
+
+}
+
+#endif /* CVMX_BUILD_FOR_LINUX_USER */
+
+/**
+* Returns 1 if the running core is being hotplugged, else it returns 0.
+*/
+int is_core_being_hot_plugged(void)
+{
+
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+ if (!cvmx_app_hotplug_info_ptr) return 0;
+ if (cvmx_app_hotplug_info_ptr->hplugged_cores &
+ (1ull << cvmx_get_core_num()))
+ return 1;
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+static cvmx_app_hotplug_global_t *cvmx_app_get_hotplug_global_ptr(void)
+{
+ const struct cvmx_bootmem_named_block_desc *block_desc;
+ cvmx_app_hotplug_global_t *hgp;
+
+ if(hotplug_global_ptr != 0) return hotplug_global_ptr;
+
+ block_desc = cvmx_bootmem_find_named_block(CVMX_APP_HOTPLUG_INFO_REGION_NAME);
+ if (!block_desc)
+ {
+ printf("ERROR: Hotplug info region is not setup\n");
+ return NULL;
+ }
+ else
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+ {
+ size_t pg_sz = sysconf(_SC_PAGESIZE), size;
+ off_t offset;
+ char *vaddr;
+ int fd;
+
+ if ((fd = open("/dev/mem", O_RDWR)) == -1) {
+ perror("open");
+ return NULL;
+ }
+
+ /*
+ * We need to mmap() this memory, since this was allocated from the
+ * kernel bootup code and does not reside in the RESERVE32 region.
+ */
+ size = CVMX_APP_HOTPLUG_INFO_REGION_SIZE + pg_sz-1;
+ offset = block_desc->base_addr & ~(pg_sz-1);
+ if ((vaddr = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset))
+ == MAP_FAILED)
+ {
+ perror("mmap");
+ return NULL;
+ }
+
+ hgp = (cvmx_app_hotplug_global_t *)(vaddr + ( block_desc->base_addr & (pg_sz-1)));
+ }
+#else
+ hgp = CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, block_desc->base_addr));
+#endif
+ hotplug_global_ptr = hgp;
+ return hgp;
+
+}
+
+/**
+ * Return the hotplug info structure (cvmx_app_hotplug_info_t) pointer for the
+ * application running on the given coremask.
+ *
+ * @param coremask Coremask of application.
+ * @return Returns hotplug info struct on success, NULL on failure
+ *
+ */
+cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info(uint32_t coremask)
+{
+ cvmx_app_hotplug_info_t *hip;
+ cvmx_app_hotplug_global_t *hgp;
+ int i;
+ int dbg = 0;
+
+#ifdef DEBUG
+ dbg = 1;
+#endif
+ hgp = cvmx_app_get_hotplug_global_ptr();
+ if (!hgp) return NULL;
+ hip = hgp->hotplug_info_array;
+
+ /* Look for the current app's info */
+ for (i=0; i<CVMX_APP_HOTPLUG_MAX_APPS; i++)
+ {
+ if (hip[i].coremask == coremask)
+ {
+ if (dbg)
+ printf("cvmx_app_hotplug_get_info(): coremask match %d -- coremask 0x%x, valid %d\n", i, (unsigned int)hip[i].coremask, (unsigned int)hip[i].valid);
+ return &hip[i];
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Return the hotplug application index structure for the application running on the
+ * given coremask.
+ *
+ * @param coremask Coremask of application.
+ * @return Returns hotplug application index on success. -1 on failure
+ *
+ */
+int cvmx_app_hotplug_get_index(uint32_t coremask)
+{
+ cvmx_app_hotplug_info_t *hip;
+ cvmx_app_hotplug_global_t *hgp;
+ int i;
+ int dbg = 0;
+
+#ifdef DEBUG
+ dbg = 1;
+#endif
+ hgp = cvmx_app_get_hotplug_global_ptr();
+ if (!hgp) return -1;
+ hip = hgp->hotplug_info_array;
+
+ /* Look for the current app's info */
+ for (i=0; i<CVMX_APP_HOTPLUG_MAX_APPS; i++)
+ {
+ if (hip[i].coremask == coremask)
+ {
+ if (dbg)
+ printf("cvmx_app_hotplug_get_info(): coremask match %d -- coremask 0x%x valid %d\n", i, (unsigned int)hip[i].coremask, (unsigned int)hip[i].valid);
+ return i;
+ }
+ }
+ return -1;
+}
+
+void print_hot_plug_info(cvmx_app_hotplug_info_t* hpinfo)
+{
+ printf("name=%s coremask=%08x hotplugged coremask=%08x valid=%d\n", hpinfo->app_name,
+ (unsigned int)hpinfo->coremask, (unsigned int)hpinfo->hotplug_activated_coremask, (unsigned int)hpinfo->valid);
+}
+
+/**
+ * Return the hotplug info structure (cvmx_app_hotplug_info_t) pointer for the
+ * application with the specified index.
+ *
+ * @param index index of application.
+ * @return Returns hotplug info struct on success, NULL on failure
+ *
+ */
+cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info_at_index(int index)
+{
+ cvmx_app_hotplug_info_t *hip;
+ cvmx_app_hotplug_global_t *hgp;
+
+ hgp = cvmx_app_get_hotplug_global_ptr();
+ if (!hgp) return NULL;
+ hip = hgp->hotplug_info_array;
+
+#ifdef DEBUG
+ printf("cvmx_app_hotplug_get_info(): hotplug_info phy addr 0x%llx ptr %p\n",
+ block_desc->base_addr, hgp);
+#endif
+ if (index < CVMX_APP_HOTPLUG_MAX_APPS)
+ {
+ if (hip[index].valid)
+ {
+ //print_hot_plug_info( &hip[index] );
+ return &hip[index];
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Determines if SE application at the index specified is hotpluggable.
+ *
+ * @param index index of application.
+ * @return Returns -1 on error.
+ * 0 -> The application is not hotpluggable
+ * 1 -> The application is hotpluggable
+*/
+int is_app_hotpluggable(int index)
+{
+ cvmx_app_hotplug_info_t *ai;
+
+ if (!(ai = cvmx_app_hotplug_get_info_at_index(index)))
+ {
+ printf("\nERROR: Failed to get hotplug info for app at index=%d\n", index);
+ return -1;
+ }
+ if (ai->hotplug_activated_coremask) return 1;
+ return 0;
+}
+
+/**
+ * This routine sends a shutdown request to a running target application.
+ *
+ * @param coremask Coremask the application is running on.
+ * @param wait 1 - Wait for shutdown completion
+ * 0 - Do not wait
+ * @return 0 on success, -1 on error
+ *
+ */
+
+int cvmx_app_hotplug_shutdown_request(uint32_t coremask, int wait)
+{
+ int i;
+ cvmx_app_hotplug_info_t *hotplug_info_ptr;
+
+ if (!(hotplug_info_ptr = cvmx_app_hotplug_get_info(coremask)))
+ {
+ printf("\nERROR: Failed to get hotplug info for coremask: 0x%x\n", (unsigned int)coremask);
+ return -1;
+ }
+ hotplug_info_ptr->shutdown_cores = coremask;
+ if (!hotplug_info_ptr->shutdown_callback)
+ {
+ printf("\nERROR: Target application has not registered for hotplug!\n");
+ return -1;
+ }
+
+ if (hotplug_info_ptr->hotplug_activated_coremask != coremask)
+ {
+ printf("\nERROR: Not all application cores have activated hotplug\n");
+ return -1;
+ }
+
+ /* Send IPIs to all application cores to request shutdown */
+ for (i=0; i<CVMX_MAX_CORES; i++) {
+ if (coremask & (1ull<<i))
+ cvmx_write_csr(CVMX_CIU_MBOX_SETX(i), 1);
+ }
+
+ if (wait)
+ {
+ while (!hotplug_info_ptr->shutdown_done);
+
+ /* Clean up the hotplug info region for this application */
+ bzero(hotplug_info_ptr, sizeof(*hotplug_info_ptr));
+ }
+
+ return 0;
+}
+
+
+
+/**
+ * This routine invokes the invoked the cores_added callbacks.
+ */
+int cvmx_app_hotplug_call_add_cores_callback(int index)
+{
+ cvmx_app_hotplug_info_t *ai;
+ int i;
+ if (!(ai = cvmx_app_hotplug_get_info_at_index(index)))
+ {
+ printf("\nERROR: Failed to get hotplug info for app at index=%d\n", index);
+ return -1;
+ }
+ /* Send IPIs to all application cores to request add_cores callback*/
+ for (i=0; i<CVMX_MAX_CORES; i++) {
+ if (ai->coremask & (1ull<<i))
+ cvmx_write_csr(CVMX_CIU_MBOX_SETX(i), 4);
+ }
+ return 0;
+}
+
+/**
+ * This routine sends a request to a running target application
+ * to unplug a specified set cores
+ * @param index is the index of the target application
+ * @param coremask Coremask of the cores to be unplugged from the app.
+ * @param wait 1 - Wait for shutdown completion
+ * 0 - Do not wait
+ * @return 0 on success, -1 on error
+ *
+ */
+int cvmx_app_hotplug_unplug_cores(int index, uint32_t coremask, int wait)
+{
+ cvmx_app_hotplug_info_t *ai;
+ int i;
+
+ if (!(ai = cvmx_app_hotplug_get_info_at_index(index)))
+ {
+ printf("\nERROR: Failed to get hotplug info for app at index=%d\n", index);
+ return -1;
+ }
+ ai->unplug_cores = coremask;
+#if 0
+ if (!ai->shutdown_callback)
+ {
+ printf("\nERROR: Target application has not registered for hotplug!\n");
+ return -1;
+ }
+#endif
+ if ( (ai->coremask | coremask ) != ai->coremask)
+ {
+ printf("\nERROR: Not all cores requested are a part of the app "
+ "r=%08x:%08x\n", (unsigned int)coremask, (unsigned int)ai->coremask);
+ return -1;
+ }
+ if (ai->coremask == coremask)
+ {
+ printf("\nERROR: Trying to remove all cores in app. "
+ "r=%08x:%08x\n", (unsigned int)coremask, (unsigned int)ai->coremask);
+ return -1;
+ }
+ /* Send IPIs to all application cores to request unplug/remove_cores
+ callback */
+ for (i=0; i<CVMX_MAX_CORES; i++) {
+ if (ai->coremask & (1ull<<i))
+ cvmx_write_csr(CVMX_CIU_MBOX_SETX(i), 2);
+ }
+
+#if 0
+ if (wait)
+ {
+ while (!ai->shutdown_done);
+
+ /* Clean up the hotplug info region for this application */
+ bzero(ai, sizeof(*ai));
+ }
+#endif
+ return 0;
+}
+
+/**
+ * Returns 1 if any app is currently being currently booted , hotplugged or
+ * shutdown. Only one app can be under a boot, hotplug or shutdown condition.
+ * Before booting an app this methods should be used to check whether boot or
+ * shutdown activity is in progress and proceed with the boot or shutdown only
+ * when there is no other activity.
+ *
+ */
+int is_app_under_boot_or_shutdown(void)
+{
+ int ret=0;
+ cvmx_app_hotplug_global_t *hgp;
+
+ hgp = cvmx_app_get_hotplug_global_ptr();
+ cvmx_spinlock_lock(&hgp->hotplug_global_lock);
+ if (hgp->app_under_boot || hgp->app_under_shutdown) ret=1;
+ cvmx_spinlock_unlock(&hgp->hotplug_global_lock);
+ return ret;
+
+}
+
+/**
+ * Sets or clear the app_under_boot value. This when set signifies that an app
+ * is being currently booted or hotplugged with a new core.
+ *
+ *
+ * @param val sets the app_under_boot to the specified value. This should be
+ * set to 1 while app any is being booted and cleared after the
+ * application has booted up.
+ *
+ */
+void set_app_unber_boot(int val)
+{
+ cvmx_app_hotplug_global_t *hgp;
+
+ hgp = cvmx_app_get_hotplug_global_ptr();
+ cvmx_spinlock_lock(&hgp->hotplug_global_lock);
+ hgp->app_under_boot = val;
+ cvmx_spinlock_unlock(&hgp->hotplug_global_lock);
+}
+
+/**
+ * Sets or clear the app_under_shutdown value. This when set signifies that an
+ * app is being currently shutdown or some cores of an app are being shutdown.
+ *
+ * @param val sets the app_under_shutdown to the specified value. This
+ * should be set to 1 while any app is being shutdown and cleared
+ * after the shutdown of the app is complete.
+ *
+ */
+void set_app_under_shutdown(int val)
+{
+ cvmx_app_hotplug_global_t *hgp;
+
+ hgp = cvmx_app_get_hotplug_global_ptr();
+ cvmx_spinlock_lock(&hgp->hotplug_global_lock);
+ hgp->app_under_shutdown = val;
+ cvmx_spinlock_unlock(&hgp->hotplug_global_lock);
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,156 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Header file for the hotplug APIs
+ *
+ * <hr>$Revision: $<hr>
+ */
+
+#ifndef __CVMX_APP_HOTPLUG_H__
+#define __CVMX_APP_HOTPLUG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#else
+#include "cvmx.h"
+#include "cvmx-coremask.h"
+#include "cvmx-interrupt.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-spinlock.h"
+#endif
+
+#define CVMX_APP_HOTPLUG_MAX_APPS 32
+#define CVMX_APP_HOTPLUG_MAX_APPNAME_LEN 256
+
+/**
+* hotplug_start is the entry point for hot plugged cores.
+* cores_added_callback is callback which in invoked when new cores are added
+* to the application. This is invoked on all the old core
+* that existed before the current set of cores were
+* added.
+* cores_removed_callback is callback which in invoked when cores are removed
+* an application. This is invoked on all the cores that
+* exist after the set of cores being requesed are
+* removed.
+* shutdown_done_callback before the application is shutdown this callback is
+* invoked on all the cores that are part of the app.
+* unplug_callback before the cores are unplugged this callback is invoked
+* only on the cores that are being unlpuuged.
+*/
+typedef struct cvmx_app_hotplug_callbacks
+{
+ void (*hotplug_start)(void *ptr);
+ void (*cores_added_callback) (uint32_t ,void *ptr);
+ void (*cores_removed_callback) (uint32_t,void *ptr);
+ void (*shutdown_callback) (void *ptr);
+ void (*unplug_core_callback) (void *ptr);
+} cvmx_app_hotplug_callbacks_t;
+
+/* The size of this struct should be a fixed size of 1024 bytes.
+ Additional members should be added towards the end of the
+ strcuture by adjusting the size of padding */
+typedef struct cvmx_app_hotplug_info
+{
+ char app_name[CVMX_APP_HOTPLUG_MAX_APPNAME_LEN];
+ uint32_t coremask;
+ uint32_t volatile hotplug_activated_coremask;
+ int32_t valid;
+ int32_t volatile shutdown_done;
+ uint64_t shutdown_callback;
+ uint64_t unplug_callback;
+ uint64_t cores_added_callback;
+ uint64_t cores_removed_callback;
+ uint64_t hotplug_start;
+ uint64_t data;
+ uint32_t volatile hplugged_cores;
+ uint32_t shutdown_cores;
+ uint32_t app_shutdown;
+ uint32_t unplug_cores;
+ uint32_t padding[172];
+} cvmx_app_hotplug_info_t;
+
+struct cvmx_app_hotplug_global
+{
+ uint32_t avail_coremask;
+ cvmx_app_hotplug_info_t hotplug_info_array[CVMX_APP_HOTPLUG_MAX_APPS];
+ uint32_t version;
+ cvmx_spinlock_t hotplug_global_lock;
+ int app_under_boot;
+ int app_under_shutdown;
+};
+typedef struct cvmx_app_hotplug_global cvmx_app_hotplug_global_t;
+
+int is_core_being_hot_plugged(void);
+int is_app_being_booted_or_shutdown(void);
+void set_app_unber_boot(int val);
+void set_app_under_shutdown(int val);
+int cvmx_app_hotplug_shutdown_request(uint32_t, int);
+int cvmx_app_hotplug_unplug_cores(int index, uint32_t coremask, int wait);
+cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info(uint32_t);
+int cvmx_app_hotplug_get_index(uint32_t coremask);
+cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info_at_index(int index);
+int is_app_hotpluggable(int index);
+int cvmx_app_hotplug_call_add_cores_callback(int index);
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+int cvmx_app_hotplug_register(void(*)(void*), void*);
+int cvmx_app_hotplug_register_cb(cvmx_app_hotplug_callbacks_t *, void*, int);
+int cvmx_app_hotplug_activate(void);
+void cvmx_app_hotplug_core_shutdown(void);
+void cvmx_app_hotplug_shutdown_disable(void);
+void cvmx_app_hotplug_shutdown_enable(void);
+#endif
+
+#define CVMX_APP_HOTPLUG_INFO_REGION_SIZE sizeof(cvmx_app_hotplug_global_t)
+#define CVMX_APP_HOTPLUG_INFO_REGION_NAME "cvmx-app-hotplug-block"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_APP_HOTPLUG_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-app-hotplug.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-app-init-linux.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-app-init-linux.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-app-init-linux.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,447 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ * Simple executive application initialization for Linux user space. This
+ * file should be used instead of cvmx-app-init.c for running simple executive
+ * applications under Linux in userspace. The following are some of the key
+ * points to remember when writing applications to run both under the
+ * standalone simple executive and userspace under Linux.
+ *
+ * -# Application main must be called "appmain" under Linux. Use and ifdef
+ * based on __linux__ to determine the proper name.
+ * -# Be careful to use cvmx_ptr_to_phys() and cvmx_phys_to_ptr. The simple
+ * executive 1-1 TLB mappings allow you to be sloppy and interchange
+ * hardware addresses with virtual address. This isn't true under Linux.
+ * -# If you're talking directly to hardware, be careful. The normal Linux
+ * protections are circumvented. If you do something bad, Linux won't
+ * save you.
+ * -# Most hardware can only be initialized once. Unless you're very careful,
+ * this also means you Linux application can only run once.
+ *
+ * <hr>$Revision: 70129 $<hr>
+ *
+ */
+#define _GNU_SOURCE
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <signal.h>
+#include <sys/statfs.h>
+#include <sys/wait.h>
+#include <sys/sysmips.h>
+#include <sched.h>
+#include <octeon-app-init.h>
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-atomic.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-coremask.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-helper-cfg.h"
+
+int octeon_model_version_check(uint32_t chip_id);
+
+#define OCTEON_ECLOCK_MULT_INPUT_X16 ((int)(33.4*16))
+
+/* Applications using the simple executive libraries under Linux userspace must
+ rename their "main" function to match the prototype below. This allows the
+ simple executive to perform needed memory initialization and process
+ creation before the application runs. */
+extern int appmain(int argc, const char *argv[]);
+
+/* These two external addresses provide the beginning and end markers for the
+ CVMX_SHARED section. These are defined by the cvmx-shared.ld linker script.
+ If they aren't defined, you probably forgot to link using this script. */
+extern void __cvmx_shared_start;
+extern void __cvmx_shared_end;
+extern uint64_t linux_mem32_min;
+extern uint64_t linux_mem32_max;
+extern uint64_t linux_mem32_wired;
+extern uint64_t linux_mem32_offset;
+
+/**
+ * This function performs some default initialization of the Octeon executive. It initializes
+ * the cvmx_bootmem memory allocator with the list of physical memory shared by the bootloader.
+ * This function should be called on all cores that will use the bootmem allocator.
+ * Applications which require a different configuration can replace this function with a suitable application
+ * specific one.
+ *
+ * @return 0 on success
+ * -1 on failure
+ */
+int cvmx_user_app_init(void)
+{
+ return 0;
+}
+
+
+/**
+ * Simulator magic is not supported in user mode under Linux.
+ * This version of simprintf simply calls the underlying C
+ * library printf for output. It also makes sure that two
+ * calls to simprintf provide atomic output.
+ *
+ * @param format Format string in the same format as printf.
+ */
+void simprintf(const char *format, ...)
+{
+ CVMX_SHARED static cvmx_spinlock_t simprintf_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
+ va_list ap;
+
+ cvmx_spinlock_lock(&simprintf_lock);
+ printf("SIMPRINTF(%d): ", (int)cvmx_get_core_num());
+ va_start(ap, format);
+ vprintf(format, ap);
+ va_end(ap);
+ cvmx_spinlock_unlock(&simprintf_lock);
+}
+
+
+/**
+ * Setup the CVMX_SHARED data section to be shared across
+ * all processors running this application. A memory mapped
+ * region is allocated using shm_open and mmap. The current
+ * contents of the CVMX_SHARED section are copied into the
+ * region. Then the new region is remapped to replace the
+ * existing CVMX_SHARED data.
+ *
+ * This function will display a message and abort the
+ * application under any error conditions. The Linux tmpfs
+ * filesystem must be mounted under /dev/shm.
+ */
+static void setup_cvmx_shared(void)
+{
+ const char *SHM_NAME = "cvmx_shared";
+ unsigned long shared_size = &__cvmx_shared_end - &__cvmx_shared_start;
+ int fd;
+
+ /* If there isn't and shared data we can skip all this */
+ if (shared_size)
+ {
+ char shm_name[30];
+ printf("CVMX_SHARED: %p-%p\n", &__cvmx_shared_start, &__cvmx_shared_end);
+
+#ifdef __UCLIBC__
+ const char *defaultdir = "/dev/shm/";
+ struct statfs f;
+ int pid;
+ /* The canonical place is /dev/shm. */
+ if (statfs (defaultdir, &f) == 0)
+ {
+ pid = getpid();
+ sprintf (shm_name, "%s%s-%d", defaultdir, SHM_NAME, pid);
+ }
+ else
+ {
+ perror("/dev/shm is not mounted");
+ exit(-1);
+ }
+
+ /* shm_open(), shm_unlink() are not implemented in uClibc. Do the
+ same thing using open() and close() system calls. */
+ fd = open (shm_name, O_RDWR | O_CREAT | O_TRUNC, 0);
+
+ if (fd < 0)
+ {
+ perror("Failed to open CVMX_SHARED(shm_name)");
+ exit(errno);
+ }
+
+ unlink (shm_name);
+#else
+ sprintf(shm_name, "%s-%d", SHM_NAME, getpid());
+ /* Open a new shared memory region for use as CVMX_SHARED */
+ fd = shm_open(shm_name, O_RDWR | O_CREAT | O_TRUNC, 0);
+ if (fd <0)
+ {
+ perror("Failed to setup CVMX_SHARED(shm_open)");
+ exit(errno);
+ }
+
+ /* We don't want the file on the filesystem. Immediately unlink it so
+ another application can create its own shared region */
+ shm_unlink(shm_name);
+#endif
+
+ /* Resize the region to match the size of CVMX_SHARED */
+ ftruncate(fd, shared_size);
+
+ /* Map the region into some random location temporarily so we can
+ copy the shared data to it */
+ void *ptr = mmap(NULL, shared_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (ptr == NULL)
+ {
+ perror("Failed to setup CVMX_SHARED(mmap copy)");
+ exit(errno);
+ }
+
+ /* Copy CVMX_SHARED to the new shared region so we don't lose
+ initializers */
+ memcpy(ptr, &__cvmx_shared_start, shared_size);
+ munmap(ptr, shared_size);
+
+ /* Remap the shared region to replace the old CVMX_SHARED region */
+ ptr = mmap(&__cvmx_shared_start, shared_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
+ if (ptr == NULL)
+ {
+ perror("Failed to setup CVMX_SHARED(mmap final)");
+ exit(errno);
+ }
+
+ /* Once mappings are setup, the file handle isn't needed anymore */
+ close(fd);
+ }
+}
+
+
+/**
+ * Shutdown and free the shared CVMX_SHARED region setup by
+ * setup_cvmx_shared.
+ */
+static void shutdown_cvmx_shared(void)
+{
+ unsigned long shared_size = &__cvmx_shared_end - &__cvmx_shared_start;
+ if (shared_size)
+ munmap(&__cvmx_shared_start, shared_size);
+}
+
+
+/**
+ * Setup access to the CONFIG_CAVIUM_RESERVE32 memory section
+ * created by the kernel. This memory is used for shared
+ * hardware buffers with 32 bit userspace applications.
+ */
+static void setup_reserve32(void)
+{
+ if (linux_mem32_min && linux_mem32_max)
+ {
+ int region_size = linux_mem32_max - linux_mem32_min + 1;
+ int mmap_flags = MAP_SHARED;
+ void *linux_mem32_base_ptr = NULL;
+
+ /* Although not strictly necessary, we are going to mmap() the wired
+ TLB region so it is in the process page tables. These pages will
+ never fault in, but they will allow GDB to access the wired
+ region. We need the mappings to exactly match the wired TLB
+ entry. */
+ if (linux_mem32_wired)
+ {
+ mmap_flags |= MAP_FIXED;
+ linux_mem32_base_ptr = CASTPTR(void, (1ull<<31) - region_size);
+ }
+
+ int fd = open("/dev/mem", O_RDWR);
+ if (fd < 0)
+ {
+ perror("ERROR opening /dev/mem");
+ exit(-1);
+ }
+
+ linux_mem32_base_ptr = mmap64(linux_mem32_base_ptr,
+ region_size,
+ PROT_READ | PROT_WRITE,
+ mmap_flags,
+ fd,
+ linux_mem32_min);
+ close(fd);
+
+ if (MAP_FAILED == linux_mem32_base_ptr)
+ {
+ perror("Error mapping reserve32");
+ exit(-1);
+ }
+
+ linux_mem32_offset = CAST64(linux_mem32_base_ptr) - linux_mem32_min;
+ }
+}
+
+
+/**
+ * Main entrypoint of the application. Here we setup shared
+ * memory and fork processes for each cpu. This simulates the
+ * normal simple executive environment of one process per
+ * cpu core.
+ *
+ * @param argc Number of command line arguments
+ * @param argv The command line arguments
+ * @return Return value for the process
+ */
+int main(int argc, const char *argv[])
+{
+ CVMX_SHARED static cvmx_spinlock_t mask_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
+ CVMX_SHARED static int32_t pending_fork;
+ unsigned long cpumask;
+ unsigned long cpu;
+ int firstcpu = 0;
+ int firstcore = 0;
+
+ cvmx_linux_enable_xkphys_access(0);
+ cvmx_sysinfo_linux_userspace_initialize();
+
+ if (sizeof(void*) == 4)
+ {
+ if (linux_mem32_min)
+ setup_reserve32();
+ else
+ {
+ printf("\nFailed to access 32bit shared memory region. Most likely the Kernel\n"
+ "has not been configured for 32bit shared memory access. Check the\n"
+ "kernel configuration.\n"
+ "Aborting...\n\n");
+ exit(-1);
+ }
+ }
+
+ setup_cvmx_shared();
+ cvmx_bootmem_init(cvmx_sysinfo_get()->phy_mem_desc_addr);
+
+ /* Check to make sure the Chip version matches the configured version */
+ octeon_model_version_check(cvmx_get_proc_id());
+
+ /* Initialize configuration to set bpid, pkind, pko_port for all the
+ available ports connected. */
+ __cvmx_helper_cfg_init();
+
+ /* Get the list of logical cpus we should run on */
+ if (sched_getaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask))
+ {
+ perror("sched_getaffinity failed");
+ exit(errno);
+ }
+
+ cvmx_sysinfo_t *system_info = cvmx_sysinfo_get();
+
+ cvmx_atomic_set32(&pending_fork, 1);
+
+ /* Get the lowest logical cpu */
+ firstcore = ffsl(cpumask) - 1;
+ cpumask ^= (1ull<<(firstcore));
+ while (1)
+ {
+ if (cpumask == 0)
+ {
+ cpu = firstcore;
+ firstcpu = 1;
+ break;
+ }
+ cpu = ffsl(cpumask) - 1;
+ /* Turn off the bit for this CPU number. We've counted him */
+ cpumask ^= (1ull<<cpu);
+ /* Increment the number of CPUs running this app */
+ cvmx_atomic_add32(&pending_fork, 1);
+ /* Flush all IO streams before the fork. Otherwise any buffered
+ data in the C library will be duplicated. This results in
+ duplicate output from a single print */
+ fflush(NULL);
+ /* Fork a process for the new CPU */
+ int pid = fork();
+ if (pid == 0)
+ {
+ break;
+ }
+ else if (pid == -1)
+ {
+ perror("Fork failed");
+ exit(errno);
+ }
+ }
+
+
+ /* Set affinity to lock me to the correct CPU */
+ cpumask = (1<<cpu);
+ if (sched_setaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask))
+ {
+ perror("sched_setaffinity failed");
+ exit(errno);
+ }
+
+ cvmx_spinlock_lock(&mask_lock);
+ system_info->core_mask |= 1<<cvmx_get_core_num();
+ cvmx_atomic_add32(&pending_fork, -1);
+ if (cvmx_atomic_get32(&pending_fork) == 0)
+ {
+ cvmx_dprintf("Active coremask = 0x%x\n", system_info->core_mask);
+ }
+ if (firstcpu)
+ system_info->init_core = cvmx_get_core_num();
+ cvmx_spinlock_unlock(&mask_lock);
+
+ /* Spinning waiting for forks to complete */
+ while (cvmx_atomic_get32(&pending_fork)) {}
+
+ cvmx_coremask_barrier_sync(system_info->core_mask);
+
+ cvmx_linux_enable_xkphys_access(1);
+
+ int result = appmain(argc, argv);
+
+ /* Wait for all forks to complete. This needs to be the core that started
+ all of the forks. It may not be the lowest numbered core! */
+ if (cvmx_get_core_num() == system_info->init_core)
+ {
+ int num_waits;
+ CVMX_POP(num_waits, system_info->core_mask);
+ num_waits--;
+ while (num_waits--)
+ {
+ if (wait(NULL) == -1)
+ perror("CVMX: Wait for forked child failed\n");
+ }
+ }
+
+ shutdown_cvmx_shared();
+
+ return result;
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-app-init-linux.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-app-init.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-app-init.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-app-init.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,590 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include "executive-config.h"
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-spinlock.h"
+#include <octeon-app-init.h>
+#include "cvmx-sysinfo.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-uart.h"
+#include "cvmx-coremask.h"
+#include "cvmx-core.h"
+#include "cvmx-interrupt.h"
+#include "cvmx-ebt3000.h"
+#include "cvmx-sim-magic.h"
+#include "cvmx-debug.h"
+#include "cvmx-qlm.h"
+#include "cvmx-scratch.h"
+#include "cvmx-helper-cfg.h"
+#include "cvmx-helper-jtag.h"
+#include <octeon_mem_map.h>
+#include "libfdt.h"
+int cvmx_debug_uart = -1;
+
+/**
+ * @file
+ *
+ * Main entry point for all simple executive based programs.
+ */
+
+
+extern void cvmx_interrupt_initialize(void);
+
+
+
+/**
+ * Main entry point for all simple executive based programs.
+ * This is the first C function called. It completes
+ * initialization, calls main, and performs C level cleanup.
+ *
+ * @param app_desc_addr
+ * Address of the application description structure passed
+ * brom the boot loader.
+ */
+EXTERN_ASM void __cvmx_app_init(uint64_t app_desc_addr);
+
+
+/**
+ * Set up sysinfo structure from boot descriptor versions 6 and higher.
+ * In these versions, the interesting data in not in the boot info structure
+ * defined by the toolchain, but is in the cvmx_bootinfo structure defined in
+ * the simple exec.
+ *
+ * @param app_desc_ptr
+ * pointer to boot descriptor block
+ *
+ * @param sys_info_ptr
+ * pointer to sysinfo structure to fill in
+ */
+static void process_boot_desc_ver_6(octeon_boot_descriptor_t *app_desc_ptr, cvmx_sysinfo_t *sys_info_ptr)
+{
+ cvmx_bootinfo_t *cvmx_bootinfo_ptr = CASTPTR(cvmx_bootinfo_t, app_desc_ptr->cvmx_desc_vaddr);
+
+ /* copy application information for simple exec use */
+ /* Populate the sys_info structure from the boot descriptor block created by the bootloader.
+ ** The boot descriptor block is put in the top of the heap, so it will be overwritten when the
+ ** heap is fully used. Information that is to be used must be copied before that.
+ ** Applications should only use the sys_info structure, not the boot descriptor
+ */
+ if (cvmx_bootinfo_ptr->major_version == 1)
+ {
+ sys_info_ptr->core_mask = cvmx_bootinfo_ptr->core_mask;
+ sys_info_ptr->heap_base = cvmx_bootinfo_ptr->heap_base;
+ sys_info_ptr->heap_size = cvmx_bootinfo_ptr->heap_end - cvmx_bootinfo_ptr->heap_base;
+ sys_info_ptr->stack_top = cvmx_bootinfo_ptr->stack_top;
+ sys_info_ptr->stack_size = cvmx_bootinfo_ptr->stack_size;
+ sys_info_ptr->init_core = cvmx_get_core_num();
+ sys_info_ptr->phy_mem_desc_addr = cvmx_bootinfo_ptr->phy_mem_desc_addr;
+ sys_info_ptr->exception_base_addr = cvmx_bootinfo_ptr->exception_base_addr;
+ sys_info_ptr->cpu_clock_hz = cvmx_bootinfo_ptr->eclock_hz;
+ sys_info_ptr->dram_data_rate_hz = cvmx_bootinfo_ptr->dclock_hz * 2;
+
+ sys_info_ptr->board_type = cvmx_bootinfo_ptr->board_type;
+ sys_info_ptr->board_rev_major = cvmx_bootinfo_ptr->board_rev_major;
+ sys_info_ptr->board_rev_minor = cvmx_bootinfo_ptr->board_rev_minor;
+ memcpy(sys_info_ptr->mac_addr_base, cvmx_bootinfo_ptr->mac_addr_base, 6);
+ sys_info_ptr->mac_addr_count = cvmx_bootinfo_ptr->mac_addr_count;
+ memcpy(sys_info_ptr->board_serial_number, cvmx_bootinfo_ptr->board_serial_number, CVMX_BOOTINFO_OCTEON_SERIAL_LEN);
+ sys_info_ptr->console_uart_num = 0;
+ if (cvmx_bootinfo_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1)
+ sys_info_ptr->console_uart_num = 1;
+
+ if (cvmx_bootinfo_ptr->dram_size > 32*1024*1024)
+ sys_info_ptr->system_dram_size = (uint64_t)cvmx_bootinfo_ptr->dram_size; /* older bootloaders incorrectly gave this in bytes, so don't convert */
+ else
+ sys_info_ptr->system_dram_size = (uint64_t)cvmx_bootinfo_ptr->dram_size * 1024 * 1024; /* convert from Megabytes to bytes */
+ if (cvmx_bootinfo_ptr->minor_version >= 1)
+ {
+ sys_info_ptr->compact_flash_common_base_addr = cvmx_bootinfo_ptr->compact_flash_common_base_addr;
+ sys_info_ptr->compact_flash_attribute_base_addr = cvmx_bootinfo_ptr->compact_flash_attribute_base_addr;
+ sys_info_ptr->led_display_base_addr = cvmx_bootinfo_ptr->led_display_base_addr;
+ }
+ else if (sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT3000 ||
+ sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT5800 ||
+ sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT5810)
+ {
+ /* Default these variables so that users of structure can be the same no
+ ** matter what version fo boot info block the bootloader passes */
+ sys_info_ptr->compact_flash_common_base_addr = 0x1d000000 + 0x800;
+ sys_info_ptr->compact_flash_attribute_base_addr = 0x1d010000;
+ if (sys_info_ptr->board_rev_major == 1)
+ sys_info_ptr->led_display_base_addr = 0x1d020000;
+ else
+ sys_info_ptr->led_display_base_addr = 0x1d020000 + 0xf8;
+ }
+ else
+ {
+ sys_info_ptr->compact_flash_common_base_addr = 0;
+ sys_info_ptr->compact_flash_attribute_base_addr = 0;
+ sys_info_ptr->led_display_base_addr = 0;
+ }
+
+ if (cvmx_bootinfo_ptr->minor_version >= 2)
+ {
+ sys_info_ptr->dfa_ref_clock_hz = cvmx_bootinfo_ptr->dfa_ref_clock_hz;
+ sys_info_ptr->bootloader_config_flags = cvmx_bootinfo_ptr->config_flags;
+ }
+ else
+ {
+ sys_info_ptr->dfa_ref_clock_hz = 0;
+ sys_info_ptr->bootloader_config_flags = 0;
+ if (app_desc_ptr->flags & OCTEON_BL_FLAG_DEBUG)
+ sys_info_ptr->bootloader_config_flags |= CVMX_BOOTINFO_CFG_FLAG_DEBUG;
+ if (app_desc_ptr->flags & OCTEON_BL_FLAG_NO_MAGIC)
+ sys_info_ptr->bootloader_config_flags |= CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC;
+ }
+
+ }
+ else
+ {
+ printf("ERROR: Incompatible CVMX descriptor passed by bootloader: %d.%d\n",
+ (int)cvmx_bootinfo_ptr->major_version, (int)cvmx_bootinfo_ptr->minor_version);
+ exit(-1);
+ }
+ if ((cvmx_bootinfo_ptr->minor_version >= 3) && (cvmx_bootinfo_ptr->fdt_addr != 0))
+ {
+ sys_info_ptr->fdt_addr = UNMAPPED_PTR(cvmx_bootinfo_ptr->fdt_addr);
+ if (fdt_check_header((const void *)sys_info_ptr->fdt_addr))
+ {
+ printf("ERROR : Corrupt Device Tree.\n");
+ exit(-1);
+ }
+ printf("Using device tree\n");
+ }
+ else
+ {
+ sys_info_ptr->fdt_addr = 0;
+ }
+}
+
+
+/**
+ * Interrupt handler for calling exit on Control-C interrupts.
+ *
+ * @param irq_number IRQ interrupt number
+ * @param registers CPU registers at the time of the interrupt
+ * @param user_arg Unused user argument
+ */
+static void process_break_interrupt(int irq_number, uint64_t registers[32], void *user_arg)
+{
+ /* Exclude new functionality when building with older toolchains */
+#if OCTEON_APP_INIT_H_VERSION >= 3
+ int uart = irq_number - CVMX_IRQ_UART0;
+ cvmx_uart_lsr_t lsrval;
+
+ /* Check for a Control-C interrupt from the console. This loop will eat
+ all input received on the uart */
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
+ while (lsrval.s.dr)
+ {
+ int c = cvmx_read_csr(CVMX_MIO_UARTX_RBR(uart));
+ if (c == '\003')
+ {
+ register uint64_t tmp;
+
+ /* Wait for an another Control-C if right now we have no
+ access to the console. After this point we hold the
+ lock and use a different lock to synchronize between
+ the memfile dumps from different cores. As a
+ consequence regular printfs *don't* work after this
+ point! */
+ if (__octeon_uart_trylock () == 1)
+ return;
+
+ /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also
+ set the MCD0 to be not masked by this core so we know
+ the signal is received by someone */
+ asm volatile (
+ "dmfc0 %0, $22\n"
+ "ori %0, %0, 0x1110\n"
+ "dmtc0 %0, $22\n"
+ : "=r" (tmp));
+ }
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
+ }
+#endif
+}
+
+/**
+ * This is the debug exception handler with "break". Before calling exit to
+ * dump the profile-feedback output it releases the lock on the console.
+ * This way if there is buffered data in stdout it can still be flushed.
+ * stdio is required to flush all output during an fread.
+ */
+
+static void exit_on_break(void)
+{
+#if OCTEON_APP_INIT_H_VERSION >= 4
+ unsigned int coremask = cvmx_sysinfo_get()->core_mask;
+
+ cvmx_coremask_barrier_sync(coremask);
+ if (cvmx_coremask_first_core(coremask))
+ __octeon_uart_unlock();
+#endif
+
+ exit(0);
+}
+
+/* Add string signature to applications so that we can easily tell what
+** Octeon revision they were compiled for. Don't make static to avoid unused
+** variable warning. */
+#define xstr(s) str(s)
+#define str(s) #s
+
+int octeon_model_version_check(uint32_t chip_id);
+
+#define OMS xstr(OCTEON_MODEL)
+char octeon_rev_signature[] =
+#ifdef USE_RUNTIME_MODEL_CHECKS
+ "Compiled for runtime Octeon model checking";
+#else
+ "Compiled for Octeon processor id: "OMS;
+#endif
+
+#define OCTEON_BL_FLAG_HPLUG_CORES (1 << 6)
+void __cvmx_app_init(uint64_t app_desc_addr)
+{
+ /* App descriptor used by bootloader */
+ octeon_boot_descriptor_t *app_desc_ptr = CASTPTR(octeon_boot_descriptor_t, app_desc_addr);
+
+ /* app info structure used by the simple exec */
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+ int breakflag = 0;
+
+ //printf("coremask=%08x flags=%08x \n", app_desc_ptr->core_mask, app_desc_ptr->flags);
+ if (cvmx_coremask_first_core(app_desc_ptr->core_mask))
+ {
+ /* Intialize the bootmem allocator with the descriptor that was provided by
+ * the bootloader
+ * IMPORTANT: All printfs must happen after this since PCI console uses named
+ * blocks.
+ */
+ cvmx_bootmem_init(CASTPTR(cvmx_bootinfo_t, app_desc_ptr->cvmx_desc_vaddr)->phy_mem_desc_addr);
+
+ /* do once per application setup */
+ if (app_desc_ptr->desc_version < 6)
+ {
+ printf("Obsolete bootloader, can't run application\n");
+ exit(-1);
+ }
+ else
+ {
+ /* Handle all newer versions here.... */
+ if (app_desc_ptr->desc_version > 7)
+ {
+ printf("Warning: newer boot descripter version than expected\n");
+ }
+ process_boot_desc_ver_6(app_desc_ptr,sys_info_ptr);
+
+ }
+
+ /*
+ * set up the feature map and config.
+ */
+ octeon_feature_init();
+
+ __cvmx_helper_cfg_init();
+ }
+ /* The flags varibale get copied over at some places and tracing the origins
+ found that
+ ** In octeon_setup_boot_desc_block
+ . cvmx_bootinfo_array[core].flags is initialized and the various bits are set
+ . cvmx_bootinfo_array[core].flags gets copied to boot_desc[core].flags
+ . Then boot_desc then get copied over to the end of the application heap and
+ boot_info_block_array[core].boot_descr_addr is set to point to the boot_desc
+ in heap.
+ ** In start_app boot_vect->boot_info_addr->boot_desc_addr is referenced and passed on
+ to octeon_setup_crt0_tlb() and this puts it into r16
+ ** In ctr0.S of the toolchain r16 is picked up and passed on as a parameter to
+ __cvmx_app_init
+
+ Note : boot_vect->boot_info_addr points to boot_info_block_array[core] and this
+ pointer is setup in octeon_setup_boot_vector()
+ */
+
+ if (!(app_desc_ptr->flags & OCTEON_BL_FLAG_HPLUG_CORES))
+ cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
+
+
+ breakflag = sys_info_ptr->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_BREAK;
+
+ /* No need to initialize bootmem, interrupts, interrupt handler and error handler
+ if version does not match. */
+ if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
+ {
+ /* Check to make sure the Chip version matches the configured version */
+ uint32_t chip_id = cvmx_get_proc_id();
+ /* Make sure we can properly run on this chip */
+ octeon_model_version_check(chip_id);
+ }
+ cvmx_interrupt_initialize();
+ if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
+ {
+ int break_uart = 0;
+ unsigned int i;
+
+ if (breakflag && cvmx_debug_booted())
+ {
+ printf("ERROR: Using debug and break together in not supported.\n");
+ while (1)
+ ;
+ }
+
+ /* Search through the arguments for a break=X or a debug=X. */
+ for (i = 0; i < app_desc_ptr->argc; i++)
+ {
+ const char *argv = CASTPTR(const char, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, app_desc_ptr->argv[i]));
+ if (strncmp(argv, "break=", 6) == 0)
+ break_uart = atoi(argv + 6);
+ else if (strncmp(argv, "debug=", 6) == 0)
+ cvmx_debug_uart = atoi(argv + 6);
+ }
+
+ if (breakflag)
+ {
+ int32_t *trampoline = CASTPTR(int32_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, BOOTLOADER_DEBUG_TRAMPOLINE));
+ /* On debug exception, call exit_on_break from all cores. */
+ *trampoline = (int32_t)(long)&exit_on_break;
+ cvmx_uart_enable_intr(break_uart, process_break_interrupt);
+ }
+ }
+ if ( !(app_desc_ptr->flags & OCTEON_BL_FLAG_HPLUG_CORES))
+ cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
+
+ /* Clear BEV now that we have installed exception handlers. */
+ uint64_t tmp;
+ asm volatile (
+ " .set push \n"
+ " .set mips64 \n"
+ " .set noreorder \n"
+ " .set noat \n"
+ " mfc0 %[tmp], $12, 0 \n"
+ " li $at, 1 << 22 \n"
+ " not $at, $at \n"
+ " and %[tmp], $at \n"
+ " mtc0 %[tmp], $12, 0 \n"
+ " .set pop \n"
+ : [tmp] "=&r" (tmp) : );
+
+ /* Set all cores to stop on MCD0 signals */
+ asm volatile(
+ "dmfc0 %0, $22, 0\n"
+ "or %0, %0, 0x1100\n"
+ "dmtc0 %0, $22, 0\n" : "=r" (tmp));
+
+ CVMX_SYNC;
+ /* Now intialize the debug exception handler as BEV is cleared. */
+ if ((!breakflag) && (!(app_desc_ptr->flags & OCTEON_BL_FLAG_HPLUG_CORES)))
+ cvmx_debug_init();
+
+ /* Synchronise all cores at this point */
+ if ( !(app_desc_ptr->flags & OCTEON_BL_FLAG_HPLUG_CORES))
+ cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
+
+}
+
+int cvmx_user_app_init(void)
+{
+ uint64_t bist_val;
+ uint64_t mask;
+ int bist_errors = 0;
+ uint64_t tmp;
+ uint64_t base_addr;
+
+
+ /* Put message on LED display */
+ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
+ ebt3000_str_write("CVMX ");
+
+ /* Check BIST results for COP0 registers, some values only meaningful in pass 2 */
+ CVMX_MF_CACHE_ERR(bist_val);
+ mask = (0x3fULL<<32); // Icache;BHT;AES;HSH/GFM;LRU;register file
+ bist_val &= mask;
+ if (bist_val)
+ {
+ printf("BIST FAILURE: COP0_CACHE_ERR: 0x%llx\n", (unsigned long long)bist_val);
+ bist_errors++;
+ }
+
+ mask = 0xfc00000000000000ull;
+ CVMX_MF_CVM_MEM_CTL(bist_val);
+ bist_val &= mask;
+ if (bist_val)
+ {
+ printf("BIST FAILURE: COP0_CVM_MEM_CTL: 0x%llx\n", (unsigned long long)bist_val);
+ bist_errors++;
+ }
+
+ /* Set up 4 cache lines of local memory, make available from Kernel space */
+ CVMX_MF_CVM_MEM_CTL(tmp);
+ tmp &= ~0x1ffull;
+ tmp |= 0x104ull;
+ /* Set WBTHRESH=4 as per Core-14752 errata in cn63xxp1.X. */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ tmp &= ~(0xfull << 11);
+ tmp |= 4 << 11;
+ }
+ CVMX_MT_CVM_MEM_CTL(tmp);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X))
+ {
+ /* Clear the lines of scratch memory configured, for
+ ** 63XX pass 2 errata Core-15169. */
+ uint64_t addr;
+ unsigned num_lines;
+ CVMX_MF_CVM_MEM_CTL(tmp);
+ num_lines = tmp & 0x3f;
+ for (addr = 0; addr < CVMX_CACHE_LINE_SIZE * num_lines; addr += 8)
+ cvmx_scratch_write64(addr, 0);
+ }
+
+#if CVMX_USE_1_TO_1_TLB_MAPPINGS
+
+ /* Check to see if the bootloader is indicating that the application is outside
+ ** of the 0x10000000 0x20000000 range, in which case we can't use 1-1 mappings */
+ if (cvmx_sysinfo_get()->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING)
+ {
+ printf("ERROR: 1-1 TLB mappings configured and oversize application loaded.\n");
+ printf("ERROR: Either 1-1 TLB mappings must be disabled or application size reduced.\n");
+ exit(-1);
+ }
+
+ /* Create 1-1 Mappings for all DRAM up to 8 gigs, excluding the low 1 Megabyte. This area
+ ** is reserved for the bootloader and exception vectors. By not mapping this area, NULL pointer
+ ** dereferences will be caught with TLB exceptions. Exception handlers should be written
+ ** using XKPHYS or KSEG0 addresses. */
+#if CVMX_NULL_POINTER_PROTECT
+ /* Exclude low 1 MByte from mapping to detect NULL pointer accesses.
+ ** The only down side of this is it uses more TLB mappings */
+ cvmx_core_add_fixed_tlb_mapping_bits(0x0, 0x0, 0x100000 | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, CVMX_TLB_PAGEMASK_1M);
+ cvmx_core_add_fixed_tlb_mapping(0x200000, 0x200000, 0x300000, CVMX_TLB_PAGEMASK_1M);
+ cvmx_core_add_fixed_tlb_mapping(0x400000, 0x400000, 0x500000, CVMX_TLB_PAGEMASK_1M);
+ cvmx_core_add_fixed_tlb_mapping(0x600000, 0x600000, 0x700000, CVMX_TLB_PAGEMASK_1M);
+
+ cvmx_core_add_fixed_tlb_mapping(0x800000, 0x800000, 0xC00000, CVMX_TLB_PAGEMASK_4M);
+ cvmx_core_add_fixed_tlb_mapping(0x1000000, 0x1000000, 0x1400000, CVMX_TLB_PAGEMASK_4M);
+ cvmx_core_add_fixed_tlb_mapping(0x1800000, 0x1800000, 0x1c00000, CVMX_TLB_PAGEMASK_4M);
+
+ cvmx_core_add_fixed_tlb_mapping(0x2000000, 0x2000000, 0x3000000, CVMX_TLB_PAGEMASK_16M);
+ cvmx_core_add_fixed_tlb_mapping(0x4000000, 0x4000000, 0x5000000, CVMX_TLB_PAGEMASK_16M);
+ cvmx_core_add_fixed_tlb_mapping(0x6000000, 0x6000000, 0x7000000, CVMX_TLB_PAGEMASK_16M);
+#else
+ /* Map entire low 128 Megs, including 0x0 */
+ cvmx_core_add_fixed_tlb_mapping(0x0, 0x0, 0x4000000ULL, CVMX_TLB_PAGEMASK_64M);
+#endif
+ cvmx_core_add_fixed_tlb_mapping(0x8000000ULL, 0x8000000ULL, 0xc000000ULL, CVMX_TLB_PAGEMASK_64M);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ for (base_addr = 0x20000000ULL; base_addr < (cvmx_sysinfo_get()->system_dram_size + 0x10000000ULL); base_addr += 0x20000000ULL)
+ {
+ if (0 > cvmx_core_add_fixed_tlb_mapping(base_addr, base_addr, base_addr + 0x10000000ULL, CVMX_TLB_PAGEMASK_256M))
+ {
+ printf("ERROR adding 1-1 TLB mapping for address 0x%llx\n", (unsigned long long)base_addr);
+ /* Exit from here, as expected memory mappings aren't set
+ up if this fails */
+ exit(-1);
+ }
+ }
+ }
+ else
+ {
+ /* Create 1-1 mapping for next 256 megs
+ ** bottom page is not valid */
+ cvmx_core_add_fixed_tlb_mapping_bits(0x400000000ULL, 0, 0x410000000ULL | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, CVMX_TLB_PAGEMASK_256M);
+
+ /* Map from 0.5 up to the installed memory size in 512 MByte chunks. If this loop runs out of memory,
+ ** the NULL pointer detection can be disabled to free up more TLB entries. */
+ if (cvmx_sysinfo_get()->system_dram_size > 0x20000000ULL)
+ {
+ for (base_addr = 0x20000000ULL; base_addr <= (cvmx_sysinfo_get()->system_dram_size - 0x20000000ULL); base_addr += 0x20000000ULL)
+ {
+ if (0 > cvmx_core_add_fixed_tlb_mapping(base_addr, base_addr, base_addr + 0x10000000ULL, CVMX_TLB_PAGEMASK_256M))
+ {
+ printf("ERROR adding 1-1 TLB mapping for address 0x%llx\n", (unsigned long long)base_addr);
+ /* Exit from here, as expected memory mappings
+ aren't set up if this fails */
+ exit(-1);
+ }
+ }
+ }
+ }
+#endif
+
+
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+ cvmx_bootmem_init(sys_info_ptr->phy_mem_desc_addr);
+
+ /* Initialize QLM and JTAG settings. Also apply any erratas. */
+ if (cvmx_coremask_first_core(cvmx_sysinfo_get()->core_mask))
+ cvmx_qlm_init();
+
+ return(0);
+}
+
+void __cvmx_app_exit(void)
+{
+ cvmx_debug_finish();
+
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ {
+ CVMX_BREAK;
+ }
+ /* Hang forever, until more appropriate stand alone simple executive
+ exit() is implemented */
+
+ while (1);
+}
+
+
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-app-init.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-app-init.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-app-init.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-app-init.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,515 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ * Header file for simple executive application initialization. This defines
+ * part of the ABI between the bootloader and the application.
+ * <hr>$Revision: 70327 $<hr>
+ *
+ */
+
+#ifndef __CVMX_APP_INIT_H__
+#define __CVMX_APP_INIT_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Current major and minor versions of the CVMX bootinfo block that is passed
+** from the bootloader to the application. This is versioned so that applications
+** can properly handle multiple bootloader versions. */
+#define CVMX_BOOTINFO_MAJ_VER 1
+#define CVMX_BOOTINFO_MIN_VER 3
+
+
+#if (CVMX_BOOTINFO_MAJ_VER == 1)
+#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
+/* This structure is populated by the bootloader. For binary
+** compatibility the only changes that should be made are
+** adding members to the end of the structure, and the minor
+** version should be incremented at that time.
+** If an incompatible change is made, the major version
+** must be incremented, and the minor version should be reset
+** to 0.
+*/
+struct cvmx_bootinfo {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t major_version;
+ uint32_t minor_version;
+
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ uint64_t desc_vaddr;
+
+ uint32_t exception_base_addr;
+ uint32_t stack_size;
+ uint32_t flags;
+ uint32_t core_mask;
+ uint32_t dram_size; /**< DRAM size in megabytes */
+ uint32_t phy_mem_desc_addr; /**< physical address of free memory descriptor block*/
+ uint32_t debugger_flags_base_addr; /**< used to pass flags from app to debugger */
+ uint32_t eclock_hz; /**< CPU clock speed, in hz */
+ uint32_t dclock_hz; /**< DRAM clock speed, in hz */
+ uint32_t reserved0;
+ uint16_t board_type;
+ uint8_t board_rev_major;
+ uint8_t board_rev_minor;
+ uint16_t reserved1;
+ uint8_t reserved2;
+ uint8_t reserved3;
+ char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+#if (CVMX_BOOTINFO_MIN_VER >= 1)
+ /* Several boards support compact flash on the Octeon boot bus. The CF
+ ** memory spaces may be mapped to different addresses on different boards.
+ ** These are the physical addresses, so care must be taken to use the correct
+ ** XKPHYS/KSEG0 addressing depending on the application's ABI.
+ ** These values will be 0 if CF is not present */
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ /* Base address of the LED display (as on EBT3000 board)
+ ** This will be 0 if LED display not present. */
+ uint64_t led_display_base_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 2)
+ uint32_t dfa_ref_clock_hz; /**< DFA reference clock in hz (if applicable)*/
+ uint32_t config_flags; /**< flags indicating various configuration options. These flags supercede
+ ** the 'flags' variable and should be used instead if available */
+#if defined(OCTEON_VENDOR_GEFES)
+ uint32_t dfm_size; /**< DFA Size */
+#endif
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 3)
+ uint64_t fdt_addr; /**< Address of the OF Flattened Device Tree structure describing the board. */
+#endif
+#else /* __BIG_ENDIAN */
+ /*
+ * Little-Endian: When the CPU mode is switched to
+ * little-endian, the view of the structure has some of the
+ * fields swapped.
+ */
+ uint32_t minor_version;
+ uint32_t major_version;
+
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ uint64_t desc_vaddr;
+
+ uint32_t stack_size;
+ uint32_t exception_base_addr;
+
+ uint32_t core_mask;
+ uint32_t flags;
+
+ uint32_t phy_mem_desc_addr;
+ uint32_t dram_size;
+
+ uint32_t eclock_hz;
+ uint32_t debugger_flags_base_addr;
+
+ uint32_t reserved0;
+ uint32_t dclock_hz;
+
+ uint8_t reserved3;
+ uint8_t reserved2;
+ uint16_t reserved1;
+ uint8_t board_rev_minor;
+ uint8_t board_rev_major;
+ uint16_t board_type;
+
+ union cvmx_bootinfo_scramble {
+ /* Must byteswap these four words so that...*/
+ uint64_t s[4];
+ /* ... this strucure has the proper data arrangement. */
+ struct {
+ char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+ uint8_t pad[5];
+ } le;
+ } scramble1;
+
+#if (CVMX_BOOTINFO_MIN_VER >= 1)
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ uint64_t led_display_base_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 2)
+ uint32_t config_flags;
+ uint32_t dfa_ref_clock_hz;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 3)
+ uint64_t fdt_addr;
+#endif
+#endif
+};
+
+typedef struct cvmx_bootinfo cvmx_bootinfo_t;
+
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET (1ull << 1)
+#define CVMX_BOOTINFO_CFG_FLAG_DEBUG (1ull << 2)
+#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
+/* This flag is set if the TLB mappings are not contained in the
+** 0x10000000 - 0x20000000 boot bus region. */
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
+
+#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
+
+
+/* Type defines for board and chip types */
+enum cvmx_board_types_enum {
+ CVMX_BOARD_TYPE_NULL = 0,
+ CVMX_BOARD_TYPE_SIM = 1,
+ CVMX_BOARD_TYPE_EBT3000 = 2,
+ CVMX_BOARD_TYPE_KODAMA = 3,
+ CVMX_BOARD_TYPE_NIAGARA = 4, /* Obsolete, no longer supported */
+ CVMX_BOARD_TYPE_NAC38 = 5, /* Obsolete, no longer supported */
+ CVMX_BOARD_TYPE_THUNDER = 6,
+ CVMX_BOARD_TYPE_TRANTOR = 7, /* Obsolete, no longer supported */
+ CVMX_BOARD_TYPE_EBH3000 = 8,
+ CVMX_BOARD_TYPE_EBH3100 = 9,
+ CVMX_BOARD_TYPE_HIKARI = 10,
+ CVMX_BOARD_TYPE_CN3010_EVB_HS5 = 11,
+ CVMX_BOARD_TYPE_CN3005_EVB_HS5 = 12,
+#if defined(OCTEON_VENDOR_GEFES)
+ CVMX_BOARD_TYPE_TNPA3804 = 13,
+ CVMX_BOARD_TYPE_AT5810 = 14,
+ CVMX_BOARD_TYPE_WNPA3850 = 15,
+ CVMX_BOARD_TYPE_W3860 = 16,
+#else
+ CVMX_BOARD_TYPE_KBP = 13,
+ CVMX_BOARD_TYPE_CN3020_EVB_HS5 = 14, /* Deprecated, CVMX_BOARD_TYPE_CN3010_EVB_HS5 supports the CN3020 */
+ CVMX_BOARD_TYPE_EBT5800 = 15,
+ CVMX_BOARD_TYPE_NICPRO2 = 16,
+#endif
+ CVMX_BOARD_TYPE_EBH5600 = 17,
+ CVMX_BOARD_TYPE_EBH5601 = 18,
+ CVMX_BOARD_TYPE_EBH5200 = 19,
+ CVMX_BOARD_TYPE_BBGW_REF = 20,
+ CVMX_BOARD_TYPE_NIC_XLE_4G = 21,
+ CVMX_BOARD_TYPE_EBT5600 = 22,
+ CVMX_BOARD_TYPE_EBH5201 = 23,
+ CVMX_BOARD_TYPE_EBT5200 = 24,
+ CVMX_BOARD_TYPE_CB5600 = 25,
+ CVMX_BOARD_TYPE_CB5601 = 26,
+ CVMX_BOARD_TYPE_CB5200 = 27,
+ CVMX_BOARD_TYPE_GENERIC = 28, /* Special 'generic' board type, supports many boards */
+ CVMX_BOARD_TYPE_EBH5610 = 29,
+ CVMX_BOARD_TYPE_LANAI2_A = 30,
+ CVMX_BOARD_TYPE_LANAI2_U = 31,
+ CVMX_BOARD_TYPE_EBB5600 = 32,
+ CVMX_BOARD_TYPE_EBB6300 = 33,
+ CVMX_BOARD_TYPE_NIC_XLE_10G = 34,
+ CVMX_BOARD_TYPE_LANAI2_G = 35,
+ CVMX_BOARD_TYPE_EBT5810 = 36,
+ CVMX_BOARD_TYPE_NIC10E = 37,
+ CVMX_BOARD_TYPE_EP6300C = 38,
+ CVMX_BOARD_TYPE_EBB6800 = 39,
+ CVMX_BOARD_TYPE_NIC4E = 40,
+ CVMX_BOARD_TYPE_NIC2E = 41,
+ CVMX_BOARD_TYPE_EBB6600 = 42,
+ CVMX_BOARD_TYPE_REDWING = 43,
+ CVMX_BOARD_TYPE_NIC68_4 = 44,
+ CVMX_BOARD_TYPE_NIC10E_66 = 45,
+ CVMX_BOARD_TYPE_EBB6100 = 46,
+ CVMX_BOARD_TYPE_EVB7100 = 47,
+ CVMX_BOARD_TYPE_MAX,
+ /* NOTE: 256-257 are being used by a customer. */
+
+ /* The range from CVMX_BOARD_TYPE_MAX to CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved
+ ** for future SDK use. */
+
+ /* Set aside a range for customer boards. These numbers are managed
+ ** by Cavium.
+ */
+ CVMX_BOARD_TYPE_CUST_DEFINED_MIN = 10000,
+ CVMX_BOARD_TYPE_CUST_WSX16 = 10001,
+ CVMX_BOARD_TYPE_CUST_NS0216 = 10002,
+ CVMX_BOARD_TYPE_CUST_NB5 = 10003,
+ CVMX_BOARD_TYPE_CUST_WMR500 = 10004,
+ CVMX_BOARD_TYPE_CUST_ITB101 = 10005,
+ CVMX_BOARD_TYPE_CUST_NTE102 = 10006,
+ CVMX_BOARD_TYPE_CUST_AGS103 = 10007,
+#if !defined(OCTEON_VENDOR_LANNER)
+ CVMX_BOARD_TYPE_CUST_GST104 = 10008,
+#else
+ CVMX_BOARD_TYPE_CUST_LANNER_MR955= 10008,
+#endif
+ CVMX_BOARD_TYPE_CUST_GCT105 = 10009,
+ CVMX_BOARD_TYPE_CUST_AGS106 = 10010,
+ CVMX_BOARD_TYPE_CUST_SGM107 = 10011,
+ CVMX_BOARD_TYPE_CUST_GCT108 = 10012,
+ CVMX_BOARD_TYPE_CUST_AGS109 = 10013,
+ CVMX_BOARD_TYPE_CUST_GCT110 = 10014,
+ CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER = 10015,
+ CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER= 10016,
+ CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX = 10017,
+ CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX = 10018,
+ CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX= 10019,
+ CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX= 10020,
+#if defined(OCTEON_VENDOR_LANNER)
+ CVMX_BOARD_TYPE_CUST_LANNER_MR730 = 10021,
+#else
+ CVMX_BOARD_TYPE_CUST_L2_ZINWELL = 10021,
+#endif
+ CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
+
+ /* Set aside a range for customer private use. The SDK won't
+ ** use any numbers in this range. */
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
+#if defined(OCTEON_VENDOR_LANNER)
+ CVMX_BOARD_TYPE_CUST_LANNER_MR320= 20002,
+ CVMX_BOARD_TYPE_CUST_LANNER_MR321X=20007,
+#endif
+#if defined(OCTEON_VENDOR_UBIQUITI)
+ CVMX_BOARD_TYPE_CUST_UBIQUITI_E100=20002,
+#endif
+#if defined(OCTEON_VENDOR_RADISYS)
+ CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE=20002,
+#endif
+#if defined(OCTEON_VENDOR_GEFES)
+ CVMX_BOARD_TYPE_CUST_TNPA5804 = 20005,
+ CVMX_BOARD_TYPE_CUST_W5434 = 20006,
+ CVMX_BOARD_TYPE_CUST_W5650 = 20007,
+ CVMX_BOARD_TYPE_CUST_W5800 = 20008,
+ CVMX_BOARD_TYPE_CUST_W5651X = 20009,
+ CVMX_BOARD_TYPE_CUST_TNPA5651X = 20010,
+ CVMX_BOARD_TYPE_CUST_TNPA56X4 = 20011,
+ CVMX_BOARD_TYPE_CUST_W63XX = 20013,
+#endif
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
+
+
+ /* Range for IO modules */
+ CVMX_BOARD_TYPE_MODULE_MIN = 30001,
+ CVMX_BOARD_TYPE_MODULE_PCIE_RC_4X = 30002,
+ CVMX_BOARD_TYPE_MODULE_PCIE_EP_4X = 30003,
+ CVMX_BOARD_TYPE_MODULE_SGMII_MARVEL = 30004,
+ CVMX_BOARD_TYPE_MODULE_SFPPLUS_BCM = 30005,
+ CVMX_BOARD_TYPE_MODULE_SRIO = 30006,
+ CVMX_BOARD_TYPE_MODULE_EBB5600_QLM0 = 30007,
+ CVMX_BOARD_TYPE_MODULE_EBB5600_QLM1 = 30008,
+ CVMX_BOARD_TYPE_MODULE_EBB5600_QLM2 = 30009,
+ CVMX_BOARD_TYPE_MODULE_EBB5600_QLM3 = 30010,
+ CVMX_BOARD_TYPE_MODULE_MAX = 31000
+
+ /* The remaining range is reserved for future use. */
+};
+enum cvmx_chip_types_enum {
+ CVMX_CHIP_TYPE_NULL = 0,
+ CVMX_CHIP_SIM_TYPE_DEPRECATED = 1,
+ CVMX_CHIP_TYPE_OCTEON_SAMPLE = 2,
+ CVMX_CHIP_TYPE_MAX
+};
+
+/* Compatability alias for NAC38 name change, planned to be removed from SDK 1.7 */
+#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38
+
+/* Functions to return string based on type */
+#define ENUM_BRD_TYPE_CASE(x) case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */
+static inline const char *cvmx_board_type_to_string(enum cvmx_board_types_enum type)
+{
+ switch (type)
+ {
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NULL)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SIM)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT3000)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KODAMA)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIAGARA)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NAC38)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_THUNDER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TRANTOR)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3000)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3100)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_HIKARI)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3010_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3005_EVB_HS5)
+#if defined(OCTEON_VENDOR_GEFES)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TNPA3804)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_AT5810)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_WNPA3850)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_W3860)
+#else
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KBP)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3020_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5800)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NICPRO2)
+#endif
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5601)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_BBGW_REF)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5610)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_A)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_U)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6300)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_10G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5810)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EP6300C)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6800)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC4E)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC2E)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_REDWING)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC68_4)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E_66)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6100)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EVB7100)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
+
+ /* Customer boards listed here */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MIN)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WSX16)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NS0216)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NB5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WMR500)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_ITB101)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NTE102)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS103)
+#if !defined(OCTEON_VENDOR_LANNER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GST104)
+#else
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_LANNER_MR955)
+#endif
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT105)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS106)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_SGM107)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT108)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS109)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT110)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX)
+#if defined(OCTEON_VENDOR_LANNER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_LANNER_MR730)
+#else
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ZINWELL)
+#endif
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX)
+
+ /* Customer private range */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
+#if defined(OCTEON_VENDOR_LANNER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_LANNER_MR320)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_LANNER_MR321X)
+#endif
+#if defined(OCTEON_VENDOR_UBIQUITI)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_UBIQUITI_E100)
+#endif
+#if defined(OCTEON_VENDOR_RADISYS)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE)
+#endif
+#if defined(OCTEON_VENDOR_GEFES)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_TNPA5804)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W5434)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W5650)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W5800)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W5651X)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_TNPA5651X)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_TNPA56X4)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W63XX)
+#endif
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
+
+ /* Module range */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_MIN)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_PCIE_RC_4X)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_PCIE_EP_4X)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_SGMII_MARVEL)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_SFPPLUS_BCM)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_SRIO)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM0)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM1)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM2)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM3)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_MAX)
+ }
+ return "Unsupported Board";
+}
+
+#define ENUM_CHIP_TYPE_CASE(x) case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */
+static inline const char *cvmx_chip_type_to_string(enum cvmx_chip_types_enum type)
+{
+ switch (type)
+ {
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_NULL)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_SIM_TYPE_DEPRECATED)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_OCTEON_SAMPLE)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_MAX)
+ }
+ return "Unsupported Chip";
+}
+
+
+extern int cvmx_debug_uart;
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_APP_INIT_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-app-init.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-asm.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-asm.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-asm.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,668 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This is file defines ASM primitives for the executive.
+
+ * <hr>$Revision: 70030 $<hr>
+ *
+ *
+ */
+#ifndef __CVMX_ASM_H__
+#define __CVMX_ASM_H__
+
+#define CVMX_MAX_CORES (32)
+
+#define COP0_INDEX $0,0 /* TLB read/write index */
+#define COP0_RANDOM $1,0 /* TLB random index */
+#define COP0_ENTRYLO0 $2,0 /* TLB entryLo0 */
+#define COP0_ENTRYLO1 $3,0 /* TLB entryLo1 */
+#define COP0_CONTEXT $4,0 /* Context */
+#define COP0_PAGEMASK $5,0 /* TLB pagemask */
+#define COP0_PAGEGRAIN $5,1 /* TLB config for max page sizes */
+#define COP0_WIRED $6,0 /* TLB number of wired entries */
+#define COP0_HWRENA $7,0 /* rdhw instruction enable per register */
+#define COP0_BADVADDR $8,0 /* Bad virtual address */
+#define COP0_COUNT $9,0 /* Mips count register */
+#define COP0_CVMCOUNT $9,6 /* Cavium count register */
+#define COP0_CVMCTL $9,7 /* Cavium control */
+#define COP0_ENTRYHI $10,0 /* TLB entryHi */
+#define COP0_COMPARE $11,0 /* Mips compare register */
+#define COP0_POWTHROTTLE $11,6 /* Power throttle register */
+#define COP0_CVMMEMCTL $11,7 /* Cavium memory control */
+#define COP0_STATUS $12,0 /* Mips status register */
+#define COP0_INTCTL $12,1 /* Useless (Vectored interrupts) */
+#define COP0_SRSCTL $12,2 /* Useless (Shadow registers) */
+#define COP0_CAUSE $13,0 /* Mips cause register */
+#define COP0_EPC $14,0 /* Exception program counter */
+#define COP0_PRID $15,0 /* Processor ID */
+#define COP0_EBASE $15,1 /* Exception base */
+#define COP0_CONFIG $16,0 /* Misc config options */
+#define COP0_CONFIG1 $16,1 /* Misc config options */
+#define COP0_CONFIG2 $16,2 /* Misc config options */
+#define COP0_CONFIG3 $16,3 /* Misc config options */
+#define COP0_WATCHLO0 $18,0 /* Address watch registers */
+#define COP0_WATCHLO1 $18,1 /* Address watch registers */
+#define COP0_WATCHHI0 $19,0 /* Address watch registers */
+#define COP0_WATCHHI1 $19,1 /* Address watch registers */
+#define COP0_XCONTEXT $20,0 /* OS context */
+#define COP0_MULTICOREDEBUG $22,0 /* Cavium debug */
+#define COP0_DEBUG $23,0 /* Debug status */
+#define COP0_DEPC $24,0 /* Debug PC */
+#define COP0_PERFCONTROL0 $25,0 /* Performance counter control */
+#define COP0_PERFCONTROL1 $25,2 /* Performance counter control */
+#define COP0_PERFVALUE0 $25,1 /* Performance counter */
+#define COP0_PERFVALUE1 $25,3 /* Performance counter */
+#define COP0_CACHEERRI $27,0 /* I cache error status */
+#define COP0_CACHEERRD $27,1 /* D cache error status */
+#define COP0_TAGLOI $28,0 /* I cache tagLo */
+#define COP0_TAGLOD $28,2 /* D cache tagLo */
+#define COP0_DATALOI $28,1 /* I cache dataLo */
+#define COP0_DATALOD $28,3 /* D cahce dataLo */
+#define COP0_TAGHI $29,2 /* ? */
+#define COP0_DATAHII $29,1 /* ? */
+#define COP0_DATAHID $29,3 /* ? */
+#define COP0_ERROREPC $30,0 /* Error PC */
+#define COP0_DESAVE $31,0 /* Debug scratch area */
+
+/* This header file can be included from a .S file. Keep non-preprocessor
+ things under !__ASSEMBLER__. */
+#ifndef __ASSEMBLER__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* turn the variable name into a string */
+#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
+#define CVMX_TMP_STR2(x) #x
+
+/* Since sync is required for Octeon2. */
+#ifdef _MIPS_ARCH_OCTEON2
+#define CVMX_CAVIUM_OCTEON2 1
+#endif
+
+/* other useful stuff */
+#define CVMX_BREAK asm volatile ("break")
+#define CVMX_SYNC asm volatile ("sync" : : :"memory")
+/* String version of SYNCW macro for using in inline asm constructs */
+#define CVMX_SYNCW_STR_OCTEON2 "syncw\n"
+#ifdef CVMX_CAVIUM_OCTEON2
+ #define CVMX_SYNCW_STR CVMX_SYNCW_STR_OCTEON2
+#else
+ #define CVMX_SYNCW_STR "syncw\nsyncw\n"
+#endif /* CVMX_CAVIUM_OCTEON2 */
+
+#ifdef __OCTEON__
+ #define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : :"memory")
+ /* We actually use two syncw instructions in a row when we need a write
+ memory barrier. This is because the CN3XXX series of Octeons have
+ errata Core-401. This can cause a single syncw to not enforce
+ ordering under very rare conditions. Even if it is rare, better safe
+ than sorry */
+ #define CVMX_SYNCW_OCTEON2 asm volatile ("syncw\n" : : :"memory")
+ #ifdef CVMX_CAVIUM_OCTEON2
+ #define CVMX_SYNCW CVMX_SYNCW_OCTEON2
+ #else
+ #define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : :"memory")
+ #endif /* CVMX_CAVIUM_OCTEON2 */
+#if defined(VXWORKS) || defined(__linux__)
+ /* Define new sync instructions to be normal SYNC instructions for
+ operating systems that use threads */
+ #define CVMX_SYNCWS CVMX_SYNCW
+ #define CVMX_SYNCS CVMX_SYNC
+ #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+ #define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW_OCTEON2
+ #define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR_OCTEON2
+#else
+ #if defined(CVMX_BUILD_FOR_TOOLCHAIN)
+ /* While building simple exec toolchain, always use syncw to
+ support all Octeon models. */
+ #define CVMX_SYNCWS CVMX_SYNCW
+ #define CVMX_SYNCS CVMX_SYNC
+ #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+ #define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW_OCTEON2
+ #define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR_OCTEON2
+ #else
+ /* Again, just like syncw, we may need two syncws instructions in a row due
+ errata Core-401. Only one syncws is required for Octeon2 models */
+ #define CVMX_SYNCS asm volatile ("syncs" : : :"memory")
+ #define CVMX_SYNCWS_OCTEON2 asm volatile ("syncws\n" : : :"memory")
+ #define CVMX_SYNCWS_STR_OCTEON2 "syncws\n"
+ #ifdef CVMX_CAVIUM_OCTEON2
+ #define CVMX_SYNCWS CVMX_SYNCWS_OCTEON2
+ #define CVMX_SYNCWS_STR CVMX_SYNCWS_STR_OCTEON2
+ #else
+ #define CVMX_SYNCWS asm volatile ("syncws\nsyncws\n" : : :"memory")
+ #define CVMX_SYNCWS_STR "syncws\nsyncws\n"
+ #endif /* CVMX_CAVIUM_OCTEON2 */
+ #endif
+#endif
+#else /* !__OCTEON__ */
+ /* Not using a Cavium compiler, always use the slower sync so the assembler stays happy */
+ #define CVMX_SYNCIOBDMA asm volatile ("sync" : : :"memory")
+ #define CVMX_SYNCW asm volatile ("sync" : : :"memory")
+ #define CVMX_SYNCWS CVMX_SYNCW
+ #define CVMX_SYNCS CVMX_SYNC
+ #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+ #define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW
+ #define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR
+#endif
+#define CVMX_SYNCI(address, offset) asm volatile ("synci " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
+#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
+#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
+// a normal prefetch
+#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
+// normal prefetches that use the pref instruction
+#define CVMX_PREFETCH_PREFX(X, address, offset) asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
+#define CVMX_PREFETCH_PREF0(address, offset) CVMX_PREFETCH_PREFX(0, address, offset)
+#define CVMX_PREFETCH_PREF1(address, offset) CVMX_PREFETCH_PREFX(1, address, offset)
+#define CVMX_PREFETCH_PREF6(address, offset) CVMX_PREFETCH_PREFX(6, address, offset)
+#define CVMX_PREFETCH_PREF7(address, offset) CVMX_PREFETCH_PREFX(7, address, offset)
+// prefetch into L1, do not put the block in the L2
+#define CVMX_PREFETCH_NOTL2(address, offset) CVMX_PREFETCH_PREFX(4, address, offset)
+#define CVMX_PREFETCH_NOTL22(address, offset) CVMX_PREFETCH_PREFX(5, address, offset)
+// prefetch into L2, do not put the block in the L1
+#define CVMX_PREFETCH_L2(address, offset) CVMX_PREFETCH_PREFX(28, address, offset)
+// CVMX_PREPARE_FOR_STORE makes each byte of the block unpredictable (actually old value or zero) until
+// that byte is stored to (by this or another processor. Note that the value of each byte is not only
+// unpredictable, but may also change again - up until the point when one of the cores stores to the
+// byte.
+#define CVMX_PREPARE_FOR_STORE(address, offset) CVMX_PREFETCH_PREFX(30, address, offset)
+// This is a command headed to the L2 controller to tell it to clear its dirty bit for a
+// block. Basically, SW is telling HW that the current version of the block will not be
+// used.
+#define CVMX_DONT_WRITE_BACK(address, offset) CVMX_PREFETCH_PREFX(29, address, offset)
+
+#define CVMX_ICACHE_INVALIDATE { CVMX_SYNC; asm volatile ("synci 0($0)" : : ); } // flush stores, invalidate entire icache
+#define CVMX_ICACHE_INVALIDATE2 { CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); } // flush stores, invalidate entire icache
+#define CVMX_DCACHE_INVALIDATE { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); } // complete prefetches, invalidate entire dcache
+
+#define CVMX_CACHE(op, address, offset) asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
+#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset) // fetch and lock the state.
+#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset) // unlock the state.
+#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset) // invalidate the cache block and clear the USED bits for the block
+#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset) // load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register
+
+/* new instruction to make RC4 run faster */
+#define CVMX_BADDU(result, input1, input2) asm ("baddu %[rd],%[rs],%[rt]" : [rd] "=d" (result) : [rs] "d" (input1) , [rt] "d" (input2))
+
+// misc v2 stuff
+#define CVMX_ROTR(result, input1, shiftconst) asm ("rotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
+#define CVMX_ROTRV(result, input1, input2) asm ("rotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
+#define CVMX_DROTR(result, input1, shiftconst) asm ("drotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
+#define CVMX_DROTRV(result, input1, input2) asm ("drotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
+#define CVMX_SEB(result, input1) asm ("seb %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+#define CVMX_SEH(result, input1) asm ("seh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+#define CVMX_DSBH(result, input1) asm ("dsbh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+#define CVMX_DSHD(result, input1) asm ("dshd %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+#define CVMX_WSBH(result, input1) asm ("wsbh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+
+// Endian swap
+#define CVMX_ES64(result, input) \
+ do {\
+ CVMX_DSBH(result, input); \
+ CVMX_DSHD(result, result); \
+ } while (0)
+#define CVMX_ES32(result, input) \
+ do {\
+ CVMX_WSBH(result, input); \
+ CVMX_ROTR(result, result, 16); \
+ } while (0)
+
+
+/* extract and insert - NOTE that pos and len variables must be constants! */
+/* the P variants take len rather than lenm1 */
+/* the M1 variants take lenm1 rather than len */
+#define CVMX_EXTS(result,input,pos,lenm1) asm ("exts %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
+#define CVMX_EXTSP(result,input,pos,len) CVMX_EXTS(result,input,pos,(len)-1)
+
+#define CVMX_DEXT(result,input,pos,len) asm ("dext %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len) : [rt] "=d" (result) : [rs] "d" (input))
+#define CVMX_DEXTM1(result,input,pos,lenm1) CVMX_DEXT(result,input,pos,(lenm1)+1)
+
+#define CVMX_EXT(result,input,pos,len) asm ("ext %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len) : [rt] "=d" (result) : [rs] "d" (input))
+#define CVMX_EXTM1(result,input,pos,lenm1) CVMX_EXT(result,input,pos,(lenm1)+1)
+
+// removed
+// #define CVMX_EXTU(result,input,pos,lenm1) asm ("extu %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
+// #define CVMX_EXTUP(result,input,pos,len) CVMX_EXTU(result,input,pos,(len)-1)
+
+#define CVMX_CINS(result,input,pos,lenm1) asm ("cins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
+#define CVMX_CINSP(result,input,pos,len) CVMX_CINS(result,input,pos,(len)-1)
+
+#define CVMX_DINS(result,input,pos,len) asm ("dins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
+#define CVMX_DINSM1(result,input,pos,lenm1) CVMX_DINS(result,input,pos,(lenm1)+1)
+#define CVMX_DINSC(result,pos,len) asm ("dins %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): "[rt]" (result))
+#define CVMX_DINSCM1(result,pos,lenm1) CVMX_DINSC(result,pos,(lenm1)+1)
+
+#define CVMX_INS(result,input,pos,len) asm ("ins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
+#define CVMX_INSM1(result,input,pos,lenm1) CVMX_INS(result,input,pos,(lenm1)+1)
+#define CVMX_INSC(result,pos,len) asm ("ins %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): "[rt]" (result))
+#define CVMX_INSCM1(result,pos,lenm1) CVMX_INSC(result,pos,(lenm1)+1)
+
+// removed
+// #define CVMX_INS0(result,input,pos,lenm1) asm("ins0 %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
+// #define CVMX_INS0P(result,input,pos,len) CVMX_INS0(result,input,pos,(len)-1)
+// #define CVMX_INS0C(result,pos,lenm1) asm ("ins0 %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : "[rt]" (result))
+// #define CVMX_INS0CP(result,pos,len) CVMX_INS0C(result,pos,(len)-1)
+
+#define CVMX_CLZ(result, input) asm ("clz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_DCLZ(result, input) asm ("dclz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_CLO(result, input) asm ("clo %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_DCLO(result, input) asm ("dclo %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_POP(result, input) asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_DPOP(result, input) asm ("dpop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+
+#ifdef CVMX_ABI_O32
+
+ /* rdhwr $31 is the 64 bit cmvcount register, it needs to be split
+ into one or two (depending on the width of the result) properly
+ sign extended registers. All other registers are 32 bits wide
+ and already properly sign extended. */
+# define CVMX_RDHWRX(result, regstr, ASM_STMT) ({ \
+ if (regstr == 31) { \
+ if (sizeof(result) == 8) { \
+ ASM_STMT (".set\tpush\n" \
+ "\t.set\tmips64r2\n" \
+ "\trdhwr\t%L0,$31\n" \
+ "\tdsra\t%M0,%L0,32\n" \
+ "\tsll\t%L0,%L0,0\n" \
+ "\t.set\tpop": "=d"(result)); \
+ } else { \
+ unsigned long _v; \
+ ASM_STMT ("rdhwr\t%0,$31\n" \
+ "\tsll\t%0,%0,0" : "=d"(_v)); \
+ result = (__typeof(result))_v; \
+ } \
+ } else { \
+ unsigned long _v; \
+ ASM_STMT ("rdhwr\t%0,$" CVMX_TMP_STR(regstr) : "=d"(_v)); \
+ result = (__typeof(result))_v; \
+ }})
+
+
+
+# define CVMX_RDHWR(result, regstr) CVMX_RDHWRX(result, regstr, asm volatile)
+# define CVMX_RDHWRNV(result, regstr) CVMX_RDHWRX(result, regstr, asm)
+#else
+# define CVMX_RDHWR(result, regstr) asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+# define CVMX_RDHWRNV(result, regstr) asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+#endif
+
+// some new cop0-like stuff
+#define CVMX_DI(result) asm volatile ("di %[rt]" : [rt] "=d" (result))
+#define CVMX_DI_NULL asm volatile ("di")
+#define CVMX_EI(result) asm volatile ("ei %[rt]" : [rt] "=d" (result))
+#define CVMX_EI_NULL asm volatile ("ei")
+#define CVMX_EHB asm volatile ("ehb")
+
+/* mul stuff */
+#define CVMX_MTM0(m) asm volatile ("mtm0 %[rs]" : : [rs] "d" (m))
+#define CVMX_MTM1(m) asm volatile ("mtm1 %[rs]" : : [rs] "d" (m))
+#define CVMX_MTM2(m) asm volatile ("mtm2 %[rs]" : : [rs] "d" (m))
+#define CVMX_MTP0(p) asm volatile ("mtp0 %[rs]" : : [rs] "d" (p))
+#define CVMX_MTP1(p) asm volatile ("mtp1 %[rs]" : : [rs] "d" (p))
+#define CVMX_MTP2(p) asm volatile ("mtp2 %[rs]" : : [rs] "d" (p))
+#define CVMX_VMULU(dest,mpcand,accum) asm volatile ("vmulu %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
+#define CVMX_VMM0(dest,mpcand,accum) asm volatile ("vmm0 %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
+#define CVMX_V3MULU(dest,mpcand,accum) asm volatile ("v3mulu %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
+
+/* branch stuff */
+// these are hard to make work because the compiler does not realize that the
+// instruction is a branch so may optimize away the label
+// the labels to these next two macros must not include a ":" at the end
+#define CVMX_BBIT1(var, pos, label) asm volatile ("bbit1 %[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(label) : : [rs] "d" (var))
+#define CVMX_BBIT0(var, pos, label) asm volatile ("bbit0 %[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(label) : : [rs] "d" (var))
+// the label to this macro must include a ":" at the end
+#define CVMX_ASM_LABEL(label) label \
+ asm volatile (CVMX_TMP_STR(label) : : )
+
+//
+// Low-latency memory stuff
+//
+// set can be 0-1
+#define CVMX_MT_LLM_READ_ADDR(set,val) asm volatile ("dmtc2 %[rt],0x0400+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MT_LLM_WRITE_ADDR_INTERNAL(set,val) asm volatile ("dmtc2 %[rt],0x0401+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MT_LLM_READ64_ADDR(set,val) asm volatile ("dmtc2 %[rt],0x0404+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MT_LLM_WRITE64_ADDR_INTERNAL(set,val) asm volatile ("dmtc2 %[rt],0x0405+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MT_LLM_DATA(set,val) asm volatile ("dmtc2 %[rt],0x0402+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MF_LLM_DATA(set,val) asm volatile ("dmfc2 %[rt],0x0402+(8*(" CVMX_TMP_STR(set) "))" : [rt] "=d" (val) : )
+
+
+// load linked, store conditional
+#define CVMX_LL(dest, address, offset) asm volatile ("ll %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (dest) : [rbase] "d" (address) )
+#define CVMX_LLD(dest, address, offset) asm volatile ("lld %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (dest) : [rbase] "d" (address) )
+#define CVMX_SC(srcdest, address, offset) asm volatile ("sc %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+#define CVMX_SCD(srcdest, address, offset) asm volatile ("scd %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+
+// load/store word left/right
+#define CVMX_LWR(srcdest, address, offset) asm volatile ("lwr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+#define CVMX_LWL(srcdest, address, offset) asm volatile ("lwl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+#define CVMX_LDR(srcdest, address, offset) asm volatile ("ldr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+#define CVMX_LDL(srcdest, address, offset) asm volatile ("ldl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+
+#define CVMX_SWR(src, address, offset) asm volatile ("swr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
+#define CVMX_SWL(src, address, offset) asm volatile ("swl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
+#define CVMX_SDR(src, address, offset) asm volatile ("sdr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
+#define CVMX_SDL(src, address, offset) asm volatile ("sdl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
+
+
+
+//
+// Useful crypto ASM's
+//
+
+// CRC
+
+#define CVMX_MT_CRC_POLYNOMIAL(val) asm volatile ("dmtc2 %[rt],0x4200" : : [rt] "d" (val))
+#define CVMX_MT_CRC_IV(val) asm volatile ("dmtc2 %[rt],0x0201" : : [rt] "d" (val))
+#define CVMX_MT_CRC_LEN(val) asm volatile ("dmtc2 %[rt],0x1202" : : [rt] "d" (val))
+#define CVMX_MT_CRC_BYTE(val) asm volatile ("dmtc2 %[rt],0x0204" : : [rt] "d" (val))
+#define CVMX_MT_CRC_HALF(val) asm volatile ("dmtc2 %[rt],0x0205" : : [rt] "d" (val))
+#define CVMX_MT_CRC_WORD(val) asm volatile ("dmtc2 %[rt],0x0206" : : [rt] "d" (val))
+#define CVMX_MT_CRC_DWORD(val) asm volatile ("dmtc2 %[rt],0x1207" : : [rt] "d" (val))
+#define CVMX_MT_CRC_VAR(val) asm volatile ("dmtc2 %[rt],0x1208" : : [rt] "d" (val))
+#define CVMX_MT_CRC_POLYNOMIAL_REFLECT(val) asm volatile ("dmtc2 %[rt],0x4210" : : [rt] "d" (val))
+#define CVMX_MT_CRC_IV_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0211" : : [rt] "d" (val))
+#define CVMX_MT_CRC_BYTE_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0214" : : [rt] "d" (val))
+#define CVMX_MT_CRC_HALF_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0215" : : [rt] "d" (val))
+#define CVMX_MT_CRC_WORD_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0216" : : [rt] "d" (val))
+#define CVMX_MT_CRC_DWORD_REFLECT(val) asm volatile ("dmtc2 %[rt],0x1217" : : [rt] "d" (val))
+#define CVMX_MT_CRC_VAR_REFLECT(val) asm volatile ("dmtc2 %[rt],0x1218" : : [rt] "d" (val))
+
+#define CVMX_MF_CRC_POLYNOMIAL(val) asm volatile ("dmfc2 %[rt],0x0200" : [rt] "=d" (val) : )
+#define CVMX_MF_CRC_IV(val) asm volatile ("dmfc2 %[rt],0x0201" : [rt] "=d" (val) : )
+#define CVMX_MF_CRC_IV_REFLECT(val) asm volatile ("dmfc2 %[rt],0x0203" : [rt] "=d" (val) : )
+#define CVMX_MF_CRC_LEN(val) asm volatile ("dmfc2 %[rt],0x0202" : [rt] "=d" (val) : )
+
+// MD5 and SHA-1
+
+// pos can be 0-6
+#define CVMX_MT_HSH_DAT(val,pos) asm volatile ("dmtc2 %[rt],0x0040+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_HSH_DATZ(pos) asm volatile ("dmtc2 $0,0x0040+" CVMX_TMP_STR(pos) : : )
+// pos can be 0-14
+#define CVMX_MT_HSH_DATW(val,pos) asm volatile ("dmtc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_HSH_DATWZ(pos) asm volatile ("dmtc2 $0,0x0240+" CVMX_TMP_STR(pos) : : )
+#define CVMX_MT_HSH_STARTMD5(val) asm volatile ("dmtc2 %[rt],0x4047" : : [rt] "d" (val))
+#define CVMX_MT_HSH_STARTSHA(val) asm volatile ("dmtc2 %[rt],0x4057" : : [rt] "d" (val))
+#define CVMX_MT_HSH_STARTSHA256(val) asm volatile ("dmtc2 %[rt],0x404f" : : [rt] "d" (val))
+#define CVMX_MT_HSH_STARTSHA512(val) asm volatile ("dmtc2 %[rt],0x424f" : : [rt] "d" (val))
+// pos can be 0-3
+#define CVMX_MT_HSH_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0048+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-7
+#define CVMX_MT_HSH_IVW(val,pos) asm volatile ("dmtc2 %[rt],0x0250+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+
+// pos can be 0-6
+#define CVMX_MF_HSH_DAT(val,pos) asm volatile ("dmfc2 %[rt],0x0040+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-14
+#define CVMX_MF_HSH_DATW(val,pos) asm volatile ("dmfc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-3
+#define CVMX_MF_HSH_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0048+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-7
+#define CVMX_MF_HSH_IVW(val,pos) asm volatile ("dmfc2 %[rt],0x0250+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+
+// 3DES
+
+// pos can be 0-2
+#define CVMX_MT_3DES_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0080+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_3DES_IV(val) asm volatile ("dmtc2 %[rt],0x0084" : : [rt] "d" (val))
+#define CVMX_MT_3DES_ENC_CBC(val) asm volatile ("dmtc2 %[rt],0x4088" : : [rt] "d" (val))
+#define CVMX_MT_3DES_ENC(val) asm volatile ("dmtc2 %[rt],0x408a" : : [rt] "d" (val))
+#define CVMX_MT_3DES_DEC_CBC(val) asm volatile ("dmtc2 %[rt],0x408c" : : [rt] "d" (val))
+#define CVMX_MT_3DES_DEC(val) asm volatile ("dmtc2 %[rt],0x408e" : : [rt] "d" (val))
+#define CVMX_MT_3DES_RESULT(val) asm volatile ("dmtc2 %[rt],0x0098" : : [rt] "d" (val))
+
+// pos can be 0-2
+#define CVMX_MF_3DES_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0080+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MF_3DES_IV(val) asm volatile ("dmfc2 %[rt],0x0084" : [rt] "=d" (val) : )
+#define CVMX_MF_3DES_RESULT(val) asm volatile ("dmfc2 %[rt],0x0088" : [rt] "=d" (val) : )
+
+// KASUMI
+
+// pos can be 0-1
+#define CVMX_MT_KAS_KEY(val,pos) CVMX_MT_3DES_KEY(val,pos)
+#define CVMX_MT_KAS_ENC_CBC(val) asm volatile ("dmtc2 %[rt],0x4089" : : [rt] "d" (val))
+#define CVMX_MT_KAS_ENC(val) asm volatile ("dmtc2 %[rt],0x408b" : : [rt] "d" (val))
+#define CVMX_MT_KAS_RESULT(val) CVMX_MT_3DES_RESULT(val)
+
+// pos can be 0-1
+#define CVMX_MF_KAS_KEY(val,pos) CVMX_MF_3DES_KEY(val,pos)
+#define CVMX_MF_KAS_RESULT(val) CVMX_MF_3DES_RESULT(val)
+
+// AES
+
+#define CVMX_MT_AES_ENC_CBC0(val) asm volatile ("dmtc2 %[rt],0x0108" : : [rt] "d" (val))
+#define CVMX_MT_AES_ENC_CBC1(val) asm volatile ("dmtc2 %[rt],0x3109" : : [rt] "d" (val))
+#define CVMX_MT_AES_ENC0(val) asm volatile ("dmtc2 %[rt],0x010a" : : [rt] "d" (val))
+#define CVMX_MT_AES_ENC1(val) asm volatile ("dmtc2 %[rt],0x310b" : : [rt] "d" (val))
+#define CVMX_MT_AES_DEC_CBC0(val) asm volatile ("dmtc2 %[rt],0x010c" : : [rt] "d" (val))
+#define CVMX_MT_AES_DEC_CBC1(val) asm volatile ("dmtc2 %[rt],0x310d" : : [rt] "d" (val))
+#define CVMX_MT_AES_DEC0(val) asm volatile ("dmtc2 %[rt],0x010e" : : [rt] "d" (val))
+#define CVMX_MT_AES_DEC1(val) asm volatile ("dmtc2 %[rt],0x310f" : : [rt] "d" (val))
+// pos can be 0-3
+#define CVMX_MT_AES_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_AES_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0102+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_AES_KEYLENGTH(val) asm volatile ("dmtc2 %[rt],0x0110" : : [rt] "d" (val)) // write the keylen
+// pos can be 0-1
+#define CVMX_MT_AES_RESULT(val,pos) asm volatile ("dmtc2 %[rt],0x0100+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+
+// pos can be 0-1
+#define CVMX_MF_AES_RESULT(val,pos) asm volatile ("dmfc2 %[rt],0x0100+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_AES_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0102+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-3
+#define CVMX_MF_AES_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MF_AES_KEYLENGTH(val) asm volatile ("dmfc2 %[rt],0x0110" : [rt] "=d" (val) : ) // read the keylen
+#define CVMX_MF_AES_DAT0(val) asm volatile ("dmfc2 %[rt],0x0111" : [rt] "=d" (val) : ) // first piece of input data
+
+// GFM
+
+// pos can be 0-1
+#define CVMX_MF_GFM_MUL(val,pos) asm volatile ("dmfc2 %[rt],0x0258+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MF_GFM_POLY(val) asm volatile ("dmfc2 %[rt],0x025e" : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_GFM_RESINP(val,pos) asm volatile ("dmfc2 %[rt],0x025a+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_GFM_RESINP_REFLECT(val,pos) asm volatile ("dmfc2 %[rt],0x005a+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+
+// pos can be 0-1
+#define CVMX_MT_GFM_MUL(val,pos) asm volatile ("dmtc2 %[rt],0x0258+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_GFM_POLY(val) asm volatile ("dmtc2 %[rt],0x025e" : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_GFM_RESINP(val,pos) asm volatile ("dmtc2 %[rt],0x025a+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_GFM_XOR0(val) asm volatile ("dmtc2 %[rt],0x025c" : : [rt] "d" (val))
+#define CVMX_MT_GFM_XORMUL1(val) asm volatile ("dmtc2 %[rt],0x425d" : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_GFM_MUL_REFLECT(val,pos) asm volatile ("dmtc2 %[rt],0x0058+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MT_GFM_XOR0_REFLECT(val) asm volatile ("dmtc2 %[rt],0x005c" : : [rt] "d" (val))
+#define CVMX_MT_GFM_XORMUL1_REFLECT(val) asm volatile ("dmtc2 %[rt],0x405d" : : [rt] "d" (val))
+
+// SNOW 3G
+
+// pos can be 0-7
+#define CVMX_MF_SNOW3G_LFSR(val,pos) asm volatile ("dmfc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-2
+#define CVMX_MF_SNOW3G_FSM(val,pos) asm volatile ("dmfc2 %[rt],0x0251+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MF_SNOW3G_RESULT(val) asm volatile ("dmfc2 %[rt],0x0250" : [rt] "=d" (val) : )
+
+// pos can be 0-7
+#define CVMX_MT_SNOW3G_LFSR(val,pos) asm volatile ("dmtc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-2
+#define CVMX_MT_SNOW3G_FSM(val,pos) asm volatile ("dmtc2 %[rt],0x0251+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_SNOW3G_RESULT(val) asm volatile ("dmtc2 %[rt],0x0250" : : [rt] "d" (val))
+#define CVMX_MT_SNOW3G_START(val) asm volatile ("dmtc2 %[rt],0x404d" : : [rt] "d" (val))
+#define CVMX_MT_SNOW3G_MORE(val) asm volatile ("dmtc2 %[rt],0x404e" : : [rt] "d" (val))
+
+// SMS4
+
+// pos can be 0-1
+#define CVMX_MF_SMS4_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0102+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_SMS4_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_SMS4_RESINP(val,pos) asm volatile ("dmfc2 %[rt],0x0100+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MT_SMS4_DEC_CBC0(val) asm volatile ("dmtc2 %[rt],0x010c" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_DEC_CBC1(val) asm volatile ("dmtc2 %[rt],0x311d" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_DEC0(val) asm volatile ("dmtc2 %[rt],0x010e" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_DEC1(val) asm volatile ("dmtc2 %[rt],0x311f" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_ENC_CBC0(val) asm volatile ("dmtc2 %[rt],0x0108" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_ENC_CBC1(val) asm volatile ("dmtc2 %[rt],0x3119" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_ENC0(val) asm volatile ("dmtc2 %[rt],0x010a" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_ENC1(val) asm volatile ("dmtc2 %[rt],0x311b" : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_SMS4_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0102+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_SMS4_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0104+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_SMS4_RESINP(val,pos) asm volatile ("dmtc2 %[rt],0x0100+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
+
+/* check_ordering stuff */
+#if 0
+#define CVMX_MF_CHORD(dest) asm volatile ("dmfc2 %[rt],0x400" : [rt] "=d" (dest) : )
+#else
+#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
+#endif
+
+#if 0
+#define CVMX_MF_CYCLE(dest) asm volatile ("dmfc0 %[rt],$9,6" : [rt] "=d" (dest) : ) // Use (64-bit) CvmCount register rather than Count
+#else
+#define CVMX_MF_CYCLE(dest) CVMX_RDHWR(dest, 31) /* reads the current (64-bit) CvmCount value */
+#endif
+
+#define CVMX_MT_CYCLE(src) asm volatile ("dmtc0 %[rt],$9,6" :: [rt] "d" (src))
+
+#define VASTR(...) #__VA_ARGS__
+
+#define CVMX_MF_COP0(val, cop0) asm volatile ("dmfc0 %[rt]," VASTR(cop0) : [rt] "=d" (val));
+#define CVMX_MT_COP0(val, cop0) asm volatile ("dmtc0 %[rt]," VASTR(cop0) : : [rt] "d" (val));
+
+#define CVMX_MF_CACHE_ERR(val) CVMX_MF_COP0(val, COP0_CACHEERRI)
+#define CVMX_MF_DCACHE_ERR(val) CVMX_MF_COP0(val, COP0_CACHEERRD)
+#define CVMX_MF_CVM_MEM_CTL(val) CVMX_MF_COP0(val, COP0_CVMMEMCTL)
+#define CVMX_MF_CVM_CTL(val) CVMX_MF_COP0(val, COP0_CVMCTL)
+#define CVMX_MT_CACHE_ERR(val) CVMX_MT_COP0(val, COP0_CACHEERRI)
+#define CVMX_MT_DCACHE_ERR(val) CVMX_MT_COP0(val, COP0_CACHEERRD)
+#define CVMX_MT_CVM_MEM_CTL(val) CVMX_MT_COP0(val, COP0_CVMMEMCTL)
+#define CVMX_MT_CVM_CTL(val) CVMX_MT_COP0(val, COP0_CVMCTL)
+
+/* Macros for TLB */
+#define CVMX_TLBWI asm volatile ("tlbwi" : : )
+#define CVMX_TLBWR asm volatile ("tlbwr" : : )
+#define CVMX_TLBR asm volatile ("tlbr" : : )
+#define CVMX_TLBP asm volatile ("tlbp" : : )
+#define CVMX_MT_ENTRY_HIGH(val) asm volatile ("dmtc0 %[rt],$10,0" : : [rt] "d" (val))
+#define CVMX_MT_ENTRY_LO_0(val) asm volatile ("dmtc0 %[rt],$2,0" : : [rt] "d" (val))
+#define CVMX_MT_ENTRY_LO_1(val) asm volatile ("dmtc0 %[rt],$3,0" : : [rt] "d" (val))
+#define CVMX_MT_PAGEMASK(val) asm volatile ("mtc0 %[rt],$5,0" : : [rt] "d" (val))
+#define CVMX_MT_PAGEGRAIN(val) asm volatile ("mtc0 %[rt],$5,1" : : [rt] "d" (val))
+#define CVMX_MT_TLB_INDEX(val) asm volatile ("mtc0 %[rt],$0,0" : : [rt] "d" (val))
+#define CVMX_MT_TLB_CONTEXT(val) asm volatile ("dmtc0 %[rt],$4,0" : : [rt] "d" (val))
+#define CVMX_MT_TLB_WIRED(val) asm volatile ("mtc0 %[rt],$6,0" : : [rt] "d" (val))
+#define CVMX_MT_TLB_RANDOM(val) asm volatile ("mtc0 %[rt],$1,0" : : [rt] "d" (val))
+#define CVMX_MF_ENTRY_LO_0(val) asm volatile ("dmfc0 %[rt],$2,0" : [rt] "=d" (val):)
+#define CVMX_MF_ENTRY_LO_1(val) asm volatile ("dmfc0 %[rt],$3,0" : [rt] "=d" (val):)
+#define CVMX_MF_ENTRY_HIGH(val) asm volatile ("dmfc0 %[rt],$10,0" : [rt] "=d" (val):)
+#define CVMX_MF_PAGEMASK(val) asm volatile ("mfc0 %[rt],$5,0" : [rt] "=d" (val):)
+#define CVMX_MF_PAGEGRAIN(val) asm volatile ("mfc0 %[rt],$5,1" : [rt] "=d" (val):)
+#define CVMX_MF_TLB_WIRED(val) asm volatile ("mfc0 %[rt],$6,0" : [rt] "=d" (val):)
+#define CVMX_MF_TLB_INDEX(val) asm volatile ("mfc0 %[rt],$0,0" : [rt] "=d" (val):)
+#define CVMX_MF_TLB_RANDOM(val) asm volatile ("mfc0 %[rt],$1,0" : [rt] "=d" (val):)
+#define TLB_DIRTY (0x1ULL<<2)
+#define TLB_VALID (0x1ULL<<1)
+#define TLB_GLOBAL (0x1ULL<<0)
+
+
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+/* Macros to PUSH and POP Octeon2 ISA. */
+#define CVMX_PUSH_OCTEON2 asm volatile (".set push\n.set arch=octeon2")
+#define CVMX_POP_OCTEON2 asm volatile (".set pop")
+#endif
+
+/* assembler macros to guarantee byte loads/stores are used */
+/* for an unaligned 16-bit access (these use AT register) */
+/* we need the hidden argument (__a) so that GCC gets the dependencies right */
+#define CVMX_LOADUNA_INT16(result, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("ulh %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset]), "m"(__a[offset + 1])); }
+#define CVMX_LOADUNA_UINT16(result, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("ulhu %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1])); }
+#define CVMX_STOREUNA_INT16(data, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("ush %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : "=m"(__a[offset + 0]), "=m"(__a[offset + 1]): [rsrc] "d" (data), [rbase] "d" (__a)); }
+
+#define CVMX_LOADUNA_INT32(result, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("ulw %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : \
+ [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1]), "m"(__a[offset + 2]), "m"(__a[offset + 3])); }
+#define CVMX_STOREUNA_INT32(data, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("usw %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : \
+ "=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]) : \
+ [rsrc] "d" (data), [rbase] "d" (__a)); }
+
+#define CVMX_LOADUNA_INT64(result, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("uld %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : \
+ [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1]), "m"(__a[offset + 2]), "m"(__a[offset + 3]), \
+ "m"(__a[offset + 4]), "m"(__a[offset + 5]), "m"(__a[offset + 6]), "m"(__a[offset + 7])); }
+#define CVMX_STOREUNA_INT64(data, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("usd %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : \
+ "=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]), \
+ "=m"(__a[offset + 4]), "=m"(__a[offset + 5]), "=m"(__a[offset + 6]), "=m"(__a[offset + 7]) : \
+ [rsrc] "d" (data), [rbase] "d" (__a)); }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __CVMX_ASM_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-asm.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-asx0-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-asx0-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-asx0-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,143 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-asx0-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon asx0.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_ASX0_DEFS_H__
+#define __CVMX_ASX0_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ASX0_DBG_DATA_DRV CVMX_ASX0_DBG_DATA_DRV_FUNC()
+static inline uint64_t CVMX_ASX0_DBG_DATA_DRV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_ASX0_DBG_DATA_DRV not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800B0000208ull);
+}
+#else
+#define CVMX_ASX0_DBG_DATA_DRV (CVMX_ADD_IO_SEG(0x00011800B0000208ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ASX0_DBG_DATA_ENABLE CVMX_ASX0_DBG_DATA_ENABLE_FUNC()
+static inline uint64_t CVMX_ASX0_DBG_DATA_ENABLE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_ASX0_DBG_DATA_ENABLE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800B0000200ull);
+}
+#else
+#define CVMX_ASX0_DBG_DATA_ENABLE (CVMX_ADD_IO_SEG(0x00011800B0000200ull))
+#endif
+
+/**
+ * cvmx_asx0_dbg_data_drv
+ *
+ * ASX_DBG_DATA_DRV
+ *
+ */
+union cvmx_asx0_dbg_data_drv {
+ uint64_t u64;
+ struct cvmx_asx0_dbg_data_drv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t pctl : 5; /**< These bits control the driving strength of the dbg
+ interface. */
+ uint64_t nctl : 4; /**< These bits control the driving strength of the dbg
+ interface. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 5;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_asx0_dbg_data_drv_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< These bits control the driving strength of the dbg
+ interface. */
+ uint64_t nctl : 4; /**< These bits control the driving strength of the dbg
+ interface. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn38xx;
+ struct cvmx_asx0_dbg_data_drv_cn38xx cn38xxp2;
+ struct cvmx_asx0_dbg_data_drv_s cn58xx;
+ struct cvmx_asx0_dbg_data_drv_s cn58xxp1;
+};
+typedef union cvmx_asx0_dbg_data_drv cvmx_asx0_dbg_data_drv_t;
+
+/**
+ * cvmx_asx0_dbg_data_enable
+ *
+ * ASX_DBG_DATA_ENABLE
+ *
+ */
+union cvmx_asx0_dbg_data_enable {
+ uint64_t u64;
+ struct cvmx_asx0_dbg_data_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< A 1->0 transistion, turns the dbg interface OFF. */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asx0_dbg_data_enable_s cn38xx;
+ struct cvmx_asx0_dbg_data_enable_s cn38xxp2;
+ struct cvmx_asx0_dbg_data_enable_s cn58xx;
+ struct cvmx_asx0_dbg_data_enable_s cn58xxp1;
+};
+typedef union cvmx_asx0_dbg_data_enable cvmx_asx0_dbg_data_enable_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-asx0-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-asxx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-asxx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-asxx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1319 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-asxx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon asxx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_ASXX_DEFS_H__
+#define __CVMX_ASXX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_GMII_RX_CLK_SET(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_ASXX_GMII_RX_CLK_SET(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000180ull);
+}
+#else
+#define CVMX_ASXX_GMII_RX_CLK_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000180ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_GMII_RX_DAT_SET(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_ASXX_GMII_RX_DAT_SET(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000188ull);
+}
+#else
+#define CVMX_ASXX_GMII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000188ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_INT_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_INT_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000018ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000018ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_INT_REG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_INT_REG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000010ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000010ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_MII_RX_DAT_SET(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_ASXX_MII_RX_DAT_SET(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000190ull);
+}
+#else
+#define CVMX_ASXX_MII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000190ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_PRT_LOOP(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_PRT_LOOP(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000040ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_PRT_LOOP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000040ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_BYPASS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_BYPASS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000248ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_BYPASS(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000248ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_BYPASS_SETTING(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_BYPASS_SETTING(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000250ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000250ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_COMP(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_COMP(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000220ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_COMP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000220ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_DATA_DRV(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_DATA_DRV(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000218ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_DATA_DRV(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000218ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_FCRAM_MODE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_FCRAM_MODE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000210ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_FCRAM_MODE(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000210ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_NCTL_STRONG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_NCTL_STRONG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000230ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_NCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000230ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_NCTL_WEAK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_NCTL_WEAK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000240ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_NCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000240ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_PCTL_STRONG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_PCTL_STRONG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000228ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_PCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000228ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_PCTL_WEAK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_PCTL_WEAK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000238ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_PCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000238ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_SETTING(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_SETTING(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000258ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000258ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_CLK_SETX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_ASXX_RX_CLK_SETX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_PRT_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_PRT_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000000ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000000ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_WOL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000100ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_WOL(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000100ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_WOL_MSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL_MSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000108ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_WOL_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000108ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_WOL_POWOK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL_POWOK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000118ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_WOL_POWOK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000118ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_WOL_SIG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL_SIG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000110ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_WOL_SIG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000110ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_TX_CLK_SETX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_ASXX_TX_CLK_SETX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_TX_COMP_BYP(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_TX_COMP_BYP(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000068ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_TX_COMP_BYP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000068ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_TX_HI_WATERX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_ASXX_TX_HI_WATERX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_TX_PRT_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_TX_PRT_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_TX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+
+/**
+ * cvmx_asx#_gmii_rx_clk_set
+ *
+ * ASX_GMII_RX_CLK_SET = GMII Clock delay setting
+ *
+ */
+union cvmx_asxx_gmii_rx_clk_set {
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_clk_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the RXCLK (GMII receive clk)
+ delay line. The intrinsic delay can range from
+ 50ps to 80ps per tap. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
+};
+typedef union cvmx_asxx_gmii_rx_clk_set cvmx_asxx_gmii_rx_clk_set_t;
+
+/**
+ * cvmx_asx#_gmii_rx_dat_set
+ *
+ * ASX_GMII_RX_DAT_SET = GMII Clock delay setting
+ *
+ */
+union cvmx_asxx_gmii_rx_dat_set {
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_dat_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the RXD (GMII receive data)
+ delay lines. The intrinsic delay can range from
+ 50ps to 80ps per tap. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
+};
+typedef union cvmx_asxx_gmii_rx_dat_set cvmx_asxx_gmii_rx_dat_set_t;
+
+/**
+ * cvmx_asx#_int_en
+ *
+ * ASX_INT_EN = Interrupt Enable
+ *
+ */
+union cvmx_asxx_int_en {
+ uint64_t u64;
+ struct cvmx_asxx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */
+ uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */
+ uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 4;
+ uint64_t txpop : 4;
+ uint64_t txpsh : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_asxx_int_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */
+ uint64_t reserved_3_3 : 1;
+ uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t txpop : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpsh : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_int_en_cn30xx cn31xx;
+ struct cvmx_asxx_int_en_s cn38xx;
+ struct cvmx_asxx_int_en_s cn38xxp2;
+ struct cvmx_asxx_int_en_cn30xx cn50xx;
+ struct cvmx_asxx_int_en_s cn58xx;
+ struct cvmx_asxx_int_en_s cn58xxp1;
+};
+typedef union cvmx_asxx_int_en cvmx_asxx_int_en_t;
+
+/**
+ * cvmx_asx#_int_reg
+ *
+ * ASX_INT_REG = Interrupt Register
+ *
+ */
+union cvmx_asxx_int_reg {
+ uint64_t u64;
+ struct cvmx_asxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */
+ uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */
+ uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 4;
+ uint64_t txpop : 4;
+ uint64_t txpsh : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_asxx_int_reg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */
+ uint64_t reserved_3_3 : 1;
+ uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t txpop : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpsh : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_int_reg_cn30xx cn31xx;
+ struct cvmx_asxx_int_reg_s cn38xx;
+ struct cvmx_asxx_int_reg_s cn38xxp2;
+ struct cvmx_asxx_int_reg_cn30xx cn50xx;
+ struct cvmx_asxx_int_reg_s cn58xx;
+ struct cvmx_asxx_int_reg_s cn58xxp1;
+};
+typedef union cvmx_asxx_int_reg cvmx_asxx_int_reg_t;
+
+/**
+ * cvmx_asx#_mii_rx_dat_set
+ *
+ * ASX_MII_RX_DAT_SET = GMII Clock delay setting
+ *
+ */
+union cvmx_asxx_mii_rx_dat_set {
+ uint64_t u64;
+ struct cvmx_asxx_mii_rx_dat_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the RXD (MII receive data)
+ delay lines. The intrinsic delay can range from
+ 50ps to 80ps per tap. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
+ struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
+};
+typedef union cvmx_asxx_mii_rx_dat_set cvmx_asxx_mii_rx_dat_set_t;
+
+/**
+ * cvmx_asx#_prt_loop
+ *
+ * ASX_PRT_LOOP = Internal Loopback mode - TX FIFO output goes into RX FIFO (and maybe pins)
+ *
+ */
+union cvmx_asxx_prt_loop {
+ uint64_t u64;
+ struct cvmx_asxx_prt_loop_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ext_loop : 4; /**< External Loopback Enable
+ 0 = No Loopback (TX FIFO is filled by RMGII)
+ 1 = RX FIFO drives the TX FIFO
+ - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
+ - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
+ - core clock > 250MHZ
+ - rxc must not deviate from the +-50ppm
+ - if txc>rxc, idle cycle may drop over time */
+ uint64_t int_loop : 4; /**< Internal Loopback Enable
+ 0 = No Loopback (RX FIFO is filled by RMGII pins)
+ 1 = TX FIFO drives the RX FIFO
+ Note, in internal loop-back mode, the RGMII link
+ status is not used (since there is no real PHY).
+ Software cannot use the inband status. */
+#else
+ uint64_t int_loop : 4;
+ uint64_t ext_loop : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_asxx_prt_loop_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t ext_loop : 3; /**< External Loopback Enable
+ 0 = No Loopback (TX FIFO is filled by RMGII)
+ 1 = RX FIFO drives the TX FIFO
+ - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
+ - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
+ - core clock > 250MHZ
+ - rxc must not deviate from the +-50ppm
+ - if txc>rxc, idle cycle may drop over time */
+ uint64_t reserved_3_3 : 1;
+ uint64_t int_loop : 3; /**< Internal Loopback Enable
+ 0 = No Loopback (RX FIFO is filled by RMGII pins)
+ 1 = TX FIFO drives the RX FIFO
+ - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
+ - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
+ - GMX_TX_CLK[CLK_CNT] must be 1
+ Note, in internal loop-back mode, the RGMII link
+ status is not used (since there is no real PHY).
+ Software cannot use the inband status. */
+#else
+ uint64_t int_loop : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t ext_loop : 3;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_prt_loop_cn30xx cn31xx;
+ struct cvmx_asxx_prt_loop_s cn38xx;
+ struct cvmx_asxx_prt_loop_s cn38xxp2;
+ struct cvmx_asxx_prt_loop_cn30xx cn50xx;
+ struct cvmx_asxx_prt_loop_s cn58xx;
+ struct cvmx_asxx_prt_loop_s cn58xxp1;
+};
+typedef union cvmx_asxx_prt_loop cvmx_asxx_prt_loop_t;
+
+/**
+ * cvmx_asx#_rld_bypass
+ *
+ * ASX_RLD_BYPASS
+ *
+ */
+union cvmx_asxx_rld_bypass {
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t bypass : 1; /**< When set, the rld_dll setting is bypassed with
+ ASX_RLD_BYPASS_SETTING */
+#else
+ uint64_t bypass : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asxx_rld_bypass_s cn38xx;
+ struct cvmx_asxx_rld_bypass_s cn38xxp2;
+ struct cvmx_asxx_rld_bypass_s cn58xx;
+ struct cvmx_asxx_rld_bypass_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_bypass cvmx_asxx_rld_bypass_t;
+
+/**
+ * cvmx_asx#_rld_bypass_setting
+ *
+ * ASX_RLD_BYPASS_SETTING
+ *
+ */
+union cvmx_asxx_rld_bypass_setting {
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_setting_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< The rld_dll setting bypass value */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_bypass_setting_s cn38xx;
+ struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
+ struct cvmx_asxx_rld_bypass_setting_s cn58xx;
+ struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_bypass_setting cvmx_asxx_rld_bypass_setting_t;
+
+/**
+ * cvmx_asx#_rld_comp
+ *
+ * ASX_RLD_COMP
+ *
+ */
+union cvmx_asxx_rld_comp {
+ uint64_t u64;
+ struct cvmx_asxx_rld_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t pctl : 5; /**< PCTL Compensation Value
+ These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+ uint64_t nctl : 4; /**< These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 5;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_asxx_rld_comp_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+ uint64_t nctl : 4; /**< These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
+ struct cvmx_asxx_rld_comp_s cn58xx;
+ struct cvmx_asxx_rld_comp_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_comp cvmx_asxx_rld_comp_t;
+
+/**
+ * cvmx_asx#_rld_data_drv
+ *
+ * ASX_RLD_DATA_DRV
+ *
+ */
+union cvmx_asxx_rld_data_drv {
+ uint64_t u64;
+ struct cvmx_asxx_rld_data_drv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< These bits specify a driving strength (positive
+ integer) for the RLD I/Os when the built-in
+ compensation circuit is bypassed. */
+ uint64_t nctl : 4; /**< These bits specify a driving strength (positive
+ integer) for the RLD I/Os when the built-in
+ compensation circuit is bypassed. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_asxx_rld_data_drv_s cn38xx;
+ struct cvmx_asxx_rld_data_drv_s cn38xxp2;
+ struct cvmx_asxx_rld_data_drv_s cn58xx;
+ struct cvmx_asxx_rld_data_drv_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_data_drv cvmx_asxx_rld_data_drv_t;
+
+/**
+ * cvmx_asx#_rld_fcram_mode
+ *
+ * ASX_RLD_FCRAM_MODE
+ *
+ */
+union cvmx_asxx_rld_fcram_mode {
+ uint64_t u64;
+ struct cvmx_asxx_rld_fcram_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t mode : 1; /**< Memory Mode
+ - 0: RLDRAM
+ - 1: FCRAM */
+#else
+ uint64_t mode : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asxx_rld_fcram_mode_s cn38xx;
+ struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
+};
+typedef union cvmx_asxx_rld_fcram_mode cvmx_asxx_rld_fcram_mode_t;
+
+/**
+ * cvmx_asx#_rld_nctl_strong
+ *
+ * ASX_RLD_NCTL_STRONG
+ *
+ */
+union cvmx_asxx_rld_nctl_strong {
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_strong_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t nctl : 5; /**< Duke's drive control */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_nctl_strong_s cn38xx;
+ struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
+ struct cvmx_asxx_rld_nctl_strong_s cn58xx;
+ struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_nctl_strong cvmx_asxx_rld_nctl_strong_t;
+
+/**
+ * cvmx_asx#_rld_nctl_weak
+ *
+ * ASX_RLD_NCTL_WEAK
+ *
+ */
+union cvmx_asxx_rld_nctl_weak {
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_weak_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t nctl : 5; /**< UNUSED (not needed for CN58XX) */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_nctl_weak_s cn38xx;
+ struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
+ struct cvmx_asxx_rld_nctl_weak_s cn58xx;
+ struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_nctl_weak cvmx_asxx_rld_nctl_weak_t;
+
+/**
+ * cvmx_asx#_rld_pctl_strong
+ *
+ * ASX_RLD_PCTL_STRONG
+ *
+ */
+union cvmx_asxx_rld_pctl_strong {
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_strong_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t pctl : 5; /**< Duke's drive control */
+#else
+ uint64_t pctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_pctl_strong_s cn38xx;
+ struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
+ struct cvmx_asxx_rld_pctl_strong_s cn58xx;
+ struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_pctl_strong cvmx_asxx_rld_pctl_strong_t;
+
+/**
+ * cvmx_asx#_rld_pctl_weak
+ *
+ * ASX_RLD_PCTL_WEAK
+ *
+ */
+union cvmx_asxx_rld_pctl_weak {
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_weak_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t pctl : 5; /**< UNUSED (not needed for CN58XX) */
+#else
+ uint64_t pctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_pctl_weak_s cn38xx;
+ struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
+ struct cvmx_asxx_rld_pctl_weak_s cn58xx;
+ struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_pctl_weak cvmx_asxx_rld_pctl_weak_t;
+
+/**
+ * cvmx_asx#_rld_setting
+ *
+ * ASX_RLD_SETTING
+ *
+ */
+union cvmx_asxx_rld_setting {
+ uint64_t u64;
+ struct cvmx_asxx_rld_setting_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t dfaset : 5; /**< RLD ClkGen DLL Setting(debug) */
+ uint64_t dfalag : 1; /**< RLD ClkGen DLL Lag Error(debug) */
+ uint64_t dfalead : 1; /**< RLD ClkGen DLL Lead Error(debug) */
+ uint64_t dfalock : 1; /**< RLD ClkGen DLL Lock acquisition(debug) */
+ uint64_t setting : 5; /**< RLDCK90 DLL Setting(debug) */
+#else
+ uint64_t setting : 5;
+ uint64_t dfalock : 1;
+ uint64_t dfalead : 1;
+ uint64_t dfalag : 1;
+ uint64_t dfaset : 5;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_asxx_rld_setting_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< This is the read-only true rld dll_setting. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
+ struct cvmx_asxx_rld_setting_s cn58xx;
+ struct cvmx_asxx_rld_setting_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_setting cvmx_asxx_rld_setting_t;
+
+/**
+ * cvmx_asx#_rx_clk_set#
+ *
+ * ASX_RX_CLK_SET = RGMII Clock delay setting
+ *
+ *
+ * Notes:
+ * Setting to place on the open-loop RXC (RGMII receive clk)
+ * delay line, which can delay the recieved clock. This
+ * can be used if the board and/or transmitting device
+ * has not otherwise delayed the clock.
+ *
+ * A value of SETTING=0 disables the delay line. The delay
+ * line should be disabled unless the transmitter or board
+ * does not delay the clock.
+ *
+ * Note that this delay line provides only a coarse control
+ * over the delay. Generally, it can only reliably provide
+ * a delay in the range 1.25-2.5ns, which may not be adequate
+ * for some system applications.
+ *
+ * The open loop delay line selects
+ * from among a series of tap positions. Each incremental
+ * tap position adds a delay of 50ps to 135ps per tap, depending
+ * on the chip, its temperature, and the voltage.
+ * To achieve from 1.25-2.5ns of delay on the recieved
+ * clock, a fixed value of SETTING=24 may work.
+ * For more precision, we recommend the following settings
+ * based on the chip voltage:
+ *
+ * VDD SETTING
+ * -----------------------------
+ * 1.0 18
+ * 1.05 19
+ * 1.1 21
+ * 1.15 22
+ * 1.2 23
+ * 1.25 24
+ * 1.3 25
+ */
+union cvmx_asxx_rx_clk_setx {
+ uint64_t u64;
+ struct cvmx_asxx_rx_clk_setx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the open-loop RXC delay line */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rx_clk_setx_s cn30xx;
+ struct cvmx_asxx_rx_clk_setx_s cn31xx;
+ struct cvmx_asxx_rx_clk_setx_s cn38xx;
+ struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
+ struct cvmx_asxx_rx_clk_setx_s cn50xx;
+ struct cvmx_asxx_rx_clk_setx_s cn58xx;
+ struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
+};
+typedef union cvmx_asxx_rx_clk_setx cvmx_asxx_rx_clk_setx_t;
+
+/**
+ * cvmx_asx#_rx_prt_en
+ *
+ * ASX_RX_PRT_EN = RGMII Port Enable
+ *
+ */
+union cvmx_asxx_rx_prt_en {
+ uint64_t u64;
+ struct cvmx_asxx_rx_prt_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_asxx_rx_prt_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
+ struct cvmx_asxx_rx_prt_en_s cn38xx;
+ struct cvmx_asxx_rx_prt_en_s cn38xxp2;
+ struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
+ struct cvmx_asxx_rx_prt_en_s cn58xx;
+ struct cvmx_asxx_rx_prt_en_s cn58xxp1;
+};
+typedef union cvmx_asxx_rx_prt_en cvmx_asxx_rx_prt_en_t;
+
+/**
+ * cvmx_asx#_rx_wol
+ *
+ * ASX_RX_WOL = RGMII RX Wake on LAN status register
+ *
+ */
+union cvmx_asxx_rx_wol {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t status : 1; /**< Copy of PMCSR[15] - PME_status */
+ uint64_t enable : 1; /**< Copy of PMCSR[8] - PME_enable */
+#else
+ uint64_t enable : 1;
+ uint64_t status : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_s cn38xx;
+ struct cvmx_asxx_rx_wol_s cn38xxp2;
+};
+typedef union cvmx_asxx_rx_wol cvmx_asxx_rx_wol_t;
+
+/**
+ * cvmx_asx#_rx_wol_msk
+ *
+ * ASX_RX_WOL_MSK = RGMII RX Wake on LAN byte mask
+ *
+ */
+union cvmx_asxx_rx_wol_msk {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t msk : 64; /**< Bytes to include in the CRC signature */
+#else
+ uint64_t msk : 64;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_msk_s cn38xx;
+ struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
+};
+typedef union cvmx_asxx_rx_wol_msk cvmx_asxx_rx_wol_msk_t;
+
+/**
+ * cvmx_asx#_rx_wol_powok
+ *
+ * ASX_RX_WOL_POWOK = RGMII RX Wake on LAN Power OK
+ *
+ */
+union cvmx_asxx_rx_wol_powok {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_powok_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t powerok : 1; /**< Power OK */
+#else
+ uint64_t powerok : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_powok_s cn38xx;
+ struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
+};
+typedef union cvmx_asxx_rx_wol_powok cvmx_asxx_rx_wol_powok_t;
+
+/**
+ * cvmx_asx#_rx_wol_sig
+ *
+ * ASX_RX_WOL_SIG = RGMII RX Wake on LAN CRC signature
+ *
+ */
+union cvmx_asxx_rx_wol_sig {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_sig_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t sig : 32; /**< CRC signature */
+#else
+ uint64_t sig : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_sig_s cn38xx;
+ struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
+};
+typedef union cvmx_asxx_rx_wol_sig cvmx_asxx_rx_wol_sig_t;
+
+/**
+ * cvmx_asx#_tx_clk_set#
+ *
+ * ASX_TX_CLK_SET = RGMII Clock delay setting
+ *
+ *
+ * Notes:
+ * Setting to place on the open-loop TXC (RGMII transmit clk)
+ * delay line, which can delay the transmited clock. This
+ * can be used if the board and/or transmitting device
+ * has not otherwise delayed the clock.
+ *
+ * A value of SETTING=0 disables the delay line. The delay
+ * line should be disabled unless the transmitter or board
+ * does not delay the clock.
+ *
+ * Note that this delay line provides only a coarse control
+ * over the delay. Generally, it can only reliably provide
+ * a delay in the range 1.25-2.5ns, which may not be adequate
+ * for some system applications.
+ *
+ * The open loop delay line selects
+ * from among a series of tap positions. Each incremental
+ * tap position adds a delay of 50ps to 135ps per tap, depending
+ * on the chip, its temperature, and the voltage.
+ * To achieve from 1.25-2.5ns of delay on the recieved
+ * clock, a fixed value of SETTING=24 may work.
+ * For more precision, we recommend the following settings
+ * based on the chip voltage:
+ *
+ * VDD SETTING
+ * -----------------------------
+ * 1.0 18
+ * 1.05 19
+ * 1.1 21
+ * 1.15 22
+ * 1.2 23
+ * 1.25 24
+ * 1.3 25
+ */
+union cvmx_asxx_tx_clk_setx {
+ uint64_t u64;
+ struct cvmx_asxx_tx_clk_setx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the open-loop TXC delay line */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_tx_clk_setx_s cn30xx;
+ struct cvmx_asxx_tx_clk_setx_s cn31xx;
+ struct cvmx_asxx_tx_clk_setx_s cn38xx;
+ struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
+ struct cvmx_asxx_tx_clk_setx_s cn50xx;
+ struct cvmx_asxx_tx_clk_setx_s cn58xx;
+ struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
+};
+typedef union cvmx_asxx_tx_clk_setx cvmx_asxx_tx_clk_setx_t;
+
+/**
+ * cvmx_asx#_tx_comp_byp
+ *
+ * ASX_TX_COMP_BYP = RGMII Clock delay setting
+ *
+ */
+union cvmx_asxx_tx_comp_byp {
+ uint64_t u64;
+ struct cvmx_asxx_tx_comp_byp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_asxx_tx_comp_byp_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t bypass : 1; /**< Compensation bypass */
+ uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */
+ uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t bypass : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
+ struct cvmx_asxx_tx_comp_byp_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */
+ uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
+ struct cvmx_asxx_tx_comp_byp_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t bypass : 1; /**< Compensation bypass */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t bypass : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn50xx;
+ struct cvmx_asxx_tx_comp_byp_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn58xx;
+ struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
+};
+typedef union cvmx_asxx_tx_comp_byp cvmx_asxx_tx_comp_byp_t;
+
+/**
+ * cvmx_asx#_tx_hi_water#
+ *
+ * ASX_TX_HI_WATER = RGMII TX FIFO Hi WaterMark
+ *
+ */
+union cvmx_asxx_tx_hi_waterx {
+ uint64_t u64;
+ struct cvmx_asxx_tx_hi_waterx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mark : 4; /**< TX FIFO HiWatermark to stall GMX
+ Value of 0 maps to 16
+ Reset value changed from 10 in pass1
+ Pass1 settings (assuming 125 tclk)
+ - 325-375: 12
+ - 375-437: 11
+ - 437-550: 10
+ - 550-687: 9 */
+#else
+ uint64_t mark : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t mark : 3; /**< TX FIFO HiWatermark to stall GMX
+ Value 0 maps to 8. */
+#else
+ uint64_t mark : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn38xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn58xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
+};
+typedef union cvmx_asxx_tx_hi_waterx cvmx_asxx_tx_hi_waterx_t;
+
+/**
+ * cvmx_asx#_tx_prt_en
+ *
+ * ASX_TX_PRT_EN = RGMII Port Enable
+ *
+ */
+union cvmx_asxx_tx_prt_en {
+ uint64_t u64;
+ struct cvmx_asxx_tx_prt_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_asxx_tx_prt_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
+ struct cvmx_asxx_tx_prt_en_s cn38xx;
+ struct cvmx_asxx_tx_prt_en_s cn38xxp2;
+ struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
+ struct cvmx_asxx_tx_prt_en_s cn58xx;
+ struct cvmx_asxx_tx_prt_en_s cn58xxp1;
+};
+typedef union cvmx_asxx_tx_prt_en cvmx_asxx_tx_prt_en_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-asxx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-atomic.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-atomic.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-atomic.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,771 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file provides atomic operations
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ *
+ */
+
+
+#ifndef __CVMX_ATOMIC_H__
+#define __CVMX_ATOMIC_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add32_nosync(int32_t *ptr, int32_t incr)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ uint32_t tmp;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " addu %[tmp], %[inc] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp)
+ : [inc] "r" (incr)
+ : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ " saa %[inc], (%[base]) \n"
+ : "+m" (*ptr)
+ : [inc] "r" (incr), [base] "r" (ptr)
+ : "memory");
+ }
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add32(int32_t *ptr, int32_t incr)
+{
+ CVMX_SYNCWS;
+ cvmx_atomic_add32_nosync(ptr, incr);
+ CVMX_SYNCWS;
+}
+
+/**
+ * Atomically sets a 32 bit (aligned) memory location to a value
+ *
+ * @param ptr address of memory to set
+ * @param value value to set memory location to.
+ */
+static inline void cvmx_atomic_set32(int32_t *ptr, int32_t value)
+{
+ CVMX_SYNCWS;
+ *ptr = value;
+ CVMX_SYNCWS;
+}
+
+/**
+ * Returns the current value of a 32 bit (aligned) memory
+ * location.
+ *
+ * @param ptr Address of memory to get
+ * @return Value of the memory
+ */
+static inline int32_t cvmx_atomic_get32(int32_t *ptr)
+{
+ return *(volatile int32_t *)ptr;
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add64_nosync(int64_t *ptr, int64_t incr)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ uint64_t tmp;
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[tmp], %[val] \n"
+ " daddu %[tmp], %[inc] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp)
+ : [inc] "r" (incr)
+ : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ " saad %[inc], (%[base]) \n"
+ : "+m" (*ptr)
+ : [inc] "r" (incr), [base] "r" (ptr)
+ : "memory");
+ }
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add64(int64_t *ptr, int64_t incr)
+{
+ CVMX_SYNCWS;
+ cvmx_atomic_add64_nosync(ptr, incr);
+ CVMX_SYNCWS;
+}
+
+/**
+ * Atomically sets a 64 bit (aligned) memory location to a value
+ *
+ * @param ptr address of memory to set
+ * @param value value to set memory location to.
+ */
+static inline void cvmx_atomic_set64(int64_t *ptr, int64_t value)
+{
+ CVMX_SYNCWS;
+ *ptr = value;
+ CVMX_SYNCWS;
+}
+
+/**
+ * Returns the current value of a 64 bit (aligned) memory
+ * location.
+ *
+ * @param ptr Address of memory to get
+ * @return Value of the memory
+ */
+static inline int64_t cvmx_atomic_get64(int64_t *ptr)
+{
+ return *(volatile int64_t *)ptr;
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does no memory synchronization.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline uint32_t cvmx_atomic_compare_and_store32_nosync(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
+{
+ uint32_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " li %[ret], 0 \n"
+ " bne %[tmp], %[old], 2f \n"
+ " move %[tmp], %[new_val] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[ret], 1 \n"
+ "2: nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [old] "r" (old_val), [new_val] "r" (new_val)
+ : "memory");
+
+ return(ret);
+
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does memory synchronization that is required to use this as a locking primitive.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline uint32_t cvmx_atomic_compare_and_store32(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
+{
+ uint32_t ret;
+ CVMX_SYNCWS;
+ ret = cvmx_atomic_compare_and_store32_nosync(ptr, old_val, new_val);
+ CVMX_SYNCWS;
+ return ret;
+
+
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does no memory synchronization.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline uint64_t cvmx_atomic_compare_and_store64_nosync(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
+{
+ uint64_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[tmp], %[val] \n"
+ " li %[ret], 0 \n"
+ " bne %[tmp], %[old], 2f \n"
+ " move %[tmp], %[new_val] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[ret], 1 \n"
+ "2: nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [old] "r" (old_val), [new_val] "r" (new_val)
+ : "memory");
+
+ return(ret);
+
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does memory synchronization that is required to use this as a locking primitive.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline uint64_t cvmx_atomic_compare_and_store64(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
+{
+ uint64_t ret;
+ CVMX_SYNCWS;
+ ret = cvmx_atomic_compare_and_store64_nosync(ptr, old_val, new_val);
+ CVMX_SYNCWS;
+ return ret;
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int64_t cvmx_atomic_fetch_and_add64_nosync(int64_t *ptr, int64_t incr)
+{
+ uint64_t tmp, ret;
+
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ {
+ CVMX_PUSH_OCTEON2;
+ if (__builtin_constant_p(incr) && incr == 1)
+ {
+ __asm__ __volatile__(
+ "laid %0,(%2)"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
+ }
+ else if (__builtin_constant_p(incr) && incr == -1)
+ {
+ __asm__ __volatile__(
+ "ladd %0,(%2)"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ "laad %0,(%2),%3"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr), "r" (incr) : "memory");
+ }
+ CVMX_POP_OCTEON2;
+ }
+ else
+ {
+#endif
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " daddu %[tmp], %[inc] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [inc] "r" (incr)
+ : "memory");
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+ }
+#endif
+
+ return (ret);
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int64_t cvmx_atomic_fetch_and_add64(int64_t *ptr, int64_t incr)
+{
+ uint64_t ret;
+ CVMX_SYNCWS;
+ ret = cvmx_atomic_fetch_and_add64_nosync(ptr, incr);
+ CVMX_SYNCWS;
+ return ret;
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int32_t cvmx_atomic_fetch_and_add32_nosync(int32_t *ptr, int32_t incr)
+{
+ uint32_t tmp, ret;
+
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ {
+ CVMX_PUSH_OCTEON2;
+ if (__builtin_constant_p(incr) && incr == 1)
+ {
+ __asm__ __volatile__(
+ "lai %0,(%2)"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
+ }
+ else if (__builtin_constant_p(incr) && incr == -1)
+ {
+ __asm__ __volatile__(
+ "lad %0,(%2)"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ "laa %0,(%2),%3"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr), "r" (incr) : "memory");
+ }
+ CVMX_POP_OCTEON2;
+ }
+ else
+ {
+#endif
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " addu %[tmp], %[inc] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [inc] "r" (incr)
+ : "memory");
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+ }
+#endif
+
+ return (ret);
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int32_t cvmx_atomic_fetch_and_add32(int32_t *ptr, int32_t incr)
+{
+ uint32_t ret;
+ CVMX_SYNCWS;
+ ret = cvmx_atomic_fetch_and_add32_nosync(ptr, incr);
+ CVMX_SYNCWS;
+ return ret;
+}
+
+/**
+ * Atomically set bits in a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to set
+ *
+ * @return Value of memory location before setting bits
+ */
+static inline uint64_t cvmx_atomic_fetch_and_bset64_nosync(uint64_t *ptr, uint64_t mask)
+{
+ uint64_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " or %[tmp], %[msk] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [msk] "r" (mask)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically set bits in a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to set
+ *
+ * @return Value of memory location before setting bits
+ */
+static inline uint32_t cvmx_atomic_fetch_and_bset32_nosync(uint32_t *ptr, uint32_t mask)
+{
+ uint32_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " or %[tmp], %[msk] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [msk] "r" (mask)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically clear bits in a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to clear
+ *
+ * @return Value of memory location before clearing bits
+ */
+static inline uint64_t cvmx_atomic_fetch_and_bclr64_nosync(uint64_t *ptr, uint64_t mask)
+{
+ uint64_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ " nor %[msk], 0 \n"
+ "1: lld %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " and %[tmp], %[msk] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret), [msk] "+r" (mask)
+ : : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically clear bits in a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to clear
+ *
+ * @return Value of memory location before clearing bits
+ */
+static inline uint32_t cvmx_atomic_fetch_and_bclr32_nosync(uint32_t *ptr, uint32_t mask)
+{
+ uint32_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ " nor %[msk], 0 \n"
+ "1: ll %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " and %[tmp], %[msk] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret), [msk] "+r" (mask)
+ : : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically swaps value in 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param new_val new value to write
+ *
+ * @return Value of memory location before swap operation
+ */
+static inline uint64_t cvmx_atomic_swap64_nosync(uint64_t *ptr, uint64_t new_val)
+{
+ uint64_t tmp, ret;
+
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ {
+ CVMX_PUSH_OCTEON2;
+ if (__builtin_constant_p(new_val) && new_val == 0)
+ {
+ __asm__ __volatile__(
+ "lacd %0,(%1)"
+ : "=r" (ret) : "r" (ptr) : "memory");
+ }
+ else if (__builtin_constant_p(new_val) && new_val == ~0ull)
+ {
+ __asm__ __volatile__(
+ "lasd %0,(%1)"
+ : "=r" (ret) : "r" (ptr) : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ "lawd %0,(%1),%2"
+ : "=r" (ret) : "r" (ptr), "r" (new_val) : "memory");
+ }
+ CVMX_POP_OCTEON2;
+ }
+ else
+ {
+#endif
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[ret], %[val] \n"
+ " move %[tmp], %[new_val] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [new_val] "r" (new_val)
+ : "memory");
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+ }
+#endif
+
+ return (ret);
+}
+
+/**
+ * Atomically swaps value in 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param new_val new value to write
+ *
+ * @return Value of memory location before swap operation
+ */
+static inline uint32_t cvmx_atomic_swap32_nosync(uint32_t *ptr, uint32_t new_val)
+{
+ uint32_t tmp, ret;
+
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ {
+ CVMX_PUSH_OCTEON2;
+ if (__builtin_constant_p(new_val) && new_val == 0)
+ {
+ __asm__ __volatile__(
+ "lac %0,(%1)"
+ : "=r" (ret) : "r" (ptr) : "memory");
+ }
+ else if (__builtin_constant_p(new_val) && new_val == ~0u)
+ {
+ __asm__ __volatile__(
+ "las %0,(%1)"
+ : "=r" (ret) : "r" (ptr) : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ "law %0,(%1),%2"
+ : "=r" (ret) : "r" (ptr), "r" (new_val) : "memory");
+ }
+ CVMX_POP_OCTEON2;
+ }
+ else
+ {
+#endif
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[ret], %[val] \n"
+ " move %[tmp], %[new_val] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [new_val] "r" (new_val)
+ : "memory");
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+ }
+#endif
+
+ return (ret);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ATOMIC_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-atomic.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-bootloader.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-bootloader.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-bootloader.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,153 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+#ifndef __CVMX_BOOTLOADER__
+#define __CVMX_BOOTLOADER__
+
+
+
+/**
+ * @file
+ *
+ * Bootloader definitions that are shared with other programs
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+
+/* The bootloader_header_t structure defines the header that is present
+** at the start of binary u-boot images. This header is used to locate the bootloader
+** image in NAND, and also to allow verification of images for normal NOR booting.
+** This structure is placed at the beginning of a bootloader binary image, and remains
+** in the executable code.
+*/
+#define BOOTLOADER_HEADER_MAGIC 0x424f4f54 /* "BOOT" in ASCII */
+
+#define BOOTLOADER_HEADER_COMMENT_LEN 64
+#define BOOTLOADER_HEADER_VERSION_LEN 64
+#define BOOTLOADER_HEADER_MAX_SIZE 0x200 /* limited by the space to the next exception handler */
+
+#define BOOTLOADER_HEADER_CURRENT_MAJOR_REV 1
+#define BOOTLOADER_HEADER_CURRENT_MINOR_REV 2
+/* Revision history
+* 1.1 Initial released revision. (SDK 1.9)
+* 1.2 TLB based relocatable image (SDK 2.0)
+*
+*
+*/
+
+/* offsets to struct bootloader_header fields for assembly use */
+#define GOT_ADDRESS_OFFSET 48
+
+#define LOOKUP_STEP (64*1024)
+
+#ifndef __ASSEMBLY__
+typedef struct bootloader_header
+{
+ uint32_t jump_instr; /* Jump to executable code following the
+ ** header. This allows this header to
+ ** be (and remain) part of the executable image)
+ */
+ uint32_t nop_instr; /* Must be 0x0 */
+ uint32_t magic; /* Magic number to identify header */
+ uint32_t hcrc; /* CRC of all of header excluding this field */
+
+ uint16_t hlen; /* Length of header in bytes */
+ uint16_t maj_rev; /* Major revision */
+ uint16_t min_rev; /* Minor revision */
+ uint16_t board_type; /* Board type that the image is for */
+
+ uint32_t dlen; /* Length of data (immediately following header) in bytes */
+ uint32_t dcrc; /* CRC of data */
+ uint64_t address; /* Mips virtual address */
+ uint32_t flags;
+ uint16_t image_type; /* Defined in bootloader_image_t enum */
+ uint16_t resv0; /* pad */
+
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t reserved4;
+
+ char comment_string[BOOTLOADER_HEADER_COMMENT_LEN]; /* Optional, for descriptive purposes */
+ char version_string[BOOTLOADER_HEADER_VERSION_LEN]; /* Optional, for descriptive purposes */
+} __attribute__((packed)) bootloader_header_t;
+
+
+
+/* Defines for flag field */
+#define BL_HEADER_FLAG_FAILSAFE (1)
+
+
+typedef enum
+{
+ BL_HEADER_IMAGE_UNKNOWN = 0x0,
+ BL_HEADER_IMAGE_STAGE2, /* Binary bootloader stage2 image (NAND boot) */
+ BL_HEADER_IMAGE_STAGE3, /* Binary bootloader stage3 image (NAND boot)*/
+ BL_HEADER_IMAGE_NOR, /* Binary bootloader for NOR boot */
+ BL_HEADER_IMAGE_PCIBOOT, /* Binary bootloader for PCI boot */
+ BL_HEADER_IMAGE_UBOOT_ENV, /* Environment for u-boot */
+ BL_HEADER_IMAGE_MAX,
+ /* Range for customer private use. Will not be used by Cavium Inc. */
+ BL_HEADER_IMAGE_CUST_RESERVED_MIN = 0x1000,
+ BL_HEADER_IMAGE_CUST_RESERVED_MAX = 0x1fff
+} bootloader_image_t;
+
+#endif /* __ASSEMBLY__ */
+
+/* Maximum address searched for NAND boot images and environments. This is used
+** by stage1 and stage2. */
+#define MAX_NAND_SEARCH_ADDR 0x400000
+
+/* Maximum address to look for start of normal bootloader */
+#define MAX_NOR_SEARCH_ADDR 0x200000
+
+/* Defines for RAM based environment set by the host or the previous bootloader
+** in a chain boot configuration. */
+
+#define U_BOOT_RAM_ENV_ADDR (0x1000)
+#define U_BOOT_RAM_ENV_SIZE (0x1000)
+#define U_BOOT_RAM_ENV_CRC_SIZE (0x4)
+#define U_BOOT_RAM_ENV_ADDR_2 (U_BOOT_RAM_ENV_ADDR + U_BOOT_RAM_ENV_SIZE)
+
+#endif /* __CVMX_BOOTLOADER__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-bootloader.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-bootmem.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-bootmem.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-bootmem.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1188 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ * Simple allocate only memory allocator. Used to allocate memory at application
+ * start time.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#endif
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#endif
+typedef uint32_t cvmx_spinlock_t;
+
+
+//#define DEBUG
+
+#define ULL unsigned long long
+#undef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
+#undef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define ALIGN_ADDR_UP(addr, align) (((addr) + (~(align))) & (align))
+
+/**
+ * This is the physical location of a cvmx_bootmem_desc_t
+ * structure in Octeon's memory. Note that dues to addressing
+ * limits or runtime environment it might not be possible to
+ * create a C pointer to this structure.
+ */
+static CVMX_SHARED uint64_t cvmx_bootmem_desc_addr = 0;
+
+/**
+ * This macro returns the size of a member of a structure.
+ * Logically it is the same as "sizeof(s::field)" in C++, but
+ * C lacks the "::" operator.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s*)NULL)->field)
+
+/**
+ * This macro returns a member of the cvmx_bootmem_desc_t
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the cvmx_bootmem_desc_t to read.
+ * Regardless of the type of the field, the return type is always
+ * a uint64_t.
+ */
+#define CVMX_BOOTMEM_DESC_GET_FIELD(field) \
+ __cvmx_bootmem_desc_get(cvmx_bootmem_desc_addr, \
+ offsetof(cvmx_bootmem_desc_t, field), \
+ SIZEOF_FIELD(cvmx_bootmem_desc_t, field))
+
+/**
+ * This macro writes a member of the cvmx_bootmem_desc_t
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the cvmx_bootmem_desc_t to write.
+ */
+#define CVMX_BOOTMEM_DESC_SET_FIELD(field, value) \
+ __cvmx_bootmem_desc_set(cvmx_bootmem_desc_addr, \
+ offsetof(cvmx_bootmem_desc_t, field), \
+ SIZEOF_FIELD(cvmx_bootmem_desc_t, field), value)
+
+/**
+ * This macro returns a member of the
+ * cvmx_bootmem_named_block_desc_t structure. These members can't
+ * be directly addressed as they might be in memory not directly
+ * reachable. In the case where bootmem is compiled with
+ * LINUX_HOST, the structure itself might be located on a remote
+ * Octeon. The argument "field" is the member name of the
+ * cvmx_bootmem_named_block_desc_t to read. Regardless of the type
+ * of the field, the return type is always a uint64_t. The "addr"
+ * parameter is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field) \
+ __cvmx_bootmem_desc_get(addr, \
+ offsetof(cvmx_bootmem_named_block_desc_t, field), \
+ SIZEOF_FIELD(cvmx_bootmem_named_block_desc_t, field))
+
+/**
+ * This macro writes a member of the cvmx_bootmem_named_block_desc_t
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the
+ * cvmx_bootmem_named_block_desc_t to write. The "addr" parameter
+ * is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_SET_FIELD(addr, field, value) \
+ __cvmx_bootmem_desc_set(addr, \
+ offsetof(cvmx_bootmem_named_block_desc_t, field), \
+ SIZEOF_FIELD(cvmx_bootmem_named_block_desc_t, field), value)
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a uint64_t.
+ */
+static inline uint64_t __cvmx_bootmem_desc_get(uint64_t base, int offset, int size)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size)
+ {
+ case 4:
+ return cvmx_read64_uint32(base);
+ case 8:
+ return cvmx_read64_uint64(base);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * This function is the implementation of the set macros defined
+ * for individual structure members. The argument are generated
+ * by the macros in order to write only the needed memory.
+ *
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ * @param value Value to write into the structure
+ */
+static inline void __cvmx_bootmem_desc_set(uint64_t base, int offset, int size, uint64_t value)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size)
+ {
+ case 4:
+ cvmx_write64_uint32(base, value);
+ break;
+ case 8:
+ cvmx_write64_uint64(base, value);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * This function retrieves the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessable.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to receive the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_GET_NAME(uint64_t addr, char *str, int len)
+{
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ int l = len;
+ char *ptr = str;
+ addr |= (1ull << 63);
+ addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
+ while (l--)
+ *ptr++ = cvmx_read64_uint8(addr++);
+ str[len] = 0;
+#else
+ extern void octeon_remote_read_mem(void *buffer, uint64_t physical_address, int length);
+ addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
+ octeon_remote_read_mem(str, addr, len);
+ str[len] = 0;
+#endif
+}
+
+/**
+ * This function stores the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessable.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to store into the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_SET_NAME(uint64_t addr, const char *str, int len)
+{
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ int l = len;
+ addr |= (1ull << 63);
+ addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
+ while (l--)
+ {
+ if (l)
+ cvmx_write64_uint8(addr++, *str++);
+ else
+ cvmx_write64_uint8(addr++, 0);
+ }
+#else
+ extern void octeon_remote_write_mem(uint64_t physical_address, const void *buffer, int length);
+ char zero = 0;
+ addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
+ octeon_remote_write_mem(addr, str, len-1);
+ octeon_remote_write_mem(addr+len-1, &zero, 1);
+#endif
+}
+
+/* See header file for descriptions of functions */
+
+/* Wrapper functions are provided for reading/writing the size and next block
+** values as these may not be directly addressible (in 32 bit applications, for instance.)
+*/
+/* Offsets of data elements in bootmem list, must match cvmx_bootmem_block_header_t */
+#define NEXT_OFFSET 0
+#define SIZE_OFFSET 8
+static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
+{
+ cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
+}
+static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
+{
+ cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
+}
+static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
+{
+ return(cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63)));
+}
+static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
+{
+ return(cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63)));
+}
+
+/**
+ * Check the version information on the bootmem descriptor
+ *
+ * @param exact_match
+ * Exact major version to check against. A zero means
+ * check that the version supports named blocks.
+ *
+ * @return Zero if the version is correct. Negative if the version is
+ * incorrect. Failures also cause a message to be displayed.
+ */
+static int __cvmx_bootmem_check_version(int exact_match)
+{
+ int major_version;
+#ifdef CVMX_BUILD_FOR_LINUX_HOST
+ if (!cvmx_bootmem_desc_addr)
+ cvmx_bootmem_desc_addr = cvmx_read64_uint64(0x48100);
+#endif
+ major_version = CVMX_BOOTMEM_DESC_GET_FIELD(major_version);
+ if ((major_version > 3) || (exact_match && major_version != exact_match))
+ {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: 0x%llx\n",
+ major_version, (int)CVMX_BOOTMEM_DESC_GET_FIELD(minor_version),
+ (ULL)cvmx_bootmem_desc_addr);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+/**
+ * Get the low level bootmem descriptor lock. If no locking
+ * is specified in the flags, then nothing is done.
+ *
+ * @param flags CVMX_BOOTMEM_FLAG_NO_LOCKING means this functions should do
+ * nothing. This is used to support nested bootmem calls.
+ */
+static inline void __cvmx_bootmem_lock(uint32_t flags)
+{
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ {
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ /* Unfortunately we can't use the normal cvmx-spinlock code as the
+ memory for the bootmem descriptor may be not accessable by a C
+ pointer. We use a 64bit XKPHYS address to access the memory
+ directly */
+ uint64_t lock_addr = (1ull << 63) | (cvmx_bootmem_desc_addr + offsetof(cvmx_bootmem_desc_t, lock));
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], 0(%[addr])\n"
+ " bnez %[tmp], 1b \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], 0(%[addr])\n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [tmp] "=&r" (tmp)
+ : [addr] "r" (lock_addr)
+ : "memory");
+#endif
+ }
+}
+
+/**
+ * Release the low level bootmem descriptor lock. If no locking
+ * is specified in the flags, then nothing is done.
+ *
+ * @param flags CVMX_BOOTMEM_FLAG_NO_LOCKING means this functions should do
+ * nothing. This is used to support nested bootmem calls.
+ */
+static inline void __cvmx_bootmem_unlock(uint32_t flags)
+{
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ {
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ /* Unfortunately we can't use the normal cvmx-spinlock code as the
+ memory for the bootmem descriptor may be not accessable by a C
+ pointer. We use a 64bit XKPHYS address to access the memory
+ directly */
+ uint64_t lock_addr = (1ull << 63) | (cvmx_bootmem_desc_addr + offsetof(cvmx_bootmem_desc_t, lock));
+
+ CVMX_SYNCW;
+ __asm__ __volatile__("sw $0, 0(%[addr])\n"
+ :: [addr] "r" (lock_addr)
+ : "memory");
+ CVMX_SYNCW;
+#endif
+ }
+}
+
+/* Some of the cvmx-bootmem functions dealing with C pointers are not supported
+ when we are compiling for CVMX_BUILD_FOR_LINUX_HOST. This ifndef removes
+ these functions when they aren't needed */
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+/* This functions takes an address range and adjusts it as necessary to
+** match the ABI that is currently being used. This is required to ensure
+** that bootmem_alloc* functions only return valid pointers for 32 bit ABIs */
+static int __cvmx_validate_mem_range(uint64_t *min_addr_ptr, uint64_t *max_addr_ptr)
+{
+
+#if defined(__linux__) && defined(CVMX_ABI_N32)
+ {
+ extern uint64_t linux_mem32_min;
+ extern uint64_t linux_mem32_max;
+ /* For 32 bit Linux apps, we need to restrict the allocations to the range
+ ** of memory configured for access from userspace. Also, we need to add mappings
+ ** for the data structures that we access.*/
+
+ /* Narrow range requests to be bounded by the 32 bit limits. octeon_phy_mem_block_alloc()
+ ** will reject inconsistent req_size/range requests, so we don't repeat those checks here.
+ ** If max unspecified, set to 32 bit maximum. */
+ *min_addr_ptr = MIN(MAX(*min_addr_ptr, linux_mem32_min), linux_mem32_max);
+ if (!*max_addr_ptr)
+ *max_addr_ptr = linux_mem32_max;
+ else
+ *max_addr_ptr = MAX(MIN(*max_addr_ptr, linux_mem32_max), linux_mem32_min);
+ }
+#elif defined(CVMX_ABI_N32)
+ {
+ uint32_t max_phys = 0x0FFFFFFF; /* Max physical address when 1-1 mappings not used */
+#if CVMX_USE_1_TO_1_TLB_MAPPINGS
+ max_phys = 0x7FFFFFFF;
+#endif
+ /* We are are running standalone simple executive, so we need to limit the range
+ ** that we allocate from */
+
+ /* Narrow range requests to be bounded by the 32 bit limits. octeon_phy_mem_block_alloc()
+ ** will reject inconsistent req_size/range requests, so we don't repeat those checks here.
+ ** If max unspecified, set to 32 bit maximum. */
+ *min_addr_ptr = MIN(MAX(*min_addr_ptr, 0x0), max_phys);
+ if (!*max_addr_ptr)
+ *max_addr_ptr = max_phys;
+ else
+ *max_addr_ptr = MAX(MIN(*max_addr_ptr, max_phys), 0x0);
+ }
+#endif
+
+ return 0;
+}
+
+
+void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr)
+{
+ int64_t address;
+
+ __cvmx_validate_mem_range(&min_addr, &max_addr);
+ address = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
+
+ if (address > 0)
+ return cvmx_phys_to_ptr(address);
+ else
+ return NULL;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_bootmem_alloc_range);
+#endif
+
+void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, uint64_t alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, address, address + size);
+}
+
+
+void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_bootmem_alloc);
+#endif
+
+void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name, void (*init)(void*))
+{
+ int64_t addr;
+ void *ptr;
+ uint64_t named_block_desc_addr;
+
+ __cvmx_bootmem_lock(0);
+
+ __cvmx_validate_mem_range(&min_addr, &max_addr);
+ named_block_desc_addr = cvmx_bootmem_phy_named_block_find(name, CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ if (named_block_desc_addr)
+ {
+ addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr, base_addr);
+ __cvmx_bootmem_unlock(0);
+ return cvmx_phys_to_ptr(addr);
+ }
+
+ addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, align, name, CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ if (addr < 0)
+ {
+ __cvmx_bootmem_unlock(0);
+ return NULL;
+ }
+ ptr = cvmx_phys_to_ptr(addr);
+ init(ptr);
+ __cvmx_bootmem_unlock(0);
+ return ptr;
+}
+
+static void *cvmx_bootmem_alloc_named_range_flags(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name, uint32_t flags)
+{
+ int64_t addr;
+
+ __cvmx_validate_mem_range(&min_addr, &max_addr);
+ addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, align, name, flags);
+ if (addr >= 0)
+ return cvmx_phys_to_ptr(addr);
+ else
+ return NULL;
+
+}
+
+void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name)
+{
+ return cvmx_bootmem_alloc_named_range_flags(size, min_addr, max_addr, align, name, 0);
+}
+
+void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, const char *name)
+{
+ return(cvmx_bootmem_alloc_named_range(size, address, address + size, 0, name));
+}
+
+void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, const char *name)
+{
+ return(cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name));
+}
+
+void *cvmx_bootmem_alloc_named_flags(uint64_t size, uint64_t alignment, const char *name, uint32_t flags)
+{
+ return cvmx_bootmem_alloc_named_range_flags(size, 0, 0, alignment, name, flags);
+}
+
+int cvmx_bootmem_free_named(const char *name)
+{
+ return(cvmx_bootmem_phy_named_block_free(name, 0));
+}
+#endif
+
+const cvmx_bootmem_named_block_desc_t *cvmx_bootmem_find_named_block(const char *name)
+{
+ /* FIXME: Returning a single static object is probably a bad thing */
+ static cvmx_bootmem_named_block_desc_t desc;
+ uint64_t named_addr = cvmx_bootmem_phy_named_block_find(name, 0);
+ if (named_addr)
+ {
+ desc.base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, base_addr);
+ desc.size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, size);
+ strncpy(desc.name, name, sizeof(desc.name));
+ desc.name[sizeof(desc.name)-1] = 0;
+ return &desc;
+ }
+ else
+ return NULL;
+}
+
+void cvmx_bootmem_print_named(void)
+{
+ cvmx_bootmem_phy_named_block_print();
+}
+
+int cvmx_bootmem_init(uint64_t mem_desc_addr)
+{
+ /* Verify that the size of cvmx_spinlock_t meets our assumptions */
+ if (sizeof(cvmx_spinlock_t) != 4)
+ {
+ cvmx_dprintf("ERROR: Unexpected size of cvmx_spinlock_t\n");
+ return(-1);
+ }
+ if (!cvmx_bootmem_desc_addr)
+ cvmx_bootmem_desc_addr = mem_desc_addr;
+ return(0);
+}
+
+
+uint64_t cvmx_bootmem_available_mem(uint64_t min_block_size)
+{
+ return(cvmx_bootmem_phy_available_mem(min_block_size));
+}
+
+
+
+
+
+/*********************************************************************
+** The cvmx_bootmem_phy* functions below return 64 bit physical addresses,
+** and expose more features that the cvmx_bootmem_functions above. These are
+** required for full memory space access in 32 bit applications, as well as for
+** using some advance features.
+** Most applications should not need to use these.
+**
+**/
+
+
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t address_max, uint64_t alignment, uint32_t flags)
+{
+
+ uint64_t head_addr;
+ uint64_t ent_addr;
+ uint64_t prev_addr = 0; /* points to previous list entry, NULL current entry is head of list */
+ uint64_t new_ent_addr = 0;
+ uint64_t desired_min_addr;
+ uint64_t alignment_mask = ~(alignment - 1);
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
+ (ULL)req_size, (ULL)address_min, (ULL)address_max, (ULL)alignment);
+#endif
+
+ if (__cvmx_bootmem_check_version(0))
+ goto error_out;
+
+ /* Do a variety of checks to validate the arguments. The allocator code will later assume
+ ** that these checks have been made. We validate that the requested constraints are not
+ ** self-contradictory before we look through the list of available memory
+ */
+
+ /* 0 is not a valid req_size for this allocator */
+ if (!req_size)
+ goto error_out;
+
+ /* Round req_size up to mult of minimum alignment bytes */
+ req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+
+ /* Enforce minimum alignment (this also keeps the minimum free block
+ ** req_size the same as the alignment req_size */
+ if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
+ {
+ alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
+ }
+ alignment_mask = ~(alignment - 1);
+
+ /* Adjust address minimum based on requested alignment (round up to meet alignment). Do this here so we can
+ ** reject impossible requests up front. (NOP for address_min == 0) */
+ if (alignment)
+ address_min = (address_min + (alignment - 1)) & ~(alignment - 1);
+
+ /* Convert !0 address_min and 0 address_max to special case of range that specifies an exact
+ ** memory block to allocate. Do this before other checks and adjustments so that this tranformation will be validated */
+ if (address_min && !address_max)
+ address_max = address_min + req_size;
+ else if (!address_min && !address_max)
+ address_max = ~0ull; /* If no limits given, use max limits */
+
+ /* Reject inconsistent args. We have adjusted these, so this may fail due to our internal changes
+ ** even if this check would pass for the values the user supplied. */
+ if (req_size > address_max - address_min)
+ goto error_out;
+
+ /* Walk through the list entries - first fit found is returned */
+
+ __cvmx_bootmem_lock(flags);
+ head_addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+ ent_addr = head_addr;
+ while (ent_addr)
+ {
+ uint64_t usable_base, usable_max;
+ uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
+
+ if (cvmx_bootmem_phy_get_next(ent_addr) && ent_addr > cvmx_bootmem_phy_get_next(ent_addr))
+ {
+ cvmx_dprintf("Internal bootmem_alloc() error: ent: 0x%llx, next: 0x%llx\n",
+ (ULL)ent_addr, (ULL)cvmx_bootmem_phy_get_next(ent_addr));
+ goto error_out;
+ }
+
+ /* Determine if this is an entry that can satisify the request */
+ /* Check to make sure entry is large enough to satisfy request */
+ usable_base = ALIGN_ADDR_UP(MAX(address_min, ent_addr), alignment_mask);
+ usable_max = MIN(address_max, ent_addr + ent_size);
+ /* We should be able to allocate block at address usable_base */
+
+ desired_min_addr = usable_base;
+
+ /* Determine if request can be satisfied from the current entry */
+ if ((((ent_addr + ent_size) > usable_base && ent_addr < address_max))
+ && req_size <= usable_max - usable_base)
+ {
+ /* We have found an entry that has room to satisfy the request, so allocate it from this entry */
+
+ /* If end CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from the end of this block
+ ** rather than the beginning */
+ if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC)
+ {
+ desired_min_addr = usable_max - req_size;
+ /* Align desired address down to required alignment */
+ desired_min_addr &= alignment_mask;
+ }
+
+ /* Match at start of entry */
+ if (desired_min_addr == ent_addr)
+ {
+ if (req_size < ent_size)
+ {
+ /* big enough to create a new block from top portion of block */
+ new_ent_addr = ent_addr + req_size;
+ cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next(ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr, ent_size - req_size);
+
+ /* Adjust next pointer as following code uses this */
+ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+ }
+
+ /* adjust prev ptr or head to remove this entry from list */
+ if (prev_addr)
+ {
+ cvmx_bootmem_phy_set_next(prev_addr, cvmx_bootmem_phy_get_next(ent_addr));
+ }
+ else
+ {
+ /* head of list being returned, so update head ptr */
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, cvmx_bootmem_phy_get_next(ent_addr));
+ }
+ __cvmx_bootmem_unlock(flags);
+ return(desired_min_addr);
+ }
+
+
+ /* block returned doesn't start at beginning of entry, so we know
+ ** that we will be splitting a block off the front of this one. Create a new block
+ ** from the beginning, add to list, and go to top of loop again.
+ **
+ ** create new block from high portion of block, so that top block
+ ** starts at desired addr
+ **/
+ new_ent_addr = desired_min_addr;
+ cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next(ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr, cvmx_bootmem_phy_get_size(ent_addr) - (desired_min_addr - ent_addr));
+ cvmx_bootmem_phy_set_size(ent_addr, desired_min_addr - ent_addr);
+ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+ /* Loop again to handle actual alloc from new block */
+ }
+
+ prev_addr = ent_addr;
+ ent_addr = cvmx_bootmem_phy_get_next(ent_addr);
+ }
+error_out:
+ /* We didn't find anything, so return error */
+ __cvmx_bootmem_unlock(flags);
+ return(-1);
+}
+
+
+
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
+{
+ uint64_t cur_addr;
+ uint64_t prev_addr = 0; /* zero is invalid */
+ int retval = 0;
+
+#ifdef DEBUG
+ cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n", (ULL)phy_addr, (ULL)size);
+#endif
+ if (__cvmx_bootmem_check_version(0))
+ return(0);
+
+ /* 0 is not a valid size for this allocator */
+ if (!size)
+ return(0);
+
+
+ __cvmx_bootmem_lock(flags);
+ cur_addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+ if (cur_addr == 0 || phy_addr < cur_addr)
+ {
+ /* add at front of list - special case with changing head ptr */
+ if (cur_addr && phy_addr + size > cur_addr)
+ goto bootmem_free_done; /* error, overlapping section */
+ else if (phy_addr + size == cur_addr)
+ {
+ /* Add to front of existing first block */
+ cvmx_bootmem_phy_set_next(phy_addr, cvmx_bootmem_phy_get_next(cur_addr));
+ cvmx_bootmem_phy_set_size(phy_addr, cvmx_bootmem_phy_get_size(cur_addr) + size);
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, phy_addr);
+
+ }
+ else
+ {
+ /* New block before first block */
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr); /* OK if cur_addr is 0 */
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, phy_addr);
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* Find place in list to add block */
+ while (cur_addr && phy_addr > cur_addr)
+ {
+ prev_addr = cur_addr;
+ cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
+ }
+
+ if (!cur_addr)
+ {
+ /* We have reached the end of the list, add on to end, checking
+ ** to see if we need to combine with last block
+ **/
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr)
+ {
+ cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(prev_addr) + size);
+ }
+ else
+ {
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, 0);
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+ else
+ {
+ /* insert between prev and cur nodes, checking for merge with either/both */
+
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr)
+ {
+ /* Merge with previous */
+ cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(prev_addr) + size);
+ if (phy_addr + size == cur_addr)
+ {
+ /* Also merge with current */
+ cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(cur_addr) + cvmx_bootmem_phy_get_size(prev_addr));
+ cvmx_bootmem_phy_set_next(prev_addr, cvmx_bootmem_phy_get_next(cur_addr));
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+ else if (phy_addr + size == cur_addr)
+ {
+ /* Merge with current */
+ cvmx_bootmem_phy_set_size(phy_addr, cvmx_bootmem_phy_get_size(cur_addr) + size);
+ cvmx_bootmem_phy_set_next(phy_addr, cvmx_bootmem_phy_get_next(cur_addr));
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* It is a standalone block, add in between prev and cur */
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+
+
+ }
+ retval = 1;
+
+bootmem_free_done:
+ __cvmx_bootmem_unlock(flags);
+ return(retval);
+
+}
+
+
+
+void cvmx_bootmem_phy_list_print(void)
+{
+ uint64_t addr;
+
+ addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+ cvmx_dprintf("\n\n\nPrinting bootmem block list, descriptor: 0x%llx, head is 0x%llx\n",
+ (ULL)cvmx_bootmem_desc_addr, (ULL)addr);
+ cvmx_dprintf("Descriptor version: %d.%d\n",
+ (int)CVMX_BOOTMEM_DESC_GET_FIELD(major_version),
+ (int)CVMX_BOOTMEM_DESC_GET_FIELD(minor_version));
+ if (CVMX_BOOTMEM_DESC_GET_FIELD(major_version) > 3)
+ {
+ cvmx_dprintf("Warning: Bootmem descriptor version is newer than expected\n");
+ }
+ if (!addr)
+ {
+ cvmx_dprintf("mem list is empty!\n");
+ }
+ while (addr)
+ {
+ cvmx_dprintf("Block address: 0x%08llx, size: 0x%08llx, next: 0x%08llx\n",
+ (ULL)addr,
+ (ULL)cvmx_bootmem_phy_get_size(addr),
+ (ULL)cvmx_bootmem_phy_get_next(addr));
+ addr = cvmx_bootmem_phy_get_next(addr);
+ }
+ cvmx_dprintf("\n\n");
+
+}
+
+
+uint64_t cvmx_bootmem_phy_available_mem(uint64_t min_block_size)
+{
+ uint64_t addr;
+
+ uint64_t available_mem = 0;
+
+ __cvmx_bootmem_lock(0);
+ addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+ while (addr)
+ {
+ if (cvmx_bootmem_phy_get_size(addr) >= min_block_size)
+ available_mem += cvmx_bootmem_phy_get_size(addr);
+ addr = cvmx_bootmem_phy_get_next(addr);
+ }
+ __cvmx_bootmem_unlock(0);
+ return(available_mem);
+
+}
+
+
+
+uint64_t cvmx_bootmem_phy_named_block_find(const char *name, uint32_t flags)
+{
+ uint64_t result = 0;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
+#endif
+ __cvmx_bootmem_lock(flags);
+ if (!__cvmx_bootmem_check_version(3))
+ {
+ int i;
+ uint64_t named_block_array_addr = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_array_addr);
+ int num_blocks = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_num_blocks);
+ int name_length = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len);
+ uint64_t named_addr = named_block_array_addr;
+ for (i = 0; i < num_blocks; i++)
+ {
+ uint64_t named_size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, size);
+ if (name && named_size)
+ {
+ char name_tmp[name_length];
+ CVMX_BOOTMEM_NAMED_GET_NAME(named_addr, name_tmp, name_length);
+ if (!strncmp(name, name_tmp, name_length - 1))
+ {
+ result = named_addr;
+ break;
+ }
+ }
+ else if (!name && !named_size)
+ {
+ result = named_addr;
+ break;
+ }
+ named_addr += sizeof(cvmx_bootmem_named_block_desc_t);
+ }
+ }
+ __cvmx_bootmem_unlock(flags);
+ return result;
+}
+
+int cvmx_bootmem_phy_named_block_free(const char *name, uint32_t flags)
+{
+ uint64_t named_block_addr;
+
+ if (__cvmx_bootmem_check_version(3))
+ return(0);
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
+#endif
+
+ /* Take lock here, as name lookup/block free/name free need to be atomic */
+ __cvmx_bootmem_lock(flags);
+
+ named_block_addr = cvmx_bootmem_phy_named_block_find(name, CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_addr)
+ {
+ uint64_t named_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, base_addr);
+ uint64_t named_size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, size);
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s, base: 0x%llx, size: 0x%llx\n",
+ name, (ULL)named_addr, (ULL)named_size);
+#endif
+ __cvmx_bootmem_phy_free(named_addr, named_size, CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ /* Set size to zero to indicate block not used. */
+ CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_addr, size, 0);
+ }
+ __cvmx_bootmem_unlock(flags);
+ return(!!named_block_addr); /* 0 on failure, 1 on success */
+}
+
+
+
+
+
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, const char *name, uint32_t flags)
+{
+ int64_t addr_allocated;
+ uint64_t named_block_desc_addr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: 0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
+ (ULL)size,
+ (ULL)min_addr,
+ (ULL)max_addr,
+ (ULL)alignment,
+ name);
+#endif
+
+ if (__cvmx_bootmem_check_version(3))
+ return(-1);
+
+ /* Take lock here, as name lookup/block alloc/name add need to be atomic */
+
+ __cvmx_bootmem_lock(flags);
+
+ named_block_desc_addr = cvmx_bootmem_phy_named_block_find(name, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_desc_addr)
+ {
+ __cvmx_bootmem_unlock(flags);
+ return(-1);
+ }
+
+ /* Get pointer to first available named block descriptor */
+ named_block_desc_addr = cvmx_bootmem_phy_named_block_find(NULL, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (!named_block_desc_addr)
+ {
+ __cvmx_bootmem_unlock(flags);
+ return(-1);
+ }
+
+ /* Round size up to mult of minimum alignment bytes
+ ** We need the actual size allocated to allow for blocks to be coallesced
+ ** when they are freed. The alloc routine does the same rounding up
+ ** on all allocations. */
+ size = (size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+ addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (addr_allocated >= 0)
+ {
+ CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_desc_addr, base_addr, addr_allocated);
+ CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_desc_addr, size, size);
+ CVMX_BOOTMEM_NAMED_SET_NAME(named_block_desc_addr, name, CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len));
+ }
+
+ __cvmx_bootmem_unlock(flags);
+ return(addr_allocated);
+}
+
+
+
+
+void cvmx_bootmem_phy_named_block_print(void)
+{
+ int i;
+ int printed = 0;
+
+ uint64_t named_block_array_addr = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_array_addr);
+ int num_blocks = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_num_blocks);
+ int name_length = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len);
+ uint64_t named_block_addr = named_block_array_addr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_print, desc addr: 0x%llx\n",
+ (ULL)cvmx_bootmem_desc_addr);
+#endif
+ if (__cvmx_bootmem_check_version(3))
+ return;
+ cvmx_dprintf("List of currently allocated named bootmem blocks:\n");
+ for (i = 0; i < num_blocks; i++)
+ {
+ uint64_t named_size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, size);
+ if (named_size)
+ {
+ char name_tmp[name_length];
+ uint64_t named_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, base_addr);
+ CVMX_BOOTMEM_NAMED_GET_NAME(named_block_addr, name_tmp, name_length);
+ printed++;
+ cvmx_dprintf("Name: %s, address: 0x%08llx, size: 0x%08llx, index: %d\n",
+ name_tmp, (ULL)named_addr, (ULL)named_size, i);
+ }
+ named_block_addr += sizeof(cvmx_bootmem_named_block_desc_t);
+ }
+ if (!printed)
+ {
+ cvmx_dprintf("No named bootmem blocks exist.\n");
+ }
+
+}
+
+
+int64_t cvmx_bootmem_phy_mem_list_init(uint64_t mem_size, uint32_t low_reserved_bytes, cvmx_bootmem_desc_t *desc_buffer)
+{
+ uint64_t cur_block_addr;
+ int64_t addr;
+ int i;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_mem_list_init (arg desc ptr: %p, cvmx_bootmem_desc: 0x%llx)\n",
+ desc_buffer, (ULL)cvmx_bootmem_desc_addr);
+#endif
+
+ /* Descriptor buffer needs to be in 32 bit addressable space to be compatible with
+ ** 32 bit applications */
+ if (!desc_buffer)
+ {
+ cvmx_dprintf("ERROR: no memory for cvmx_bootmem descriptor provided\n");
+ return 0;
+ }
+
+ if (mem_size > OCTEON_MAX_PHY_MEM_SIZE)
+ {
+ mem_size = OCTEON_MAX_PHY_MEM_SIZE;
+ cvmx_dprintf("ERROR: requested memory size too large, truncating to maximum size\n");
+ }
+
+ if (cvmx_bootmem_desc_addr)
+ return 1;
+
+ /* Initialize cvmx pointer to descriptor */
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ cvmx_bootmem_init(cvmx_ptr_to_phys(desc_buffer));
+#else
+ cvmx_bootmem_init((unsigned long)desc_buffer);
+#endif
+
+ /* Fill the bootmem descriptor */
+ CVMX_BOOTMEM_DESC_SET_FIELD(lock, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(flags, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(major_version, CVMX_BOOTMEM_DESC_MAJ_VER);
+ CVMX_BOOTMEM_DESC_SET_FIELD(minor_version, CVMX_BOOTMEM_DESC_MIN_VER);
+ CVMX_BOOTMEM_DESC_SET_FIELD(app_data_addr, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(app_data_size, 0);
+
+ /* Set up global pointer to start of list, exclude low 64k for exception vectors, space for global descriptor */
+ cur_block_addr = (OCTEON_DDR0_BASE + low_reserved_bytes);
+
+ if (mem_size <= OCTEON_DDR0_SIZE)
+ {
+ __cvmx_bootmem_phy_free(cur_block_addr, mem_size - low_reserved_bytes, 0);
+ goto frees_done;
+ }
+
+ __cvmx_bootmem_phy_free(cur_block_addr, OCTEON_DDR0_SIZE - low_reserved_bytes, 0);
+
+ mem_size -= OCTEON_DDR0_SIZE;
+
+ /* Add DDR2 block next if present */
+ if (mem_size > OCTEON_DDR1_SIZE)
+ {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
+ __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE, mem_size - OCTEON_DDR1_SIZE, 0);
+ }
+ else
+ {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
+
+ }
+frees_done:
+
+ /* Initialize the named block structure */
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_name_len, CVMX_BOOTMEM_NAME_LEN);
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_num_blocks, CVMX_BOOTMEM_NUM_NAMED_BLOCKS);
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, 0);
+
+ /* Allocate this near the top of the low 256 MBytes of memory */
+ addr = cvmx_bootmem_phy_alloc(CVMX_BOOTMEM_NUM_NAMED_BLOCKS * sizeof(cvmx_bootmem_named_block_desc_t),0, 0x10000000, 0 ,CVMX_BOOTMEM_FLAG_END_ALLOC);
+ if (addr >= 0)
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, addr);
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_mem_list_init: named_block_array_addr: 0x%llx)\n",
+ (ULL)addr);
+#endif
+ if (!addr)
+ {
+ cvmx_dprintf("FATAL ERROR: unable to allocate memory for bootmem descriptor!\n");
+ return(0);
+ }
+ for (i=0; i<CVMX_BOOTMEM_NUM_NAMED_BLOCKS; i++)
+ {
+ CVMX_BOOTMEM_NAMED_SET_FIELD(addr, base_addr, 0);
+ CVMX_BOOTMEM_NAMED_SET_FIELD(addr, size, 0);
+ addr += sizeof(cvmx_bootmem_named_block_desc_t);
+ }
+
+ return(1);
+}
+
+
+void cvmx_bootmem_lock(void)
+{
+ __cvmx_bootmem_lock(0);
+}
+
+void cvmx_bootmem_unlock(void)
+{
+ __cvmx_bootmem_unlock(0);
+}
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+void *__cvmx_bootmem_internal_get_desc_ptr(void)
+{
+ return cvmx_phys_to_ptr(cvmx_bootmem_desc_addr);
+}
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-bootmem.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-bootmem.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-bootmem.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-bootmem.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,489 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ * Simple allocate only memory allocator. Used to allocate memory at application
+ * start time.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMX_BOOTMEM_H__
+#define __CVMX_BOOTMEM_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_BOOTMEM_NAME_LEN 128 /* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64 /* Can change without breaking ABI */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull) /* minimum alignment of bootmem alloced blocks */
+
+/* Flags for cvmx_bootmem_phy_mem* functions */
+#define CVMX_BOOTMEM_FLAG_END_ALLOC (1 << 0) /* Allocate from end of block instead of beginning */
+#define CVMX_BOOTMEM_FLAG_NO_LOCKING (1 << 1) /* Don't do any locking. */
+
+/* Real physical addresses of memory regions */
+#define OCTEON_DDR0_BASE (0x0ULL)
+#define OCTEON_DDR0_SIZE (0x010000000ULL)
+#define OCTEON_DDR1_BASE ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 0x20000000ULL : 0x410000000ULL)
+#define OCTEON_DDR1_SIZE (0x010000000ULL)
+#define OCTEON_DDR2_BASE ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 0x30000000ULL : 0x20000000ULL)
+#define OCTEON_DDR2_SIZE ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 0x7d0000000ULL : 0x3e0000000ULL)
+#define OCTEON_MAX_PHY_MEM_SIZE ((OCTEON_IS_MODEL(OCTEON_CN68XX)) ? 128*1024*1024*1024ULL : (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 32*1024*1024*1024ull : 16*1024*1024*1024ULL)
+
+/* First bytes of each free physical block of memory contain this structure,
+ * which is used to maintain the free memory list. Since the bootloader is
+ * only 32 bits, there is a union providing 64 and 32 bit versions. The
+ * application init code converts addresses to 64 bit addresses before the
+ * application starts.
+ */
+typedef struct
+{
+ /* Note: these are referenced from assembly routines in the bootloader, so this structure
+ ** should not be changed without changing those routines as well. */
+ uint64_t next_block_addr;
+ uint64_t size;
+
+} cvmx_bootmem_block_header_t;
+
+
+/* Structure for named memory blocks
+** Number of descriptors
+** available can be changed without affecting compatiblity,
+** but name length changes require a bump in the bootmem
+** descriptor version
+** Note: This structure must be naturally 64 bit aligned, as a single
+** memory image will be used by both 32 and 64 bit programs.
+*/
+struct cvmx_bootmem_named_block_desc
+{
+ uint64_t base_addr; /**< Base address of named block */
+ uint64_t size; /**< Size actually allocated for named block (may differ from requested) */
+ char name[CVMX_BOOTMEM_NAME_LEN]; /**< name of named block */
+};
+
+typedef struct cvmx_bootmem_named_block_desc cvmx_bootmem_named_block_desc_t;
+
+/* Current descriptor versions */
+#define CVMX_BOOTMEM_DESC_MAJ_VER 3 /* CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MIN_VER 0 /* CVMX bootmem descriptor minor version */
+
+/* First three members of cvmx_bootmem_desc_t are left in original
+** positions for backwards compatibility.
+*/
+typedef struct
+{
+ uint32_t lock; /**< spinlock to control access to list */
+ uint32_t flags; /**< flags for indicating various conditions */
+ uint64_t head_addr;
+
+ uint32_t major_version; /**< incremented changed when incompatible changes made */
+ uint32_t minor_version; /**< incremented changed when compatible changes made, reset to zero when major incremented */
+ uint64_t app_data_addr;
+ uint64_t app_data_size;
+
+ uint32_t named_block_num_blocks; /**< number of elements in named blocks array */
+ uint32_t named_block_name_len; /**< length of name array in bootmem blocks */
+ uint64_t named_block_array_addr; /**< address of named memory block descriptors */
+
+} cvmx_bootmem_desc_t;
+
+
+/**
+ * Initialize the boot alloc memory structures. This is
+ * normally called inside of cvmx_user_app_init()
+ *
+ * @param mem_desc_addr Address of the free memory list
+ * @return
+ */
+extern int cvmx_bootmem_init(uint64_t mem_desc_addr);
+
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader.
+ * This is an allocate-only algorithm, so freeing memory is not possible.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader at a specific
+ * address. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated at the specified address.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param address Physical address to allocate memory at. If this memory is not
+ * available, the allocation fails.
+ * @param alignment Alignment required - must be power of 2
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, uint64_t alignment);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader within a specified
+ * address range. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated in the requested range.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr defines the minimum address of the range
+ * @param max_addr defines the maximum address of the range
+ * @param alignment Alignment required - must be power of 2
+ * @param flags Flags to control options for the allocation.
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_range_flags(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr, uint32_t flags);
+
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader within a specified
+ * address range. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated in the requested range.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr defines the minimum address of the range
+ * @param max_addr defines the maximum address of the range
+ * @param alignment Alignment required - must be power of 2
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr);
+
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, const char *name);
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named_flags(uint64_t size, uint64_t alignment, const char *name, uint32_t flags);
+
+
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param address Physical address to allocate memory at. If this memory is not
+ * available, the allocation fails.
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, const char *name);
+
+
+
+/**
+ * Allocate a block of memory from a specific range of the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ * If request cannot be satisfied within the address range specified, NULL is returned
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr minimum address of range
+ * @param max_addr maximum address of range
+ * @param align Alignment of memory to be allocated. (must be a power of 2)
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name);
+
+/**
+ * Allocate if needed a block of memory from a specific range of the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ * If the requested name block is already allocated, return the pointer to block of memory.
+ * If request cannot be satisfied within the address range specified, NULL is returned
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr minimum address of range
+ * @param max_addr maximum address of range
+ * @param align Alignment of memory to be allocated. (must be a power of 2)
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ * @param init Initialization function
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name, void (*init)(void*));
+
+/**
+ * Frees a previously allocated named bootmem block.
+ *
+ * @param name name of block to free
+ *
+ * @return 0 on failure,
+ * !0 on success
+ */
+extern int cvmx_bootmem_free_named(const char *name);
+
+
+/**
+ * Finds a named bootmem block by name.
+ *
+ * @param name name of block to free
+ *
+ * @return pointer to named block descriptor on success
+ * 0 on failure
+ */
+const cvmx_bootmem_named_block_desc_t *cvmx_bootmem_find_named_block(const char *name);
+
+
+
+/**
+ * Returns the size of available memory in bytes, only
+ * counting blocks that are at least as big as the minimum block
+ * size.
+ *
+ * @param min_block_size
+ * Minimum block size to count in total.
+ *
+ * @return Number of bytes available for allocation that meet the block size requirement
+ */
+uint64_t cvmx_bootmem_available_mem(uint64_t min_block_size);
+
+
+
+/**
+ * Prints out the list of named blocks that have been allocated
+ * along with their addresses and sizes.
+ * This is primarily used for debugging purposes
+ */
+void cvmx_bootmem_print_named(void);
+
+
+/**
+ * Allocates a block of physical memory from the free list, at (optional) requested address and alignment.
+ *
+ * @param req_size size of region to allocate. All requests are rounded up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ * @param address_min
+ * Minimum address that block can occupy.
+ * @param address_max
+ * Specifies the maximum address_min (inclusive) that the allocation can use.
+ * @param alignment Requested alignment of the block. If this alignment cannot be met, the allocation fails.
+ * This must be a power of 2.
+ * (Note: Alignment of CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and internally enforced. Requested alignments of
+ * less than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return physical address of block allocated, or -1 on failure
+ */
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t address_max, uint64_t alignment, uint32_t flags);
+
+
+
+/**
+ * Allocates a named block of physical memory from the free list, at (optional) requested address and alignment.
+ *
+ * @param size size of region to allocate. All requests are rounded up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ * @param min_addr
+ * Minimum address that block can occupy.
+ * @param max_addr
+ * Specifies the maximum address_min (inclusive) that the allocation can use.
+ * @param alignment Requested alignment of the block. If this alignment cannot be met, the allocation fails.
+ * This must be a power of 2.
+ * (Note: Alignment of CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and internally enforced. Requested alignments of
+ * less than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param name name to assign to named block
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return physical address of block allocated, or -1 on failure
+ */
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, const char *name, uint32_t flags);
+
+
+/**
+ * Finds a named memory block by name.
+ * Also used for finding an unused entry in the named block table.
+ *
+ * @param name Name of memory block to find.
+ * If NULL pointer given, then finds unused descriptor, if available.
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return Physical address of the memory block descriptor, zero if not
+ * found. If zero returned when name parameter is NULL, then no
+ * memory block descriptors are available.
+ */
+uint64_t cvmx_bootmem_phy_named_block_find(const char *name, uint32_t flags);
+
+
+/**
+ * Returns the size of available memory in bytes, only
+ * counting blocks that are at least as big as the minimum block
+ * size.
+ *
+ * @param min_block_size
+ * Minimum block size to count in total.
+ *
+ * @return Number of bytes available for allocation that meet the block size requirement
+ */
+uint64_t cvmx_bootmem_phy_available_mem(uint64_t min_block_size);
+
+/**
+ * Frees a named block.
+ *
+ * @param name name of block to free
+ * @param flags flags for passing options
+ *
+ * @return 0 on failure
+ * 1 on success
+ */
+int cvmx_bootmem_phy_named_block_free(const char *name, uint32_t flags);
+
+/**
+ * Frees a block to the bootmem allocator list. This must
+ * be used with care, as the size provided must match the size
+ * of the block that was allocated, or the list will become
+ * corrupted.
+ *
+ * IMPORTANT: This is only intended to be used as part of named block
+ * frees and initial population of the free memory list.
+ * *
+ *
+ * @param phy_addr physical address of block
+ * @param size size of block in bytes.
+ * @param flags flags for passing options
+ *
+ * @return 1 on success,
+ * 0 on failure
+ */
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
+
+
+/**
+ * Prints the list of currently allocated named blocks
+ *
+ */
+void cvmx_bootmem_phy_named_block_print(void);
+
+
+/**
+ * Prints the list of available memory.
+ *
+ */
+void cvmx_bootmem_phy_list_print(void);
+
+
+
+/**
+ * This function initializes the free memory list used by cvmx_bootmem.
+ * This must be called before any allocations can be done.
+ *
+ * @param mem_size Total memory available, in bytes
+ * @param low_reserved_bytes
+ * Number of bytes to reserve (leave out of free list) at address 0x0.
+ * @param desc_buffer
+ * Buffer for the bootmem descriptor. This must be a 32 bit addressable
+ * address.
+ *
+ * @return 1 on success
+ * 0 on failure
+ */
+int64_t cvmx_bootmem_phy_mem_list_init(uint64_t mem_size, uint32_t low_reserved_bytes, cvmx_bootmem_desc_t *desc_buffer);
+
+/**
+ * Locks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_lock(void);
+
+/**
+ * Unlocks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_unlock(void);
+
+/**
+ * Internal use function to get the current descriptor pointer */
+void *__cvmx_bootmem_internal_get_desc_ptr(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_BOOTMEM_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-bootmem.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ciu-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ciu-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ciu-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,13084 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-ciu-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon ciu.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_CIU_DEFS_H__
+#define __CVMX_CIU_DEFS_H__
+
+#define CVMX_CIU_BIST (CVMX_ADD_IO_SEG(0x0001070000000730ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_BLOCK_INT CVMX_CIU_BLOCK_INT_FUNC()
+static inline uint64_t CVMX_CIU_BLOCK_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_BLOCK_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000007C0ull);
+}
+#else
+#define CVMX_CIU_BLOCK_INT (CVMX_ADD_IO_SEG(0x00010700000007C0ull))
+#endif
+#define CVMX_CIU_DINT (CVMX_ADD_IO_SEG(0x0001070000000720ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_IOX_INT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_CIU_EN2_IOX_INT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000A600ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_CIU_EN2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x000107000000A600ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_IOX_INT_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_CIU_EN2_IOX_INT_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000CE00ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_CIU_EN2_IOX_INT_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CE00ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_IOX_INT_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_CIU_EN2_IOX_INT_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000AE00ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_CIU_EN2_IOX_INT_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AE00ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_PPX_IP2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_EN2_PPX_IP2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000A000ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_EN2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x000107000000A000ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_PPX_IP2_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_EN2_PPX_IP2_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000C800ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_EN2_PPX_IP2_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000C800ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_PPX_IP2_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_EN2_PPX_IP2_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000A800ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_EN2_PPX_IP2_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000A800ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_PPX_IP3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_EN2_PPX_IP3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000A200ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_EN2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x000107000000A200ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_PPX_IP3_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_EN2_PPX_IP3_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000CA00ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_EN2_PPX_IP3_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CA00ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_PPX_IP3_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_EN2_PPX_IP3_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000AA00ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_EN2_PPX_IP3_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AA00ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_PPX_IP4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_EN2_PPX_IP4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000A400ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_EN2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x000107000000A400ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_PPX_IP4_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_EN2_PPX_IP4_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000CC00ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_EN2_PPX_IP4_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CC00ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_EN2_PPX_IP4_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_EN2_PPX_IP4_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000107000000AC00ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_EN2_PPX_IP4_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AC00ull) + ((offset) & 15) * 8)
+#endif
+#define CVMX_CIU_FUSE (CVMX_ADD_IO_SEG(0x0001070000000728ull))
+#define CVMX_CIU_GSTOP (CVMX_ADD_IO_SEG(0x0001070000000710ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_INT33_SUM0 CVMX_CIU_INT33_SUM0_FUNC()
+static inline uint64_t CVMX_CIU_INT33_SUM0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_INT33_SUM0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000110ull);
+}
+#else
+#define CVMX_CIU_INT33_SUM0 (CVMX_ADD_IO_SEG(0x0001070000000110ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 19) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN0(offset) (CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN0_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 19) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN0_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN0_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 19) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN0_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 19) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN1(offset) (CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN1_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 19) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN1_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN1_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 19) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN1_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_0(offset) (CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_0_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_0_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_0_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_0_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_1(offset) (CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_1_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_1_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_1_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_1_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_SUM0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 19) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7) || (offset == 32)))))
+ cvmx_warn("CVMX_CIU_INTX_SUM0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_CIU_INTX_SUM0(offset) (CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_SUM4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_SUM4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_INTX_SUM4(offset) (CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_INT_DBG_SEL CVMX_CIU_INT_DBG_SEL_FUNC()
+static inline uint64_t CVMX_CIU_INT_DBG_SEL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_INT_DBG_SEL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000007D0ull);
+}
+#else
+#define CVMX_CIU_INT_DBG_SEL (CVMX_ADD_IO_SEG(0x00010700000007D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_INT_SUM1 CVMX_CIU_INT_SUM1_FUNC()
+static inline uint64_t CVMX_CIU_INT_SUM1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_INT_SUM1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000108ull);
+}
+#else
+#define CVMX_CIU_INT_SUM1 (CVMX_ADD_IO_SEG(0x0001070000000108ull))
+#endif
+static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ if ((offset == 0))
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 0) * 8;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 3) * 8;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 1) * 8;
+ break;
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 15))
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 11))
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 9))
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 5))
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 7) * 8;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 31))
+ return CVMX_ADD_IO_SEG(0x0001070100100600ull) + ((offset) & 31) * 8;
+ break;
+ }
+ cvmx_warn("CVMX_CIU_MBOX_CLRX (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 3) * 8;
+}
+static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ if ((offset == 0))
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 0) * 8;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 3) * 8;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 1) * 8;
+ break;
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 15))
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 11))
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 9))
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 5))
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 7) * 8;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 31))
+ return CVMX_ADD_IO_SEG(0x0001070100100400ull) + ((offset) & 31) * 8;
+ break;
+ }
+ cvmx_warn("CVMX_CIU_MBOX_SETX (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 3) * 8;
+}
+#define CVMX_CIU_NMI (CVMX_ADD_IO_SEG(0x0001070000000718ull))
+#define CVMX_CIU_PCI_INTA (CVMX_ADD_IO_SEG(0x0001070000000750ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_PP_BIST_STAT CVMX_CIU_PP_BIST_STAT_FUNC()
+static inline uint64_t CVMX_CIU_PP_BIST_STAT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_CIU_PP_BIST_STAT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000007E0ull);
+}
+#else
+#define CVMX_CIU_PP_BIST_STAT (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
+#endif
+#define CVMX_CIU_PP_DBG (CVMX_ADD_IO_SEG(0x0001070000000708ull))
+static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ if ((offset == 0))
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 0) * 8;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 3) * 8;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 1) * 8;
+ break;
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 15))
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 11))
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 9))
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 5))
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 7) * 8;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 31))
+ return CVMX_ADD_IO_SEG(0x0001070100100200ull) + ((offset) & 31) * 8;
+ break;
+ }
+ cvmx_warn("CVMX_CIU_PP_POKEX (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 3) * 8;
+}
+#define CVMX_CIU_PP_RST (CVMX_ADD_IO_SEG(0x0001070000000700ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM0 CVMX_CIU_QLM0_FUNC()
+static inline uint64_t CVMX_CIU_QLM0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_QLM0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000780ull);
+}
+#else
+#define CVMX_CIU_QLM0 (CVMX_ADD_IO_SEG(0x0001070000000780ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM1 CVMX_CIU_QLM1_FUNC()
+static inline uint64_t CVMX_CIU_QLM1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_QLM1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000788ull);
+}
+#else
+#define CVMX_CIU_QLM1 (CVMX_ADD_IO_SEG(0x0001070000000788ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM2 CVMX_CIU_QLM2_FUNC()
+static inline uint64_t CVMX_CIU_QLM2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_QLM2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000790ull);
+}
+#else
+#define CVMX_CIU_QLM2 (CVMX_ADD_IO_SEG(0x0001070000000790ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM3 CVMX_CIU_QLM3_FUNC()
+static inline uint64_t CVMX_CIU_QLM3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_CIU_QLM3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000798ull);
+}
+#else
+#define CVMX_CIU_QLM3 (CVMX_ADD_IO_SEG(0x0001070000000798ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM4 CVMX_CIU_QLM4_FUNC()
+static inline uint64_t CVMX_CIU_QLM4_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_CIU_QLM4 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000007A0ull);
+}
+#else
+#define CVMX_CIU_QLM4 (CVMX_ADD_IO_SEG(0x00010700000007A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM_DCOK CVMX_CIU_QLM_DCOK_FUNC()
+static inline uint64_t CVMX_CIU_QLM_DCOK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_CIU_QLM_DCOK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000760ull);
+}
+#else
+#define CVMX_CIU_QLM_DCOK (CVMX_ADD_IO_SEG(0x0001070000000760ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM_JTGC CVMX_CIU_QLM_JTGC_FUNC()
+static inline uint64_t CVMX_CIU_QLM_JTGC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_QLM_JTGC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000768ull);
+}
+#else
+#define CVMX_CIU_QLM_JTGC (CVMX_ADD_IO_SEG(0x0001070000000768ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM_JTGD CVMX_CIU_QLM_JTGD_FUNC()
+static inline uint64_t CVMX_CIU_QLM_JTGD_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_QLM_JTGD not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000770ull);
+}
+#else
+#define CVMX_CIU_QLM_JTGD (CVMX_ADD_IO_SEG(0x0001070000000770ull))
+#endif
+#define CVMX_CIU_SOFT_BIST (CVMX_ADD_IO_SEG(0x0001070000000738ull))
+#define CVMX_CIU_SOFT_PRST (CVMX_ADD_IO_SEG(0x0001070000000748ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_SOFT_PRST1 CVMX_CIU_SOFT_PRST1_FUNC()
+static inline uint64_t CVMX_CIU_SOFT_PRST1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_SOFT_PRST1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000758ull);
+}
+#else
+#define CVMX_CIU_SOFT_PRST1 (CVMX_ADD_IO_SEG(0x0001070000000758ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_SOFT_PRST2 CVMX_CIU_SOFT_PRST2_FUNC()
+static inline uint64_t CVMX_CIU_SOFT_PRST2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_CIU_SOFT_PRST2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000007D8ull);
+}
+#else
+#define CVMX_CIU_SOFT_PRST2 (CVMX_ADD_IO_SEG(0x00010700000007D8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_SOFT_PRST3 CVMX_CIU_SOFT_PRST3_FUNC()
+static inline uint64_t CVMX_CIU_SOFT_PRST3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_CIU_SOFT_PRST3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000007E0ull);
+}
+#else
+#define CVMX_CIU_SOFT_PRST3 (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
+#endif
+#define CVMX_CIU_SOFT_RST (CVMX_ADD_IO_SEG(0x0001070000000740ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_SUM1_IOX_INT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_CIU_SUM1_IOX_INT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000008600ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_CIU_SUM1_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008600ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_SUM1_PPX_IP2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_SUM1_PPX_IP2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000008000ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_SUM1_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008000ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_SUM1_PPX_IP3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_SUM1_PPX_IP3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000008200ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_SUM1_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008200ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_SUM1_PPX_IP4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_SUM1_PPX_IP4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000008400ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_SUM1_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008400ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_SUM2_IOX_INT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_CIU_SUM2_IOX_INT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000008E00ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_CIU_SUM2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008E00ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_SUM2_PPX_IP2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_SUM2_PPX_IP2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000008800ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_SUM2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008800ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_SUM2_PPX_IP3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_SUM2_PPX_IP3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000008A00ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_SUM2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008A00ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_SUM2_PPX_IP4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_SUM2_PPX_IP4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000008C00ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_SUM2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008C00ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_TIMX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 9)))))
+ cvmx_warn("CVMX_CIU_TIMX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_TIM_MULTI_CAST CVMX_CIU_TIM_MULTI_CAST_FUNC()
+static inline uint64_t CVMX_CIU_TIM_MULTI_CAST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_CIU_TIM_MULTI_CAST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x000107000000C200ull);
+}
+#else
+#define CVMX_CIU_TIM_MULTI_CAST (CVMX_ADD_IO_SEG(0x000107000000C200ull))
+#endif
+static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ if ((offset == 0))
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 0) * 8;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 3) * 8;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 1) * 8;
+ break;
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 15))
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 11))
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 9))
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 15) * 8;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 5))
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 7) * 8;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 31))
+ return CVMX_ADD_IO_SEG(0x0001070100100000ull) + ((offset) & 31) * 8;
+ break;
+ }
+ cvmx_warn("CVMX_CIU_WDOGX (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 3) * 8;
+}
+
+/**
+ * cvmx_ciu_bist
+ */
+union cvmx_ciu_bist {
+ uint64_t u64;
+ struct cvmx_ciu_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t bist : 7; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_ciu_bist_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t bist : 4; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_bist_cn30xx cn31xx;
+ struct cvmx_ciu_bist_cn30xx cn38xx;
+ struct cvmx_ciu_bist_cn30xx cn38xxp2;
+ struct cvmx_ciu_bist_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t bist : 2; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_bist_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t bist : 3; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_bist_cn52xx cn52xxp1;
+ struct cvmx_ciu_bist_cn30xx cn56xx;
+ struct cvmx_ciu_bist_cn30xx cn56xxp1;
+ struct cvmx_ciu_bist_cn30xx cn58xx;
+ struct cvmx_ciu_bist_cn30xx cn58xxp1;
+ struct cvmx_ciu_bist_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t bist : 6; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_bist_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t bist : 5; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_bist_cn63xx cn63xxp1;
+ struct cvmx_ciu_bist_cn61xx cn66xx;
+ struct cvmx_ciu_bist_s cn68xx;
+ struct cvmx_ciu_bist_s cn68xxp1;
+ struct cvmx_ciu_bist_cn61xx cnf71xx;
+};
+typedef union cvmx_ciu_bist cvmx_ciu_bist_t;
+
+/**
+ * cvmx_ciu_block_int
+ *
+ * CIU_BLOCK_INT = CIU Blocks Interrupt
+ *
+ * The interrupt lines from the various chip blocks.
+ */
+union cvmx_ciu_block_int {
+ uint64_t u64;
+ struct cvmx_ciu_block_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_43_59 : 17;
+ uint64_t ptp : 1; /**< PTP interrupt
+ See CIU_INT_SUM1[PTP] */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t dfm : 1; /**< DFM interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_34_39 : 6;
+ uint64_t srio1 : 1; /**< SRIO1 interrupt
+ See SRIO1_INT_REG */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t reserved_31_31 : 1;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t reserved_29_29 : 1;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_27_27 : 1;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t reserved_24_24 : 1;
+ uint64_t asxpcs1 : 1; /**< See PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t asxpcs0 : 1; /**< See PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t reserved_21_21 : 1;
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t reserved_18_19 : 2;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t reserved_8_8 : 1;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t gmx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG */
+ uint64_t gmx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t gmx1 : 1;
+ uint64_t sli : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t usb : 1;
+ uint64_t rad : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t pip : 1;
+ uint64_t reserved_21_21 : 1;
+ uint64_t asxpcs0 : 1;
+ uint64_t asxpcs1 : 1;
+ uint64_t reserved_24_24 : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t agl : 1;
+ uint64_t reserved_29_29 : 1;
+ uint64_t iob : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfm : 1;
+ uint64_t dpi : 1;
+ uint64_t ptp : 1;
+ uint64_t reserved_43_59 : 17;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_ciu_block_int_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t ptp : 1; /**< PTP interrupt
+ See CIU_INT_SUM1[PTP] */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t reserved_31_40 : 10;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t reserved_29_29 : 1;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_27_27 : 1;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t reserved_24_24 : 1;
+ uint64_t asxpcs1 : 1; /**< See PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t asxpcs0 : 1; /**< See PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t reserved_21_21 : 1;
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t reserved_18_19 : 2;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t reserved_8_8 : 1;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t gmx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG */
+ uint64_t gmx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t gmx1 : 1;
+ uint64_t sli : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t usb : 1;
+ uint64_t rad : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t pip : 1;
+ uint64_t reserved_21_21 : 1;
+ uint64_t asxpcs0 : 1;
+ uint64_t asxpcs1 : 1;
+ uint64_t reserved_24_24 : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t agl : 1;
+ uint64_t reserved_29_29 : 1;
+ uint64_t iob : 1;
+ uint64_t reserved_31_40 : 10;
+ uint64_t dpi : 1;
+ uint64_t ptp : 1;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_block_int_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t ptp : 1; /**< PTP interrupt
+ See CIU_INT_SUM1[PTP] */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t dfm : 1; /**< DFM interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_34_39 : 6;
+ uint64_t srio1 : 1; /**< SRIO1 interrupt
+ See SRIO1_INT_REG, SRIO1_INT2_REG */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t reserved_31_31 : 1;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t reserved_29_29 : 1;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_27_27 : 1;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t reserved_23_24 : 2;
+ uint64_t asxpcs0 : 1; /**< See PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t reserved_21_21 : 1;
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t reserved_18_19 : 2;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t reserved_8_8 : 1;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_2_2 : 1;
+ uint64_t gmx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t sli : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t usb : 1;
+ uint64_t rad : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t pip : 1;
+ uint64_t reserved_21_21 : 1;
+ uint64_t asxpcs0 : 1;
+ uint64_t reserved_23_24 : 2;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t agl : 1;
+ uint64_t reserved_29_29 : 1;
+ uint64_t iob : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfm : 1;
+ uint64_t dpi : 1;
+ uint64_t ptp : 1;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_block_int_cn63xx cn63xxp1;
+ struct cvmx_ciu_block_int_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_43_59 : 17;
+ uint64_t ptp : 1; /**< PTP interrupt
+ See CIU_INT_SUM1[PTP] */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t dfm : 1; /**< DFM interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_33_39 : 7;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t reserved_31_31 : 1;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t reserved_29_29 : 1;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_27_27 : 1;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t reserved_24_24 : 1;
+ uint64_t asxpcs1 : 1; /**< See PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t asxpcs0 : 1; /**< See PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t reserved_21_21 : 1;
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t reserved_18_19 : 2;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t reserved_8_8 : 1;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t gmx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG */
+ uint64_t gmx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t gmx1 : 1;
+ uint64_t sli : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t usb : 1;
+ uint64_t rad : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t pip : 1;
+ uint64_t reserved_21_21 : 1;
+ uint64_t asxpcs0 : 1;
+ uint64_t asxpcs1 : 1;
+ uint64_t reserved_24_24 : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t agl : 1;
+ uint64_t reserved_29_29 : 1;
+ uint64_t iob : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t dfm : 1;
+ uint64_t dpi : 1;
+ uint64_t ptp : 1;
+ uint64_t reserved_43_59 : 17;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_block_int_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t ptp : 1; /**< PTP interrupt
+ See CIU_INT_SUM1[PTP] */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t reserved_31_40 : 10;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t reserved_27_29 : 3;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t reserved_23_24 : 2;
+ uint64_t asxpcs0 : 1; /**< See PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t reserved_21_21 : 1;
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t reserved_18_19 : 2;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t reserved_6_8 : 3;
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_2_2 : 1;
+ uint64_t gmx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t sli : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t reserved_6_8 : 3;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t usb : 1;
+ uint64_t rad : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t pip : 1;
+ uint64_t reserved_21_21 : 1;
+ uint64_t asxpcs0 : 1;
+ uint64_t reserved_23_24 : 2;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_27_29 : 3;
+ uint64_t iob : 1;
+ uint64_t reserved_31_40 : 10;
+ uint64_t dpi : 1;
+ uint64_t ptp : 1;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_block_int cvmx_ciu_block_int_t;
+
+/**
+ * cvmx_ciu_dint
+ */
+union cvmx_ciu_dint {
+ uint64_t u64;
+ struct cvmx_ciu_dint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t dint : 32; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_dint_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t dint : 1; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_dint_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dint : 2; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_dint_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t dint : 16; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_dint_cn38xx cn38xxp2;
+ struct cvmx_ciu_dint_cn31xx cn50xx;
+ struct cvmx_ciu_dint_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t dint : 4; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_dint_cn52xx cn52xxp1;
+ struct cvmx_ciu_dint_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t dint : 12; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_dint_cn56xx cn56xxp1;
+ struct cvmx_ciu_dint_cn38xx cn58xx;
+ struct cvmx_ciu_dint_cn38xx cn58xxp1;
+ struct cvmx_ciu_dint_cn52xx cn61xx;
+ struct cvmx_ciu_dint_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t dint : 6; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_dint_cn63xx cn63xxp1;
+ struct cvmx_ciu_dint_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t dint : 10; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_dint_s cn68xx;
+ struct cvmx_ciu_dint_s cn68xxp1;
+ struct cvmx_ciu_dint_cn52xx cnf71xx;
+};
+typedef union cvmx_ciu_dint cvmx_ciu_dint_t;
+
+/**
+ * cvmx_ciu_en2_io#_int
+ *
+ * Notes:
+ * These SUM2 CSR's did not exist prior to pass 1.2. CIU_TIM4-9 did not exist prior to pass 1.2.
+ *
+ */
+union cvmx_ciu_en2_iox_int {
+ uint64_t u64;
+ struct cvmx_ciu_en2_iox_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_iox_int_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_iox_int_cn61xx cn66xx;
+ struct cvmx_ciu_en2_iox_int_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_iox_int cvmx_ciu_en2_iox_int_t;
+
+/**
+ * cvmx_ciu_en2_io#_int_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_EN2_PP(IO)X_IPx(INT) register, read back corresponding
+ * CIU_EN2_PP(IO)X_IPx(INT) value.
+ */
+union cvmx_ciu_en2_iox_int_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_en2_iox_int_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< Write 1 to clear ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< Write 1 to clear EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< Write 1 to clear General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_iox_int_w1c_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< Write 1 to clear General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_iox_int_w1c_cn61xx cn66xx;
+ struct cvmx_ciu_en2_iox_int_w1c_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_iox_int_w1c cvmx_ciu_en2_iox_int_w1c_t;
+
+/**
+ * cvmx_ciu_en2_io#_int_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_EN2_PP(IO)X_IPx(INT) register, read back corresponding
+ * CIU_EN2_PP(IO)X_IPx(INT) value.
+ */
+union cvmx_ciu_en2_iox_int_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_en2_iox_int_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< Write 1 to set ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< Write 1 to set EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< Write 1 to set General timer 4-9 interrupt enables */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_iox_int_w1s_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< Write 1 to set General timer 4-9 interrupt enables */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_iox_int_w1s_cn61xx cn66xx;
+ struct cvmx_ciu_en2_iox_int_w1s_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_iox_int_w1s cvmx_ciu_en2_iox_int_w1s_t;
+
+/**
+ * cvmx_ciu_en2_pp#_ip2
+ *
+ * Notes:
+ * These SUM2 CSR's did not exist prior to pass 1.2. CIU_TIM4-9 did not exist prior to pass 1.2.
+ *
+ */
+union cvmx_ciu_en2_ppx_ip2 {
+ uint64_t u64;
+ struct cvmx_ciu_en2_ppx_ip2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_ppx_ip2_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_ppx_ip2_cn61xx cn66xx;
+ struct cvmx_ciu_en2_ppx_ip2_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_ppx_ip2 cvmx_ciu_en2_ppx_ip2_t;
+
+/**
+ * cvmx_ciu_en2_pp#_ip2_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_EN2_PP(IO)X_IPx(INT) register, read back corresponding
+ * CIU_EN2_PP(IO)X_IPx(INT) value.
+ */
+union cvmx_ciu_en2_ppx_ip2_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_en2_ppx_ip2_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< Write 1 to clear ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< Write 1 to clear EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< Write 1 to clear General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< Write 1 to clear General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx cn66xx;
+ struct cvmx_ciu_en2_ppx_ip2_w1c_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_ppx_ip2_w1c cvmx_ciu_en2_ppx_ip2_w1c_t;
+
+/**
+ * cvmx_ciu_en2_pp#_ip2_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_EN2_PP(IO)X_IPx(INT) register, read back corresponding
+ * CIU_EN2_PP(IO)X_IPx(INT) value.
+ */
+union cvmx_ciu_en2_ppx_ip2_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_en2_ppx_ip2_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< Write 1 to set ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< Write 1 to set EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< Write 1 to set General timer 4-9 interrupt enables */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< Write 1 to set General timer 4-9 interrupt enables */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx cn66xx;
+ struct cvmx_ciu_en2_ppx_ip2_w1s_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_ppx_ip2_w1s cvmx_ciu_en2_ppx_ip2_w1s_t;
+
+/**
+ * cvmx_ciu_en2_pp#_ip3
+ *
+ * Notes:
+ * These SUM2 CSR's did not exist prior to pass 1.2. CIU_TIM4-9 did not exist prior to pass 1.2.
+ *
+ */
+union cvmx_ciu_en2_ppx_ip3 {
+ uint64_t u64;
+ struct cvmx_ciu_en2_ppx_ip3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_ppx_ip3_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_ppx_ip3_cn61xx cn66xx;
+ struct cvmx_ciu_en2_ppx_ip3_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_ppx_ip3 cvmx_ciu_en2_ppx_ip3_t;
+
+/**
+ * cvmx_ciu_en2_pp#_ip3_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_EN2_PP(IO)X_IPx(INT) register, read back corresponding
+ * CIU_EN2_PP(IO)X_IPx(INT) value.
+ */
+union cvmx_ciu_en2_ppx_ip3_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_en2_ppx_ip3_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< Write 1 to clear ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< Write 1 to clear EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< Write 1 to clear General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< Write 1 to clear General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx cn66xx;
+ struct cvmx_ciu_en2_ppx_ip3_w1c_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_ppx_ip3_w1c cvmx_ciu_en2_ppx_ip3_w1c_t;
+
+/**
+ * cvmx_ciu_en2_pp#_ip3_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_EN2_PP(IO)X_IPx(INT) register, read back corresponding
+ * CIU_EN2_PP(IO)X_IPx(INT) value.
+ */
+union cvmx_ciu_en2_ppx_ip3_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_en2_ppx_ip3_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< Write 1 to set ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< Write 1 to set EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< Write 1 to set General timer 4-9 interrupt enables */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< Write 1 to set General timer 4-9 interrupt enables */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx cn66xx;
+ struct cvmx_ciu_en2_ppx_ip3_w1s_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_ppx_ip3_w1s cvmx_ciu_en2_ppx_ip3_w1s_t;
+
+/**
+ * cvmx_ciu_en2_pp#_ip4
+ *
+ * Notes:
+ * These SUM2 CSR's did not exist prior to pass 1.2. CIU_TIM4-9 did not exist prior to pass 1.2.
+ *
+ */
+union cvmx_ciu_en2_ppx_ip4 {
+ uint64_t u64;
+ struct cvmx_ciu_en2_ppx_ip4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_ppx_ip4_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_ppx_ip4_cn61xx cn66xx;
+ struct cvmx_ciu_en2_ppx_ip4_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_ppx_ip4 cvmx_ciu_en2_ppx_ip4_t;
+
+/**
+ * cvmx_ciu_en2_pp#_ip4_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_EN2_PP(IO)X_IPx(INT) register, read back corresponding
+ * CIU_EN2_PP(IO)X_IPx(INT) value.
+ */
+union cvmx_ciu_en2_ppx_ip4_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_en2_ppx_ip4_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< Write 1 to clear ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< Write 1 to clear EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< Write 1 to clear General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< Write 1 to clear General timer 4-9 interrupt enable */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx cn66xx;
+ struct cvmx_ciu_en2_ppx_ip4_w1c_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_ppx_ip4_w1c cvmx_ciu_en2_ppx_ip4_w1c_t;
+
+/**
+ * cvmx_ciu_en2_pp#_ip4_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_EN2_PP(IO)X_IPx(INT) register, read back corresponding
+ * CIU_EN2_PP(IO)X_IPx(INT) value.
+ */
+union cvmx_ciu_en2_ppx_ip4_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_en2_ppx_ip4_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< Write 1 to set ENDOR PHY interrupts enable */
+ uint64_t eoi : 1; /**< Write 1 to set EOI rsl interrupt enable */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< Write 1 to set General timer 4-9 interrupt enables */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< Write 1 to set General timer 4-9 interrupt enables */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx cn66xx;
+ struct cvmx_ciu_en2_ppx_ip4_w1s_s cnf71xx;
+};
+typedef union cvmx_ciu_en2_ppx_ip4_w1s cvmx_ciu_en2_ppx_ip4_w1s_t;
+
+/**
+ * cvmx_ciu_fuse
+ */
+union cvmx_ciu_fuse {
+ uint64_t u64;
+ struct cvmx_ciu_fuse_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t fuse : 32; /**< Physical PP is present */
+#else
+ uint64_t fuse : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_fuse_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t fuse : 1; /**< Physical PP is present */
+#else
+ uint64_t fuse : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_fuse_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t fuse : 2; /**< Physical PP is present */
+#else
+ uint64_t fuse : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_fuse_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t fuse : 16; /**< Physical PP is present */
+#else
+ uint64_t fuse : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_fuse_cn38xx cn38xxp2;
+ struct cvmx_ciu_fuse_cn31xx cn50xx;
+ struct cvmx_ciu_fuse_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t fuse : 4; /**< Physical PP is present */
+#else
+ uint64_t fuse : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_fuse_cn52xx cn52xxp1;
+ struct cvmx_ciu_fuse_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t fuse : 12; /**< Physical PP is present */
+#else
+ uint64_t fuse : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_fuse_cn56xx cn56xxp1;
+ struct cvmx_ciu_fuse_cn38xx cn58xx;
+ struct cvmx_ciu_fuse_cn38xx cn58xxp1;
+ struct cvmx_ciu_fuse_cn52xx cn61xx;
+ struct cvmx_ciu_fuse_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t fuse : 6; /**< Physical PP is present */
+#else
+ uint64_t fuse : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_fuse_cn63xx cn63xxp1;
+ struct cvmx_ciu_fuse_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t fuse : 10; /**< Physical PP is present */
+#else
+ uint64_t fuse : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_fuse_s cn68xx;
+ struct cvmx_ciu_fuse_s cn68xxp1;
+ struct cvmx_ciu_fuse_cn52xx cnf71xx;
+};
+typedef union cvmx_ciu_fuse cvmx_ciu_fuse_t;
+
+/**
+ * cvmx_ciu_gstop
+ */
+union cvmx_ciu_gstop {
+ uint64_t u64;
+ struct cvmx_ciu_gstop_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t gstop : 1; /**< GSTOP bit */
+#else
+ uint64_t gstop : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_gstop_s cn30xx;
+ struct cvmx_ciu_gstop_s cn31xx;
+ struct cvmx_ciu_gstop_s cn38xx;
+ struct cvmx_ciu_gstop_s cn38xxp2;
+ struct cvmx_ciu_gstop_s cn50xx;
+ struct cvmx_ciu_gstop_s cn52xx;
+ struct cvmx_ciu_gstop_s cn52xxp1;
+ struct cvmx_ciu_gstop_s cn56xx;
+ struct cvmx_ciu_gstop_s cn56xxp1;
+ struct cvmx_ciu_gstop_s cn58xx;
+ struct cvmx_ciu_gstop_s cn58xxp1;
+ struct cvmx_ciu_gstop_s cn61xx;
+ struct cvmx_ciu_gstop_s cn63xx;
+ struct cvmx_ciu_gstop_s cn63xxp1;
+ struct cvmx_ciu_gstop_s cn66xx;
+ struct cvmx_ciu_gstop_s cn68xx;
+ struct cvmx_ciu_gstop_s cn68xxp1;
+ struct cvmx_ciu_gstop_s cnf71xx;
+};
+typedef union cvmx_ciu_gstop cvmx_ciu_gstop_t;
+
+/**
+ * cvmx_ciu_int#_en0
+ *
+ * Notes:
+ * CIU_INT0_EN0: PP0/IP2
+ * CIU_INT1_EN0: PP0/IP3
+ * CIU_INT2_EN0: PP1/IP2
+ * CIU_INT3_EN0: PP1/IP3
+ * CIU_INT4_EN0: PP2/IP2
+ * CIU_INT5_EN0: PP2/IP3
+ * CIU_INT6_EN0: PP3/IP2
+ * CIU_INT7_EN0: PP3/IP3
+ * .....
+ *
+ * (hole)
+ * CIU_INT32_EN0: IO 0
+ * CIU_INT33_EN0: IO 1
+ */
+union cvmx_ciu_intx_en0 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox/PCIe interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_intx_en0_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_intx_en0_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_intx_en0_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_en0_cn30xx cn50xx;
+ struct cvmx_ciu_intx_en0_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_en0_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en0_cn38xx cn58xx;
+ struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_en0_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t mii : 1; /**< RGMII/MIX Interface 0 Interrupt enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox/PCIe interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_cn52xx cn63xxp1;
+ struct cvmx_ciu_intx_en0_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt enable */
+ uint64_t reserved_57_57 : 1;
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox/PCIe/sRIO interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_57 : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en0_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox/PCIe interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en0 cvmx_ciu_intx_en0_t;
+
+/**
+ * cvmx_ciu_int#_en0_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN0 register, read back corresponding CIU_INTx_EN0 value.
+ *
+ */
+union cvmx_ciu_intx_en0_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to clr RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to clear MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to clear PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupt enables */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox/PCIe interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_w1c_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en0_w1c_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en0_w1c_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to clr RGMII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to clear MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to clear PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox/PCIe interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xxp1;
+ struct cvmx_ciu_intx_en0_w1c_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to clr RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt */
+ uint64_t mpi : 1; /**< Write 1 to clear MPI/SPI interrupt */
+ uint64_t reserved_57_57 : 1;
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox/PCIe/sRIO interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_57 : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en0_w1c_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to clear MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to clear PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox/PCIe interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en0_w1c cvmx_ciu_intx_en0_w1c_t;
+
+/**
+ * cvmx_ciu_int#_en0_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTx_EN0 register, read back corresponding CIU_INTx_EN0 value.
+ *
+ */
+union cvmx_ciu_intx_en0_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to set RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to set MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to set PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupt enables */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox/PCIe interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_w1s_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en0_w1s_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en0_w1s_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to set RGMII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to set MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to set PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox/PCIe interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xxp1;
+ struct cvmx_ciu_intx_en0_w1s_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to set RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt */
+ uint64_t mpi : 1; /**< Write 1 to set MPI/SPI interrupt */
+ uint64_t reserved_57_57 : 1;
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox/PCIe/sRIO interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_57 : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en0_w1s_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to set MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to set PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox/PCIe interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en0_w1s cvmx_ciu_intx_en0_w1s_t;
+
+/**
+ * cvmx_ciu_int#_en1
+ *
+ * Notes:
+ * Enables for CIU_SUM1_PPX_IPx or CIU_SUM1_IOX_INT
+ * CIU_INT0_EN1: PP0/IP2
+ * CIU_INT1_EN1: PP0/IP3
+ * CIU_INT2_EN1: PP1/IP2
+ * CIU_INT3_EN1: PP1/IP3
+ * CIU_INT4_EN1: PP2/IP2
+ * CIU_INT5_EN1: PP2/IP3
+ * CIU_INT6_EN1: PP3/IP2
+ * CIU_INT7_EN1: PP3/IP3
+ * .....
+ *
+ * (hole)
+ * CIU_INT32_EN1: IO0
+ * CIU_INT33_EN1: IO1
+ *
+ * @verbatim
+ * PPx/IP2 will be raised when...
+ *
+ * n = x*2
+ * PPx/IP2 = |([CIU_SUM2_PPx_IP2,CIU_SUM1_PPx_IP2, CIU_INTn_SUM0] & [CIU_EN2_PPx_IP2,CIU_INTn_EN1, CIU_INTn_EN0])
+ *
+ * PPx/IP3 will be raised when...
+ *
+ * n = x*2 + 1
+ * PPx/IP3 = |([CIU_SUM2_PPx_IP3,CIU_SUM1_PPx_IP3, CIU_INTn_SUM0] & [CIU_EN2_PPx_IP3,CIU_INTn_EN1, CIU_INTn_EN0])
+ *
+ * PCI/INT will be raised when...
+ *
+ * PCI/INT = |([CIU_SUM2_IO0_INT,CIU_SUM1_IO0_INT, CIU_INT32_SUM0] & [CIU_EN2_IO0_INT,CIU_INT32_EN1, CIU_INT32_EN0])
+ * PCI/INT = |([CIU_SUM2_IO1_INT,CIU_SUM1_IO1_INT, CIU_INT33_SUM0] & [CIU_EN2_IO1_INT,CIU_INT33_EN1, CIU_INT33_EN0])
+ * @endverbatim
+ */
+union cvmx_ciu_intx_en1 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en1_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t wdog : 1; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_intx_en1_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t wdog : 2; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_intx_en1_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_intx_en1_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_en1_cn31xx cn50xx;
+ struct cvmx_ciu_intx_en1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn52xxp1;
+ struct cvmx_ciu_intx_en1_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en1_cn38xx cn58xx;
+ struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_en1_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MIX Interface 1 Interrupt enable */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en1_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_cn63xx cn63xxp1;
+ struct cvmx_ciu_intx_en1_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en1_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< DPI_DMA interrupt enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt enable */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en1 cvmx_ciu_intx_en1_t;
+
+/**
+ * cvmx_ciu_int#_en1_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTX_EN1 register, read back corresponding CIU_INTX_EN1 value.
+ *
+ */
+union cvmx_ciu_intx_en1_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< Write 1 to clear SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< Write 1 to clear SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to clear SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< Write 1 to clear GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear EMMC Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MII/MIX Interface 1
+ Interrupt enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en1_w1c_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_w1c_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_w1c_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en1_w1c_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< Write 1 to clear GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear EMMC Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MIX Interface 1
+ Interrupt enable */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en1_w1c_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to clear SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MII/MIX Interface 1
+ Interrupt enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_w1c_cn63xx cn63xxp1;
+ struct cvmx_ciu_intx_en1_w1c_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< Write 1 to clear SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< Write 1 to clear SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< Write 1 to clear GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MII/MIX Interface 1
+ Interrupt enable */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en1_w1c_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear DPI_DMA interrupt enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear EMMC Flash Controller interrupt
+ enable */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en1_w1c cvmx_ciu_intx_en1_w1c_t;
+
+/**
+ * cvmx_ciu_int#_en1_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTX_EN1 register, read back corresponding CIU_INTX_EN1 value.
+ *
+ */
+union cvmx_ciu_intx_en1_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< Write 1 to set SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< Write 1 to set SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< Write 1 to set DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to set SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to set SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< Write 1 to set DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< Write 1 to set GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set EMMC Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en1_w1s_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_w1s_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_w1s_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en1_w1s_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< Write 1 to set DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< Write 1 to set GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set EMMC Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en1_w1s_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to set DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to set SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to set SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_w1s_cn63xx cn63xxp1;
+ struct cvmx_ciu_intx_en1_w1s_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< Write 1 to set SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< Write 1 to set SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< Write 1 to set DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< Write 1 to set SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< Write 1 to set GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en1_w1s_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< Write 1 to set DPI_DMA interrupt enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set EMMC Flash Controller interrupt
+ enable */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en1_w1s cvmx_ciu_intx_en1_w1s_t;
+
+/**
+ * cvmx_ciu_int#_en4_0
+ *
+ * Notes:
+ * CIU_INT0_EN4_0: PP0 /IP4
+ * CIU_INT1_EN4_0: PP1 /IP4
+ * ...
+ * CIU_INT3_EN4_0: PP3 /IP4
+ */
+union cvmx_ciu_intx_en4_0 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_0_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_en4_0_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en4_0_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_en4_0_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t mii : 1; /**< RGMII/MIX Interface 0 Interrupt enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn63xxp1;
+ struct cvmx_ciu_intx_en4_0_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt enable */
+ uint64_t reserved_57_57 : 1;
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_57 : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en4_0_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en4_0 cvmx_ciu_intx_en4_0_t;
+
+/**
+ * cvmx_ciu_int#_en4_0_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN4_0 register, read back corresponding CIU_INTx_EN4_0 value.
+ *
+ */
+union cvmx_ciu_intx_en4_0_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to clr RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to clear MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to clear PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupt enables */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to clr RGMII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to clear MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to clear PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xxp1;
+ struct cvmx_ciu_intx_en4_0_w1c_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to clr RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt */
+ uint64_t mpi : 1; /**< Write 1 to clear MPI/SPI interrupt */
+ uint64_t reserved_57_57 : 1;
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_57 : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to clear MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to clear PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en4_0_w1c cvmx_ciu_intx_en4_0_w1c_t;
+
+/**
+ * cvmx_ciu_int#_en4_0_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTX_EN4_0 register, read back corresponding CIU_INTX_EN4_0 value.
+ *
+ */
+union cvmx_ciu_intx_en4_0_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to set RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to set MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to set PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupt enables */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to set RGMII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to set MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to set PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xxp1;
+ struct cvmx_ciu_intx_en4_0_w1s_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to set RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt */
+ uint64_t mpi : 1; /**< Write 1 to set MPI/SPI interrupt */
+ uint64_t reserved_57_57 : 1;
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_57 : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< Write 1 to set MPI/SPI interrupt enable */
+ uint64_t pcm : 1; /**< Write 1 to set PCM/TDM interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupt enables */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en4_0_w1s cvmx_ciu_intx_en4_0_w1s_t;
+
+/**
+ * cvmx_ciu_int#_en4_1
+ *
+ * Notes:
+ * PPx/IP4 will be raised when...
+ * PPx/IP4 = |([CIU_SUM1_PPx_IP4, CIU_INTx_SUM4] & [CIU_INTx_EN4_1, CIU_INTx_EN4_0])
+ */
+union cvmx_ciu_intx_en4_1 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_1_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t wdog : 2; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_intx_en4_1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn52xxp1;
+ struct cvmx_ciu_intx_en4_1_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en4_1_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_en4_1_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MIX Interface 1 Interrupt enable */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en4_1_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_cn63xx cn63xxp1;
+ struct cvmx_ciu_intx_en4_1_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en4_1_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< DPI_DMA interrupt enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt enable */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en4_1 cvmx_ciu_intx_en4_1_t;
+
+/**
+ * cvmx_ciu_int#_en4_1_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTX_EN4_1 register, read back corresponding CIU_INTX_EN4_1 value.
+ *
+ */
+union cvmx_ciu_intx_en4_1_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< Write 1 to clear SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< Write 1 to clear SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to clear SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< Write 1 to clear GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear EMMC Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MII/MIX Interface 1
+ Interrupt enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_1_w1c_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< Write 1 to clear GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear EMMC Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MIX Interface 1
+ Interrupt enable */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to clear SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MII/MIX Interface 1
+ Interrupt enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn63xx cn63xxp1;
+ struct cvmx_ciu_intx_en4_1_w1c_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< Write 1 to clear SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< Write 1 to clear SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< Write 1 to clear GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MII/MIX Interface 1
+ Interrupt enable */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear DPI_DMA interrupt enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear EMMC Flash Controller interrupt
+ enable */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en4_1_w1c cvmx_ciu_intx_en4_1_w1c_t;
+
+/**
+ * cvmx_ciu_int#_en4_1_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTX_EN4_1 register, read back corresponding CIU_INTX_EN4_1 value.
+ *
+ */
+union cvmx_ciu_intx_en4_1_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< Write 1 to set SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< Write 1 to set SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< Write 1 to set DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to set SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to set SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< Write 1 to set DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< Write 1 to set GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set EMMC Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_1_w1s_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< Write 1 to set DPI_DMA interrupt enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< Write 1 to set GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set EMMC Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to set DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to set SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to set SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn63xx cn63xxp1;
+ struct cvmx_ciu_intx_en4_1_w1s_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< Write 1 to set SRIO3 interrupt enable */
+ uint64_t srio2 : 1; /**< Write 1 to set SRIO2 interrupt enable */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< Write 1 to set DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< Write 1 to set SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< Write 1 to set GMX1 interrupt enable */
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< Write 1 to set DPI_DMA interrupt enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set EMMC Flash Controller interrupt
+ enable */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_en4_1_w1s cvmx_ciu_intx_en4_1_w1s_t;
+
+/**
+ * cvmx_ciu_int#_sum0
+ */
+union cvmx_ciu_intx_sum0 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_sum0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt
+ See MIX0_ISR */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt, Set when MPI transaction
+ finished, see MPI_CFG[INT_ENA] and MPI_STS[BUSY] */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t gmx_drp : 2; /**< GMX0/1 packet drop interrupt
+ Set any time corresponding GMX0/1 drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This interrupt will assert if any bit within
+ CIU_BLOCK_INT is asserted. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx (CIU_SUM1_IOX_INT) bit is set
+ and corresponding enable bit in CIU_INTx_EN is set
+ PPs use CIU_INTx_SUM0 where x=0-7
+ PCIe uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-11
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCIe internal interrupts for entries 32-33
+ which equal CIU_PCI_INTA[INT] */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ When GPIO_MULTI_CAST[EN] == 1
+ Write 1 to clear either the per PP or common GPIO
+ edge-triggered interrupts,depending on mode.
+ See GPIO_MULTI_CAST for all details.
+ When GPIO_MULTI_CAST[EN] == 0
+ Read Only, retain the same behavior as o63. */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_sum0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM0 where x=0-1.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_intx_sum0_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM0 where x=0-3.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_intx_sum0_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM0 where x=0-31.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_intx_sum0_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_sum0_cn30xx cn50xx;
+ struct cvmx_ciu_intx_sum0_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_INT_SUM1 bit is set and corresponding
+ enable bit in CIU_INTx_EN is set, where x
+ is the same as x in this CIU_INTx_SUM0.
+ PPs use CIU_INTx_SUM0 where x=0-7.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3
+ Note that WDOG_SUM only summarizes the SUM/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ Refer to "Receiving Message-Signalled
+ Interrupts" in the PCIe chapter of the spec */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the PCIe chapter of the spec */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-7
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_sum0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_sum0_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM0 where x=0-23.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ Refer to "Receiving Message-Signalled
+ Interrupts" in the PCIe chapter of the spec */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the PCIe chapter of the spec */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-23
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
+ struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_sum0_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mii : 1; /**< RGMII/MIX Interface 0 Interrupt
+ See MIX0_ISR */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt, Set when MPI transaction
+ finished, see MPI_CFG[INT_ENA] and MPI_STS[BUSY] */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t sum2 : 1; /**< SUM2&EN2 SUMMARY bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM2_PPX_IPx (CIU_SUM2_IOX_INT) bit is set
+ and corresponding enable bit in CIU_EN2_PPX_IPx
+ (CIU_EN2_IOX_INT) is set.
+ Note that SUM2 only summarizes the SUM2/EN2
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t gmx_drp : 2; /**< GMX0/1 packet drop interrupt
+ Set any time corresponding GMX0/1 drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This interrupt will assert if any bit within
+ CIU_BLOCK_INT is asserted. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx (CIU_SUM1_IOX_INT) bit is set
+ and corresponding enable bit in CIU_INTx_EN is set
+ PPs use CIU_INTx_SUM0 where x=0-7
+ PCIe uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-11
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCIe internal interrupts for entries 32-33
+ which equal CIU_PCI_INTA[INT] */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ When GPIO_MULTI_CAST[EN] == 1
+ Write 1 to clear either the per PP or common GPIO
+ edge-triggered interrupts,depending on mode.
+ See GPIO_MULTI_CAST for all details.
+ When GPIO_MULTI_CAST[EN] == 0
+ Read Only, retain the same behavior as o63. */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t sum2 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_sum0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_sum0_cn52xx cn63xxp1;
+ struct cvmx_ciu_intx_sum0_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt
+ See MIX0_ISR */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt, Set when MPI transaction
+ finished, see MPI_CFG[INT_ENA] and MPI_STS[BUSY] */
+ uint64_t reserved_57_57 : 1;
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts.
+ Prior to pass 1.2 or
+ when CIU_TIM_MULTI_CAST[EN]==0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing is per
+ cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t sum2 : 1; /**< SUM2&EN2 SUMMARY bit
+ In pass 1.2 and subsequent passes,
+ this read-only bit reads as a one whenever any
+ CIU_SUM2_PPX_IPx (CIU_SUM2_IOX_INT) bit is set
+ and corresponding enable bit in CIU_EN2_PPX_IPx
+ (CIU_EN2_IOX_INT) is set.
+ Note that SUM2 only summarizes the SUM2/EN2
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts.
+ Prior to pass 1.2, SUM2 did not exist and this
+ bit reads as zero. */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t gmx_drp : 2; /**< GMX0/1 packet drop interrupt
+ Set any time corresponding GMX0/1 drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This interrupt will assert if any bit within
+ CIU_BLOCK_INT is asserted. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx (CIU_SUM1_IOX_INT) bit is set
+ and corresponding enable bit in CIU_INTx_EN is set
+ PPs use CIU_INTx_SUM0 where x=0-19
+ PCIe/sRIO uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-11
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCIe/sRIO internal interrupts for entries 32-33
+ which equal CIU_PCI_INTA[INT] */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t sum2 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_57 : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_sum0_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt, Set when MPI transaction
+ finished, see MPI_CFG[INT_ENA] and MPI_STS[BUSY] */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t sum2 : 1; /**< SUM2&EN2 SUMMARY bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM2_PPX_IPx (CIU_SUM2_IOX_INT) bit is set
+ and corresponding enable bit in CIU_EN2_PPX_IPx
+ (CIU_EN2_IOX_INT) is set.
+ Note that SUM2 only summarizes the SUM2/EN2
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX0/1 packet drop interrupt
+ Set any time corresponding GMX0/1 drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This interrupt will assert if any bit within
+ CIU_BLOCK_INT is asserted. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx (CIU_SUM1_IOX_INT) bit is set
+ and corresponding enable bit in CIU_INTx_EN is set
+ PPs use CIU_INTx_SUM0 where x=0-7
+ PCIe uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-11
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCIe internal interrupts for entries 32-33
+ which equal CIU_PCI_INTA[INT] */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ When GPIO_MULTI_CAST[EN] == 1
+ Write 1 to clear either the per PP or common GPIO
+ edge-triggered interrupts,depending on mode.
+ See GPIO_MULTI_CAST for all details.
+ When GPIO_MULTI_CAST[EN] == 0
+ Read Only, retain the same behavior as o63. */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t sum2 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_sum0 cvmx_ciu_intx_sum0_t;
+
+/**
+ * cvmx_ciu_int#_sum4
+ */
+union cvmx_ciu_intx_sum4 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_sum4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt
+ See MIX0_ISR */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt
+ Set any time corresponding GMX drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This bit is set when any bit is set in
+ CIU_BLOCK_INT. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx (CIU_SUM1_IOX_INT) bit is set
+ and corresponding enable bit in CIU_INTx_EN is set
+ PPs use CIU_INTx_SUM0 where x=0-19
+ PCIe uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-5
+ [33] is the or of <31:16>
+ [32] is the or of <15:0> */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ When GPIO_MULTI_CAST[EN] == 1
+ Write 1 to clear either the per PP interrupt or
+ common GPIO interrupt for all PP/IOs,depending
+ on mode setting. This will apply to all 16 GPIOs.
+ See GPIO_MULTI_CAST for all details
+ When GPIO_MULTI_CAST[EN] == 0
+ Read Only, retain the same behavior as o63. */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_sum4_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM4 where x=0-1. */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_intx_sum4_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< SUM1&EN4_1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_INT_SUM1 bit is set and corresponding
+ enable bit in CIU_INTx_EN4_1 is set, where x
+ is the same as x in this CIU_INTx_SUM4.
+ PPs use CIU_INTx_SUM4 for IP4, where x=PPid.
+ Note that WDOG_SUM only summarizes the SUM/EN4_1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ Refer to "Receiving Message-Signalled
+ Interrupts" in the PCIe chapter of the spec */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the PCIe chapter of the spec */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-3
+ [33] is the or of <31:16>
+ [32] is the or of <15:0> */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_sum4_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_sum4_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ These registers report WDOG to IP4 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ Refer to "Receiving Message-Signalled
+ Interrupts" in the PCIe chapter of the spec */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the PCIe chapter of the spec */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-11
+ [33] is the or of <31:16>
+ [32] is the or of <15:0> */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_sum4_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_sum4_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ These registers report WDOG to IP4 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_sum4_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mii : 1; /**< RGMII/MIX Interface 0 Interrupt
+ See MIX0_ISR */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t sum2 : 1; /**< SUM2&EN2 SUMMARY bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM2_PPX_IPx (CIU_SUM2_IOX_INT) bit is set
+ and corresponding enable bit in CIU_EN2_PPX_IPx
+ (CIU_EN2_IOX_INT) is set.
+ Note that WDOG_SUM only summarizes the SUM2/EN2
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt
+ Set any time corresponding GMX drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This bit is set when any bit is set in
+ CIU_BLOCK_INT. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx (CIU_SUM1_IOX_INT) bit is set
+ and corresponding enable bit in CIU_INTx_EN is set
+ PPs use CIU_INTx_SUM0 where x=0-19
+ PCIe uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-5
+ [33] is the or of <31:16>
+ [32] is the or of <15:0> */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ When GPIO_MULTI_CAST[EN] == 1
+ Write 1 to clear either the per PP interrupt or
+ common GPIO interrupt for all PP/IOs,depending
+ on mode setting. This will apply to all 16 GPIOs.
+ See GPIO_MULTI_CAST for all details
+ When GPIO_MULTI_CAST[EN] == 0
+ Read Only, retain the same behavior as o63. */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t sum2 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_intx_sum4_cn52xx cn63xx;
+ struct cvmx_ciu_intx_sum4_cn52xx cn63xxp1;
+ struct cvmx_ciu_intx_sum4_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt
+ See MIX0_ISR */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t reserved_57_57 : 1;
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts.
+ Prior to pass 1.2 or
+ when CIU_TIM_MULTI_CAST[EN]==0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing is per
+ cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t sum2 : 1; /**< SUM2&EN2 SUMMARY bit
+ In pass 1.2 and subsequent passes,
+ this read-only bit reads as a one whenever any
+ CIU_SUM2_PPX_IPx (CIU_SUM2_IOX_INT) bit is set
+ and corresponding enable bit in CIU_EN2_PPX_IPx
+ (CIU_EN2_IOX_INT) is set.
+ Note that WDOG_SUM only summarizes the SUM2/EN2
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts.
+ Prior to pass 1.2, SUM2 did not exist and this
+ bit reads as zero. */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt
+ Set any time corresponding GMX drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This bit is set when any bit is set in
+ CIU_BLOCK_INT. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx (CIU_SUM1_IOX_INT) bit is set
+ and corresponding enable bit in CIU_INTx_EN is set
+ PPs use CIU_INTx_SUM0 where x=0-19
+ PCIe/sRIO uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-5
+ [33] is the or of <31:16>
+ [32] is the or of <15:0> */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t sum2 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_57 : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_intx_sum4_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t sum2 : 1; /**< SUM2&EN2 SUMMARY bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM2_PPX_IPx (CIU_SUM2_IOX_INT) bit is set
+ and corresponding enable bit in CIU_EN2_PPX_IPx
+ (CIU_EN2_IOX_INT) is set.
+ Note that WDOG_SUM only summarizes the SUM2/EN2
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop interrupt
+ Set any time corresponding GMX drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This bit is set when any bit is set in
+ CIU_BLOCK_INT. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx (CIU_SUM1_IOX_INT) bit is set
+ and corresponding enable bit in CIU_INTx_EN is set
+ PPs use CIU_INTx_SUM0 where x=0-19
+ PCIe uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-5
+ [33] is the or of <31:16>
+ [32] is the or of <15:0> */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ When GPIO_MULTI_CAST[EN] == 1
+ Write 1 to clear either the per PP interrupt or
+ common GPIO interrupt for all PP/IOs,depending
+ on mode setting. This will apply to all 16 GPIOs.
+ See GPIO_MULTI_CAST for all details
+ When GPIO_MULTI_CAST[EN] == 0
+ Read Only, retain the same behavior as o63. */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t sum2 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_intx_sum4 cvmx_ciu_intx_sum4_t;
+
+/**
+ * cvmx_ciu_int33_sum0
+ */
+union cvmx_ciu_int33_sum0 {
+ uint64_t u64;
+ struct cvmx_ciu_int33_sum0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt
+ See MIX0_ISR */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t sum2 : 1; /**< SUM2&EN2 SUMMARY bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM2_PPX_IPx (CIU_SUM2_IOX_INT) bit is set
+ and corresponding enable bit in CIU_EN2_PPX_IPx
+ (CIU_EN2_IOX_INT) is set.
+ Note that SUM2 only summarizes the SUM2/EN2
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt
+ Set any time corresponding GMX drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This interrupt will assert if any bit within
+ CIU_BLOCK_INT is asserted. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx bit is set and corresponding
+ enable bit in CIU_INTx_EN is set, where x
+ is the same as x in this CIU_INTx_SUM0.
+ PPs use CIU_INTx_SUM0 where x=0-7.
+ PCIe uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< A read-only copy of CIU_PCI_INTA[INT] */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ When GPIO_MULTI_CAST[EN] == 1
+ Write 1 to clear either the per PP or common GPIO
+ edge-triggered interrupts,depending on mode.
+ See GPIO_MULTI_CAST for all details.
+ When GPIO_MULTI_CAST[EN] == 0
+ Read Only, retain the same behavior as o63. */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t sum2 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_int33_sum0_s cn61xx;
+ struct cvmx_ciu_int33_sum0_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt
+ See MIX0_ISR */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer interrupts
+ Set any time the corresponding CIU timer expires */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop interrupt
+ Set any time corresponding GMX drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This interrupt will assert if any bit within
+ CIU_BLOCK_INT is asserted. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_INT_SUM1 bit is set and corresponding
+ enable bit in CIU_INTx_EN is set, where x
+ is the same as x in this CIU_INTx_SUM0.
+ PPs use CIU_INTx_SUM0 where x=0-11.
+ PCIe/sRIO uses the CIU_INTx_SUM0 where x=32-33.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3
+ Note that WDOG_SUM only summarizes the SUM/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< A read-only copy of CIU_PCI_INTA[INT] */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_int33_sum0_cn63xx cn63xxp1;
+ struct cvmx_ciu_int33_sum0_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt
+ See MIX0_ISR */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t reserved_57_57 : 1;
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts.
+ Prior to pass 1.2 or
+ when CIU_TIM_MULTI_CAST[EN]==0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing is per
+ cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t sum2 : 1; /**< SUM2&EN2 SUMMARY bit
+ In pass 1.2 and subsequent passes,
+ this read-only bit reads as a one whenever any
+ CIU_SUM2_PPX_IPx (CIU_SUM2_IOX_INT) bit is set
+ and corresponding enable bit in CIU_EN2_PPX_IPx
+ (CIU_EN2_IOX_INT) is set.
+ Note that SUM2 only summarizes the SUM2/EN2
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts.
+ Prior to pass 1.2, SUM2 did not exist and this
+ bit reads as zero. */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt
+ Set any time corresponding GMX drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This interrupt will assert if any bit within
+ CIU_BLOCK_INT is asserted. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx (CIU_SUM1_IOX_INT) bit is set
+ and corresponding enable bit in CIU_INTx_EN is set
+ PPs use CIU_INTx_SUM0 where x=0-19
+ PCIe/sRIO uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< A read-only copy of CIU_PCI_INTA[INT] */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t sum2 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_57 : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_int33_sum0_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT* */
+ uint64_t powiq : 1; /**< POW IQ interrupt
+ See POW_IQ_INT */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt
+ See MIO_TWS1_INT */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t timer : 4; /**< General timer 0-3 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_SUM2_*[TIMER] field implement all 10 CIU_TIM*
+ interrupts. */
+ uint64_t sum2 : 1; /**< SUM2&EN2 SUMMARY bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM2_PPX_IPx (CIU_SUM2_IOX_INT) bit is set
+ and corresponding enable bit in CIU_EN2_PPX_IPx
+ (CIU_EN2_IOX_INT) is set.
+ Note that SUM2 only summarizes the SUM2/EN2
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop interrupt
+ Set any time corresponding GMX drops a packet */
+ uint64_t trace : 1; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t rml : 1; /**< RML Interrupt
+ This interrupt will assert if any bit within
+ CIU_BLOCK_INT is asserted. */
+ uint64_t twsi : 1; /**< TWSI Interrupt
+ See MIO_TWS0_INT */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_SUM1_PPX_IPx bit is set and corresponding
+ enable bit in CIU_INTx_EN is set, where x
+ is the same as x in this CIU_INTx_SUM0.
+ PPs use CIU_INTx_SUM0 where x=0-7.
+ PCIe uses the CIU_INTx_SUM0 where x=32-33.
+ Note that WDOG_SUM only summarizes the SUM1/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCIe MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ PCI_INT<3> = INTD
+ PCI_INT<2> = INTC
+ PCI_INT<1> = INTB
+ PCI_INT<0> = INTA */
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t mbox : 2; /**< A read-only copy of CIU_PCI_INTA[INT] */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ When GPIO_MULTI_CAST[EN] == 1
+ Write 1 to clear either the per PP or common GPIO
+ edge-triggered interrupts,depending on mode.
+ See GPIO_MULTI_CAST for all details.
+ When GPIO_MULTI_CAST[EN] == 0
+ Read Only, retain the same behavior as o63. */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ See POW_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t sum2 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_int33_sum0 cvmx_ciu_int33_sum0_t;
+
+/**
+ * cvmx_ciu_int_dbg_sel
+ */
+union cvmx_ciu_int_dbg_sel {
+ uint64_t u64;
+ struct cvmx_ciu_int_dbg_sel_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t sel : 3; /**< Selects if all or the specific interrupt is
+ presented on the debug port.
+ 0=erst_n
+ 1=start_bist
+ 2=toggle at sclk/2 freq
+ 3=All PP interrupt bits are ORed together
+ 4=Only the selected virtual PP/IRQ is selected */
+ uint64_t reserved_10_15 : 6;
+ uint64_t irq : 2; /**< Which IRQ to select
+ 0=IRQ2
+ 1=IRQ3
+ 2=IRQ4 */
+ uint64_t reserved_5_7 : 3;
+ uint64_t pp : 5; /**< Which PP to select */
+#else
+ uint64_t pp : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t irq : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t sel : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_ciu_int_dbg_sel_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t sel : 3; /**< Selects if all or the specific interrupt is
+ presented on the debug port.
+ 0=erst_n
+ 1=start_bist
+ 2=toggle at sclk/2 freq
+ 3=All PP interrupt bits are ORed together
+ 4=Only the selected virtual PP/IRQ is selected */
+ uint64_t reserved_10_15 : 6;
+ uint64_t irq : 2; /**< Which IRQ to select
+ 0=IRQ2
+ 1=IRQ3
+ 2=IRQ4 */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pp : 4; /**< Which PP to select */
+#else
+ uint64_t pp : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t irq : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t sel : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_int_dbg_sel_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t sel : 3; /**< Selects if all or the specific interrupt is
+ presented on the debug port.
+ 0=erst_n
+ 1=start_bist
+ 2=toggle at sclk/2 freq
+ 3=All PP interrupt bits are ORed together
+ 4=Only the selected physical PP/IRQ is selected */
+ uint64_t reserved_10_15 : 6;
+ uint64_t irq : 2; /**< Which IRQ to select
+ 0=IRQ2
+ 1=IRQ3
+ 2=IRQ4 */
+ uint64_t reserved_3_7 : 5;
+ uint64_t pp : 3; /**< Which PP to select */
+#else
+ uint64_t pp : 3;
+ uint64_t reserved_3_7 : 5;
+ uint64_t irq : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t sel : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_int_dbg_sel_cn61xx cn66xx;
+ struct cvmx_ciu_int_dbg_sel_s cn68xx;
+ struct cvmx_ciu_int_dbg_sel_s cn68xxp1;
+ struct cvmx_ciu_int_dbg_sel_cn61xx cnf71xx;
+};
+typedef union cvmx_ciu_int_dbg_sel cvmx_ciu_int_dbg_sel_t;
+
+/**
+ * cvmx_ciu_int_sum1
+ */
+union cvmx_ciu_int_sum1 {
+ uint64_t u64;
+ struct cvmx_ciu_int_sum1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt
+ See SRIO1_INT_REG */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_int_sum1_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t wdog : 1; /**< 1 watchdog interrupt */
+#else
+ uint64_t wdog : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_int_sum1_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t wdog : 2; /**< 2 watchdog interrupts */
+#else
+ uint64_t wdog : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_int_sum1_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< 16 watchdog interrupts */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_int_sum1_cn38xx cn38xxp2;
+ struct cvmx_ciu_int_sum1_cn31xx cn50xx;
+ struct cvmx_ciu_int_sum1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< 4 watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_int_sum1_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< 4 watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn52xxp1;
+ struct cvmx_ciu_int_sum1_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< 12 watchdog interrupts */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
+ struct cvmx_ciu_int_sum1_cn38xx cn58xx;
+ struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
+ struct cvmx_ciu_int_sum1_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_int_sum1_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt
+ See SRIO1_INT_REG, SRIO1_INT2_REG */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< 6 watchdog interrupts */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_int_sum1_cn63xx cn63xxp1;
+ struct cvmx_ciu_int_sum1_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< 10 watchdog interrupts */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_int_sum1_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t reserved_37_46 : 10;
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_46 : 10;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_int_sum1 cvmx_ciu_int_sum1_t;
+
+/**
+ * cvmx_ciu_mbox_clr#
+ */
+union cvmx_ciu_mbox_clrx {
+ uint64_t u64;
+ struct cvmx_ciu_mbox_clrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bits : 32; /**< On writes, clr corresponding bit in MBOX register
+ on reads, return the MBOX register */
+#else
+ uint64_t bits : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_mbox_clrx_s cn30xx;
+ struct cvmx_ciu_mbox_clrx_s cn31xx;
+ struct cvmx_ciu_mbox_clrx_s cn38xx;
+ struct cvmx_ciu_mbox_clrx_s cn38xxp2;
+ struct cvmx_ciu_mbox_clrx_s cn50xx;
+ struct cvmx_ciu_mbox_clrx_s cn52xx;
+ struct cvmx_ciu_mbox_clrx_s cn52xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn56xx;
+ struct cvmx_ciu_mbox_clrx_s cn56xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn58xx;
+ struct cvmx_ciu_mbox_clrx_s cn58xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn61xx;
+ struct cvmx_ciu_mbox_clrx_s cn63xx;
+ struct cvmx_ciu_mbox_clrx_s cn63xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn66xx;
+ struct cvmx_ciu_mbox_clrx_s cn68xx;
+ struct cvmx_ciu_mbox_clrx_s cn68xxp1;
+ struct cvmx_ciu_mbox_clrx_s cnf71xx;
+};
+typedef union cvmx_ciu_mbox_clrx cvmx_ciu_mbox_clrx_t;
+
+/**
+ * cvmx_ciu_mbox_set#
+ */
+union cvmx_ciu_mbox_setx {
+ uint64_t u64;
+ struct cvmx_ciu_mbox_setx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bits : 32; /**< On writes, set corresponding bit in MBOX register
+ on reads, return the MBOX register */
+#else
+ uint64_t bits : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_mbox_setx_s cn30xx;
+ struct cvmx_ciu_mbox_setx_s cn31xx;
+ struct cvmx_ciu_mbox_setx_s cn38xx;
+ struct cvmx_ciu_mbox_setx_s cn38xxp2;
+ struct cvmx_ciu_mbox_setx_s cn50xx;
+ struct cvmx_ciu_mbox_setx_s cn52xx;
+ struct cvmx_ciu_mbox_setx_s cn52xxp1;
+ struct cvmx_ciu_mbox_setx_s cn56xx;
+ struct cvmx_ciu_mbox_setx_s cn56xxp1;
+ struct cvmx_ciu_mbox_setx_s cn58xx;
+ struct cvmx_ciu_mbox_setx_s cn58xxp1;
+ struct cvmx_ciu_mbox_setx_s cn61xx;
+ struct cvmx_ciu_mbox_setx_s cn63xx;
+ struct cvmx_ciu_mbox_setx_s cn63xxp1;
+ struct cvmx_ciu_mbox_setx_s cn66xx;
+ struct cvmx_ciu_mbox_setx_s cn68xx;
+ struct cvmx_ciu_mbox_setx_s cn68xxp1;
+ struct cvmx_ciu_mbox_setx_s cnf71xx;
+};
+typedef union cvmx_ciu_mbox_setx cvmx_ciu_mbox_setx_t;
+
+/**
+ * cvmx_ciu_nmi
+ */
+union cvmx_ciu_nmi {
+ uint64_t u64;
+ struct cvmx_ciu_nmi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t nmi : 32; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_nmi_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t nmi : 1; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_nmi_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t nmi : 2; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_nmi_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t nmi : 16; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_nmi_cn38xx cn38xxp2;
+ struct cvmx_ciu_nmi_cn31xx cn50xx;
+ struct cvmx_ciu_nmi_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t nmi : 4; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_nmi_cn52xx cn52xxp1;
+ struct cvmx_ciu_nmi_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t nmi : 12; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_nmi_cn56xx cn56xxp1;
+ struct cvmx_ciu_nmi_cn38xx cn58xx;
+ struct cvmx_ciu_nmi_cn38xx cn58xxp1;
+ struct cvmx_ciu_nmi_cn52xx cn61xx;
+ struct cvmx_ciu_nmi_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t nmi : 6; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_nmi_cn63xx cn63xxp1;
+ struct cvmx_ciu_nmi_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t nmi : 10; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_nmi_s cn68xx;
+ struct cvmx_ciu_nmi_s cn68xxp1;
+ struct cvmx_ciu_nmi_cn52xx cnf71xx;
+};
+typedef union cvmx_ciu_nmi cvmx_ciu_nmi_t;
+
+/**
+ * cvmx_ciu_pci_inta
+ */
+union cvmx_ciu_pci_inta {
+ uint64_t u64;
+ struct cvmx_ciu_pci_inta_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t intr : 2; /**< PCIe interrupt
+ These bits are observed in CIU_INTX_SUM0<33:32>
+ where X=32-33 */
+#else
+ uint64_t intr : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_ciu_pci_inta_s cn30xx;
+ struct cvmx_ciu_pci_inta_s cn31xx;
+ struct cvmx_ciu_pci_inta_s cn38xx;
+ struct cvmx_ciu_pci_inta_s cn38xxp2;
+ struct cvmx_ciu_pci_inta_s cn50xx;
+ struct cvmx_ciu_pci_inta_s cn52xx;
+ struct cvmx_ciu_pci_inta_s cn52xxp1;
+ struct cvmx_ciu_pci_inta_s cn56xx;
+ struct cvmx_ciu_pci_inta_s cn56xxp1;
+ struct cvmx_ciu_pci_inta_s cn58xx;
+ struct cvmx_ciu_pci_inta_s cn58xxp1;
+ struct cvmx_ciu_pci_inta_s cn61xx;
+ struct cvmx_ciu_pci_inta_s cn63xx;
+ struct cvmx_ciu_pci_inta_s cn63xxp1;
+ struct cvmx_ciu_pci_inta_s cn66xx;
+ struct cvmx_ciu_pci_inta_s cn68xx;
+ struct cvmx_ciu_pci_inta_s cn68xxp1;
+ struct cvmx_ciu_pci_inta_s cnf71xx;
+};
+typedef union cvmx_ciu_pci_inta cvmx_ciu_pci_inta_t;
+
+/**
+ * cvmx_ciu_pp_bist_stat
+ */
+union cvmx_ciu_pp_bist_stat {
+ uint64_t u64;
+ struct cvmx_ciu_pp_bist_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pp_bist : 32; /**< Physical PP BIST status */
+#else
+ uint64_t pp_bist : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_pp_bist_stat_s cn68xx;
+ struct cvmx_ciu_pp_bist_stat_s cn68xxp1;
+};
+typedef union cvmx_ciu_pp_bist_stat cvmx_ciu_pp_bist_stat_t;
+
+/**
+ * cvmx_ciu_pp_dbg
+ */
+union cvmx_ciu_pp_dbg {
+ uint64_t u64;
+ struct cvmx_ciu_pp_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ppdbg : 32; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_pp_dbg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t ppdbg : 1; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_pp_dbg_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t ppdbg : 2; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_pp_dbg_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t ppdbg : 16; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_pp_dbg_cn38xx cn38xxp2;
+ struct cvmx_ciu_pp_dbg_cn31xx cn50xx;
+ struct cvmx_ciu_pp_dbg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t ppdbg : 4; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_pp_dbg_cn52xx cn52xxp1;
+ struct cvmx_ciu_pp_dbg_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t ppdbg : 12; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
+ struct cvmx_ciu_pp_dbg_cn38xx cn58xx;
+ struct cvmx_ciu_pp_dbg_cn38xx cn58xxp1;
+ struct cvmx_ciu_pp_dbg_cn52xx cn61xx;
+ struct cvmx_ciu_pp_dbg_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t ppdbg : 6; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_pp_dbg_cn63xx cn63xxp1;
+ struct cvmx_ciu_pp_dbg_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t ppdbg : 10; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_pp_dbg_s cn68xx;
+ struct cvmx_ciu_pp_dbg_s cn68xxp1;
+ struct cvmx_ciu_pp_dbg_cn52xx cnf71xx;
+};
+typedef union cvmx_ciu_pp_dbg cvmx_ciu_pp_dbg_t;
+
+/**
+ * cvmx_ciu_pp_poke#
+ *
+ * Notes:
+ * Any write to a CIU_PP_POKE register clears any pending interrupt generated
+ * by the associated watchdog, resets the CIU_WDOG[STATE] field, and set
+ * CIU_WDOG[CNT] to be (CIU_WDOG[LEN] << 8).
+ *
+ * Reads to this register will return the associated CIU_WDOG register.
+ */
+union cvmx_ciu_pp_pokex {
+ uint64_t u64;
+ struct cvmx_ciu_pp_pokex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t poke : 64; /**< Reserved */
+#else
+ uint64_t poke : 64;
+#endif
+ } s;
+ struct cvmx_ciu_pp_pokex_s cn30xx;
+ struct cvmx_ciu_pp_pokex_s cn31xx;
+ struct cvmx_ciu_pp_pokex_s cn38xx;
+ struct cvmx_ciu_pp_pokex_s cn38xxp2;
+ struct cvmx_ciu_pp_pokex_s cn50xx;
+ struct cvmx_ciu_pp_pokex_s cn52xx;
+ struct cvmx_ciu_pp_pokex_s cn52xxp1;
+ struct cvmx_ciu_pp_pokex_s cn56xx;
+ struct cvmx_ciu_pp_pokex_s cn56xxp1;
+ struct cvmx_ciu_pp_pokex_s cn58xx;
+ struct cvmx_ciu_pp_pokex_s cn58xxp1;
+ struct cvmx_ciu_pp_pokex_s cn61xx;
+ struct cvmx_ciu_pp_pokex_s cn63xx;
+ struct cvmx_ciu_pp_pokex_s cn63xxp1;
+ struct cvmx_ciu_pp_pokex_s cn66xx;
+ struct cvmx_ciu_pp_pokex_s cn68xx;
+ struct cvmx_ciu_pp_pokex_s cn68xxp1;
+ struct cvmx_ciu_pp_pokex_s cnf71xx;
+};
+typedef union cvmx_ciu_pp_pokex cvmx_ciu_pp_pokex_t;
+
+/**
+ * cvmx_ciu_pp_rst
+ *
+ * Contains the reset control for each PP. Value of '1' will hold a PP in reset, '0' will release.
+ * Resets to 0xf when PCI boot is enabled, 0xe otherwise.
+ */
+union cvmx_ciu_pp_rst {
+ uint64_t u64;
+ struct cvmx_ciu_pp_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rst : 31; /**< PP Rst for PP's 3-1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 31;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_pp_rst_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_pp_rst_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t rst : 1; /**< PP Rst for PP1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_pp_rst_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t rst : 15; /**< PP Rst for PP's 15-1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 15;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_pp_rst_cn38xx cn38xxp2;
+ struct cvmx_ciu_pp_rst_cn31xx cn50xx;
+ struct cvmx_ciu_pp_rst_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t rst : 3; /**< PP Rst for PP's 11-1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 3;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_pp_rst_cn52xx cn52xxp1;
+ struct cvmx_ciu_pp_rst_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t rst : 11; /**< PP Rst for PP's 11-1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 11;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
+ struct cvmx_ciu_pp_rst_cn38xx cn58xx;
+ struct cvmx_ciu_pp_rst_cn38xx cn58xxp1;
+ struct cvmx_ciu_pp_rst_cn52xx cn61xx;
+ struct cvmx_ciu_pp_rst_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t rst : 5; /**< PP Rst for PP's 5-1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 5;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_pp_rst_cn63xx cn63xxp1;
+ struct cvmx_ciu_pp_rst_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t rst : 9; /**< PP Rst for PP's 9-1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 9;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_pp_rst_s cn68xx;
+ struct cvmx_ciu_pp_rst_s cn68xxp1;
+ struct cvmx_ciu_pp_rst_cn52xx cnf71xx;
+};
+typedef union cvmx_ciu_pp_rst cvmx_ciu_pp_rst_t;
+
+/**
+ * cvmx_ciu_qlm0
+ *
+ * Notes:
+ * This register is only reset by cold reset.
+ *
+ */
+union cvmx_ciu_qlm0 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t g2bypass : 1; /**< QLM0 PCIE Gen2 tx bypass enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2deemph : 5; /**< QLM0 PCIE Gen2 tx bypass de-emphasis value */
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2margin : 5; /**< QLM0 PCIE Gen2 tx bypass margin (amplitude) value */
+ uint64_t reserved_32_39 : 8;
+ uint64_t txbypass : 1; /**< QLM0 transmitter bypass enable */
+ uint64_t reserved_21_30 : 10;
+ uint64_t txdeemph : 5; /**< QLM0 transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLM0 transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLM0 lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 5;
+ uint64_t reserved_21_30 : 10;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_39 : 8;
+ uint64_t g2margin : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2deemph : 5;
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2bypass : 1;
+#endif
+ } s;
+ struct cvmx_ciu_qlm0_s cn61xx;
+ struct cvmx_ciu_qlm0_s cn63xx;
+ struct cvmx_ciu_qlm0_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t txbypass : 1; /**< QLM0 transmitter bypass enable */
+ uint64_t reserved_20_30 : 11;
+ uint64_t txdeemph : 4; /**< QLM0 transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLM0 transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLM0 lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 4;
+ uint64_t reserved_20_30 : 11;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn63xxp1;
+ struct cvmx_ciu_qlm0_s cn66xx;
+ struct cvmx_ciu_qlm0_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t txbypass : 1; /**< QLMx transmitter bypass enable */
+ uint64_t reserved_21_30 : 10;
+ uint64_t txdeemph : 5; /**< QLMx transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLMx transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLMx lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 5;
+ uint64_t reserved_21_30 : 10;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn68xx;
+ struct cvmx_ciu_qlm0_cn68xx cn68xxp1;
+ struct cvmx_ciu_qlm0_s cnf71xx;
+};
+typedef union cvmx_ciu_qlm0 cvmx_ciu_qlm0_t;
+
+/**
+ * cvmx_ciu_qlm1
+ *
+ * Notes:
+ * This register is only reset by cold reset.
+ *
+ */
+union cvmx_ciu_qlm1 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t g2bypass : 1; /**< QLM1 PCIE Gen2 tx bypass enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2deemph : 5; /**< QLM1 PCIE Gen2 tx bypass de-emphasis value */
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2margin : 5; /**< QLM1 PCIE Gen2 tx bypass margin (amplitude) value */
+ uint64_t reserved_32_39 : 8;
+ uint64_t txbypass : 1; /**< QLM1 transmitter bypass enable */
+ uint64_t reserved_21_30 : 10;
+ uint64_t txdeemph : 5; /**< QLM1 transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLM1 transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLM1 lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 5;
+ uint64_t reserved_21_30 : 10;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_39 : 8;
+ uint64_t g2margin : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2deemph : 5;
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2bypass : 1;
+#endif
+ } s;
+ struct cvmx_ciu_qlm1_s cn61xx;
+ struct cvmx_ciu_qlm1_s cn63xx;
+ struct cvmx_ciu_qlm1_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t txbypass : 1; /**< QLM1 transmitter bypass enable */
+ uint64_t reserved_20_30 : 11;
+ uint64_t txdeemph : 4; /**< QLM1 transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLM1 transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLM1 lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 4;
+ uint64_t reserved_20_30 : 11;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn63xxp1;
+ struct cvmx_ciu_qlm1_s cn66xx;
+ struct cvmx_ciu_qlm1_s cn68xx;
+ struct cvmx_ciu_qlm1_s cn68xxp1;
+ struct cvmx_ciu_qlm1_s cnf71xx;
+};
+typedef union cvmx_ciu_qlm1 cvmx_ciu_qlm1_t;
+
+/**
+ * cvmx_ciu_qlm2
+ *
+ * Notes:
+ * This register is only reset by cold reset.
+ *
+ */
+union cvmx_ciu_qlm2 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t g2bypass : 1; /**< QLMx PCIE Gen2 tx bypass enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2deemph : 5; /**< QLMx PCIE Gen2 tx bypass de-emphasis value */
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2margin : 5; /**< QLMx PCIE Gen2 tx bypass margin (amplitude) value */
+ uint64_t reserved_32_39 : 8;
+ uint64_t txbypass : 1; /**< QLM2 transmitter bypass enable */
+ uint64_t reserved_21_30 : 10;
+ uint64_t txdeemph : 5; /**< QLM2 transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLM2 transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLM2 lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 5;
+ uint64_t reserved_21_30 : 10;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_39 : 8;
+ uint64_t g2margin : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2deemph : 5;
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2bypass : 1;
+#endif
+ } s;
+ struct cvmx_ciu_qlm2_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t txbypass : 1; /**< QLM2 transmitter bypass enable */
+ uint64_t reserved_21_30 : 10;
+ uint64_t txdeemph : 5; /**< QLM2 transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLM2 transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLM2 lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 5;
+ uint64_t reserved_21_30 : 10;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_qlm2_cn61xx cn63xx;
+ struct cvmx_ciu_qlm2_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t txbypass : 1; /**< QLM2 transmitter bypass enable */
+ uint64_t reserved_20_30 : 11;
+ uint64_t txdeemph : 4; /**< QLM2 transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLM2 transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLM2 lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 4;
+ uint64_t reserved_20_30 : 11;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn63xxp1;
+ struct cvmx_ciu_qlm2_cn61xx cn66xx;
+ struct cvmx_ciu_qlm2_s cn68xx;
+ struct cvmx_ciu_qlm2_s cn68xxp1;
+ struct cvmx_ciu_qlm2_cn61xx cnf71xx;
+};
+typedef union cvmx_ciu_qlm2 cvmx_ciu_qlm2_t;
+
+/**
+ * cvmx_ciu_qlm3
+ *
+ * Notes:
+ * This register is only reset by cold reset.
+ *
+ */
+union cvmx_ciu_qlm3 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t g2bypass : 1; /**< QLMx PCIE Gen2 tx bypass enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2deemph : 5; /**< QLMx PCIE Gen2 tx bypass de-emphasis value */
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2margin : 5; /**< QLMx PCIE Gen2 tx bypass margin (amplitude) value */
+ uint64_t reserved_32_39 : 8;
+ uint64_t txbypass : 1; /**< QLMx transmitter bypass enable */
+ uint64_t reserved_21_30 : 10;
+ uint64_t txdeemph : 5; /**< QLMx transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLMx transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLMx lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 5;
+ uint64_t reserved_21_30 : 10;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_39 : 8;
+ uint64_t g2margin : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2deemph : 5;
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2bypass : 1;
+#endif
+ } s;
+ struct cvmx_ciu_qlm3_s cn68xx;
+ struct cvmx_ciu_qlm3_s cn68xxp1;
+};
+typedef union cvmx_ciu_qlm3 cvmx_ciu_qlm3_t;
+
+/**
+ * cvmx_ciu_qlm4
+ *
+ * Notes:
+ * This register is only reset by cold reset.
+ *
+ */
+union cvmx_ciu_qlm4 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t g2bypass : 1; /**< QLMx PCIE Gen2 tx bypass enable */
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2deemph : 5; /**< QLMx PCIE Gen2 tx bypass de-emphasis value */
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2margin : 5; /**< QLMx PCIE Gen2 tx bypass margin (amplitude) value */
+ uint64_t reserved_32_39 : 8;
+ uint64_t txbypass : 1; /**< QLMx transmitter bypass enable */
+ uint64_t reserved_21_30 : 10;
+ uint64_t txdeemph : 5; /**< QLMx transmitter bypass de-emphasis value */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txmargin : 5; /**< QLMx transmitter bypass margin (amplitude) value */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lane_en : 4; /**< QLMx lane enable mask */
+#else
+ uint64_t lane_en : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t txmargin : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdeemph : 5;
+ uint64_t reserved_21_30 : 10;
+ uint64_t txbypass : 1;
+ uint64_t reserved_32_39 : 8;
+ uint64_t g2margin : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t g2deemph : 5;
+ uint64_t reserved_53_62 : 10;
+ uint64_t g2bypass : 1;
+#endif
+ } s;
+ struct cvmx_ciu_qlm4_s cn68xx;
+ struct cvmx_ciu_qlm4_s cn68xxp1;
+};
+typedef union cvmx_ciu_qlm4 cvmx_ciu_qlm4_t;
+
+/**
+ * cvmx_ciu_qlm_dcok
+ */
+union cvmx_ciu_qlm_dcok {
+ uint64_t u64;
+ struct cvmx_ciu_qlm_dcok_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t qlm_dcok : 4; /**< Re-assert dcok for each QLM. The value in this
+ field is "anded" with the pll_dcok pin and then
+ sent to each QLM (0..3). */
+#else
+ uint64_t qlm_dcok : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu_qlm_dcok_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t qlm_dcok : 2; /**< Re-assert dcok for each QLM. The value in this
+ field is "anded" with the pll_dcok pin and then
+ sent to each QLM (0..3). */
+#else
+ uint64_t qlm_dcok : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_qlm_dcok_cn52xx cn52xxp1;
+ struct cvmx_ciu_qlm_dcok_s cn56xx;
+ struct cvmx_ciu_qlm_dcok_s cn56xxp1;
+};
+typedef union cvmx_ciu_qlm_dcok cvmx_ciu_qlm_dcok_t;
+
+/**
+ * cvmx_ciu_qlm_jtgc
+ */
+union cvmx_ciu_qlm_jtgc {
+ uint64_t u64;
+ struct cvmx_ciu_qlm_jtgc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t bypass_ext : 1; /**< BYPASS Field extension to select QLM 4
+ Selects which QLM JTAG shift chains are bypassed
+ by the QLM JTAG data register (CIU_QLM_JTGD) (one
+ bit per QLM) */
+ uint64_t reserved_11_15 : 5;
+ uint64_t clk_div : 3; /**< Clock divider for QLM JTAG operations. eclk is
+ divided by 2^(CLK_DIV + 2) */
+ uint64_t reserved_7_7 : 1;
+ uint64_t mux_sel : 3; /**< Selects which QLM JTAG shift out is shifted into
+ the QLM JTAG shift register: CIU_QLM_JTGD[SHFT_REG] */
+ uint64_t bypass : 4; /**< Selects which QLM JTAG shift chains are bypassed
+ by the QLM JTAG data register (CIU_QLM_JTGD) (one
+ bit per QLM) */
+#else
+ uint64_t bypass : 4;
+ uint64_t mux_sel : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t clk_div : 3;
+ uint64_t reserved_11_15 : 5;
+ uint64_t bypass_ext : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_ciu_qlm_jtgc_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t clk_div : 3; /**< Clock divider for QLM JTAG operations. eclk is
+ divided by 2^(CLK_DIV + 2) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t mux_sel : 1; /**< Selects which QLM JTAG shift out is shifted into
+ the QLM JTAG shift register: CIU_QLM_JTGD[SHFT_REG] */
+ uint64_t reserved_2_3 : 2;
+ uint64_t bypass : 2; /**< Selects which QLM JTAG shift chains are bypassed
+ by the QLM JTAG data register (CIU_QLM_JTGD) (one
+ bit per QLM) */
+#else
+ uint64_t bypass : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t mux_sel : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t clk_div : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
+ struct cvmx_ciu_qlm_jtgc_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t clk_div : 3; /**< Clock divider for QLM JTAG operations. eclk is
+ divided by 2^(CLK_DIV + 2) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t mux_sel : 2; /**< Selects which QLM JTAG shift out is shifted into
+ the QLM JTAG shift register: CIU_QLM_JTGD[SHFT_REG] */
+ uint64_t bypass : 4; /**< Selects which QLM JTAG shift chains are bypassed
+ by the QLM JTAG data register (CIU_QLM_JTGD) (one
+ bit per QLM) */
+#else
+ uint64_t bypass : 4;
+ uint64_t mux_sel : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t clk_div : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_qlm_jtgc_cn56xx cn56xxp1;
+ struct cvmx_ciu_qlm_jtgc_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t clk_div : 3; /**< Clock divider for QLM JTAG operations. eclk is
+ divided by 2^(CLK_DIV + 2) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t mux_sel : 2; /**< Selects which QLM JTAG shift out is shifted into
+ the QLM JTAG shift register: CIU_QLM_JTGD[SHFT_REG] */
+ uint64_t reserved_3_3 : 1;
+ uint64_t bypass : 3; /**< Selects which QLM JTAG shift chains are bypassed
+ by the QLM JTAG data register (CIU_QLM_JTGD) (one
+ bit per QLM) */
+#else
+ uint64_t bypass : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t mux_sel : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t clk_div : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_qlm_jtgc_cn61xx cn63xx;
+ struct cvmx_ciu_qlm_jtgc_cn61xx cn63xxp1;
+ struct cvmx_ciu_qlm_jtgc_cn61xx cn66xx;
+ struct cvmx_ciu_qlm_jtgc_s cn68xx;
+ struct cvmx_ciu_qlm_jtgc_s cn68xxp1;
+ struct cvmx_ciu_qlm_jtgc_cn61xx cnf71xx;
+};
+typedef union cvmx_ciu_qlm_jtgc cvmx_ciu_qlm_jtgc_t;
+
+/**
+ * cvmx_ciu_qlm_jtgd
+ */
+union cvmx_ciu_qlm_jtgd {
+ uint64_t u64;
+ struct cvmx_ciu_qlm_jtgd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when
+ op completes) */
+ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when
+ op completes) */
+ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when
+ op completes) */
+ uint64_t reserved_45_60 : 16;
+ uint64_t select : 5; /**< Selects which QLM JTAG shift chains the JTAG
+ operations are performed on */
+ uint64_t reserved_37_39 : 3;
+ uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */
+ uint64_t shft_reg : 32; /**< QLM JTAG shift register */
+#else
+ uint64_t shft_reg : 32;
+ uint64_t shft_cnt : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t select : 5;
+ uint64_t reserved_45_60 : 16;
+ uint64_t update : 1;
+ uint64_t shift : 1;
+ uint64_t capture : 1;
+#endif
+ } s;
+ struct cvmx_ciu_qlm_jtgd_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when
+ op completes) */
+ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when
+ op completes) */
+ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when
+ op completes) */
+ uint64_t reserved_42_60 : 19;
+ uint64_t select : 2; /**< Selects which QLM JTAG shift chains the JTAG
+ operations are performed on */
+ uint64_t reserved_37_39 : 3;
+ uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */
+ uint64_t shft_reg : 32; /**< QLM JTAG shift register */
+#else
+ uint64_t shft_reg : 32;
+ uint64_t shft_cnt : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t select : 2;
+ uint64_t reserved_42_60 : 19;
+ uint64_t update : 1;
+ uint64_t shift : 1;
+ uint64_t capture : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_qlm_jtgd_cn52xx cn52xxp1;
+ struct cvmx_ciu_qlm_jtgd_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when
+ op completes) */
+ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when
+ op completes) */
+ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when
+ op completes) */
+ uint64_t reserved_44_60 : 17;
+ uint64_t select : 4; /**< Selects which QLM JTAG shift chains the JTAG
+ operations are performed on */
+ uint64_t reserved_37_39 : 3;
+ uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */
+ uint64_t shft_reg : 32; /**< QLM JTAG shift register */
+#else
+ uint64_t shft_reg : 32;
+ uint64_t shft_cnt : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t select : 4;
+ uint64_t reserved_44_60 : 17;
+ uint64_t update : 1;
+ uint64_t shift : 1;
+ uint64_t capture : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_qlm_jtgd_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when
+ op completes) */
+ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when
+ op completes) */
+ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when
+ op completes) */
+ uint64_t reserved_37_60 : 24;
+ uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */
+ uint64_t shft_reg : 32; /**< QLM JTAG shift register */
+#else
+ uint64_t shft_reg : 32;
+ uint64_t shft_cnt : 5;
+ uint64_t reserved_37_60 : 24;
+ uint64_t update : 1;
+ uint64_t shift : 1;
+ uint64_t capture : 1;
+#endif
+ } cn56xxp1;
+ struct cvmx_ciu_qlm_jtgd_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when
+ op completes) */
+ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when
+ op completes) */
+ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when
+ op completes) */
+ uint64_t reserved_43_60 : 18;
+ uint64_t select : 3; /**< Selects which QLM JTAG shift chains the JTAG
+ operations are performed on */
+ uint64_t reserved_37_39 : 3;
+ uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */
+ uint64_t shft_reg : 32; /**< QLM JTAG shift register */
+#else
+ uint64_t shft_reg : 32;
+ uint64_t shft_cnt : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t select : 3;
+ uint64_t reserved_43_60 : 18;
+ uint64_t update : 1;
+ uint64_t shift : 1;
+ uint64_t capture : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_qlm_jtgd_cn61xx cn63xx;
+ struct cvmx_ciu_qlm_jtgd_cn61xx cn63xxp1;
+ struct cvmx_ciu_qlm_jtgd_cn61xx cn66xx;
+ struct cvmx_ciu_qlm_jtgd_s cn68xx;
+ struct cvmx_ciu_qlm_jtgd_s cn68xxp1;
+ struct cvmx_ciu_qlm_jtgd_cn61xx cnf71xx;
+};
+typedef union cvmx_ciu_qlm_jtgd cvmx_ciu_qlm_jtgd_t;
+
+/**
+ * cvmx_ciu_soft_bist
+ */
+union cvmx_ciu_soft_bist {
+ uint64_t u64;
+ struct cvmx_ciu_soft_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_bist : 1; /**< Reserved */
+#else
+ uint64_t soft_bist : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_soft_bist_s cn30xx;
+ struct cvmx_ciu_soft_bist_s cn31xx;
+ struct cvmx_ciu_soft_bist_s cn38xx;
+ struct cvmx_ciu_soft_bist_s cn38xxp2;
+ struct cvmx_ciu_soft_bist_s cn50xx;
+ struct cvmx_ciu_soft_bist_s cn52xx;
+ struct cvmx_ciu_soft_bist_s cn52xxp1;
+ struct cvmx_ciu_soft_bist_s cn56xx;
+ struct cvmx_ciu_soft_bist_s cn56xxp1;
+ struct cvmx_ciu_soft_bist_s cn58xx;
+ struct cvmx_ciu_soft_bist_s cn58xxp1;
+ struct cvmx_ciu_soft_bist_s cn61xx;
+ struct cvmx_ciu_soft_bist_s cn63xx;
+ struct cvmx_ciu_soft_bist_s cn63xxp1;
+ struct cvmx_ciu_soft_bist_s cn66xx;
+ struct cvmx_ciu_soft_bist_s cn68xx;
+ struct cvmx_ciu_soft_bist_s cn68xxp1;
+ struct cvmx_ciu_soft_bist_s cnf71xx;
+};
+typedef union cvmx_ciu_soft_bist cvmx_ciu_soft_bist_t;
+
+/**
+ * cvmx_ciu_soft_prst
+ */
+union cvmx_ciu_soft_prst {
+ uint64_t u64;
+ struct cvmx_ciu_soft_prst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t host64 : 1; /**< PCX Host Mode Device Capability (0=32b/1=64b) */
+ uint64_t npi : 1; /**< When PCI soft reset is asserted, also reset the
+ NPI and PNI logic */
+ uint64_t soft_prst : 1; /**< Resets the PCIe logic in all modes, not just
+ RC mode. The reset value is based on the
+ corresponding MIO_RST_CTL[PRTMODE] CSR field:
+ If PRTMODE == 0, then SOFT_PRST resets to 0
+ If PRTMODE != 0, then SOFT_PRST resets to 1
+ When OCTEON is configured to drive the PERST*_L
+ chip pin (ie. MIO_RST_CTL0[RST_DRV] is set), this
+ controls the PERST*_L chip pin. */
+#else
+ uint64_t soft_prst : 1;
+ uint64_t npi : 1;
+ uint64_t host64 : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_ciu_soft_prst_s cn30xx;
+ struct cvmx_ciu_soft_prst_s cn31xx;
+ struct cvmx_ciu_soft_prst_s cn38xx;
+ struct cvmx_ciu_soft_prst_s cn38xxp2;
+ struct cvmx_ciu_soft_prst_s cn50xx;
+ struct cvmx_ciu_soft_prst_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_prst : 1; /**< Reset the PCI bus. Only works when Octane is
+ configured as a HOST. When OCTEON is a PCI host
+ (i.e. when PCI_HOST_MODE = 1), This controls
+ PCI_RST_L. Refer to section 10.11.1. */
+#else
+ uint64_t soft_prst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn52xxp1;
+ struct cvmx_ciu_soft_prst_cn52xx cn56xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
+ struct cvmx_ciu_soft_prst_s cn58xx;
+ struct cvmx_ciu_soft_prst_s cn58xxp1;
+ struct cvmx_ciu_soft_prst_cn52xx cn61xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn63xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn63xxp1;
+ struct cvmx_ciu_soft_prst_cn52xx cn66xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn68xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn68xxp1;
+ struct cvmx_ciu_soft_prst_cn52xx cnf71xx;
+};
+typedef union cvmx_ciu_soft_prst cvmx_ciu_soft_prst_t;
+
+/**
+ * cvmx_ciu_soft_prst1
+ */
+union cvmx_ciu_soft_prst1 {
+ uint64_t u64;
+ struct cvmx_ciu_soft_prst1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_prst : 1; /**< Resets the PCIe logic in all modes, not just
+ RC mode. The reset value is based on the
+ corresponding MIO_RST_CTL[PRTMODE] CSR field:
+ If PRTMODE == 0, then SOFT_PRST resets to 0
+ If PRTMODE != 0, then SOFT_PRST resets to 1
+ In o61, this PRST initial value is always '1' as
+ PEM1 always running on host mode. */
+#else
+ uint64_t soft_prst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_soft_prst1_s cn52xx;
+ struct cvmx_ciu_soft_prst1_s cn52xxp1;
+ struct cvmx_ciu_soft_prst1_s cn56xx;
+ struct cvmx_ciu_soft_prst1_s cn56xxp1;
+ struct cvmx_ciu_soft_prst1_s cn61xx;
+ struct cvmx_ciu_soft_prst1_s cn63xx;
+ struct cvmx_ciu_soft_prst1_s cn63xxp1;
+ struct cvmx_ciu_soft_prst1_s cn66xx;
+ struct cvmx_ciu_soft_prst1_s cn68xx;
+ struct cvmx_ciu_soft_prst1_s cn68xxp1;
+ struct cvmx_ciu_soft_prst1_s cnf71xx;
+};
+typedef union cvmx_ciu_soft_prst1 cvmx_ciu_soft_prst1_t;
+
+/**
+ * cvmx_ciu_soft_prst2
+ */
+union cvmx_ciu_soft_prst2 {
+ uint64_t u64;
+ struct cvmx_ciu_soft_prst2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_prst : 1; /**< Resets the sRIO logic in all modes, not just
+ RC mode. The reset value is based on the
+ corresponding MIO_RST_CNTL[PRTMODE] CSR field:
+ If PRTMODE == 0, then SOFT_PRST resets to 0
+ If PRTMODE != 0, then SOFT_PRST resets to 1 */
+#else
+ uint64_t soft_prst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_soft_prst2_s cn66xx;
+};
+typedef union cvmx_ciu_soft_prst2 cvmx_ciu_soft_prst2_t;
+
+/**
+ * cvmx_ciu_soft_prst3
+ */
+union cvmx_ciu_soft_prst3 {
+ uint64_t u64;
+ struct cvmx_ciu_soft_prst3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_prst : 1; /**< Resets the sRIO logic in all modes, not just
+ RC mode. The reset value is based on the
+ corresponding MIO_RST_CNTL[PRTMODE] CSR field:
+ If PRTMODE == 0, then SOFT_PRST resets to 0
+ If PRTMODE != 0, then SOFT_PRST resets to 1 */
+#else
+ uint64_t soft_prst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_soft_prst3_s cn66xx;
+};
+typedef union cvmx_ciu_soft_prst3 cvmx_ciu_soft_prst3_t;
+
+/**
+ * cvmx_ciu_soft_rst
+ */
+union cvmx_ciu_soft_rst {
+ uint64_t u64;
+ struct cvmx_ciu_soft_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< Resets Octeon
+ When soft reseting Octeon from a remote PCIe
+ host, always read CIU_SOFT_RST (and wait for
+ result) before writing SOFT_RST to '1'. */
+#else
+ uint64_t soft_rst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_soft_rst_s cn30xx;
+ struct cvmx_ciu_soft_rst_s cn31xx;
+ struct cvmx_ciu_soft_rst_s cn38xx;
+ struct cvmx_ciu_soft_rst_s cn38xxp2;
+ struct cvmx_ciu_soft_rst_s cn50xx;
+ struct cvmx_ciu_soft_rst_s cn52xx;
+ struct cvmx_ciu_soft_rst_s cn52xxp1;
+ struct cvmx_ciu_soft_rst_s cn56xx;
+ struct cvmx_ciu_soft_rst_s cn56xxp1;
+ struct cvmx_ciu_soft_rst_s cn58xx;
+ struct cvmx_ciu_soft_rst_s cn58xxp1;
+ struct cvmx_ciu_soft_rst_s cn61xx;
+ struct cvmx_ciu_soft_rst_s cn63xx;
+ struct cvmx_ciu_soft_rst_s cn63xxp1;
+ struct cvmx_ciu_soft_rst_s cn66xx;
+ struct cvmx_ciu_soft_rst_s cn68xx;
+ struct cvmx_ciu_soft_rst_s cn68xxp1;
+ struct cvmx_ciu_soft_rst_s cnf71xx;
+};
+typedef union cvmx_ciu_soft_rst cvmx_ciu_soft_rst_t;
+
+/**
+ * cvmx_ciu_sum1_io#_int
+ *
+ * Notes:
+ * SUM1 becomes per IPx in o65/6 and afterwards. Only Field <40> DPI_DMA will have
+ * different value per PP(IP) for $CIU_SUM1_PPx_IPy, and <40> DPI_DMA will always
+ * be zero for $CIU_SUM1_IOX_INT. All other fields ([63:41] and [39:0]) values are idential for
+ * different PPs, same value as $CIU_INT_SUM1.
+ * Write to any IRQ's PTP fields will clear PTP for all IRQ's PTP field.
+ */
+union cvmx_ciu_sum1_iox_int {
+ uint64_t u64;
+ struct cvmx_ciu_sum1_iox_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_sum1_iox_int_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_sum1_iox_int_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< 10 watchdog interrupts */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_sum1_iox_int_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_sum1_iox_int cvmx_ciu_sum1_iox_int_t;
+
+/**
+ * cvmx_ciu_sum1_pp#_ip2
+ *
+ * Notes:
+ * SUM1 becomes per IPx in o65/6 and afterwards. Only Field <40> DPI_DMA will have
+ * different value per PP(IP) for $CIU_SUM1_PPx_IPy, and <40> DPI_DMA will always
+ * be zero for $CIU_SUM1_IOX_INT. All other fields ([63:41] and [39:0]) values are idential for
+ * different PPs, same value as $CIU_INT_SUM1.
+ * Write to any IRQ's PTP fields will clear PTP for all IRQ's PTP field.
+ */
+union cvmx_ciu_sum1_ppx_ip2 {
+ uint64_t u64;
+ struct cvmx_ciu_sum1_ppx_ip2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_sum1_ppx_ip2_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_sum1_ppx_ip2_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< 10 watchdog interrupts */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_sum1_ppx_ip2_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_sum1_ppx_ip2 cvmx_ciu_sum1_ppx_ip2_t;
+
+/**
+ * cvmx_ciu_sum1_pp#_ip3
+ *
+ * Notes:
+ * SUM1 becomes per IPx in o65/6 and afterwards. Only Field <40> DPI_DMA will have
+ * different value per PP(IP) for $CIU_SUM1_PPx_IPy, and <40> DPI_DMA will always
+ * be zero for $CIU_SUM1_IOX_INT. All other fields ([63:41] and [39:0]) values are idential for
+ * different PPs, same value as $CIU_INT_SUM1.
+ * Write to any IRQ's PTP fields will clear PTP for all IRQ's PTP field.
+ */
+union cvmx_ciu_sum1_ppx_ip3 {
+ uint64_t u64;
+ struct cvmx_ciu_sum1_ppx_ip3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_sum1_ppx_ip3_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_sum1_ppx_ip3_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< 10 watchdog interrupts */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_sum1_ppx_ip3_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_sum1_ppx_ip3 cvmx_ciu_sum1_ppx_ip3_t;
+
+/**
+ * cvmx_ciu_sum1_pp#_ip4
+ *
+ * Notes:
+ * SUM1 becomes per IPx in o65/6 and afterwards. Only Field <40> DPI_DMA will have
+ * different value per PP(IP) for $CIU_SUM1_PPx_IPy, and <40> DPI_DMA will always
+ * be zero for $CIU_SUM1_IOX_INT. All other fields ([63:41] and [39:0]) values are idential for
+ * different PPs, same value as $CIU_INT_SUM1.
+ * Write to any IRQ's PTP fields will clear PTP for all IRQ's PTP field.
+ */
+union cvmx_ciu_sum1_ppx_ip4 {
+ uint64_t u64;
+ struct cvmx_ciu_sum1_ppx_ip4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_sum1_ppx_ip4_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_41_45 : 5;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_38_39 : 2;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t mii1 : 1; /**< RGMII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_4_17 : 14;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_17 : 14;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_39 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_45 : 5;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_sum1_ppx_ip4_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_62_62 : 1;
+ uint64_t srio3 : 1; /**< SRIO3 interrupt
+ See SRIO3_INT_REG, SRIO3_INT2_REG */
+ uint64_t srio2 : 1; /**< SRIO2 interrupt
+ See SRIO2_INT_REG, SRIO2_INT2_REG */
+ uint64_t reserved_57_59 : 3;
+ uint64_t dfm : 1; /**< DFM Interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_51_51 : 1;
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG, SRIO0_INT2_REG */
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_38_45 : 8;
+ uint64_t agx1 : 1; /**< GMX1 interrupt
+ See GMX1_RX*_INT_REG, GMX1_TX_INT_REG,
+ PCS1_INT*_REG, PCSX1_INT_REG */
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt
+ See MIX1_ISR */
+ uint64_t reserved_10_17 : 8;
+ uint64_t wdog : 10; /**< 10 watchdog interrupts */
+#else
+ uint64_t wdog : 10;
+ uint64_t reserved_10_17 : 8;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t agx1 : 1;
+ uint64_t reserved_38_45 : 8;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_59 : 3;
+ uint64_t srio2 : 1;
+ uint64_t srio3 : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t rst : 1;
+#endif
+ } cn66xx;
+ struct cvmx_ciu_sum1_ppx_ip4_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_53_62 : 10;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t reserved_50_51 : 2;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t reserved_41_46 : 6;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ TBD, See DPI DMA instruction completion */
+ uint64_t reserved_37_39 : 3;
+ uint64_t agx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG,
+ PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t reserved_32_32 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t reserved_28_28 : 1;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< EMMC Flash Controller interrupt
+ See EMMC interrupt */
+ uint64_t reserved_4_18 : 15;
+ uint64_t wdog : 4; /**< Per PP watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_18 : 15;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_28_28 : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_32_32 : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_41_46 : 6;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_50_51 : 2;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_62 : 10;
+ uint64_t rst : 1;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_ciu_sum1_ppx_ip4 cvmx_ciu_sum1_ppx_ip4_t;
+
+/**
+ * cvmx_ciu_sum2_io#_int
+ *
+ * Notes:
+ * These SUM2 CSR's did not exist prior to pass 1.2. CIU_TIM4-9 did not exist prior to pass 1.2.
+ *
+ */
+union cvmx_ciu_sum2_iox_int {
+ uint64_t u64;
+ struct cvmx_ciu_sum2_iox_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< ENDOR PHY interrupts, see ENDOR interrupt status
+ register ENDOR_RSTCLK_INTR0(1)_STATUS for details */
+ uint64_t eoi : 1; /**< EOI rsl interrupt, see EOI_INT_STA */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< General timer 4-9 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_INT*_SUM0/4[TIMER] field implement all 10
+ CIU_TIM* interrupts. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_sum2_iox_int_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< General timer 4-9 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_INT*_SUM0/4[TIMER] field implement all 10
+ CIU_TIM* interrupts. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_sum2_iox_int_cn61xx cn66xx;
+ struct cvmx_ciu_sum2_iox_int_s cnf71xx;
+};
+typedef union cvmx_ciu_sum2_iox_int cvmx_ciu_sum2_iox_int_t;
+
+/**
+ * cvmx_ciu_sum2_pp#_ip2
+ *
+ * Notes:
+ * These SUM2 CSR's did not exist prior to pass 1.2. CIU_TIM4-9 did not exist prior to pass 1.2.
+ *
+ */
+union cvmx_ciu_sum2_ppx_ip2 {
+ uint64_t u64;
+ struct cvmx_ciu_sum2_ppx_ip2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< ENDOR PHY interrupts, see ENDOR interrupt status
+ register ENDOR_RSTCLK_INTR0(1)_STATUS for details */
+ uint64_t eoi : 1; /**< EOI rsl interrupt, see EOI_INT_STA */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< General timer 4-9 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_INT*_SUM0/4[TIMER] field implement all 10
+ CIU_TIM* interrupts. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_sum2_ppx_ip2_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< General timer 4-9 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_INT*_SUM0/4[TIMER] field implement all 10
+ CIU_TIM* interrupts. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_sum2_ppx_ip2_cn61xx cn66xx;
+ struct cvmx_ciu_sum2_ppx_ip2_s cnf71xx;
+};
+typedef union cvmx_ciu_sum2_ppx_ip2 cvmx_ciu_sum2_ppx_ip2_t;
+
+/**
+ * cvmx_ciu_sum2_pp#_ip3
+ *
+ * Notes:
+ * These SUM2 CSR's did not exist prior to pass 1.2. CIU_TIM4-9 did not exist prior to pass 1.2.
+ *
+ */
+union cvmx_ciu_sum2_ppx_ip3 {
+ uint64_t u64;
+ struct cvmx_ciu_sum2_ppx_ip3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< ENDOR PHY interrupts, see ENDOR interrupt status
+ register ENDOR_RSTCLK_INTR0(1)_STATUS for details */
+ uint64_t eoi : 1; /**< EOI rsl interrupt, see EOI_INT_STA */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< General timer 4-9 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_INT*_SUM0/4[TIMER] field implement all 10
+ CIU_TIM* interrupts. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_sum2_ppx_ip3_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< General timer 4-9 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_INT*_SUM0/4[TIMER] field implement all 10
+ CIU_TIM* interrupts. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_sum2_ppx_ip3_cn61xx cn66xx;
+ struct cvmx_ciu_sum2_ppx_ip3_s cnf71xx;
+};
+typedef union cvmx_ciu_sum2_ppx_ip3 cvmx_ciu_sum2_ppx_ip3_t;
+
+/**
+ * cvmx_ciu_sum2_pp#_ip4
+ *
+ * Notes:
+ * These SUM2 CSR's did not exist prior to pass 1.2. CIU_TIM4-9 did not exist prior to pass 1.2.
+ *
+ */
+union cvmx_ciu_sum2_ppx_ip4 {
+ uint64_t u64;
+ struct cvmx_ciu_sum2_ppx_ip4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t endor : 2; /**< ENDOR PHY interrupts, see ENDOR interrupt status
+ register ENDOR_RSTCLK_INTR0(1)_STATUS for details */
+ uint64_t eoi : 1; /**< EOI rsl interrupt, see EOI_INT_STA */
+ uint64_t reserved_10_11 : 2;
+ uint64_t timer : 6; /**< General timer 4-9 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_INT*_SUM0/4[TIMER] field implement all 10
+ CIU_TIM* interrupts. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_11 : 2;
+ uint64_t eoi : 1;
+ uint64_t endor : 2;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_ciu_sum2_ppx_ip4_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t timer : 6; /**< General timer 4-9 interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 0, this interrupt is
+ common for all PP/IRQs, writing '1' to any PP/IRQ
+ will clear all TIMERx(x=0..9) interrupts.
+ When CIU_TIM_MULTI_CAST[EN] == 1, TIMERx(x=0..9)
+ are set at the same time, but clearing are based on
+ per cnMIPS core. See CIU_TIM_MULTI_CAST for detail.
+ The combination of this field and the
+ CIU_INT*_SUM0/4[TIMER] field implement all 10
+ CIU_TIM* interrupts. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t timer : 6;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_ciu_sum2_ppx_ip4_cn61xx cn66xx;
+ struct cvmx_ciu_sum2_ppx_ip4_s cnf71xx;
+};
+typedef union cvmx_ciu_sum2_ppx_ip4 cvmx_ciu_sum2_ppx_ip4_t;
+
+/**
+ * cvmx_ciu_tim#
+ *
+ * Notes:
+ * CIU_TIM4-9 did not exist prior to pass 1.2
+ *
+ */
+union cvmx_ciu_timx {
+ uint64_t u64;
+ struct cvmx_ciu_timx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t one_shot : 1; /**< One-shot mode */
+ uint64_t len : 36; /**< Timeout length in core clock cycles
+ Periodic interrupts will occur every LEN+1 core
+ clock cycles when ONE_SHOT==0
+ Timer disabled when LEN==0 */
+#else
+ uint64_t len : 36;
+ uint64_t one_shot : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } s;
+ struct cvmx_ciu_timx_s cn30xx;
+ struct cvmx_ciu_timx_s cn31xx;
+ struct cvmx_ciu_timx_s cn38xx;
+ struct cvmx_ciu_timx_s cn38xxp2;
+ struct cvmx_ciu_timx_s cn50xx;
+ struct cvmx_ciu_timx_s cn52xx;
+ struct cvmx_ciu_timx_s cn52xxp1;
+ struct cvmx_ciu_timx_s cn56xx;
+ struct cvmx_ciu_timx_s cn56xxp1;
+ struct cvmx_ciu_timx_s cn58xx;
+ struct cvmx_ciu_timx_s cn58xxp1;
+ struct cvmx_ciu_timx_s cn61xx;
+ struct cvmx_ciu_timx_s cn63xx;
+ struct cvmx_ciu_timx_s cn63xxp1;
+ struct cvmx_ciu_timx_s cn66xx;
+ struct cvmx_ciu_timx_s cn68xx;
+ struct cvmx_ciu_timx_s cn68xxp1;
+ struct cvmx_ciu_timx_s cnf71xx;
+};
+typedef union cvmx_ciu_timx cvmx_ciu_timx_t;
+
+/**
+ * cvmx_ciu_tim_multi_cast
+ *
+ * Notes:
+ * This register does not exist prior to pass 1.2 silicon. Those earlier chip passes operate as if
+ * EN==0.
+ */
+union cvmx_ciu_tim_multi_cast {
+ uint64_t u64;
+ struct cvmx_ciu_tim_multi_cast_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< General Timer Interrupt Mutli-Cast mode:
+ - 0: Timer interrupt is common for all PP/IRQs.
+ - 1: Timer interrupts are set at the same time for
+ all PP/IRQs, but interrupt clearings can/need
+ to be done Individually based on per cnMIPS core.
+ Timer interrupts for IOs (X=32,33) will always use
+ common interrupts. Clear any of the I/O interrupts
+ will clear the common interrupt. */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_tim_multi_cast_s cn61xx;
+ struct cvmx_ciu_tim_multi_cast_s cn66xx;
+ struct cvmx_ciu_tim_multi_cast_s cnf71xx;
+};
+typedef union cvmx_ciu_tim_multi_cast cvmx_ciu_tim_multi_cast_t;
+
+/**
+ * cvmx_ciu_wdog#
+ */
+union cvmx_ciu_wdogx {
+ uint64_t u64;
+ struct cvmx_ciu_wdogx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t gstopen : 1; /**< GSTOPEN */
+ uint64_t dstop : 1; /**< DSTOP */
+ uint64_t cnt : 24; /**< Number of 256-cycle intervals until next watchdog
+ expiration. Cleared on write to associated
+ CIU_PP_POKE register. */
+ uint64_t len : 16; /**< Watchdog time expiration length
+ The 16 bits of LEN represent the most significant
+ bits of a 24 bit decrementer that decrements
+ every 256 cycles.
+ LEN must be set > 0 */
+ uint64_t state : 2; /**< Watchdog state
+ number of watchdog time expirations since last
+ PP poke. Cleared on write to associated
+ CIU_PP_POKE register. */
+ uint64_t mode : 2; /**< Watchdog mode
+ 0 = Off
+ 1 = Interrupt Only
+ 2 = Interrupt + NMI
+ 3 = Interrupt + NMI + Soft-Reset */
+#else
+ uint64_t mode : 2;
+ uint64_t state : 2;
+ uint64_t len : 16;
+ uint64_t cnt : 24;
+ uint64_t dstop : 1;
+ uint64_t gstopen : 1;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } s;
+ struct cvmx_ciu_wdogx_s cn30xx;
+ struct cvmx_ciu_wdogx_s cn31xx;
+ struct cvmx_ciu_wdogx_s cn38xx;
+ struct cvmx_ciu_wdogx_s cn38xxp2;
+ struct cvmx_ciu_wdogx_s cn50xx;
+ struct cvmx_ciu_wdogx_s cn52xx;
+ struct cvmx_ciu_wdogx_s cn52xxp1;
+ struct cvmx_ciu_wdogx_s cn56xx;
+ struct cvmx_ciu_wdogx_s cn56xxp1;
+ struct cvmx_ciu_wdogx_s cn58xx;
+ struct cvmx_ciu_wdogx_s cn58xxp1;
+ struct cvmx_ciu_wdogx_s cn61xx;
+ struct cvmx_ciu_wdogx_s cn63xx;
+ struct cvmx_ciu_wdogx_s cn63xxp1;
+ struct cvmx_ciu_wdogx_s cn66xx;
+ struct cvmx_ciu_wdogx_s cn68xx;
+ struct cvmx_ciu_wdogx_s cn68xxp1;
+ struct cvmx_ciu_wdogx_s cnf71xx;
+};
+typedef union cvmx_ciu_wdogx cvmx_ciu_wdogx_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ciu-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ciu2-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ciu2-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ciu2-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,10671 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-ciu2-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon ciu2.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_CIU2_DEFS_H__
+#define __CVMX_CIU2_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_ACK_IOX_INT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_ACK_IOX_INT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080C0800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_ACK_IOX_INT(block_id) (CVMX_ADD_IO_SEG(0x00010701080C0800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_ACK_PPX_IP2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_ACK_PPX_IP2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000C0000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_ACK_PPX_IP2(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_ACK_PPX_IP3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_ACK_PPX_IP3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000C0200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_ACK_PPX_IP3(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_ACK_PPX_IP4(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_ACK_PPX_IP4(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000C0400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_ACK_PPX_IP4(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108097800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108097800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_GPIO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_GPIO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080B7800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B7800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_GPIO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_GPIO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080A7800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A7800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108094800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070108094800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_IO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_IO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080B4800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B4800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_IO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_IO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080A4800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A4800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_MBOX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_MBOX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108098800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070108098800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_MBOX_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_MBOX_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080B8800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B8800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_MBOX_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_MBOX_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080A8800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A8800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108095800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070108095800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_MEM_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_MEM_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080B5800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B5800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_MEM_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_MEM_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080A5800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A5800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108093800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108093800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_MIO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_MIO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080B3800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B3800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_MIO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_MIO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080A3800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A3800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108096800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070108096800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_PKT_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_PKT_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080B6800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B6800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_PKT_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_PKT_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080A6800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A6800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108092800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070108092800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_RML_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_RML_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080B2800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B2800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_RML_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_RML_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080A2800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A2800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108091800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070108091800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_WDOG_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_WDOG_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080B1800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B1800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_WDOG_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_WDOG_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080A1800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A1800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108090800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070108090800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_WRKQ_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_WRKQ_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080B0800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B0800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_IOX_INT_WRKQ_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_EN_IOX_INT_WRKQ_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701080A0800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_IOX_INT_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A0800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100097000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100097000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_GPIO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_GPIO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B7000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B7000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_GPIO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_GPIO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A7000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A7000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100094000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100094000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_IO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_IO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B4000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B4000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_IO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_IO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A4000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A4000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_MBOX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_MBOX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100098000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100098000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B8000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_MBOX_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_MBOX_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A8000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100095000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100095000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_MEM_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_MEM_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B5000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B5000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_MEM_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_MEM_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A5000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A5000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100093000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100093000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_MIO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_MIO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B3000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B3000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_MIO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_MIO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A3000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A3000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100096000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100096000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_PKT_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_PKT_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B6000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B6000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_PKT_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_PKT_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A6000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A6000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100092000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_RML_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_RML_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B2000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B2000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_RML_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_RML_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A2000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A2000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100091000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_WDOG_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_WDOG_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B1000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B1000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_WDOG_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_WDOG_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A1000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A1000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100090000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B0000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A0000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100097200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100097200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_GPIO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_GPIO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B7200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B7200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_GPIO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_GPIO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A7200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A7200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100094200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100094200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_IO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_IO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B4200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B4200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_IO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_IO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A4200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A4200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_MBOX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_MBOX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100098200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100098200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B8200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A8200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100095200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100095200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_MEM_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_MEM_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B5200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B5200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_MEM_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_MEM_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A5200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A5200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100093200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100093200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_MIO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_MIO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B3200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B3200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_MIO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_MIO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A3200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A3200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100096200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100096200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_PKT_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_PKT_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B6200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B6200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_PKT_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_PKT_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A6200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A6200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100092200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_RML_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_RML_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B2200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B2200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_RML_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_RML_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A2200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A2200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100091200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_WDOG_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_WDOG_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B1200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B1200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_WDOG_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_WDOG_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A1200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A1200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100090200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_WRKQ_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_WRKQ_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B0200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP3_WRKQ_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP3_WRKQ_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A0200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP3_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100097400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100097400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_GPIO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_GPIO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B7400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B7400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_GPIO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_GPIO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A7400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A7400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100094400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100094400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_IO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_IO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B4400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B4400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_IO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_IO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A4400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A4400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_MBOX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_MBOX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100098400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100098400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_MBOX_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_MBOX_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B8400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_MBOX_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_MBOX_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A8400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100095400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100095400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_MEM_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_MEM_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B5400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B5400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_MEM_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_MEM_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A5400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A5400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100093400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100093400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_MIO_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_MIO_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B3400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B3400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_MIO_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_MIO_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A3400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A3400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100096400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100096400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_PKT_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_PKT_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B6400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B6400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_PKT_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_PKT_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A6400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A6400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100092400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_RML_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_RML_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B2400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B2400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_RML_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_RML_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A2400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A2400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100091400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_WDOG_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_WDOG_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B1400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B1400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_WDOG_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_WDOG_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A1400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A1400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100090400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_WRKQ_W1C(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_WRKQ_W1C(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000B0400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_EN_PPX_IP4_WRKQ_W1S(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_EN_PPX_IP4_WRKQ_W1S(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000A0400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_EN_PPX_IP4_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU2_INTR_CIU_READY CVMX_CIU2_INTR_CIU_READY_FUNC()
+static inline uint64_t CVMX_CIU2_INTR_CIU_READY_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_CIU2_INTR_CIU_READY not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070100102008ull);
+}
+#else
+#define CVMX_CIU2_INTR_CIU_READY (CVMX_ADD_IO_SEG(0x0001070100102008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU2_INTR_RAM_ECC_CTL CVMX_CIU2_INTR_RAM_ECC_CTL_FUNC()
+static inline uint64_t CVMX_CIU2_INTR_RAM_ECC_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_CIU2_INTR_RAM_ECC_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070100102010ull);
+}
+#else
+#define CVMX_CIU2_INTR_RAM_ECC_CTL (CVMX_ADD_IO_SEG(0x0001070100102010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU2_INTR_RAM_ECC_ST CVMX_CIU2_INTR_RAM_ECC_ST_FUNC()
+static inline uint64_t CVMX_CIU2_INTR_RAM_ECC_ST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_CIU2_INTR_RAM_ECC_ST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070100102018ull);
+}
+#else
+#define CVMX_CIU2_INTR_RAM_ECC_ST (CVMX_ADD_IO_SEG(0x0001070100102018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU2_INTR_SLOWDOWN CVMX_CIU2_INTR_SLOWDOWN_FUNC()
+static inline uint64_t CVMX_CIU2_INTR_SLOWDOWN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_CIU2_INTR_SLOWDOWN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070100102000ull);
+}
+#else
+#define CVMX_CIU2_INTR_SLOWDOWN (CVMX_ADD_IO_SEG(0x0001070100102000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_MSIRED_PPX_IP2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_MSIRED_PPX_IP2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000C1000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_MSIRED_PPX_IP2(block_id) (CVMX_ADD_IO_SEG(0x00010701000C1000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_MSIRED_PPX_IP3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_MSIRED_PPX_IP3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000C1200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_MSIRED_PPX_IP3(block_id) (CVMX_ADD_IO_SEG(0x00010701000C1200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_MSIRED_PPX_IP4(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_MSIRED_PPX_IP4(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00010701000C1400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_MSIRED_PPX_IP4(block_id) (CVMX_ADD_IO_SEG(0x00010701000C1400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_MSI_RCVX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 255)))))
+ cvmx_warn("CVMX_CIU2_MSI_RCVX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010701000C2000ull) + ((offset) & 255) * 8;
+}
+#else
+#define CVMX_CIU2_MSI_RCVX(offset) (CVMX_ADD_IO_SEG(0x00010701000C2000ull) + ((offset) & 255) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_MSI_SELX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 255)))))
+ cvmx_warn("CVMX_CIU2_MSI_SELX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010701000C3000ull) + ((offset) & 255) * 8;
+}
+#else
+#define CVMX_CIU2_MSI_SELX(offset) (CVMX_ADD_IO_SEG(0x00010701000C3000ull) + ((offset) & 255) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_IOX_INT_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_RAW_IOX_INT_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108047800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_IOX_INT_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108047800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_IOX_INT_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_RAW_IOX_INT_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108044800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_IOX_INT_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070108044800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_IOX_INT_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_RAW_IOX_INT_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108045800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_IOX_INT_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070108045800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_IOX_INT_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_RAW_IOX_INT_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108043800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_IOX_INT_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108043800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_IOX_INT_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_RAW_IOX_INT_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108046800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_IOX_INT_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070108046800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_IOX_INT_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_RAW_IOX_INT_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108042800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_IOX_INT_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070108042800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_IOX_INT_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_RAW_IOX_INT_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108041800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_IOX_INT_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070108041800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_IOX_INT_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_RAW_IOX_INT_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108040800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_IOX_INT_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070108040800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP2_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP2_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100047000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP2_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100047000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP2_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP2_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100044000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP2_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100044000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP2_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP2_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100045000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP2_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100045000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP2_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP2_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100043000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP2_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100043000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP2_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP2_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100046000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP2_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100046000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP2_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP2_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100042000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100042000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP2_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP2_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100041000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100041000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP2_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP2_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100040000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP3_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP3_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100047200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP3_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100047200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP3_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP3_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100044200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP3_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100044200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP3_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP3_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100045200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP3_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100045200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP3_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP3_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100043200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP3_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100043200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP3_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP3_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100046200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP3_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100046200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP3_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP3_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100042200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP3_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100042200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP3_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP3_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100041200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP3_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100041200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP3_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP3_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100040200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP3_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP4_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP4_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100047400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP4_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100047400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP4_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP4_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100044400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP4_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100044400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP4_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP4_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100045400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP4_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100045400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP4_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP4_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100043400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP4_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100043400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP4_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP4_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100046400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP4_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100046400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP4_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP4_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100042400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP4_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100042400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP4_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP4_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100041400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP4_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100041400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_RAW_PPX_IP4_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_RAW_PPX_IP4_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100040400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_RAW_PPX_IP4_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_IOX_INT_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_SRC_IOX_INT_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108087800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_IOX_INT_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108087800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_IOX_INT_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_SRC_IOX_INT_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108084800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_IOX_INT_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070108084800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_IOX_INT_MBOX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_SRC_IOX_INT_MBOX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108088800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_IOX_INT_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070108088800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_IOX_INT_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_SRC_IOX_INT_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108085800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_IOX_INT_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070108085800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_IOX_INT_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_SRC_IOX_INT_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108083800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_IOX_INT_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108083800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_IOX_INT_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_SRC_IOX_INT_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108086800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_IOX_INT_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070108086800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_IOX_INT_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_SRC_IOX_INT_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108082800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_IOX_INT_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070108082800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_IOX_INT_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_SRC_IOX_INT_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108081800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_IOX_INT_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070108081800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_IOX_INT_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_CIU2_SRC_IOX_INT_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070108080800ull) + ((block_id) & 1) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_IOX_INT_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070108080800ull) + ((block_id) & 1) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP2_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP2_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100087000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP2_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100087000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP2_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP2_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100084000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP2_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100084000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP2_MBOX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP2_MBOX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100088000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP2_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100088000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP2_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP2_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100085000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP2_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100085000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP2_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP2_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100083000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP2_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100083000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP2_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP2_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100086000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP2_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100086000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP2_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP2_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100082000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP2_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP2_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100081000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP2_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP2_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100080000ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080000ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP3_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP3_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100087200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP3_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100087200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP3_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP3_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100084200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP3_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100084200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP3_MBOX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP3_MBOX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100088200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP3_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100088200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP3_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP3_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100085200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP3_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100085200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP3_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP3_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100083200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP3_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100083200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP3_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP3_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100086200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP3_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100086200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP3_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP3_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100082200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP3_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP3_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP3_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100081200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP3_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP3_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP3_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100080200ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP3_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080200ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP4_GPIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP4_GPIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100087400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP4_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100087400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP4_IO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP4_IO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100084400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP4_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100084400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP4_MBOX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP4_MBOX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100088400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP4_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100088400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP4_MEM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP4_MEM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100085400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP4_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100085400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP4_MIO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP4_MIO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100083400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP4_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100083400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP4_PKT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP4_PKT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100086400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP4_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100086400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP4_RML(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP4_RML(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100082400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP4_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP4_WDOG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP4_WDOG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100081400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP4_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SRC_PPX_IP4_WRKQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 31)))))
+ cvmx_warn("CVMX_CIU2_SRC_PPX_IP4_WRKQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001070100080400ull) + ((block_id) & 31) * 0x200000ull;
+}
+#else
+#define CVMX_CIU2_SRC_PPX_IP4_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080400ull) + ((block_id) & 31) * 0x200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SUM_IOX_INT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_CIU2_SUM_IOX_INT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070100000800ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_CIU2_SUM_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070100000800ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SUM_PPX_IP2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_CIU2_SUM_PPX_IP2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070100000000ull) + ((offset) & 31) * 8;
+}
+#else
+#define CVMX_CIU2_SUM_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070100000000ull) + ((offset) & 31) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SUM_PPX_IP3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_CIU2_SUM_PPX_IP3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070100000200ull) + ((offset) & 31) * 8;
+}
+#else
+#define CVMX_CIU2_SUM_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070100000200ull) + ((offset) & 31) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU2_SUM_PPX_IP4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_CIU2_SUM_PPX_IP4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070100000400ull) + ((offset) & 31) * 8;
+}
+#else
+#define CVMX_CIU2_SUM_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070100000400ull) + ((offset) & 31) * 8)
+#endif
+
+/**
+ * cvmx_ciu2_ack_io#_int
+ */
+union cvmx_ciu2_ack_iox_int {
+ uint64_t u64;
+ struct cvmx_ciu2_ack_iox_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t ack : 1; /**< Read to clear the corresponding interrupt to
+ PP/IO. Without this read the interrupt will not
+ deassert until the next CIU interrupt scan, up to
+ 200 cycles away. */
+#else
+ uint64_t ack : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu2_ack_iox_int_s cn68xx;
+ struct cvmx_ciu2_ack_iox_int_s cn68xxp1;
+};
+typedef union cvmx_ciu2_ack_iox_int cvmx_ciu2_ack_iox_int_t;
+
+/**
+ * cvmx_ciu2_ack_pp#_ip2
+ *
+ * CIU2_ACK_PPX_IPx (Pass 2)
+ *
+ */
+union cvmx_ciu2_ack_ppx_ip2 {
+ uint64_t u64;
+ struct cvmx_ciu2_ack_ppx_ip2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t ack : 1; /**< Read to clear the corresponding interrupt to
+ PP/IO. Without this read the interrupt will not
+ deassert until the next CIU interrupt scan, up to
+ 200 cycles away. */
+#else
+ uint64_t ack : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu2_ack_ppx_ip2_s cn68xx;
+ struct cvmx_ciu2_ack_ppx_ip2_s cn68xxp1;
+};
+typedef union cvmx_ciu2_ack_ppx_ip2 cvmx_ciu2_ack_ppx_ip2_t;
+
+/**
+ * cvmx_ciu2_ack_pp#_ip3
+ */
+union cvmx_ciu2_ack_ppx_ip3 {
+ uint64_t u64;
+ struct cvmx_ciu2_ack_ppx_ip3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t ack : 1; /**< Read to clear the corresponding interrupt to
+ PP/IO. Without this read the interrupt will not
+ deassert until the next CIU interrupt scan, up to
+ 200 cycles away. */
+#else
+ uint64_t ack : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu2_ack_ppx_ip3_s cn68xx;
+ struct cvmx_ciu2_ack_ppx_ip3_s cn68xxp1;
+};
+typedef union cvmx_ciu2_ack_ppx_ip3 cvmx_ciu2_ack_ppx_ip3_t;
+
+/**
+ * cvmx_ciu2_ack_pp#_ip4
+ */
+union cvmx_ciu2_ack_ppx_ip4 {
+ uint64_t u64;
+ struct cvmx_ciu2_ack_ppx_ip4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t ack : 1; /**< Read to clear the corresponding interrupt to
+ PP/IO. Without this read the interrupt will not
+ deassert until the next CIU interrupt scan, up to
+ 200 cycles away. */
+#else
+ uint64_t ack : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu2_ack_ppx_ip4_s cn68xx;
+ struct cvmx_ciu2_ack_ppx_ip4_s cn68xxp1;
+};
+typedef union cvmx_ciu2_ack_ppx_ip4 cvmx_ciu2_ack_ppx_ip4_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_gpio
+ */
+union cvmx_ciu2_en_iox_int_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupt-enable */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_gpio_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_gpio cvmx_ciu2_en_iox_int_gpio_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_gpio_w1c
+ */
+union cvmx_ciu2_en_iox_int_gpio_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_gpio_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< Write 1 to clear CIU2_EN_xx_yy_GPIO[GPIO] */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_gpio_w1c_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_gpio_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_gpio_w1c cvmx_ciu2_en_iox_int_gpio_w1c_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_gpio_w1s
+ */
+union cvmx_ciu2_en_iox_int_gpio_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_gpio_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enable,write 1 to enable CIU2_EN */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_gpio_w1s_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_gpio_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_gpio_w1s cvmx_ciu2_en_iox_int_gpio_w1s_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_io
+ */
+union cvmx_ciu2_en_iox_int_io {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt-enable */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA interrupt-enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit interrupt-enable
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI interrupt-enable */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D interrupt-enable */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_io_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_io cvmx_ciu2_en_iox_int_io_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_io_w1c
+ */
+union cvmx_ciu2_en_iox_int_io_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_io_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< Write 1 to clear CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_io_w1c_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_io_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_io_w1c cvmx_ciu2_en_iox_int_io_w1c_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_io_w1s
+ */
+union cvmx_ciu2_en_iox_int_io_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_io_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< Write 1 to enable CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_io_w1s_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_io_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_io_w1s cvmx_ciu2_en_iox_int_io_w1s_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_mbox
+ */
+union cvmx_ciu2_en_iox_int_mbox {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_mbox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Mailbox interrupt-enable, use with CIU2_MBOX
+ to generate CIU2_SRC_xx_yy_MBOX */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_mbox_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_mbox_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_mbox cvmx_ciu2_en_iox_int_mbox_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_mbox_w1c
+ */
+union cvmx_ciu2_en_iox_int_mbox_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_mbox_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MBOX[MBOX] */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_mbox_w1c_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_mbox_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_mbox_w1c cvmx_ciu2_en_iox_int_mbox_w1c_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_mbox_w1s
+ */
+union cvmx_ciu2_en_iox_int_mbox_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_mbox_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MBOX[MBOX] */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_mbox_w1s_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_mbox_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_mbox_w1s cvmx_ciu2_en_iox_int_mbox_w1s_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_mem
+ */
+union cvmx_ciu2_en_iox_int_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt-enable */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_mem_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_mem cvmx_ciu2_en_iox_int_mem_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_mem_w1c
+ */
+union cvmx_ciu2_en_iox_int_mem_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_mem_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_mem_w1c_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_mem_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_mem_w1c cvmx_ciu2_en_iox_int_mem_w1c_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_mem_w1s
+ */
+union cvmx_ciu2_en_iox_int_mem_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_mem_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_mem_w1s_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_mem_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_mem_w1s cvmx_ciu2_en_iox_int_mem_w1s_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_mio
+ */
+union cvmx_ciu2_en_iox_int_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt-enable */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt-enable */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB HCI Interrupt-enable */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt-enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupt-enable */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x interrupt-enable */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines interrupt-enable */
+ uint64_t mio : 1; /**< MIO boot interrupt-enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt-enable */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupt-enable */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt-enable */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt-enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt-enable */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_mio_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_mio cvmx_ciu2_en_iox_int_mio_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_mio_w1c
+ */
+union cvmx_ciu2_en_iox_int_mio_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_mio_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[NAND] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[SSQIQ] */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_mio_w1c_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_mio_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_mio_w1c cvmx_ciu2_en_iox_int_mio_w1c_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_mio_w1s
+ */
+union cvmx_ciu2_en_iox_int_mio_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_mio_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[NAND] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[SSQIQ] */
+ uint64_t ipdppthr : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_mio_w1s_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_mio_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_mio_w1s cvmx_ciu2_en_iox_int_mio_w1s_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_pkt
+ */
+union cvmx_ciu2_en_iox_int_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x interrupt-enable */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt-enable */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt-enable */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt-enable */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_pkt_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x interrupt-enable */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt-enable */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt-enable */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt-enable */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_pkt cvmx_ciu2_en_iox_int_pkt_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_pkt_w1c
+ */
+union cvmx_ciu2_en_iox_int_pkt_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_pkt_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_pkt_w1c_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_pkt_w1c_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_pkt_w1c cvmx_ciu2_en_iox_int_pkt_w1c_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_pkt_w1s
+ */
+union cvmx_ciu2_en_iox_int_pkt_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_pkt_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_pkt_w1s_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_pkt_w1s_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_pkt_w1s cvmx_ciu2_en_iox_int_pkt_w1s_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_rml
+ */
+union cvmx_ciu2_en_iox_int_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt-enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA interrupt-enable */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt-enable */
+ uint64_t sli : 1; /**< SLI interrupt-enable */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt-enable */
+ uint64_t rad : 1; /**< RAD interrupt-enable */
+ uint64_t tim : 1; /**< TIM interrupt-enable */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt-enable */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt-enable */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt-enable */
+ uint64_t pip : 1; /**< PIP interrupt-enable */
+ uint64_t ipd : 1; /**< IPD interrupt-enable */
+ uint64_t fpa : 1; /**< FPA interrupt-enable */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt-enable */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_rml_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt-enable */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt-enable */
+ uint64_t sli : 1; /**< SLI interrupt-enable */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt-enable */
+ uint64_t rad : 1; /**< RAD interrupt-enable */
+ uint64_t tim : 1; /**< TIM interrupt-enable */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt-enable */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt-enable */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt-enable */
+ uint64_t pip : 1; /**< PIP interrupt-enable */
+ uint64_t ipd : 1; /**< IPD interrupt-enable */
+ uint64_t fpa : 1; /**< FPA interrupt-enable */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt-enable */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_rml cvmx_ciu2_en_iox_int_rml_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_rml_w1c
+ */
+union cvmx_ciu2_en_iox_int_rml_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_rml_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI_DMA] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_rml_w1c_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_rml_w1c_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_rml_w1c cvmx_ciu2_en_iox_int_rml_w1c_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_rml_w1s
+ */
+union cvmx_ciu2_en_iox_int_rml_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_rml_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI_DMA] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_rml_w1s_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_rml_w1s_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_rml_w1s cvmx_ciu2_en_iox_int_rml_w1s_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_wdog
+ */
+union cvmx_ciu2_en_iox_int_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupt-enable */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_wdog_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_wdog cvmx_ciu2_en_iox_int_wdog_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_wdog_w1c
+ */
+union cvmx_ciu2_en_iox_int_wdog_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_wdog_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< write 1 to clear CIU2_EN_xx_yy_WDOG */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_wdog_w1c_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_wdog_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_wdog_w1c cvmx_ciu2_en_iox_int_wdog_w1c_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_wdog_w1s
+ */
+union cvmx_ciu2_en_iox_int_wdog_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_wdog_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< Write 1 to enable CIU2_EN_xx_yy_WDOG[WDOG] */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_wdog_w1s_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_wdog_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_wdog_w1s cvmx_ciu2_en_iox_int_wdog_w1s_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_wrkq
+ */
+union cvmx_ciu2_en_iox_int_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue interrupt-enable */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_wrkq_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_wrkq cvmx_ciu2_en_iox_int_wrkq_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_wrkq_w1c
+ */
+union cvmx_ciu2_en_iox_int_wrkq_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_wrkq_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< Write 1 to clear CIU2_EN_xx_yy_WRKQ[WORKQ]
+ For W1C bits, write 1 to clear the corresponding
+ CIU2_EN_xx_yy_WRKQ,write 0 to retain previous value */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_wrkq_w1c_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_wrkq_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_wrkq_w1c cvmx_ciu2_en_iox_int_wrkq_w1c_t;
+
+/**
+ * cvmx_ciu2_en_io#_int_wrkq_w1s
+ */
+union cvmx_ciu2_en_iox_int_wrkq_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_iox_int_wrkq_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< Write 1 to enable CIU2_EN_xx_yy_WRKQ[WORKQ]
+ 1 bit/group. For all W1S bits, write 1 to enable
+ corresponding CIU2_EN_xx_yy_WRKQ[WORKQ] bit,
+ writing 0 to retain previous value. */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_iox_int_wrkq_w1s_s cn68xx;
+ struct cvmx_ciu2_en_iox_int_wrkq_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_iox_int_wrkq_w1s cvmx_ciu2_en_iox_int_wrkq_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_gpio
+ */
+union cvmx_ciu2_en_ppx_ip2_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupt-enable */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_gpio_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_gpio cvmx_ciu2_en_ppx_ip2_gpio_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_gpio_w1c
+ */
+union cvmx_ciu2_en_ppx_ip2_gpio_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_gpio_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< Write 1 to clear CIU2_EN_xx_yy_GPIO[GPIO] */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_gpio_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_gpio_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_gpio_w1c cvmx_ciu2_en_ppx_ip2_gpio_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_gpio_w1s
+ */
+union cvmx_ciu2_en_ppx_ip2_gpio_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_gpio_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enable,write 1 to enable CIU2_EN */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_gpio_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_gpio_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_gpio_w1s cvmx_ciu2_en_ppx_ip2_gpio_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_io
+ */
+union cvmx_ciu2_en_ppx_ip2_io {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt-enable */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA interrupt-enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit interrupt-enable
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI interrupt-enable */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D interrupt-enable */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_io_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_io cvmx_ciu2_en_ppx_ip2_io_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_io_w1c
+ */
+union cvmx_ciu2_en_ppx_ip2_io_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_io_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< Write 1 to clear CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_io_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_io_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_io_w1c cvmx_ciu2_en_ppx_ip2_io_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_io_w1s
+ */
+union cvmx_ciu2_en_ppx_ip2_io_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_io_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< Write 1 to enable CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_io_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_io_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_io_w1s cvmx_ciu2_en_ppx_ip2_io_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_mbox
+ */
+union cvmx_ciu2_en_ppx_ip2_mbox {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_mbox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Mailbox interrupt-enable, use with CIU2_MBOX
+ to generate CIU2_SRC_xx_yy_MBOX */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_mbox_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_mbox_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_mbox cvmx_ciu2_en_ppx_ip2_mbox_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_mbox_w1c
+ */
+union cvmx_ciu2_en_ppx_ip2_mbox_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_mbox_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MBOX[MBOX] */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_mbox_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_mbox_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_mbox_w1c cvmx_ciu2_en_ppx_ip2_mbox_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_mbox_w1s
+ */
+union cvmx_ciu2_en_ppx_ip2_mbox_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_mbox_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MBOX[MBOX] */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_mbox_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_mbox_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_mbox_w1s cvmx_ciu2_en_ppx_ip2_mbox_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_mem
+ */
+union cvmx_ciu2_en_ppx_ip2_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt-enable */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_mem_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_mem cvmx_ciu2_en_ppx_ip2_mem_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_mem_w1c
+ */
+union cvmx_ciu2_en_ppx_ip2_mem_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_mem_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_mem_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_mem_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_mem_w1c cvmx_ciu2_en_ppx_ip2_mem_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_mem_w1s
+ */
+union cvmx_ciu2_en_ppx_ip2_mem_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_mem_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_mem_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_mem_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_mem_w1s cvmx_ciu2_en_ppx_ip2_mem_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_mio
+ */
+union cvmx_ciu2_en_ppx_ip2_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt-enable */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt-enable */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB HCI Interrupt-enable */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt-enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupt-enable */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x interrupt-enable */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines interrupt-enable */
+ uint64_t mio : 1; /**< MIO boot interrupt-enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt-enable */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupt-enable */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt-enable */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt-enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt-enable */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_mio_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_mio cvmx_ciu2_en_ppx_ip2_mio_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_mio_w1c
+ */
+union cvmx_ciu2_en_ppx_ip2_mio_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_mio_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[NAND] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[SSQIQ] */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_mio_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_mio_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_mio_w1c cvmx_ciu2_en_ppx_ip2_mio_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_mio_w1s
+ */
+union cvmx_ciu2_en_ppx_ip2_mio_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_mio_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[NAND] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[SSQIQ] */
+ uint64_t ipdppthr : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_mio_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_mio_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_mio_w1s cvmx_ciu2_en_ppx_ip2_mio_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_pkt
+ */
+union cvmx_ciu2_en_ppx_ip2_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x interrupt-enable */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt-enable */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt-enable */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt-enable */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_pkt_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x interrupt-enable */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt-enable */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt-enable */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt-enable */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_pkt cvmx_ciu2_en_ppx_ip2_pkt_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_pkt_w1c
+ */
+union cvmx_ciu2_en_ppx_ip2_pkt_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_pkt_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_pkt_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_pkt_w1c_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_pkt_w1c cvmx_ciu2_en_ppx_ip2_pkt_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_pkt_w1s
+ */
+union cvmx_ciu2_en_ppx_ip2_pkt_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_pkt_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_pkt_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_pkt_w1s_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_pkt_w1s cvmx_ciu2_en_ppx_ip2_pkt_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_rml
+ */
+union cvmx_ciu2_en_ppx_ip2_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt-enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA interrupt-enable */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt-enable */
+ uint64_t sli : 1; /**< SLI interrupt-enable */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt-enable */
+ uint64_t rad : 1; /**< RAD interrupt-enable */
+ uint64_t tim : 1; /**< TIM interrupt-enable */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt-enable */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt-enable */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt-enable */
+ uint64_t pip : 1; /**< PIP interrupt-enable */
+ uint64_t ipd : 1; /**< IPD interrupt-enable */
+ uint64_t fpa : 1; /**< FPA interrupt-enable */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt-enable */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_rml_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt-enable */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt-enable */
+ uint64_t sli : 1; /**< SLI interrupt-enable */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt-enable */
+ uint64_t rad : 1; /**< RAD interrupt-enable */
+ uint64_t tim : 1; /**< TIM interrupt-enable */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt-enable */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt-enable */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt-enable */
+ uint64_t pip : 1; /**< PIP interrupt-enable */
+ uint64_t ipd : 1; /**< IPD interrupt-enable */
+ uint64_t fpa : 1; /**< FPA interrupt-enable */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt-enable */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_rml cvmx_ciu2_en_ppx_ip2_rml_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_rml_w1c
+ */
+union cvmx_ciu2_en_ppx_ip2_rml_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_rml_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI_DMA] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_rml_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_rml_w1c_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_rml_w1c cvmx_ciu2_en_ppx_ip2_rml_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_rml_w1s
+ */
+union cvmx_ciu2_en_ppx_ip2_rml_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_rml_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI_DMA] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_rml_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_rml_w1s_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_rml_w1s cvmx_ciu2_en_ppx_ip2_rml_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_wdog
+ */
+union cvmx_ciu2_en_ppx_ip2_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupt-enable */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_wdog_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_wdog cvmx_ciu2_en_ppx_ip2_wdog_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_wdog_w1c
+ */
+union cvmx_ciu2_en_ppx_ip2_wdog_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_wdog_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< write 1 to clear CIU2_EN_xx_yy_WDOG */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_wdog_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_wdog_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_wdog_w1c cvmx_ciu2_en_ppx_ip2_wdog_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_wdog_w1s
+ */
+union cvmx_ciu2_en_ppx_ip2_wdog_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_wdog_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< Write 1 to enable CIU2_EN_xx_yy_WDOG[WDOG] */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_wdog_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_wdog_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_wdog_w1s cvmx_ciu2_en_ppx_ip2_wdog_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_wrkq
+ */
+union cvmx_ciu2_en_ppx_ip2_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue interrupt-enable */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_wrkq_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_wrkq cvmx_ciu2_en_ppx_ip2_wrkq_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_wrkq_w1c
+ */
+union cvmx_ciu2_en_ppx_ip2_wrkq_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_wrkq_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< Write 1 to clear CIU2_EN_xx_yy_WRKQ[WORKQ]
+ For W1C bits, write 1 to clear the corresponding
+ CIU2_EN_xx_yy_WRKQ,write 0 to retain previous value */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_wrkq_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_wrkq_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_wrkq_w1c cvmx_ciu2_en_ppx_ip2_wrkq_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip2_wrkq_w1s
+ */
+union cvmx_ciu2_en_ppx_ip2_wrkq_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip2_wrkq_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< Write 1 to enable CIU2_EN_xx_yy_WRKQ[WORKQ]
+ 1 bit/group. For all W1S bits, write 1 to enable
+ corresponding CIU2_EN_xx_yy_WRKQ[WORKQ] bit,
+ writing 0 to retain previous value. */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip2_wrkq_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip2_wrkq_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip2_wrkq_w1s cvmx_ciu2_en_ppx_ip2_wrkq_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_gpio
+ */
+union cvmx_ciu2_en_ppx_ip3_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupt-enable */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_gpio_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_gpio cvmx_ciu2_en_ppx_ip3_gpio_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_gpio_w1c
+ */
+union cvmx_ciu2_en_ppx_ip3_gpio_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_gpio_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< Write 1 to clear CIU2_EN_xx_yy_GPIO[GPIO] */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_gpio_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_gpio_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_gpio_w1c cvmx_ciu2_en_ppx_ip3_gpio_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_gpio_w1s
+ */
+union cvmx_ciu2_en_ppx_ip3_gpio_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_gpio_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enable,write 1 to enable CIU2_EN */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_gpio_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_gpio_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_gpio_w1s cvmx_ciu2_en_ppx_ip3_gpio_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_io
+ */
+union cvmx_ciu2_en_ppx_ip3_io {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt-enable */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA interrupt-enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit interrupt-enable
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI interrupt-enable */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D interrupt-enable */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_io_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_io cvmx_ciu2_en_ppx_ip3_io_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_io_w1c
+ */
+union cvmx_ciu2_en_ppx_ip3_io_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_io_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< Write 1 to clear CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_io_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_io_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_io_w1c cvmx_ciu2_en_ppx_ip3_io_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_io_w1s
+ */
+union cvmx_ciu2_en_ppx_ip3_io_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_io_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< Write 1 to enable CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_io_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_io_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_io_w1s cvmx_ciu2_en_ppx_ip3_io_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_mbox
+ */
+union cvmx_ciu2_en_ppx_ip3_mbox {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_mbox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Mailbox interrupt-enable, use with CIU2_MBOX
+ to generate CIU2_SRC_xx_yy_MBOX */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_mbox_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_mbox_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_mbox cvmx_ciu2_en_ppx_ip3_mbox_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_mbox_w1c
+ */
+union cvmx_ciu2_en_ppx_ip3_mbox_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_mbox_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MBOX[MBOX] */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_mbox_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_mbox_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_mbox_w1c cvmx_ciu2_en_ppx_ip3_mbox_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_mbox_w1s
+ */
+union cvmx_ciu2_en_ppx_ip3_mbox_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_mbox_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MBOX[MBOX] */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_mbox_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_mbox_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_mbox_w1s cvmx_ciu2_en_ppx_ip3_mbox_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_mem
+ */
+union cvmx_ciu2_en_ppx_ip3_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt-enable */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_mem_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_mem cvmx_ciu2_en_ppx_ip3_mem_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_mem_w1c
+ */
+union cvmx_ciu2_en_ppx_ip3_mem_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_mem_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_mem_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_mem_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_mem_w1c cvmx_ciu2_en_ppx_ip3_mem_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_mem_w1s
+ */
+union cvmx_ciu2_en_ppx_ip3_mem_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_mem_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_mem_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_mem_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_mem_w1s cvmx_ciu2_en_ppx_ip3_mem_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_mio
+ */
+union cvmx_ciu2_en_ppx_ip3_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt-enable */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt-enable */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB HCI Interrupt-enable */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt-enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupt-enable */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x interrupt-enable */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines interrupt-enable */
+ uint64_t mio : 1; /**< MIO boot interrupt-enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt-enable */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupt-enable */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt-enable */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt-enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt-enable */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_mio_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_mio cvmx_ciu2_en_ppx_ip3_mio_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_mio_w1c
+ */
+union cvmx_ciu2_en_ppx_ip3_mio_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_mio_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[NAND] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[SSQIQ] */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_mio_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_mio_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_mio_w1c cvmx_ciu2_en_ppx_ip3_mio_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_mio_w1s
+ */
+union cvmx_ciu2_en_ppx_ip3_mio_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_mio_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[NAND] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[SSQIQ] */
+ uint64_t ipdppthr : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_mio_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_mio_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_mio_w1s cvmx_ciu2_en_ppx_ip3_mio_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_pkt
+ */
+union cvmx_ciu2_en_ppx_ip3_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x interrupt-enable */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt-enable */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt-enable */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt-enable */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_pkt_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x interrupt-enable */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt-enable */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt-enable */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt-enable */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_pkt cvmx_ciu2_en_ppx_ip3_pkt_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_pkt_w1c
+ */
+union cvmx_ciu2_en_ppx_ip3_pkt_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_pkt_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_pkt_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_pkt_w1c_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_pkt_w1c cvmx_ciu2_en_ppx_ip3_pkt_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_pkt_w1s
+ */
+union cvmx_ciu2_en_ppx_ip3_pkt_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_pkt_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_pkt_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_pkt_w1s_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_pkt_w1s cvmx_ciu2_en_ppx_ip3_pkt_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_rml
+ */
+union cvmx_ciu2_en_ppx_ip3_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt-enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA interrupt-enable */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt-enable */
+ uint64_t sli : 1; /**< SLI interrupt-enable */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt-enable */
+ uint64_t rad : 1; /**< RAD interrupt-enable */
+ uint64_t tim : 1; /**< TIM interrupt-enable */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt-enable */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt-enable */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt-enable */
+ uint64_t pip : 1; /**< PIP interrupt-enable */
+ uint64_t ipd : 1; /**< IPD interrupt-enable */
+ uint64_t fpa : 1; /**< FPA interrupt-enable */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt-enable */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_rml_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt-enable */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt-enable */
+ uint64_t sli : 1; /**< SLI interrupt-enable */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt-enable */
+ uint64_t rad : 1; /**< RAD interrupt-enable */
+ uint64_t tim : 1; /**< TIM interrupt-enable */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt-enable */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt-enable */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt-enable */
+ uint64_t pip : 1; /**< PIP interrupt-enable */
+ uint64_t ipd : 1; /**< IPD interrupt-enable */
+ uint64_t fpa : 1; /**< FPA interrupt-enable */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt-enable */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_rml cvmx_ciu2_en_ppx_ip3_rml_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_rml_w1c
+ */
+union cvmx_ciu2_en_ppx_ip3_rml_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_rml_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI_DMA] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_rml_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_rml_w1c_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_rml_w1c cvmx_ciu2_en_ppx_ip3_rml_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_rml_w1s
+ */
+union cvmx_ciu2_en_ppx_ip3_rml_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_rml_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI_DMA] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_rml_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_rml_w1s_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_rml_w1s cvmx_ciu2_en_ppx_ip3_rml_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_wdog
+ */
+union cvmx_ciu2_en_ppx_ip3_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupt-enable */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_wdog_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_wdog cvmx_ciu2_en_ppx_ip3_wdog_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_wdog_w1c
+ */
+union cvmx_ciu2_en_ppx_ip3_wdog_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_wdog_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< write 1 to clear CIU2_EN_xx_yy_WDOG */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_wdog_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_wdog_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_wdog_w1c cvmx_ciu2_en_ppx_ip3_wdog_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_wdog_w1s
+ */
+union cvmx_ciu2_en_ppx_ip3_wdog_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_wdog_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< Write 1 to enable CIU2_EN_xx_yy_WDOG[WDOG] */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_wdog_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_wdog_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_wdog_w1s cvmx_ciu2_en_ppx_ip3_wdog_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_wrkq
+ */
+union cvmx_ciu2_en_ppx_ip3_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue interrupt-enable */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_wrkq_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_wrkq cvmx_ciu2_en_ppx_ip3_wrkq_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_wrkq_w1c
+ */
+union cvmx_ciu2_en_ppx_ip3_wrkq_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_wrkq_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< Write 1 to clear CIU2_EN_xx_yy_WRKQ[WORKQ]
+ For W1C bits, write 1 to clear the corresponding
+ CIU2_EN_xx_yy_WRKQ,write 0 to retain previous value */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_wrkq_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_wrkq_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_wrkq_w1c cvmx_ciu2_en_ppx_ip3_wrkq_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip3_wrkq_w1s
+ */
+union cvmx_ciu2_en_ppx_ip3_wrkq_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip3_wrkq_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< Write 1 to enable CIU2_EN_xx_yy_WRKQ[WORKQ]
+ 1 bit/group. For all W1S bits, write 1 to enable
+ corresponding CIU2_EN_xx_yy_WRKQ[WORKQ] bit,
+ writing 0 to retain previous value. */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip3_wrkq_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip3_wrkq_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip3_wrkq_w1s cvmx_ciu2_en_ppx_ip3_wrkq_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_gpio
+ */
+union cvmx_ciu2_en_ppx_ip4_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupt-enable */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_gpio_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_gpio cvmx_ciu2_en_ppx_ip4_gpio_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_gpio_w1c
+ */
+union cvmx_ciu2_en_ppx_ip4_gpio_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_gpio_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< Write 1 to clear CIU2_EN_xx_yy_GPIO[GPIO] */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_gpio_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_gpio_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_gpio_w1c cvmx_ciu2_en_ppx_ip4_gpio_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_gpio_w1s
+ */
+union cvmx_ciu2_en_ppx_ip4_gpio_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_gpio_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enable,write 1 to enable CIU2_EN */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_gpio_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_gpio_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_gpio_w1s cvmx_ciu2_en_ppx_ip4_gpio_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_io
+ */
+union cvmx_ciu2_en_ppx_ip4_io {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt-enable */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA interrupt-enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit interrupt-enable
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI interrupt-enable */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D interrupt-enable */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_io_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_io cvmx_ciu2_en_ppx_ip4_io_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_io_w1c
+ */
+union cvmx_ciu2_en_ppx_ip4_io_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_io_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< Write 1 to clear CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< Write 1 to clear CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_io_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_io_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_io_w1c cvmx_ciu2_en_ppx_ip4_io_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_io_w1s
+ */
+union cvmx_ciu2_en_ppx_ip4_io_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_io_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< Write 1 to enable CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< Write 1 to enable CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_io_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_io_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_io_w1s cvmx_ciu2_en_ppx_ip4_io_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_mbox
+ */
+union cvmx_ciu2_en_ppx_ip4_mbox {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_mbox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Mailbox interrupt-enable, use with CIU2_MBOX
+ to generate CIU2_SRC_xx_yy_MBOX */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_mbox_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_mbox_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_mbox cvmx_ciu2_en_ppx_ip4_mbox_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_mbox_w1c
+ */
+union cvmx_ciu2_en_ppx_ip4_mbox_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_mbox_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MBOX[MBOX] */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_mbox_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_mbox_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_mbox_w1c cvmx_ciu2_en_ppx_ip4_mbox_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_mbox_w1s
+ */
+union cvmx_ciu2_en_ppx_ip4_mbox_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_mbox_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MBOX[MBOX] */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_mbox_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_mbox_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_mbox_w1s cvmx_ciu2_en_ppx_ip4_mbox_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_mem
+ */
+union cvmx_ciu2_en_ppx_ip4_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt-enable */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_mem_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_mem cvmx_ciu2_en_ppx_ip4_mem_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_mem_w1c
+ */
+union cvmx_ciu2_en_ppx_ip4_mem_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_mem_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_mem_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_mem_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_mem_w1c cvmx_ciu2_en_ppx_ip4_mem_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_mem_w1s
+ */
+union cvmx_ciu2_en_ppx_ip4_mem_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_mem_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_mem_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_mem_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_mem_w1s cvmx_ciu2_en_ppx_ip4_mem_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_mio
+ */
+union cvmx_ciu2_en_ppx_ip4_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt-enable */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt-enable */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB HCI Interrupt-enable */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt-enable */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupt-enable */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x interrupt-enable */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines interrupt-enable */
+ uint64_t mio : 1; /**< MIO boot interrupt-enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt-enable */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupt-enable */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt-enable */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt-enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt-enable */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_mio_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_mio cvmx_ciu2_en_ppx_ip4_mio_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_mio_w1c
+ */
+union cvmx_ciu2_en_ppx_ip4_mio_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_mio_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[NAND] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[SSQIQ] */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_mio_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_mio_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_mio_w1c cvmx_ciu2_en_ppx_ip4_mio_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_mio_w1s
+ */
+union cvmx_ciu2_en_ppx_ip4_mio_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_mio_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[NAND] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[SSQIQ] */
+ uint64_t ipdppthr : 1; /**< Write 1 to enable CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_mio_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_mio_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_mio_w1s cvmx_ciu2_en_ppx_ip4_mio_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_pkt
+ */
+union cvmx_ciu2_en_ppx_ip4_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x interrupt-enable */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt-enable */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt-enable */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt-enable */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_pkt_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x interrupt-enable */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt-enable */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt-enable */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt-enable */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_pkt cvmx_ciu2_en_ppx_ip4_pkt_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_pkt_w1c
+ */
+union cvmx_ciu2_en_ppx_ip4_pkt_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_pkt_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_pkt_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_pkt_w1c_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to clear CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_pkt_w1c cvmx_ciu2_en_ppx_ip4_pkt_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_pkt_w1s
+ */
+union cvmx_ciu2_en_ppx_ip4_pkt_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_pkt_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_pkt_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_pkt_w1s_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< Write 1 to enable CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_pkt_w1s cvmx_ciu2_en_ppx_ip4_pkt_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_rml
+ */
+union cvmx_ciu2_en_ppx_ip4_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt-enable */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA interrupt-enable */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt-enable */
+ uint64_t sli : 1; /**< SLI interrupt-enable */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt-enable */
+ uint64_t rad : 1; /**< RAD interrupt-enable */
+ uint64_t tim : 1; /**< TIM interrupt-enable */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt-enable */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt-enable */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt-enable */
+ uint64_t pip : 1; /**< PIP interrupt-enable */
+ uint64_t ipd : 1; /**< IPD interrupt-enable */
+ uint64_t fpa : 1; /**< FPA interrupt-enable */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt-enable */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_rml_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt-enable */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt-enable */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt-enable */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt-enable */
+ uint64_t sli : 1; /**< SLI interrupt-enable */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt-enable */
+ uint64_t rad : 1; /**< RAD interrupt-enable */
+ uint64_t tim : 1; /**< TIM interrupt-enable */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt-enable */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt-enable */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt-enable */
+ uint64_t pip : 1; /**< PIP interrupt-enable */
+ uint64_t ipd : 1; /**< IPD interrupt-enable */
+ uint64_t fpa : 1; /**< FPA interrupt-enable */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt-enable */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_rml cvmx_ciu2_en_ppx_ip4_rml_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_rml_w1c
+ */
+union cvmx_ciu2_en_ppx_ip4_rml_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_rml_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI_DMA] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_rml_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_rml_w1c_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to clear CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_rml_w1c cvmx_ciu2_en_ppx_ip4_rml_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_rml_w1s
+ */
+union cvmx_ciu2_en_ppx_ip4_rml_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_rml_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI_DMA] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_rml_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_rml_w1s_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< Write 1 to enable CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_rml_w1s cvmx_ciu2_en_ppx_ip4_rml_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_wdog
+ */
+union cvmx_ciu2_en_ppx_ip4_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupt-enable */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_wdog_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_wdog cvmx_ciu2_en_ppx_ip4_wdog_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_wdog_w1c
+ */
+union cvmx_ciu2_en_ppx_ip4_wdog_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_wdog_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< write 1 to clear CIU2_EN_xx_yy_WDOG */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_wdog_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_wdog_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_wdog_w1c cvmx_ciu2_en_ppx_ip4_wdog_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_wdog_w1s
+ */
+union cvmx_ciu2_en_ppx_ip4_wdog_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_wdog_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< Write 1 to enable CIU2_EN_xx_yy_WDOG[WDOG] */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_wdog_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_wdog_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_wdog_w1s cvmx_ciu2_en_ppx_ip4_wdog_w1s_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_wrkq
+ */
+union cvmx_ciu2_en_ppx_ip4_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue interrupt-enable */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_wrkq_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_wrkq cvmx_ciu2_en_ppx_ip4_wrkq_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_wrkq_w1c
+ */
+union cvmx_ciu2_en_ppx_ip4_wrkq_w1c {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_wrkq_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< Write 1 to clear CIU2_EN_xx_yy_WRKQ[WORKQ]
+ For W1C bits, write 1 to clear the corresponding
+ CIU2_EN_xx_yy_WRKQ,write 0 to retain previous value */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_wrkq_w1c_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_wrkq_w1c_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_wrkq_w1c cvmx_ciu2_en_ppx_ip4_wrkq_w1c_t;
+
+/**
+ * cvmx_ciu2_en_pp#_ip4_wrkq_w1s
+ */
+union cvmx_ciu2_en_ppx_ip4_wrkq_w1s {
+ uint64_t u64;
+ struct cvmx_ciu2_en_ppx_ip4_wrkq_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< Write 1 to enable CIU2_EN_xx_yy_WRKQ[WORKQ]
+ 1 bit/group. For all W1S bits, write 1 to enable
+ corresponding CIU2_EN_xx_yy_WRKQ[WORKQ] bit,
+ writing 0 to retain previous value. */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_en_ppx_ip4_wrkq_w1s_s cn68xx;
+ struct cvmx_ciu2_en_ppx_ip4_wrkq_w1s_s cn68xxp1;
+};
+typedef union cvmx_ciu2_en_ppx_ip4_wrkq_w1s cvmx_ciu2_en_ppx_ip4_wrkq_w1s_t;
+
+/**
+ * cvmx_ciu2_intr_ciu_ready
+ */
+union cvmx_ciu2_intr_ciu_ready {
+ uint64_t u64;
+ struct cvmx_ciu2_intr_ciu_ready_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t ready : 1; /**< Because of the delay of the IRQ updates which may
+ take about 200 sclk cycles, software should read
+ this register after servicing interrupts and wait
+ for response before enabling interrupt watching.
+ Or, the outdated interrupt will show up again.
+ The read back data return when all interrupts have
+ been serviced, and read back data is always zero.
+ In o68 pass2, CIU_READY gets replaced by CIU2_ACK
+ This becomes an internal debug feature. */
+#else
+ uint64_t ready : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu2_intr_ciu_ready_s cn68xx;
+ struct cvmx_ciu2_intr_ciu_ready_s cn68xxp1;
+};
+typedef union cvmx_ciu2_intr_ciu_ready cvmx_ciu2_intr_ciu_ready_t;
+
+/**
+ * cvmx_ciu2_intr_ram_ecc_ctl
+ */
+union cvmx_ciu2_intr_ram_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_ciu2_intr_ram_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t flip_synd : 2; /**< Testing feature. Flip Syndrom to generate single or
+ double bit error. FLIP_SYND[0] generate even number
+ -ed bits error,FLIP_SYND[1] generate odd bits error */
+ uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 9bit ECC
+ check/correct logic for CIU interrupt enable RAM.
+ With ECC enabled, the ECC code will be generated
+ and written in the memory and then later on reads,
+ used to check and correct Single bit error and
+ detect Double Bit error. */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t flip_synd : 2;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_ciu2_intr_ram_ecc_ctl_s cn68xx;
+ struct cvmx_ciu2_intr_ram_ecc_ctl_s cn68xxp1;
+};
+typedef union cvmx_ciu2_intr_ram_ecc_ctl cvmx_ciu2_intr_ram_ecc_ctl_t;
+
+/**
+ * cvmx_ciu2_intr_ram_ecc_st
+ */
+union cvmx_ciu2_intr_ram_ecc_st {
+ uint64_t u64;
+ struct cvmx_ciu2_intr_ram_ecc_st_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t addr : 7; /**< Latch the address for latest sde/dde occured
+ The value only 0-98 indicates the different 98 IRQs
+ Software can read all corresponding corrected value
+ from CIU2_EN_PPX_IPx_*** or CIU2_EN_IOX_INT_*** and
+ rewite to the same address to corrected the bit err */
+ uint64_t reserved_13_15 : 3;
+ uint64_t syndrom : 9; /**< Report the latest error syndrom */
+ uint64_t reserved_2_3 : 2;
+ uint64_t dbe : 1; /**< Double bit error observed. Write '1' to clear */
+ uint64_t sbe : 1; /**< Single bit error observed. Write '1' to clear */
+#else
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t syndrom : 9;
+ uint64_t reserved_13_15 : 3;
+ uint64_t addr : 7;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_ciu2_intr_ram_ecc_st_s cn68xx;
+ struct cvmx_ciu2_intr_ram_ecc_st_s cn68xxp1;
+};
+typedef union cvmx_ciu2_intr_ram_ecc_st cvmx_ciu2_intr_ram_ecc_st_t;
+
+/**
+ * cvmx_ciu2_intr_slowdown
+ */
+union cvmx_ciu2_intr_slowdown {
+ uint64_t u64;
+ struct cvmx_ciu2_intr_slowdown_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t ctl : 3; /**< Slowdown CIU interrupt walker processing time.
+ IRQ2/3/4 for all 32 PPs are sent to PP (MRC) in
+ a serial bus to reduce global routing. There is
+ no backpressure mechanism designed for this scheme.
+ It will be only a problem when sclk is faster, this
+ Control will process 1 interrupt in 2^(CTL) sclks
+ With different setting, clock rate ratio can handle
+ SLOWDOWN sclk_freq/aclk_freq ratio
+ 0 3
+ 1 6
+ n 3*2^(n) */
+#else
+ uint64_t ctl : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_ciu2_intr_slowdown_s cn68xx;
+ struct cvmx_ciu2_intr_slowdown_s cn68xxp1;
+};
+typedef union cvmx_ciu2_intr_slowdown cvmx_ciu2_intr_slowdown_t;
+
+/**
+ * cvmx_ciu2_msi_rcv#
+ *
+ * CIU2_MSI_RCV Received MSI state bits (Pass 2)
+ *
+ */
+union cvmx_ciu2_msi_rcvx {
+ uint64_t u64;
+ struct cvmx_ciu2_msi_rcvx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t msi_rcv : 1; /**< MSI state bit, set on MSI delivery or by software
+ "write 1" to set or "write 0" to clear.
+ This register is used to create the
+ CIU2_RAW_xx_yy_IO[MSIRED] interrupt. See also
+ SLI_MSI_RCV. */
+#else
+ uint64_t msi_rcv : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu2_msi_rcvx_s cn68xx;
+ struct cvmx_ciu2_msi_rcvx_s cn68xxp1;
+};
+typedef union cvmx_ciu2_msi_rcvx cvmx_ciu2_msi_rcvx_t;
+
+/**
+ * cvmx_ciu2_msi_sel#
+ *
+ * CIU2_MSI_SEL Received MSI SEL enable (Pass 2)
+ *
+ */
+union cvmx_ciu2_msi_selx {
+ uint64_t u64;
+ struct cvmx_ciu2_msi_selx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t pp_num : 5; /**< Processor number to receive this MSI interrupt */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ip_num : 2; /**< Interrupt priority level to receive this MSI
+ interrupt (00=IP2, 01=IP3, 10=IP4, 11=rsvd) */
+ uint64_t reserved_1_3 : 3;
+ uint64_t en : 1; /**< Enable interrupt delivery.
+ Must be set for PP_NUM and IP_NUM to have effect. */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t ip_num : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t pp_num : 5;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_ciu2_msi_selx_s cn68xx;
+ struct cvmx_ciu2_msi_selx_s cn68xxp1;
+};
+typedef union cvmx_ciu2_msi_selx cvmx_ciu2_msi_selx_t;
+
+/**
+ * cvmx_ciu2_msired_pp#_ip2
+ *
+ * CIU2_MSIRED_PPX_IPx (Pass 2)
+ * Contains reduced MSI interrupt numbers for delivery to software.
+ * Note MSIRED delivery can only be made to PPs, not to IO; thus there are no CIU2_MSIRED_IO registers.
+ */
+union cvmx_ciu2_msired_ppx_ip2 {
+ uint64_t u64;
+ struct cvmx_ciu2_msired_ppx_ip2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t intr : 1; /**< Interrupt pending */
+ uint64_t reserved_17_19 : 3;
+ uint64_t newint : 1; /**< New interrupt to be delivered.
+ Internal state, for diagnostic use only. | $PR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t msi_num : 8; /**< MSI number causing this interrupt.
+ If multiple MSIs are pending to the same PP and IP,
+ then this contains the numerically lowest MSI number */
+#else
+ uint64_t msi_num : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t newint : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t intr : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_ciu2_msired_ppx_ip2_s cn68xx;
+ struct cvmx_ciu2_msired_ppx_ip2_s cn68xxp1;
+};
+typedef union cvmx_ciu2_msired_ppx_ip2 cvmx_ciu2_msired_ppx_ip2_t;
+
+/**
+ * cvmx_ciu2_msired_pp#_ip3
+ */
+union cvmx_ciu2_msired_ppx_ip3 {
+ uint64_t u64;
+ struct cvmx_ciu2_msired_ppx_ip3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t intr : 1; /**< Interrupt pending */
+ uint64_t reserved_17_19 : 3;
+ uint64_t newint : 1; /**< New interrupt to be delivered.
+ Internal state, for diagnostic use only. | $PR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t msi_num : 8; /**< MSI number causing this interrupt.
+ If multiple MSIs are pending to the same PP and IP,
+ then this contains the numerically lowest MSI number */
+#else
+ uint64_t msi_num : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t newint : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t intr : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_ciu2_msired_ppx_ip3_s cn68xx;
+ struct cvmx_ciu2_msired_ppx_ip3_s cn68xxp1;
+};
+typedef union cvmx_ciu2_msired_ppx_ip3 cvmx_ciu2_msired_ppx_ip3_t;
+
+/**
+ * cvmx_ciu2_msired_pp#_ip4
+ */
+union cvmx_ciu2_msired_ppx_ip4 {
+ uint64_t u64;
+ struct cvmx_ciu2_msired_ppx_ip4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t intr : 1; /**< Interrupt pending */
+ uint64_t reserved_17_19 : 3;
+ uint64_t newint : 1; /**< New interrupt to be delivered.
+ Internal state, for diagnostic use only. | $PR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t msi_num : 8; /**< MSI number causing this interrupt.
+ If multiple MSIs are pending to the same PP and IP,
+ then this contains the numerically lowest MSI number */
+#else
+ uint64_t msi_num : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t newint : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t intr : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_ciu2_msired_ppx_ip4_s cn68xx;
+ struct cvmx_ciu2_msired_ppx_ip4_s cn68xxp1;
+};
+typedef union cvmx_ciu2_msired_ppx_ip4 cvmx_ciu2_msired_ppx_ip4_t;
+
+/**
+ * cvmx_ciu2_raw_io#_int_gpio
+ */
+union cvmx_ciu2_raw_iox_int_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_iox_int_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ For GPIO, all 98 RAW readout will be same value */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_iox_int_gpio_s cn68xx;
+ struct cvmx_ciu2_raw_iox_int_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_iox_int_gpio cvmx_ciu2_raw_iox_int_gpio_t;
+
+/**
+ * cvmx_ciu2_raw_io#_int_io
+ */
+union cvmx_ciu2_raw_iox_int_io {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_iox_int_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt
+ See PEMx_INT_SUM (enabled by PEMx_INT_ENB) */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA software enable
+ See CIU_PCI_INTA */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit, copy of
+ CIU2_MSIRED_PPx_IPy.INT, all IO interrupts
+ CIU2_RAW_IOX_INT_IO[MSIRED] always zero.
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D
+ PCI_INTR[3] = INTD
+ PCI_INTR[2] = INTC
+ PCI_INTR[1] = INTB
+ PCI_INTR[0] = INTA
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ For IO, all 98 RAW readout will be different */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_iox_int_io_s cn68xx;
+ struct cvmx_ciu2_raw_iox_int_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_iox_int_io cvmx_ciu2_raw_iox_int_io_t;
+
+/**
+ * cvmx_ciu2_raw_io#_int_mem
+ */
+union cvmx_ciu2_raw_iox_int_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_iox_int_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt
+ See LMC*_INT
+ For MEM, all 98 RAW readout will be same value */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_iox_int_mem_s cn68xx;
+ struct cvmx_ciu2_raw_iox_int_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_iox_int_mem cvmx_ciu2_raw_iox_int_mem_t;
+
+/**
+ * cvmx_ciu2_raw_io#_int_mio
+ */
+union cvmx_ciu2_raw_iox_int_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_iox_int_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt
+ See UCTL*_INT_REG */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x Interrupt
+ See MIO_TWSx_INT */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupts
+ Set any time the corresponding CIU timer expires */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt
+ See SSO_IQ_INT */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT*
+ For MIO, all 98 RAW readout will be same value */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_iox_int_mio_s cn68xx;
+ struct cvmx_ciu2_raw_iox_int_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_iox_int_mio cvmx_ciu2_raw_iox_int_mio_t;
+
+/**
+ * cvmx_ciu2_raw_io#_int_pkt
+ */
+union cvmx_ciu2_raw_iox_int_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_iox_int_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupt pulse */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupts */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts
+ See MIX*_ISR */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX 0-4 packet drop interrupt pulse
+ Set any time corresponding GMX drops a packet */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX 0-4 interrupt
+ See GMX*_RX*_INT_REG, GMX*_TX_INT_REG,
+ PCS0_INT*_REG, PCSX*_INT_REG
+ For PKT, all 98 RAW readout will be same value */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_iox_int_pkt_s cn68xx;
+ struct cvmx_ciu2_raw_iox_int_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupts */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts
+ See MIX*_ISR */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX 0-4 packet drop interrupt pulse
+ Set any time corresponding GMX drops a packet */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX 0-4 interrupt
+ See GMX*_RX*_INT_REG, GMX*_TX_INT_REG,
+ PCS0_INT*_REG, PCSX*_INT_REG
+ For PKT, all 98 RAW readout will be same value */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_iox_int_pkt cvmx_ciu2_raw_iox_int_pkt_t;
+
+/**
+ * cvmx_ciu2_raw_io#_int_rml
+ */
+union cvmx_ciu2_raw_iox_int_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_iox_int_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ See DPI DMA instruction completion */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_INT_ECCERR, TIM_INT0 */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_INT_REG */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt
+ See SSO_ERR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM
+ For RML, all 98 RAW readout will be same value */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_iox_int_rml_s cn68xx;
+ struct cvmx_ciu2_raw_iox_int_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_INT_ECCERR, TIM_INT0 */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_INT_REG */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt
+ See SSO_ERR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM
+ For RML, all 98 RAW readout will be same value */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_iox_int_rml cvmx_ciu2_raw_iox_int_rml_t;
+
+/**
+ * cvmx_ciu2_raw_io#_int_wdog
+ */
+union cvmx_ciu2_raw_iox_int_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_iox_int_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupts
+ For WDOG, all 98 RAW readout will be same value */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_iox_int_wdog_s cn68xx;
+ struct cvmx_ciu2_raw_iox_int_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_iox_int_wdog cvmx_ciu2_raw_iox_int_wdog_t;
+
+/**
+ * cvmx_ciu2_raw_io#_int_wrkq
+ */
+union cvmx_ciu2_raw_iox_int_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_iox_int_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue interrupts
+ See SSO_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the SSO.
+ For WRKQ, all 98 RAW readout will be same value */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_iox_int_wrkq_s cn68xx;
+ struct cvmx_ciu2_raw_iox_int_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_iox_int_wrkq cvmx_ciu2_raw_iox_int_wrkq_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip2_gpio
+ */
+union cvmx_ciu2_raw_ppx_ip2_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip2_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ For GPIO, all 98 RAW readout will be same value */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip2_gpio_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip2_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip2_gpio cvmx_ciu2_raw_ppx_ip2_gpio_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip2_io
+ */
+union cvmx_ciu2_raw_ppx_ip2_io {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip2_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt
+ See PEMx_INT_SUM (enabled by PEMx_INT_ENB) */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA software enable
+ See CIU_PCI_INTA */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit, copy of
+ CIU2_MSIRED_PPx_IPy.INT, all IO interrupts
+ CIU2_RAW_IOX_INT_IO[MSIRED] always zero.
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D
+ PCI_INTR[3] = INTD
+ PCI_INTR[2] = INTC
+ PCI_INTR[1] = INTB
+ PCI_INTR[0] = INTA
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ For IO, all 98 RAW readout will be different */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip2_io_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip2_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip2_io cvmx_ciu2_raw_ppx_ip2_io_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip2_mem
+ */
+union cvmx_ciu2_raw_ppx_ip2_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip2_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt
+ See LMC*_INT
+ For MEM, all 98 RAW readout will be same value */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip2_mem_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip2_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip2_mem cvmx_ciu2_raw_ppx_ip2_mem_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip2_mio
+ */
+union cvmx_ciu2_raw_ppx_ip2_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip2_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt
+ See UCTL*_INT_REG */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x Interrupt
+ See MIO_TWSx_INT */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupts
+ Set any time the corresponding CIU timer expires */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt
+ See SSO_IQ_INT */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT*
+ For MIO, all 98 RAW readout will be same value */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip2_mio_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip2_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip2_mio cvmx_ciu2_raw_ppx_ip2_mio_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip2_pkt
+ */
+union cvmx_ciu2_raw_ppx_ip2_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip2_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupt pulse */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupts */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts
+ See MIX*_ISR */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX 0-4 packet drop interrupt pulse
+ Set any time corresponding GMX drops a packet */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX 0-4 interrupt
+ See GMX*_RX*_INT_REG, GMX*_TX_INT_REG,
+ PCS0_INT*_REG, PCSX*_INT_REG
+ For PKT, all 98 RAW readout will be same value */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip2_pkt_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip2_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupts */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts
+ See MIX*_ISR */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX 0-4 packet drop interrupt pulse
+ Set any time corresponding GMX drops a packet */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX 0-4 interrupt
+ See GMX*_RX*_INT_REG, GMX*_TX_INT_REG,
+ PCS0_INT*_REG, PCSX*_INT_REG
+ For PKT, all 98 RAW readout will be same value */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip2_pkt cvmx_ciu2_raw_ppx_ip2_pkt_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip2_rml
+ */
+union cvmx_ciu2_raw_ppx_ip2_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip2_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ See DPI DMA instruction completion */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_INT_ECCERR, TIM_INT0 */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_INT_REG */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt
+ See SSO_ERR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM
+ For RML, all 98 RAW readout will be same value */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip2_rml_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip2_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_INT_ECCERR, TIM_INT0 */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_INT_REG */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt
+ See SSO_ERR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM
+ For RML, all 98 RAW readout will be same value */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip2_rml cvmx_ciu2_raw_ppx_ip2_rml_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip2_wdog
+ */
+union cvmx_ciu2_raw_ppx_ip2_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip2_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupts
+ For WDOG, all 98 RAW readout will be same value */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip2_wdog_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip2_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip2_wdog cvmx_ciu2_raw_ppx_ip2_wdog_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip2_wrkq
+ */
+union cvmx_ciu2_raw_ppx_ip2_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip2_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue interrupts
+ See SSO_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the SSO.
+ For WRKQ, all 98 RAW readout will be same value */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip2_wrkq_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip2_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip2_wrkq cvmx_ciu2_raw_ppx_ip2_wrkq_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip3_gpio
+ */
+union cvmx_ciu2_raw_ppx_ip3_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip3_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ For GPIO, all 98 RAW readout will be same value */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip3_gpio_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip3_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip3_gpio cvmx_ciu2_raw_ppx_ip3_gpio_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip3_io
+ */
+union cvmx_ciu2_raw_ppx_ip3_io {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip3_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt
+ See PEMx_INT_SUM (enabled by PEMx_INT_ENB) */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA software enable
+ See CIU_PCI_INTA */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit, copy of
+ CIU2_MSIRED_PPx_IPy.INT, all IO interrupts
+ CIU2_RAW_IOX_INT_IO[MSIRED] always zero.
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D
+ PCI_INTR[3] = INTD
+ PCI_INTR[2] = INTC
+ PCI_INTR[1] = INTB
+ PCI_INTR[0] = INTA
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ For IO, all 98 RAW readout will be different */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip3_io_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip3_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip3_io cvmx_ciu2_raw_ppx_ip3_io_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip3_mem
+ */
+union cvmx_ciu2_raw_ppx_ip3_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip3_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt
+ See LMC*_INT
+ For MEM, all 98 RAW readout will be same value */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip3_mem_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip3_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip3_mem cvmx_ciu2_raw_ppx_ip3_mem_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip3_mio
+ */
+union cvmx_ciu2_raw_ppx_ip3_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip3_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt
+ See UCTL*_INT_REG */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x Interrupt
+ See MIO_TWSx_INT */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupts
+ Set any time the corresponding CIU timer expires */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt
+ See SSO_IQ_INT */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT*
+ For MIO, all 98 RAW readout will be same value */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip3_mio_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip3_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip3_mio cvmx_ciu2_raw_ppx_ip3_mio_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip3_pkt
+ */
+union cvmx_ciu2_raw_ppx_ip3_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip3_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupt pulse */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupts */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts
+ See MIX*_ISR */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX 0-4 packet drop interrupt pulse
+ Set any time corresponding GMX drops a packet */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX 0-4 interrupt
+ See GMX*_RX*_INT_REG, GMX*_TX_INT_REG,
+ PCS0_INT*_REG, PCSX*_INT_REG
+ For PKT, all 98 RAW readout will be same value */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip3_pkt_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip3_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupts */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts
+ See MIX*_ISR */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX 0-4 packet drop interrupt pulse
+ Set any time corresponding GMX drops a packet */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX 0-4 interrupt
+ See GMX*_RX*_INT_REG, GMX*_TX_INT_REG,
+ PCS0_INT*_REG, PCSX*_INT_REG
+ For PKT, all 98 RAW readout will be same value */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip3_pkt cvmx_ciu2_raw_ppx_ip3_pkt_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip3_rml
+ */
+union cvmx_ciu2_raw_ppx_ip3_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip3_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ See DPI DMA instruction completion */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_INT_ECCERR, TIM_INT0 */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_INT_REG */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt
+ See SSO_ERR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM
+ For RML, all 98 RAW readout will be same value */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip3_rml_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip3_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_INT_ECCERR, TIM_INT0 */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_INT_REG */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt
+ See SSO_ERR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM
+ For RML, all 98 RAW readout will be same value */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip3_rml cvmx_ciu2_raw_ppx_ip3_rml_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip3_wdog
+ */
+union cvmx_ciu2_raw_ppx_ip3_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip3_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupts
+ For WDOG, all 98 RAW readout will be same value */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip3_wdog_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip3_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip3_wdog cvmx_ciu2_raw_ppx_ip3_wdog_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip3_wrkq
+ */
+union cvmx_ciu2_raw_ppx_ip3_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip3_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue interrupts
+ See SSO_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the SSO.
+ For WRKQ, all 98 RAW readout will be same value */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip3_wrkq_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip3_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip3_wrkq cvmx_ciu2_raw_ppx_ip3_wrkq_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip4_gpio
+ */
+union cvmx_ciu2_raw_ppx_ip4_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip4_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupts
+ For GPIO, all 98 RAW readout will be same value */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip4_gpio_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip4_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip4_gpio cvmx_ciu2_raw_ppx_ip4_gpio_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip4_io
+ */
+union cvmx_ciu2_raw_ppx_ip4_io {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip4_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt
+ See PEMx_INT_SUM (enabled by PEMx_INT_ENB) */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA software enable
+ See CIU_PCI_INTA */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit, copy of
+ CIU2_MSIRED_PPx_IPy.INT, all IO interrupts
+ CIU2_RAW_IOX_INT_IO[MSIRED] always zero.
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI
+ See SLI_MSI_RCVn for bit <40+n> */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D
+ PCI_INTR[3] = INTD
+ PCI_INTR[2] = INTC
+ PCI_INTR[1] = INTB
+ PCI_INTR[0] = INTA
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the SLI chapter of the spec
+ For IO, all 98 RAW readout will be different */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip4_io_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip4_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip4_io cvmx_ciu2_raw_ppx_ip4_io_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip4_mem
+ */
+union cvmx_ciu2_raw_ppx_ip4_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip4_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt
+ See LMC*_INT
+ For MEM, all 98 RAW readout will be same value */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip4_mem_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip4_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip4_mem cvmx_ciu2_raw_ppx_ip4_mem_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip4_mio
+ */
+union cvmx_ciu2_raw_ppx_ip4_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip4_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt
+ See MIO_RST_INT */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt
+ Set when HW decrements MIO_PTP_EVT_CNT to zero */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB EHCI or OHCI Interrupt
+ See UAHC0_EHCI_USBSTS UAHC0_OHCI0_HCINTERRUPTSTATUS */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt
+ See UCTL*_INT_REG */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupts
+ See MIO_UARTn_IIR[IID] for bit <34+n> */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x Interrupt
+ See MIO_TWSx_INT */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt
+ See MIO_BOOT_DMA_INT*, MIO_NDF_DMA_INT */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt
+ See NDF_INT */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupts
+ Set any time the corresponding CIU timer expires */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt
+ Set any time PIP/IPD drops a packet */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt
+ See SSO_IQ_INT */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt
+ See IPD_PORT_QOS_INT*
+ For MIO, all 98 RAW readout will be same value */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip4_mio_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip4_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip4_mio cvmx_ciu2_raw_ppx_ip4_mio_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip4_pkt
+ */
+union cvmx_ciu2_raw_ppx_ip4_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip4_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupt pulse */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupts */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts
+ See MIX*_ISR */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX 0-4 packet drop interrupt pulse
+ Set any time corresponding GMX drops a packet */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX 0-4 interrupt
+ See GMX*_RX*_INT_REG, GMX*_TX_INT_REG,
+ PCS0_INT*_REG, PCSX*_INT_REG
+ For PKT, all 98 RAW readout will be same value */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip4_pkt_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip4_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupts */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts
+ See MIX*_ISR */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX 0-4 packet drop interrupt pulse
+ Set any time corresponding GMX drops a packet */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX 0-4 interrupt
+ See GMX*_RX*_INT_REG, GMX*_TX_INT_REG,
+ PCS0_INT*_REG, PCSX*_INT_REG
+ For PKT, all 98 RAW readout will be same value */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip4_pkt cvmx_ciu2_raw_ppx_ip4_pkt_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip4_rml
+ */
+union cvmx_ciu2_raw_ppx_ip4_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip4_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ See DPI DMA instruction completion */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_INT_ECCERR, TIM_INT0 */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_INT_REG */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt
+ See SSO_ERR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM
+ For RML, all 98 RAW readout will be same value */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip4_rml_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip4_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt
+ See TRA_INT_STATUS */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_INT_ECCERR, TIM_INT0 */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_INT_REG */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt
+ See SSO_ERR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM
+ For RML, all 98 RAW readout will be same value */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip4_rml cvmx_ciu2_raw_ppx_ip4_rml_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip4_wdog
+ */
+union cvmx_ciu2_raw_ppx_ip4_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip4_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupts
+ For WDOG, all 98 RAW readout will be same value */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip4_wdog_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip4_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip4_wdog cvmx_ciu2_raw_ppx_ip4_wdog_t;
+
+/**
+ * cvmx_ciu2_raw_pp#_ip4_wrkq
+ */
+union cvmx_ciu2_raw_ppx_ip4_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_raw_ppx_ip4_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue interrupts
+ See SSO_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the SSO.
+ For WRKQ, all 98 RAW readout will be same value */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_raw_ppx_ip4_wrkq_s cn68xx;
+ struct cvmx_ciu2_raw_ppx_ip4_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_raw_ppx_ip4_wrkq cvmx_ciu2_raw_ppx_ip4_wrkq_t;
+
+/**
+ * cvmx_ciu2_src_io#_int_gpio
+ */
+union cvmx_ciu2_src_iox_int_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_src_iox_int_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupts source */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_src_iox_int_gpio_s cn68xx;
+ struct cvmx_ciu2_src_iox_int_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_iox_int_gpio cvmx_ciu2_src_iox_int_gpio_t;
+
+/**
+ * cvmx_ciu2_src_io#_int_io
+ */
+union cvmx_ciu2_src_iox_int_io {
+ uint64_t u64;
+ struct cvmx_ciu2_src_iox_int_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt source
+ CIU2_RAW_IO[PEM] & CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA source
+ CIU2_RAW_IO[PCI_INTA] & CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit source
+ CIU2_RAW_IO[MSIRED] & CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI source
+ CIU2_RAW_IO[PCI_MSI] & CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D interrupt source
+ CIU2_RAW_IO[PCI_INTR] &CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_src_iox_int_io_s cn68xx;
+ struct cvmx_ciu2_src_iox_int_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_iox_int_io cvmx_ciu2_src_iox_int_io_t;
+
+/**
+ * cvmx_ciu2_src_io#_int_mbox
+ */
+union cvmx_ciu2_src_iox_int_mbox {
+ uint64_t u64;
+ struct cvmx_ciu2_src_iox_int_mbox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Mailbox interrupt Source (RAW & ENABLE)
+ For CIU2_SRC_PPX_IPx_MBOX:
+ Four mailbox interrupts for entries 0-31
+ RAW & ENABLE
+ [3] is the or of <31:24> of CIU2_MBOX
+ [2] is the or of <23:16> of CIU2_MBOX
+ [1] is the or of <15:8> of CIU2_MBOX
+ [0] is the or of <7:0> of CIU2_MBOX
+ CIU2_MBOX value can be read out via CSR address
+ CIU_MBOX_SET/CLR
+ For CIU2_SRC_IOX_INT_MBOX:
+ always zero */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_src_iox_int_mbox_s cn68xx;
+ struct cvmx_ciu2_src_iox_int_mbox_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_iox_int_mbox cvmx_ciu2_src_iox_int_mbox_t;
+
+/**
+ * cvmx_ciu2_src_io#_int_mem
+ */
+union cvmx_ciu2_src_iox_int_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_src_iox_int_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt source
+ CIU2_RAW_MEM[LMC] & CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_src_iox_int_mem_s cn68xx;
+ struct cvmx_ciu2_src_iox_int_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_iox_int_mem cvmx_ciu2_src_iox_int_mem_t;
+
+/**
+ * cvmx_ciu2_src_io#_int_mio
+ */
+union cvmx_ciu2_src_iox_int_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_src_iox_int_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt source
+ CIU2_RAW_MIO[RST] & CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt source
+ CIU2_RAW_MIO[PTP] & CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB HCI Interrupt source
+ CIU2_RAW_MIO[USB_HCI] & CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt source
+ CIU2_RAW_MIO[USB_UCTL] &CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupts source
+ CIU2_RAW_MIO[UART] & CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x Interrupt source
+ CIU2_RAW_MIO[TWSI] & CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt source
+ CIU2_RAW_MIO[BOOTDMA] & CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< MIO boot interrupt source
+ CIU2_RAW_MIO[MIO] & CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt source
+ CIU2_RAW_MIO[NAND] & CIU2_EN_xx_yy_MIO[NANAD] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupts source
+ CIU2_RAW_MIO[TIMER] & CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt source
+ CIU2_RAW_MIO[IPD_DRP] & CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt source
+ CIU2_RAW_MIO[SSOIQ] & CIU2_EN_xx_yy_MIO[SSOIQ] */
+ uint64_t ipdppthr : 1; /**< IPD per-port cnt threshold interrupt source
+ CIU2_RAW_MIO[IPDPPTHR] &CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_src_iox_int_mio_s cn68xx;
+ struct cvmx_ciu2_src_iox_int_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_iox_int_mio cvmx_ciu2_src_iox_int_mio_t;
+
+/**
+ * cvmx_ciu2_src_io#_int_pkt
+ */
+union cvmx_ciu2_src_iox_int_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_src_iox_int_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupts source
+ CIU2_RAW_PKT[ILK_DRP] & CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupts source
+ CIU2_RAW_PKT[ILK] & CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts source
+ CIU2_RAW_PKT[MII] & CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt source
+ CIU2_RAW_PKT[AGL] & CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt, RAW & ENABLE
+ CIU2_RAW_PKT[GMX_DRP] & CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt source
+ CIU2_RAW_PKT[AGX] & CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_src_iox_int_pkt_s cn68xx;
+ struct cvmx_ciu2_src_iox_int_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupts source
+ CIU2_RAW_PKT[ILK] & CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts source
+ CIU2_RAW_PKT[MII] & CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt source
+ CIU2_RAW_PKT[AGL] & CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt, RAW & ENABLE
+ CIU2_RAW_PKT[GMX_DRP] & CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt source
+ CIU2_RAW_PKT[AGX] & CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_src_iox_int_pkt cvmx_ciu2_src_iox_int_pkt_t;
+
+/**
+ * cvmx_ciu2_src_io#_int_rml
+ */
+union cvmx_ciu2_src_iox_int_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_src_iox_int_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt source
+ CIU2_RAW_RML[TRACE] & CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt source
+ CIU2_RAW_RML[L2C] & CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt source
+ CIU2_RAW_RML[DFA] & CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ See DPI DMA instruction completion */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt source
+ CIU2_RAW_RML[DPI] & CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< SLI interrupt source
+ CIU2_RAW_RML[SLI] & CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt source
+ CIU2_RAW_RML[KEY] & CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< RAD interrupt source
+ CIU2_RAW_RML[RAD] & CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< TIM interrupt source
+ CIU2_RAW_RML[TIM] & CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt source
+ CIU2_RAW_RML[ZIP] & CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt source
+ CIU2_RAW_RML[SSO] & CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt source
+ CIU2_RAW_RML[PKO] & CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< PIP interrupt source
+ CIU2_RAW_RML[PIP] & CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< IPD interrupt source
+ CIU2_RAW_RML[IPD] & CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< FPA interrupt source
+ CIU2_RAW_RML[FPA] & CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt source
+ CIU2_RAW_RML[IOB] & CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_src_iox_int_rml_s cn68xx;
+ struct cvmx_ciu2_src_iox_int_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt source
+ CIU2_RAW_RML[TRACE] & CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt source
+ CIU2_RAW_RML[L2C] & CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt source
+ CIU2_RAW_RML[DFA] & CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt source
+ CIU2_RAW_RML[DPI] & CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< SLI interrupt source
+ CIU2_RAW_RML[SLI] & CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt source
+ CIU2_RAW_RML[KEY] & CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< RAD interrupt source
+ CIU2_RAW_RML[RAD] & CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< TIM interrupt source
+ CIU2_RAW_RML[TIM] & CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt source
+ CIU2_RAW_RML[ZIP] & CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt source
+ CIU2_RAW_RML[SSO] & CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt source
+ CIU2_RAW_RML[PKO] & CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< PIP interrupt source
+ CIU2_RAW_RML[PIP] & CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< IPD interrupt source
+ CIU2_RAW_RML[IPD] & CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< FPA interrupt source
+ CIU2_RAW_RML[FPA] & CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt source
+ CIU2_RAW_RML[IOB] & CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_src_iox_int_rml cvmx_ciu2_src_iox_int_rml_t;
+
+/**
+ * cvmx_ciu2_src_io#_int_wdog
+ */
+union cvmx_ciu2_src_iox_int_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_src_iox_int_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupts source
+ CIU2_RAW_WDOG & CIU2_EN_xx_yy_WDOG */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_src_iox_int_wdog_s cn68xx;
+ struct cvmx_ciu2_src_iox_int_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_iox_int_wdog cvmx_ciu2_src_iox_int_wdog_t;
+
+/**
+ * cvmx_ciu2_src_io#_int_wrkq
+ */
+union cvmx_ciu2_src_iox_int_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_src_iox_int_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue intr source,
+ CIU2_RAW_WRKQ & CIU2_EN_xx_yy_WRKQ */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_src_iox_int_wrkq_s cn68xx;
+ struct cvmx_ciu2_src_iox_int_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_iox_int_wrkq cvmx_ciu2_src_iox_int_wrkq_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip2_gpio
+ */
+union cvmx_ciu2_src_ppx_ip2_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip2_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupts source */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip2_gpio_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip2_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip2_gpio cvmx_ciu2_src_ppx_ip2_gpio_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip2_io
+ */
+union cvmx_ciu2_src_ppx_ip2_io {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip2_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt source
+ CIU2_RAW_IO[PEM] & CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA source
+ CIU2_RAW_IO[PCI_INTA] & CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit source
+ CIU2_RAW_IO[MSIRED] & CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI source
+ CIU2_RAW_IO[PCI_MSI] & CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D interrupt source
+ CIU2_RAW_IO[PCI_INTR] &CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip2_io_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip2_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip2_io cvmx_ciu2_src_ppx_ip2_io_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip2_mbox
+ */
+union cvmx_ciu2_src_ppx_ip2_mbox {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip2_mbox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Mailbox interrupt Source (RAW & ENABLE)
+ For CIU2_SRC_PPX_IPx_MBOX:
+ Four mailbox interrupts for entries 0-31
+ RAW & ENABLE
+ [3] is the or of <31:24> of CIU2_MBOX
+ [2] is the or of <23:16> of CIU2_MBOX
+ [1] is the or of <15:8> of CIU2_MBOX
+ [0] is the or of <7:0> of CIU2_MBOX
+ CIU2_MBOX value can be read out via CSR address
+ CIU_MBOX_SET/CLR
+ For CIU2_SRC_IOX_INT_MBOX:
+ always zero */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip2_mbox_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip2_mbox_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip2_mbox cvmx_ciu2_src_ppx_ip2_mbox_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip2_mem
+ */
+union cvmx_ciu2_src_ppx_ip2_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip2_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt source
+ CIU2_RAW_MEM[LMC] & CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip2_mem_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip2_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip2_mem cvmx_ciu2_src_ppx_ip2_mem_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip2_mio
+ */
+union cvmx_ciu2_src_ppx_ip2_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip2_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt source
+ CIU2_RAW_MIO[RST] & CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt source
+ CIU2_RAW_MIO[PTP] & CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB HCI Interrupt source
+ CIU2_RAW_MIO[USB_HCI] & CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt source
+ CIU2_RAW_MIO[USB_UCTL] &CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupts source
+ CIU2_RAW_MIO[UART] & CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x Interrupt source
+ CIU2_RAW_MIO[TWSI] & CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt source
+ CIU2_RAW_MIO[BOOTDMA] & CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< MIO boot interrupt source
+ CIU2_RAW_MIO[MIO] & CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt source
+ CIU2_RAW_MIO[NAND] & CIU2_EN_xx_yy_MIO[NANAD] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupts source
+ CIU2_RAW_MIO[TIMER] & CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt source
+ CIU2_RAW_MIO[IPD_DRP] & CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt source
+ CIU2_RAW_MIO[SSOIQ] & CIU2_EN_xx_yy_MIO[SSOIQ] */
+ uint64_t ipdppthr : 1; /**< IPD per-port cnt threshold interrupt source
+ CIU2_RAW_MIO[IPDPPTHR] &CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip2_mio_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip2_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip2_mio cvmx_ciu2_src_ppx_ip2_mio_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip2_pkt
+ */
+union cvmx_ciu2_src_ppx_ip2_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip2_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupts source
+ CIU2_RAW_PKT[ILK_DRP] & CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupts source
+ CIU2_RAW_PKT[ILK] & CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts source
+ CIU2_RAW_PKT[MII] & CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt source
+ CIU2_RAW_PKT[AGL] & CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt, RAW & ENABLE
+ CIU2_RAW_PKT[GMX_DRP] & CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt source
+ CIU2_RAW_PKT[AGX] & CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip2_pkt_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip2_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupts source
+ CIU2_RAW_PKT[ILK] & CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts source
+ CIU2_RAW_PKT[MII] & CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt source
+ CIU2_RAW_PKT[AGL] & CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt, RAW & ENABLE
+ CIU2_RAW_PKT[GMX_DRP] & CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt source
+ CIU2_RAW_PKT[AGX] & CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip2_pkt cvmx_ciu2_src_ppx_ip2_pkt_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip2_rml
+ */
+union cvmx_ciu2_src_ppx_ip2_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip2_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt source
+ CIU2_RAW_RML[TRACE] & CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt source
+ CIU2_RAW_RML[L2C] & CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt source
+ CIU2_RAW_RML[DFA] & CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ See DPI DMA instruction completion */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt source
+ CIU2_RAW_RML[DPI] & CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< SLI interrupt source
+ CIU2_RAW_RML[SLI] & CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt source
+ CIU2_RAW_RML[KEY] & CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< RAD interrupt source
+ CIU2_RAW_RML[RAD] & CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< TIM interrupt source
+ CIU2_RAW_RML[TIM] & CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt source
+ CIU2_RAW_RML[ZIP] & CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt source
+ CIU2_RAW_RML[SSO] & CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt source
+ CIU2_RAW_RML[PKO] & CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< PIP interrupt source
+ CIU2_RAW_RML[PIP] & CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< IPD interrupt source
+ CIU2_RAW_RML[IPD] & CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< FPA interrupt source
+ CIU2_RAW_RML[FPA] & CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt source
+ CIU2_RAW_RML[IOB] & CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip2_rml_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip2_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt source
+ CIU2_RAW_RML[TRACE] & CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt source
+ CIU2_RAW_RML[L2C] & CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt source
+ CIU2_RAW_RML[DFA] & CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt source
+ CIU2_RAW_RML[DPI] & CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< SLI interrupt source
+ CIU2_RAW_RML[SLI] & CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt source
+ CIU2_RAW_RML[KEY] & CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< RAD interrupt source
+ CIU2_RAW_RML[RAD] & CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< TIM interrupt source
+ CIU2_RAW_RML[TIM] & CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt source
+ CIU2_RAW_RML[ZIP] & CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt source
+ CIU2_RAW_RML[SSO] & CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt source
+ CIU2_RAW_RML[PKO] & CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< PIP interrupt source
+ CIU2_RAW_RML[PIP] & CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< IPD interrupt source
+ CIU2_RAW_RML[IPD] & CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< FPA interrupt source
+ CIU2_RAW_RML[FPA] & CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt source
+ CIU2_RAW_RML[IOB] & CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip2_rml cvmx_ciu2_src_ppx_ip2_rml_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip2_wdog
+ */
+union cvmx_ciu2_src_ppx_ip2_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip2_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupts source
+ CIU2_RAW_WDOG & CIU2_EN_xx_yy_WDOG */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip2_wdog_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip2_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip2_wdog cvmx_ciu2_src_ppx_ip2_wdog_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip2_wrkq
+ *
+ * All SRC values is generated by AND Raw value (CIU2_RAW_XXX) with CIU2_EN_PPX_IPx_XXX
+ *
+ */
+union cvmx_ciu2_src_ppx_ip2_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip2_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue intr source,
+ CIU2_RAW_WRKQ & CIU2_EN_xx_yy_WRKQ */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip2_wrkq_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip2_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip2_wrkq cvmx_ciu2_src_ppx_ip2_wrkq_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip3_gpio
+ */
+union cvmx_ciu2_src_ppx_ip3_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip3_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupts source */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip3_gpio_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip3_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip3_gpio cvmx_ciu2_src_ppx_ip3_gpio_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip3_io
+ */
+union cvmx_ciu2_src_ppx_ip3_io {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip3_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt source
+ CIU2_RAW_IO[PEM] & CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA source
+ CIU2_RAW_IO[PCI_INTA] & CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit source
+ CIU2_RAW_IO[MSIRED] & CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI source
+ CIU2_RAW_IO[PCI_MSI] & CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D interrupt source
+ CIU2_RAW_IO[PCI_INTR] &CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip3_io_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip3_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip3_io cvmx_ciu2_src_ppx_ip3_io_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip3_mbox
+ */
+union cvmx_ciu2_src_ppx_ip3_mbox {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip3_mbox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Mailbox interrupt Source (RAW & ENABLE)
+ For CIU2_SRC_PPX_IPx_MBOX:
+ Four mailbox interrupts for entries 0-31
+ RAW & ENABLE
+ [3] is the or of <31:24> of CIU2_MBOX
+ [2] is the or of <23:16> of CIU2_MBOX
+ [1] is the or of <15:8> of CIU2_MBOX
+ [0] is the or of <7:0> of CIU2_MBOX
+ CIU2_MBOX value can be read out via CSR address
+ CIU_MBOX_SET/CLR
+ For CIU2_SRC_IOX_INT_MBOX:
+ always zero */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip3_mbox_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip3_mbox_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip3_mbox cvmx_ciu2_src_ppx_ip3_mbox_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip3_mem
+ */
+union cvmx_ciu2_src_ppx_ip3_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip3_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt source
+ CIU2_RAW_MEM[LMC] & CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip3_mem_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip3_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip3_mem cvmx_ciu2_src_ppx_ip3_mem_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip3_mio
+ */
+union cvmx_ciu2_src_ppx_ip3_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip3_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt source
+ CIU2_RAW_MIO[RST] & CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt source
+ CIU2_RAW_MIO[PTP] & CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB HCI Interrupt source
+ CIU2_RAW_MIO[USB_HCI] & CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt source
+ CIU2_RAW_MIO[USB_UCTL] &CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupts source
+ CIU2_RAW_MIO[UART] & CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x Interrupt source
+ CIU2_RAW_MIO[TWSI] & CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt source
+ CIU2_RAW_MIO[BOOTDMA] & CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< MIO boot interrupt source
+ CIU2_RAW_MIO[MIO] & CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt source
+ CIU2_RAW_MIO[NAND] & CIU2_EN_xx_yy_MIO[NANAD] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupts source
+ CIU2_RAW_MIO[TIMER] & CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt source
+ CIU2_RAW_MIO[IPD_DRP] & CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt source
+ CIU2_RAW_MIO[SSOIQ] & CIU2_EN_xx_yy_MIO[SSOIQ] */
+ uint64_t ipdppthr : 1; /**< IPD per-port cnt threshold interrupt source
+ CIU2_RAW_MIO[IPDPPTHR] &CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip3_mio_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip3_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip3_mio cvmx_ciu2_src_ppx_ip3_mio_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip3_pkt
+ */
+union cvmx_ciu2_src_ppx_ip3_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip3_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupts source
+ CIU2_RAW_PKT[ILK_DRP] & CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupts source
+ CIU2_RAW_PKT[ILK] & CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts source
+ CIU2_RAW_PKT[MII] & CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt source
+ CIU2_RAW_PKT[AGL] & CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt, RAW & ENABLE
+ CIU2_RAW_PKT[GMX_DRP] & CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt source
+ CIU2_RAW_PKT[AGX] & CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip3_pkt_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip3_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupts source
+ CIU2_RAW_PKT[ILK] & CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts source
+ CIU2_RAW_PKT[MII] & CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt source
+ CIU2_RAW_PKT[AGL] & CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt, RAW & ENABLE
+ CIU2_RAW_PKT[GMX_DRP] & CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt source
+ CIU2_RAW_PKT[AGX] & CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip3_pkt cvmx_ciu2_src_ppx_ip3_pkt_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip3_rml
+ */
+union cvmx_ciu2_src_ppx_ip3_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip3_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt source
+ CIU2_RAW_RML[TRACE] & CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt source
+ CIU2_RAW_RML[L2C] & CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt source
+ CIU2_RAW_RML[DFA] & CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ See DPI DMA instruction completion */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt source
+ CIU2_RAW_RML[DPI] & CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< SLI interrupt source
+ CIU2_RAW_RML[SLI] & CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt source
+ CIU2_RAW_RML[KEY] & CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< RAD interrupt source
+ CIU2_RAW_RML[RAD] & CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< TIM interrupt source
+ CIU2_RAW_RML[TIM] & CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt source
+ CIU2_RAW_RML[ZIP] & CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt source
+ CIU2_RAW_RML[SSO] & CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt source
+ CIU2_RAW_RML[PKO] & CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< PIP interrupt source
+ CIU2_RAW_RML[PIP] & CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< IPD interrupt source
+ CIU2_RAW_RML[IPD] & CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< FPA interrupt source
+ CIU2_RAW_RML[FPA] & CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt source
+ CIU2_RAW_RML[IOB] & CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip3_rml_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip3_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt source
+ CIU2_RAW_RML[TRACE] & CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt source
+ CIU2_RAW_RML[L2C] & CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt source
+ CIU2_RAW_RML[DFA] & CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt source
+ CIU2_RAW_RML[DPI] & CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< SLI interrupt source
+ CIU2_RAW_RML[SLI] & CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt source
+ CIU2_RAW_RML[KEY] & CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< RAD interrupt source
+ CIU2_RAW_RML[RAD] & CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< TIM interrupt source
+ CIU2_RAW_RML[TIM] & CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt source
+ CIU2_RAW_RML[ZIP] & CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt source
+ CIU2_RAW_RML[SSO] & CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt source
+ CIU2_RAW_RML[PKO] & CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< PIP interrupt source
+ CIU2_RAW_RML[PIP] & CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< IPD interrupt source
+ CIU2_RAW_RML[IPD] & CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< FPA interrupt source
+ CIU2_RAW_RML[FPA] & CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt source
+ CIU2_RAW_RML[IOB] & CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip3_rml cvmx_ciu2_src_ppx_ip3_rml_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip3_wdog
+ */
+union cvmx_ciu2_src_ppx_ip3_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip3_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupts source
+ CIU2_RAW_WDOG & CIU2_EN_xx_yy_WDOG */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip3_wdog_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip3_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip3_wdog cvmx_ciu2_src_ppx_ip3_wdog_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip3_wrkq
+ */
+union cvmx_ciu2_src_ppx_ip3_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip3_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue intr source,
+ CIU2_RAW_WRKQ & CIU2_EN_xx_yy_WRKQ */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip3_wrkq_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip3_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip3_wrkq cvmx_ciu2_src_ppx_ip3_wrkq_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip4_gpio
+ */
+union cvmx_ciu2_src_ppx_ip4_gpio {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip4_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t gpio : 16; /**< 16 GPIO interrupts source */
+#else
+ uint64_t gpio : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip4_gpio_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip4_gpio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip4_gpio cvmx_ciu2_src_ppx_ip4_gpio_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip4_io
+ */
+union cvmx_ciu2_src_ppx_ip4_io {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip4_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t pem : 2; /**< PEMx interrupt source
+ CIU2_RAW_IO[PEM] & CIU2_EN_xx_yy_IO[PEM] */
+ uint64_t reserved_18_31 : 14;
+ uint64_t pci_inta : 2; /**< PCI_INTA source
+ CIU2_RAW_IO[PCI_INTA] & CIU2_EN_xx_yy_IO[PCI_INTA] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t msired : 1; /**< MSI summary bit source
+ CIU2_RAW_IO[MSIRED] & CIU2_EN_xx_yy_IO[MSIRED]
+ This bit may not be functional in pass 1. */
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI source
+ CIU2_RAW_IO[PCI_MSI] & CIU2_EN_xx_yy_IO[PCI_MSI] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_intr : 4; /**< PCIe INTA/B/C/D interrupt source
+ CIU2_RAW_IO[PCI_INTR] &CIU2_EN_xx_yy_IO[PCI_INTR] */
+#else
+ uint64_t pci_intr : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t pci_msi : 4;
+ uint64_t msired : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pci_inta : 2;
+ uint64_t reserved_18_31 : 14;
+ uint64_t pem : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip4_io_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip4_io_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip4_io cvmx_ciu2_src_ppx_ip4_io_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip4_mbox
+ */
+union cvmx_ciu2_src_ppx_ip4_mbox {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip4_mbox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mbox : 4; /**< Mailbox interrupt Source (RAW & ENABLE)
+ For CIU2_SRC_PPX_IPx_MBOX:
+ Four mailbox interrupts for entries 0-31
+ RAW & ENABLE
+ [3] is the or of <31:24> of CIU2_MBOX
+ [2] is the or of <23:16> of CIU2_MBOX
+ [1] is the or of <15:8> of CIU2_MBOX
+ [0] is the or of <7:0> of CIU2_MBOX
+ CIU2_MBOX value can be read out via CSR address
+ CIU_MBOX_SET/CLR
+ For CIU2_SRC_IOX_INT_MBOX:
+ always zero */
+#else
+ uint64_t mbox : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip4_mbox_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip4_mbox_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip4_mbox cvmx_ciu2_src_ppx_ip4_mbox_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip4_mem
+ */
+union cvmx_ciu2_src_ppx_ip4_mem {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip4_mem_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmc : 4; /**< LMC* interrupt source
+ CIU2_RAW_MEM[LMC] & CIU2_EN_xx_yy_MEM[LMC] */
+#else
+ uint64_t lmc : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip4_mem_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip4_mem_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip4_mem cvmx_ciu2_src_ppx_ip4_mem_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip4_mio
+ */
+union cvmx_ciu2_src_ppx_ip4_mio {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip4_mio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rst : 1; /**< MIO RST interrupt source
+ CIU2_RAW_MIO[RST] & CIU2_EN_xx_yy_MIO[RST] */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ptp : 1; /**< PTP interrupt source
+ CIU2_RAW_MIO[PTP] & CIU2_EN_xx_yy_MIO[PTP] */
+ uint64_t reserved_45_47 : 3;
+ uint64_t usb_hci : 1; /**< USB HCI Interrupt source
+ CIU2_RAW_MIO[USB_HCI] & CIU2_EN_xx_yy_MIO[USB_HCI] */
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_uctl : 1; /**< USB UCTL* interrupt source
+ CIU2_RAW_MIO[USB_UCTL] &CIU2_EN_xx_yy_MIO[USB_UCTL] */
+ uint64_t reserved_38_39 : 2;
+ uint64_t uart : 2; /**< Two UART interrupts source
+ CIU2_RAW_MIO[UART] & CIU2_EN_xx_yy_MIO[UART] */
+ uint64_t reserved_34_35 : 2;
+ uint64_t twsi : 2; /**< TWSI x Interrupt source
+ CIU2_RAW_MIO[TWSI] & CIU2_EN_xx_yy_MIO[TWSI] */
+ uint64_t reserved_19_31 : 13;
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt source
+ CIU2_RAW_MIO[BOOTDMA] & CIU2_EN_xx_yy_MIO[BOOTDMA] */
+ uint64_t mio : 1; /**< MIO boot interrupt source
+ CIU2_RAW_MIO[MIO] & CIU2_EN_xx_yy_MIO[MIO] */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt source
+ CIU2_RAW_MIO[NAND] & CIU2_EN_xx_yy_MIO[NANAD] */
+ uint64_t reserved_12_15 : 4;
+ uint64_t timer : 4; /**< General timer interrupts source
+ CIU2_RAW_MIO[TIMER] & CIU2_EN_xx_yy_MIO[TIMER] */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt source
+ CIU2_RAW_MIO[IPD_DRP] & CIU2_EN_xx_yy_MIO[IPD_DRP] */
+ uint64_t ssoiq : 1; /**< SSO IQ interrupt source
+ CIU2_RAW_MIO[SSOIQ] & CIU2_EN_xx_yy_MIO[SSOIQ] */
+ uint64_t ipdppthr : 1; /**< IPD per-port cnt threshold interrupt source
+ CIU2_RAW_MIO[IPDPPTHR] &CIU2_EN_xx_yy_MIO[IPDPPTHR] */
+#else
+ uint64_t ipdppthr : 1;
+ uint64_t ssoiq : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t timer : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t bootdma : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t twsi : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t uart : 2;
+ uint64_t reserved_38_39 : 2;
+ uint64_t usb_uctl : 1;
+ uint64_t reserved_41_43 : 3;
+ uint64_t usb_hci : 1;
+ uint64_t reserved_45_47 : 3;
+ uint64_t ptp : 1;
+ uint64_t reserved_49_62 : 14;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip4_mio_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip4_mio_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip4_mio cvmx_ciu2_src_ppx_ip4_mio_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip4_pkt
+ */
+union cvmx_ciu2_src_ppx_ip4_pkt {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip4_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t ilk_drp : 2; /**< ILK Packet Drop interrupts source
+ CIU2_RAW_PKT[ILK_DRP] & CIU2_EN_xx_yy_PKT[ILK_DRP] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk : 1; /**< ILK interface interrupts source
+ CIU2_RAW_PKT[ILK] & CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts source
+ CIU2_RAW_PKT[MII] & CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt source
+ CIU2_RAW_PKT[AGL] & CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt, RAW & ENABLE
+ CIU2_RAW_PKT[GMX_DRP] & CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt source
+ CIU2_RAW_PKT[AGX] & CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t ilk_drp : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip4_pkt_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip4_pkt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ilk : 1; /**< ILK interface interrupts source
+ CIU2_RAW_PKT[ILK] & CIU2_EN_xx_yy_PKT[ILK] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface x Interrupts source
+ CIU2_RAW_PKT[MII] & CIU2_EN_xx_yy_PKT[MII] */
+ uint64_t reserved_33_39 : 7;
+ uint64_t agl : 1; /**< AGL interrupt source
+ CIU2_RAW_PKT[AGL] & CIU2_EN_xx_yy_PKT[AGL] */
+ uint64_t reserved_13_31 : 19;
+ uint64_t gmx_drp : 5; /**< GMX packet drop interrupt, RAW & ENABLE
+ CIU2_RAW_PKT[GMX_DRP] & CIU2_EN_xx_yy_PKT[GMX_DRP] */
+ uint64_t reserved_5_7 : 3;
+ uint64_t agx : 5; /**< GMX interrupt source
+ CIU2_RAW_PKT[AGX] & CIU2_EN_xx_yy_PKT[AGX] */
+#else
+ uint64_t agx : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gmx_drp : 5;
+ uint64_t reserved_13_31 : 19;
+ uint64_t agl : 1;
+ uint64_t reserved_33_39 : 7;
+ uint64_t mii : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t ilk : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip4_pkt cvmx_ciu2_src_ppx_ip4_pkt_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip4_rml
+ */
+union cvmx_ciu2_src_ppx_ip4_rml {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip4_rml_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt source
+ CIU2_RAW_RML[TRACE] & CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt source
+ CIU2_RAW_RML[L2C] & CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt source
+ CIU2_RAW_RML[DFA] & CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dpi_dma : 1; /**< DPI DMA instruction completion interrupt
+ See DPI DMA instruction completion */
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi : 1; /**< DPI interrupt source
+ CIU2_RAW_RML[DPI] & CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< SLI interrupt source
+ CIU2_RAW_RML[SLI] & CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt source
+ CIU2_RAW_RML[KEY] & CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< RAD interrupt source
+ CIU2_RAW_RML[RAD] & CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< TIM interrupt source
+ CIU2_RAW_RML[TIM] & CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt source
+ CIU2_RAW_RML[ZIP] & CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt source
+ CIU2_RAW_RML[SSO] & CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt source
+ CIU2_RAW_RML[PKO] & CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< PIP interrupt source
+ CIU2_RAW_RML[PIP] & CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< IPD interrupt source
+ CIU2_RAW_RML[IPD] & CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< FPA interrupt source
+ CIU2_RAW_RML[FPA] & CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt source
+ CIU2_RAW_RML[IOB] & CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_35 : 2;
+ uint64_t dpi_dma : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip4_rml_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip4_rml_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t trace : 4; /**< Trace buffer interrupt source
+ CIU2_RAW_RML[TRACE] & CIU2_EN_xx_yy_RML[TRACE] */
+ uint64_t reserved_49_51 : 3;
+ uint64_t l2c : 1; /**< L2C interrupt source
+ CIU2_RAW_RML[L2C] & CIU2_EN_xx_yy_RML[L2C] */
+ uint64_t reserved_41_47 : 7;
+ uint64_t dfa : 1; /**< DFA interrupt source
+ CIU2_RAW_RML[DFA] & CIU2_EN_xx_yy_RML[DFA] */
+ uint64_t reserved_34_39 : 6;
+ uint64_t dpi : 1; /**< DPI interrupt source
+ CIU2_RAW_RML[DPI] & CIU2_EN_xx_yy_RML[DPI] */
+ uint64_t sli : 1; /**< SLI interrupt source
+ CIU2_RAW_RML[SLI] & CIU2_EN_xx_yy_RML[SLI] */
+ uint64_t reserved_31_31 : 1;
+ uint64_t key : 1; /**< KEY interrupt source
+ CIU2_RAW_RML[KEY] & CIU2_EN_xx_yy_RML[KEY] */
+ uint64_t rad : 1; /**< RAD interrupt source
+ CIU2_RAW_RML[RAD] & CIU2_EN_xx_yy_RML[RAD] */
+ uint64_t tim : 1; /**< TIM interrupt source
+ CIU2_RAW_RML[TIM] & CIU2_EN_xx_yy_RML[TIM] */
+ uint64_t reserved_25_27 : 3;
+ uint64_t zip : 1; /**< ZIP interrupt source
+ CIU2_RAW_RML[ZIP] & CIU2_EN_xx_yy_RML[ZIP] */
+ uint64_t reserved_17_23 : 7;
+ uint64_t sso : 1; /**< SSO err interrupt source
+ CIU2_RAW_RML[SSO] & CIU2_EN_xx_yy_RML[SSO] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pko : 1; /**< PKO interrupt source
+ CIU2_RAW_RML[PKO] & CIU2_EN_xx_yy_RML[PKO] */
+ uint64_t pip : 1; /**< PIP interrupt source
+ CIU2_RAW_RML[PIP] & CIU2_EN_xx_yy_RML[PIP] */
+ uint64_t ipd : 1; /**< IPD interrupt source
+ CIU2_RAW_RML[IPD] & CIU2_EN_xx_yy_RML[IPD] */
+ uint64_t fpa : 1; /**< FPA interrupt source
+ CIU2_RAW_RML[FPA] & CIU2_EN_xx_yy_RML[FPA] */
+ uint64_t reserved_1_3 : 3;
+ uint64_t iob : 1; /**< IOB interrupt source
+ CIU2_RAW_RML[IOB] & CIU2_EN_xx_yy_RML[IOB] */
+#else
+ uint64_t iob : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t fpa : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t sso : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t zip : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfa : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t l2c : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t trace : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip4_rml cvmx_ciu2_src_ppx_ip4_rml_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip4_wdog
+ */
+union cvmx_ciu2_src_ppx_ip4_wdog {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip4_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog : 32; /**< 32 watchdog interrupts source
+ CIU2_RAW_WDOG & CIU2_EN_xx_yy_WDOG */
+#else
+ uint64_t wdog : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip4_wdog_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip4_wdog_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip4_wdog cvmx_ciu2_src_ppx_ip4_wdog_t;
+
+/**
+ * cvmx_ciu2_src_pp#_ip4_wrkq
+ */
+union cvmx_ciu2_src_ppx_ip4_wrkq {
+ uint64_t u64;
+ struct cvmx_ciu2_src_ppx_ip4_wrkq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t workq : 64; /**< 64 work queue intr source,
+ CIU2_RAW_WRKQ & CIU2_EN_xx_yy_WRKQ */
+#else
+ uint64_t workq : 64;
+#endif
+ } s;
+ struct cvmx_ciu2_src_ppx_ip4_wrkq_s cn68xx;
+ struct cvmx_ciu2_src_ppx_ip4_wrkq_s cn68xxp1;
+};
+typedef union cvmx_ciu2_src_ppx_ip4_wrkq cvmx_ciu2_src_ppx_ip4_wrkq_t;
+
+/**
+ * cvmx_ciu2_sum_io#_int
+ */
+union cvmx_ciu2_sum_iox_int {
+ uint64_t u64;
+ struct cvmx_ciu2_sum_iox_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mbox : 4; /**< MBOX interrupt summary
+ Direct connect to CIU2_SRC_*_MBOX[MBOX]
+ See CIU_MBOX_SET/CLR / CIU2_SRC_*_MBOX */
+ uint64_t reserved_8_59 : 52;
+ uint64_t gpio : 1; /**< GPIO interrupt summary,
+ Report ORed result of CIU2_SRC_*_GPIO[63:0]
+ See CIU2_RAW_GPIO / CIU2_SRC_*_GPIO */
+ uint64_t pkt : 1; /**< Packet I/O interrupt summary
+ Report ORed result of CIU2_SRC_*_PKT[63:0]
+ See CIU2_RAW_PKT / CIU2_SRC_*_PKT */
+ uint64_t mem : 1; /**< MEM interrupt Summary
+ Report ORed result of CIU2_SRC_*_MEM[63:0]
+ See CIU2_RAW_MEM / CIU2_SRC_*_MEM */
+ uint64_t io : 1; /**< I/O interrupt summary
+ Report ORed result of CIU2_SRC_*_IO[63:0]
+ See CIU2_RAW_IO / CIU2_SRC_*_IO */
+ uint64_t mio : 1; /**< MIO interrupt summary
+ Report ORed result of CIU2_SRC_*_MIO[63:0]
+ See CIU2_RAW_MIO / CIU2_SRC_*_MIO */
+ uint64_t rml : 1; /**< RML Interrupt
+ Report ORed result of CIU2_SRC_*_RML[63:0]
+ See CIU2_RAW_RML / CIU2_SRC_*_RML */
+ uint64_t wdog : 1; /**< WDOG summary bit
+ Report ORed result of CIU2_SRC_*_WDOG[63:0]
+ See CIU2_RAW_WDOG / CIU2_SRC_*_WDOG
+ This read-only bit reads as a one whenever
+ CIU2_RAW_WDOG bit is set and corresponding
+ enable bit in CIU2_EN_PPx_IPy_WDOG or
+ CIU2_EN_IOx_INT_WDOG is set, where x and y are
+ the same x and y in the CIU2_SUM_PPx_IPy or
+ CIU2_SUM_IOx_INT registers.
+ Alternatively, the CIU2_SRC_PPx_IPy_WDOG and
+ CIU2_SRC_IOx_INT_WDOG registers can be used. */
+ uint64_t workq : 1; /**< 64 work queue interrupts
+ Report ORed result of CIU2_SRC_*_WRKQ[63:0]
+ See CIU2_RAW_WRKQ / CIU2_SRC_*_WRKQ
+ See SSO_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the SSO. */
+#else
+ uint64_t workq : 1;
+ uint64_t wdog : 1;
+ uint64_t rml : 1;
+ uint64_t mio : 1;
+ uint64_t io : 1;
+ uint64_t mem : 1;
+ uint64_t pkt : 1;
+ uint64_t gpio : 1;
+ uint64_t reserved_8_59 : 52;
+ uint64_t mbox : 4;
+#endif
+ } s;
+ struct cvmx_ciu2_sum_iox_int_s cn68xx;
+ struct cvmx_ciu2_sum_iox_int_s cn68xxp1;
+};
+typedef union cvmx_ciu2_sum_iox_int cvmx_ciu2_sum_iox_int_t;
+
+/**
+ * cvmx_ciu2_sum_pp#_ip2
+ */
+union cvmx_ciu2_sum_ppx_ip2 {
+ uint64_t u64;
+ struct cvmx_ciu2_sum_ppx_ip2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mbox : 4; /**< MBOX interrupt summary
+ Direct connect to CIU2_SRC_*_MBOX[MBOX]
+ See CIU_MBOX_SET/CLR / CIU2_SRC_*_MBOX */
+ uint64_t reserved_8_59 : 52;
+ uint64_t gpio : 1; /**< GPIO interrupt summary,
+ Report ORed result of CIU2_SRC_*_GPIO[63:0]
+ See CIU2_RAW_GPIO / CIU2_SRC_*_GPIO */
+ uint64_t pkt : 1; /**< Packet I/O interrupt summary
+ Report ORed result of CIU2_SRC_*_PKT[63:0]
+ See CIU2_RAW_PKT / CIU2_SRC_*_PKT */
+ uint64_t mem : 1; /**< MEM interrupt Summary
+ Report ORed result of CIU2_SRC_*_MEM[63:0]
+ See CIU2_RAW_MEM / CIU2_SRC_*_MEM */
+ uint64_t io : 1; /**< I/O interrupt summary
+ Report ORed result of CIU2_SRC_*_IO[63:0]
+ See CIU2_RAW_IO / CIU2_SRC_*_IO */
+ uint64_t mio : 1; /**< MIO interrupt summary
+ Report ORed result of CIU2_SRC_*_MIO[63:0]
+ See CIU2_RAW_MIO / CIU2_SRC_*_MIO */
+ uint64_t rml : 1; /**< RML Interrupt
+ Report ORed result of CIU2_SRC_*_RML[63:0]
+ See CIU2_RAW_RML / CIU2_SRC_*_RML */
+ uint64_t wdog : 1; /**< WDOG summary bit
+ Report ORed result of CIU2_SRC_*_WDOG[63:0]
+ See CIU2_RAW_WDOG / CIU2_SRC_*_WDOG
+ This read-only bit reads as a one whenever
+ CIU2_RAW_WDOG bit is set and corresponding
+ enable bit in CIU2_EN_PPx_IPy_WDOG or
+ CIU2_EN_IOx_INT_WDOG is set, where x and y are
+ the same x and y in the CIU2_SUM_PPx_IPy or
+ CIU2_SUM_IOx_INT registers.
+ Alternatively, the CIU2_SRC_PPx_IPy_WDOG and
+ CIU2_SRC_IOx_INT_WDOG registers can be used. */
+ uint64_t workq : 1; /**< 64 work queue interrupts
+ Report ORed result of CIU2_SRC_*_WRKQ[63:0]
+ See CIU2_RAW_WRKQ / CIU2_SRC_*_WRKQ
+ See SSO_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the SSO. */
+#else
+ uint64_t workq : 1;
+ uint64_t wdog : 1;
+ uint64_t rml : 1;
+ uint64_t mio : 1;
+ uint64_t io : 1;
+ uint64_t mem : 1;
+ uint64_t pkt : 1;
+ uint64_t gpio : 1;
+ uint64_t reserved_8_59 : 52;
+ uint64_t mbox : 4;
+#endif
+ } s;
+ struct cvmx_ciu2_sum_ppx_ip2_s cn68xx;
+ struct cvmx_ciu2_sum_ppx_ip2_s cn68xxp1;
+};
+typedef union cvmx_ciu2_sum_ppx_ip2 cvmx_ciu2_sum_ppx_ip2_t;
+
+/**
+ * cvmx_ciu2_sum_pp#_ip3
+ */
+union cvmx_ciu2_sum_ppx_ip3 {
+ uint64_t u64;
+ struct cvmx_ciu2_sum_ppx_ip3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mbox : 4; /**< MBOX interrupt summary
+ Direct connect to CIU2_SRC_*_MBOX[MBOX]
+ See CIU_MBOX_SET/CLR / CIU2_SRC_*_MBOX */
+ uint64_t reserved_8_59 : 52;
+ uint64_t gpio : 1; /**< GPIO interrupt summary,
+ Report ORed result of CIU2_SRC_*_GPIO[63:0]
+ See CIU2_RAW_GPIO / CIU2_SRC_*_GPIO */
+ uint64_t pkt : 1; /**< Packet I/O interrupt summary
+ Report ORed result of CIU2_SRC_*_PKT[63:0]
+ See CIU2_RAW_PKT / CIU2_SRC_*_PKT */
+ uint64_t mem : 1; /**< MEM interrupt Summary
+ Report ORed result of CIU2_SRC_*_MEM[63:0]
+ See CIU2_RAW_MEM / CIU2_SRC_*_MEM */
+ uint64_t io : 1; /**< I/O interrupt summary
+ Report ORed result of CIU2_SRC_*_IO[63:0]
+ See CIU2_RAW_IO / CIU2_SRC_*_IO */
+ uint64_t mio : 1; /**< MIO interrupt summary
+ Report ORed result of CIU2_SRC_*_MIO[63:0]
+ See CIU2_RAW_MIO / CIU2_SRC_*_MIO */
+ uint64_t rml : 1; /**< RML Interrupt
+ Report ORed result of CIU2_SRC_*_RML[63:0]
+ See CIU2_RAW_RML / CIU2_SRC_*_RML */
+ uint64_t wdog : 1; /**< WDOG summary bit
+ Report ORed result of CIU2_SRC_*_WDOG[63:0]
+ See CIU2_RAW_WDOG / CIU2_SRC_*_WDOG
+ This read-only bit reads as a one whenever
+ CIU2_RAW_WDOG bit is set and corresponding
+ enable bit in CIU2_EN_PPx_IPy_WDOG or
+ CIU2_EN_IOx_INT_WDOG is set, where x and y are
+ the same x and y in the CIU2_SUM_PPx_IPy or
+ CIU2_SUM_IOx_INT registers.
+ Alternatively, the CIU2_SRC_PPx_IPy_WDOG and
+ CIU2_SRC_IOx_INT_WDOG registers can be used. */
+ uint64_t workq : 1; /**< 64 work queue interrupts
+ Report ORed result of CIU2_SRC_*_WRKQ[63:0]
+ See CIU2_RAW_WRKQ / CIU2_SRC_*_WRKQ
+ See SSO_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the SSO. */
+#else
+ uint64_t workq : 1;
+ uint64_t wdog : 1;
+ uint64_t rml : 1;
+ uint64_t mio : 1;
+ uint64_t io : 1;
+ uint64_t mem : 1;
+ uint64_t pkt : 1;
+ uint64_t gpio : 1;
+ uint64_t reserved_8_59 : 52;
+ uint64_t mbox : 4;
+#endif
+ } s;
+ struct cvmx_ciu2_sum_ppx_ip3_s cn68xx;
+ struct cvmx_ciu2_sum_ppx_ip3_s cn68xxp1;
+};
+typedef union cvmx_ciu2_sum_ppx_ip3 cvmx_ciu2_sum_ppx_ip3_t;
+
+/**
+ * cvmx_ciu2_sum_pp#_ip4
+ */
+union cvmx_ciu2_sum_ppx_ip4 {
+ uint64_t u64;
+ struct cvmx_ciu2_sum_ppx_ip4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mbox : 4; /**< MBOX interrupt summary
+ Direct connect to CIU2_SRC_*_MBOX[MBOX]
+ See CIU_MBOX_SET/CLR / CIU2_SRC_*_MBOX */
+ uint64_t reserved_8_59 : 52;
+ uint64_t gpio : 1; /**< GPIO interrupt summary,
+ Report ORed result of CIU2_SRC_*_GPIO[63:0]
+ See CIU2_RAW_GPIO / CIU2_SRC_*_GPIO */
+ uint64_t pkt : 1; /**< Packet I/O interrupt summary
+ Report ORed result of CIU2_SRC_*_PKT[63:0]
+ See CIU2_RAW_PKT / CIU2_SRC_*_PKT */
+ uint64_t mem : 1; /**< MEM interrupt Summary
+ Report ORed result of CIU2_SRC_*_MEM[63:0]
+ See CIU2_RAW_MEM / CIU2_SRC_*_MEM */
+ uint64_t io : 1; /**< I/O interrupt summary
+ Report ORed result of CIU2_SRC_*_IO[63:0]
+ See CIU2_RAW_IO / CIU2_SRC_*_IO */
+ uint64_t mio : 1; /**< MIO interrupt summary
+ Report ORed result of CIU2_SRC_*_MIO[63:0]
+ See CIU2_RAW_MIO / CIU2_SRC_*_MIO */
+ uint64_t rml : 1; /**< RML Interrupt
+ Report ORed result of CIU2_SRC_*_RML[63:0]
+ See CIU2_RAW_RML / CIU2_SRC_*_RML */
+ uint64_t wdog : 1; /**< WDOG summary bit
+ Report ORed result of CIU2_SRC_*_WDOG[63:0]
+ See CIU2_RAW_WDOG / CIU2_SRC_*_WDOG
+ This read-only bit reads as a one whenever
+ CIU2_RAW_WDOG bit is set and corresponding
+ enable bit in CIU2_EN_PPx_IPy_WDOG or
+ CIU2_EN_IOx_INT_WDOG is set, where x and y are
+ the same x and y in the CIU2_SUM_PPx_IPy or
+ CIU2_SUM_IOx_INT registers.
+ Alternatively, the CIU2_SRC_PPx_IPy_WDOG and
+ CIU2_SRC_IOx_INT_WDOG registers can be used. */
+ uint64_t workq : 1; /**< 64 work queue interrupts
+ Report ORed result of CIU2_SRC_*_WRKQ[63:0]
+ See CIU2_RAW_WRKQ / CIU2_SRC_*_WRKQ
+ See SSO_WQ_INT[WQ_INT]
+ 1 bit/group. A copy of the R/W1C bit in the SSO. */
+#else
+ uint64_t workq : 1;
+ uint64_t wdog : 1;
+ uint64_t rml : 1;
+ uint64_t mio : 1;
+ uint64_t io : 1;
+ uint64_t mem : 1;
+ uint64_t pkt : 1;
+ uint64_t gpio : 1;
+ uint64_t reserved_8_59 : 52;
+ uint64_t mbox : 4;
+#endif
+ } s;
+ struct cvmx_ciu2_sum_ppx_ip4_s cn68xx;
+ struct cvmx_ciu2_sum_ppx_ip4_s cn68xxp1;
+};
+typedef union cvmx_ciu2_sum_ppx_ip4 cvmx_ciu2_sum_ppx_ip4_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ciu2-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-clock.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-clock.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-clock.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,143 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to Core, IO and DDR Clock.
+ *
+ * <hr>$Revision: 45089 $<hr>
+*/
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-dbg-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#endif
+#include "cvmx.h"
+#endif
+
+#ifndef CVMX_BUILD_FOR_UBOOT
+static uint64_t rate_eclk = 0;
+static uint64_t rate_sclk = 0;
+static uint64_t rate_dclk = 0;
+#endif
+
+/**
+ * Get clock rate based on the clock type.
+ *
+ * @param clock - Enumeration of the clock type.
+ * @return - return the clock rate.
+ */
+uint64_t cvmx_clock_get_rate(cvmx_clock_t clock)
+{
+ const uint64_t REF_CLOCK = 50000000;
+
+#ifdef CVMX_BUILD_FOR_UBOOT
+ uint64_t rate_eclk = 0;
+ uint64_t rate_sclk = 0;
+ uint64_t rate_dclk = 0;
+#endif
+
+ if (cvmx_unlikely(!rate_eclk))
+ {
+ /* Note: The order of these checks is important.
+ ** octeon_has_feature(OCTEON_FEATURE_PCIE) is true for both 6XXX
+ ** and 52XX/56XX, so OCTEON_FEATURE_NPEI _must_ be checked first */
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_npei_dbg_data_t npei_dbg_data;
+ npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ rate_eclk = REF_CLOCK * npei_dbg_data.s.c_mul;
+ rate_sclk = rate_eclk;
+ }
+ else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ {
+ cvmx_mio_rst_boot_t mio_rst_boot;
+ mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+ rate_eclk = REF_CLOCK * mio_rst_boot.s.c_mul;
+ rate_sclk = REF_CLOCK * mio_rst_boot.s.pnr_mul;
+ }
+ else
+ {
+ cvmx_dbg_data_t dbg_data;
+ dbg_data.u64 = cvmx_read_csr(CVMX_DBG_DATA);
+ rate_eclk = REF_CLOCK * dbg_data.s.c_mul;
+ rate_sclk = rate_eclk;
+ }
+ }
+
+ switch (clock)
+ {
+ case CVMX_CLOCK_SCLK:
+ case CVMX_CLOCK_TIM:
+ case CVMX_CLOCK_IPD:
+ return rate_sclk;
+
+ case CVMX_CLOCK_RCLK:
+ case CVMX_CLOCK_CORE:
+ return rate_eclk;
+
+ case CVMX_CLOCK_DDR:
+#if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_TOOLCHAIN)
+ if (cvmx_unlikely(!rate_dclk))
+ rate_dclk = cvmx_sysinfo_get()->dram_data_rate_hz;
+#endif
+ return rate_dclk;
+ }
+
+ cvmx_dprintf("cvmx_clock_get_rate: Unknown clock type\n");
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_clock_get_rate);
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-clock.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-clock.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-clock.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-clock.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,140 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to Core, IO and DDR Clock.
+ *
+ * <hr>$Revision: 45089 $<hr>
+*/
+
+#ifndef __CVMX_CLOCK_H__
+#define __CVMX_CLOCK_H__
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-lmcx-defs.h>
+#else
+#include "cvmx.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Enumeration of different Clocks in Octeon.
+ */
+typedef enum{
+ CVMX_CLOCK_RCLK, /**< Clock used by cores, coherent bus and L2 cache. */
+ CVMX_CLOCK_SCLK, /**< Clock used by IO blocks. */
+ CVMX_CLOCK_DDR, /**< Clock used by DRAM */
+ CVMX_CLOCK_CORE, /**< Alias for CVMX_CLOCK_RCLK */
+ CVMX_CLOCK_TIM, /**< Alias for CVMX_CLOCK_SCLK */
+ CVMX_CLOCK_IPD, /**< Alias for CVMX_CLOCK_SCLK */
+} cvmx_clock_t;
+
+/**
+ * Get cycle count based on the clock type.
+ *
+ * @param clock - Enumeration of the clock type.
+ * @return - Get the number of cycles executed so far.
+ */
+static inline uint64_t cvmx_clock_get_count(cvmx_clock_t clock)
+{
+ switch(clock)
+ {
+ case CVMX_CLOCK_RCLK:
+ case CVMX_CLOCK_CORE:
+ {
+#ifndef __mips__
+ return cvmx_read_csr(CVMX_IPD_CLK_COUNT);
+#elif defined(CVMX_ABI_O32)
+ uint32_t tmp_low, tmp_hi;
+
+ asm volatile (
+ " .set push \n"
+ " .set mips64r2 \n"
+ " .set noreorder \n"
+ " rdhwr %[tmpl], $31 \n"
+ " dsrl %[tmph], %[tmpl], 32 \n"
+ " sll %[tmpl], 0 \n"
+ " sll %[tmph], 0 \n"
+ " .set pop \n"
+ : [tmpl] "=&r" (tmp_low), [tmph] "=&r" (tmp_hi) : );
+
+ return(((uint64_t)tmp_hi << 32) + tmp_low);
+#else
+ uint64_t cycle;
+ CVMX_RDHWR(cycle, 31);
+ return(cycle);
+#endif
+ }
+
+ case CVMX_CLOCK_SCLK:
+ case CVMX_CLOCK_TIM:
+ case CVMX_CLOCK_IPD:
+ return cvmx_read_csr(CVMX_IPD_CLK_COUNT);
+
+ case CVMX_CLOCK_DDR:
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ return cvmx_read_csr(CVMX_LMCX_DCLK_CNT(0));
+ else
+ return ((cvmx_read_csr(CVMX_LMCX_DCLK_CNT_HI(0)) << 32) | cvmx_read_csr(CVMX_LMCX_DCLK_CNT_LO(0)));
+ }
+
+ cvmx_dprintf("cvmx_clock_get_count: Unknown clock type\n");
+ return 0;
+}
+
+extern uint64_t cvmx_clock_get_rate(cvmx_clock_t clock);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_CLOCK_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-clock.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,340 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-dpi-defs.h>
+#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-cmd-queue.h>
+#else
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "cvmx-config.h"
+#endif
+#include "cvmx-fpa.h"
+#include "cvmx-cmd-queue.h"
+#endif
+
+
+/**
+ * This application uses this pointer to access the global queue
+ * state. It points to a bootmem named block.
+ */
+CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(__cvmx_cmd_queue_state_ptr);
+#endif
+
+/**
+ * @INTERNAL
+ * Initialize the Global queue state pointer.
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
+{
+ char *alloc_name = "cvmx_cmd_queues";
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+ extern uint64_t octeon_reserve32_memory;
+#endif
+
+ if (cvmx_likely(__cvmx_cmd_queue_state_ptr))
+ return CVMX_CMD_QUEUE_SUCCESS;
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+ if (octeon_reserve32_memory)
+ __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
+ octeon_reserve32_memory,
+ octeon_reserve32_memory + (CONFIG_CAVIUM_RESERVE32<<20) - 1,
+ 128, alloc_name);
+ else
+#endif
+ __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name);
+#else
+ __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name);
+#endif
+ if (__cvmx_cmd_queue_state_ptr)
+ memset(__cvmx_cmd_queue_state_ptr, 0, sizeof(*__cvmx_cmd_queue_state_ptr));
+ else
+ {
+ const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name);
+ if (block_desc)
+ __cvmx_cmd_queue_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
+ else
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n", alloc_name);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ }
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @param queue_id Hardware command queue to initialize.
+ * @param max_depth Maximum outstanding commands that can be queued.
+ * @param fpa_pool FPA pool the command queues should come from.
+ * @param pool_size Size of each buffer in the FPA pool (bytes)
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size)
+{
+ __cvmx_cmd_queue_state_t *qstate;
+ cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return result;
+
+ qstate = __cvmx_cmd_queue_get_state(queue_id);
+ if (qstate == NULL)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /* We artificially limit max_depth to 1<<20 words. It is an arbitrary limit */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH)
+ {
+ if ((max_depth < 0) || (max_depth > 1<<20))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ else if (max_depth != 0)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ if ((fpa_pool < 0) || (fpa_pool > 7))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ if ((pool_size < 128) || (pool_size > 65536))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /* See if someone else has already initialized the queue */
+ if (qstate->base_ptr_div128)
+ {
+ if (max_depth != (int)qstate->max_depth)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different max_depth (%d).\n", (int)qstate->max_depth);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if (fpa_pool != qstate->fpa_pool)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different FPA pool (%u).\n", qstate->fpa_pool);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if ((pool_size>>3)-1 != qstate->pool_size_m1)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different FPA pool size (%u).\n", (qstate->pool_size_m1+1)<<3);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ CVMX_SYNCWS;
+ return CVMX_CMD_QUEUE_ALREADY_SETUP;
+ }
+ else
+ {
+ cvmx_fpa_ctl_status_t status;
+ void *buffer;
+
+ status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
+ if (!status.s.enb)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: FPA is not enabled.\n");
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ buffer = cvmx_fpa_alloc(fpa_pool);
+ if (buffer == NULL)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to allocate initial buffer.\n");
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+
+ memset(qstate, 0, sizeof(*qstate));
+ qstate->max_depth = max_depth;
+ qstate->fpa_pool = fpa_pool;
+ qstate->pool_size_m1 = (pool_size>>3)-1;
+ qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
+ /* We zeroed the now serving field so we need to also zero the ticket */
+ __cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
+ CVMX_SYNCWS;
+ return CVMX_CMD_QUEUE_SUCCESS;
+ }
+}
+
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @param queue_id Queue to shutdown
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+ if (qptr == NULL)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to get queue information.\n");
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ if (cvmx_cmd_queue_length(queue_id) > 0)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still has data in it.\n");
+ return CVMX_CMD_QUEUE_FULL;
+ }
+
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+ if (qptr->base_ptr_div128)
+ {
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7), qptr->fpa_pool, 0);
+ qptr->base_ptr_div128 = 0;
+ }
+ __cvmx_cmd_queue_unlock(qptr);
+
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @param queue_id Hardware command queue to query
+ *
+ * @return Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
+{
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /* The cast is here so gcc with check that all values in the
+ cvmx_cmd_queue_id_t enumeration are here */
+ switch ((cvmx_cmd_queue_id_t)(queue_id & 0xff0000))
+ {
+ case CVMX_CMD_QUEUE_PKO_BASE:
+ /* FIXME: Need atomic lock on CVMX_PKO_REG_READ_IDX. Right now we
+ are normally called with the queue lock, so that is a SLIGHT
+ amount of protection */
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ cvmx_pko_mem_debug9_t debug9;
+ debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
+ return debug9.cn38xx.doorbell;
+ }
+ else
+ {
+ cvmx_pko_mem_debug8_t debug8;
+ debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return debug8.cn68xx.doorbell;
+ else
+ return debug8.cn58xx.doorbell;
+ }
+ case CVMX_CMD_QUEUE_ZIP:
+ case CVMX_CMD_QUEUE_DFA:
+ case CVMX_CMD_QUEUE_RAID:
+ // FIXME: Implement other lengths
+ return 0;
+ case CVMX_CMD_QUEUE_DMA_BASE:
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_npei_dmax_counts_t dmax_counts;
+ dmax_counts.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS(queue_id & 0x7));
+ return dmax_counts.s.dbell;
+ }
+ else
+ {
+ cvmx_dpi_dmax_counts_t dmax_counts;
+ dmax_counts.u64 = cvmx_read_csr(CVMX_DPI_DMAX_COUNTS(queue_id & 0x7));
+ return dmax_counts.s.dbell;
+ }
+ case CVMX_CMD_QUEUE_END:
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+}
+
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access to the low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @param queue_id Command queue to query
+ *
+ * @return Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+ if (qptr && qptr->base_ptr_div128)
+ return cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ else
+ return NULL;
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,615 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ *
+ * The common command queue infrastructure abstracts out the
+ * software necessary for adding to Octeon's chained queue
+ * structures. These structures are used for commands to the
+ * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
+ * hardware unit takes commands and CSRs of different types,
+ * they all use basic linked command buffers to store the
+ * pending request. In general, users of the CVMX API don't
+ * call cvmx-cmd-queue functions directly. Instead the hardware
+ * unit specific wrapper should be used. The wrappers perform
+ * unit specific validation and CSR writes to submit the
+ * commands.
+ *
+ * Even though most software will never directly interact with
+ * cvmx-cmd-queue, knowledge of its internal workings can help
+ * in diagnosing performance problems and help with debugging.
+ *
+ * Command queue pointers are stored in a global named block
+ * called "cvmx_cmd_queues". Except for the PKO queues, each
+ * hardware queue is stored in its own cache line to reduce SMP
+ * contention on spin locks. The PKO queues are stored such that
+ * every 16th queue is next to each other in memory. This scheme
+ * allows for queues being in separate cache lines when there
+ * are low number of queues per port. With 16 queues per port,
+ * the first queue for each port is in the same cache area. The
+ * second queues for each port are in another area, etc. This
+ * allows software to implement very efficient lockless PKO with
+ * 16 queues per port using a minimum of cache lines per core.
+ * All queues for a given core will be isolated in the same
+ * cache area.
+ *
+ * In addition to the memory pointer layout, cvmx-cmd-queue
+ * provides an optimized fair ll/sc locking mechanism for the
+ * queues. The lock uses a "ticket / now serving" model to
+ * maintain fair order on contended locks. In addition, it uses
+ * predicted locking time to limit cache contention. When a core
+ * know it must wait in line for a lock, it spins on the
+ * internal cycle counter to completely eliminate any causes of
+ * bus traffic.
+ *
+ * <hr> $Revision: 70030 $ <hr>
+ */
+
+#ifndef __CVMX_CMD_QUEUE_H__
+#define __CVMX_CMD_QUEUE_H__
+
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#endif
+
+#include "cvmx-fpa.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * By default we disable the max depth support. Most programs
+ * don't use it and it slows down the command queue processing
+ * significantly.
+ */
+#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
+#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
+#endif
+
+/**
+ * Enumeration representing all hardware blocks that use command
+ * queues. Each hardware block has up to 65536 sub identifiers for
+ * multiple command queues. Not all chips support all hardware
+ * units.
+ */
+typedef enum
+{
+ CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
+#define CVMX_CMD_QUEUE_PKO(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
+ CVMX_CMD_QUEUE_ZIP = 0x10000,
+#define CVMX_CMD_QUEUE_ZIP_QUE(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_ZIP + (0xffff&(queue))))
+ CVMX_CMD_QUEUE_DFA = 0x20000,
+ CVMX_CMD_QUEUE_RAID = 0x30000,
+ CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
+#define CVMX_CMD_QUEUE_DMA(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
+ CVMX_CMD_QUEUE_END = 0x50000,
+} cvmx_cmd_queue_id_t;
+
+/**
+ * Command write operations can fail if the command queue needs
+ * a new buffer and the associated FPA pool is empty. It can also
+ * fail if the number of queued command words reaches the maximum
+ * set at initialization.
+ */
+typedef enum
+{
+ CVMX_CMD_QUEUE_SUCCESS = 0,
+ CVMX_CMD_QUEUE_NO_MEMORY = -1,
+ CVMX_CMD_QUEUE_FULL = -2,
+ CVMX_CMD_QUEUE_INVALID_PARAM = -3,
+ CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
+} cvmx_cmd_queue_result_t;
+
+typedef struct
+{
+ uint8_t now_serving; /**< You have lock when this is your ticket */
+ uint64_t unused1 : 24;
+ uint32_t max_depth; /**< Maximum outstanding command words */
+ uint64_t fpa_pool : 3; /**< FPA pool buffers come from */
+ uint64_t base_ptr_div128: 29; /**< Top of command buffer pointer shifted 7 */
+ uint64_t unused2 : 6;
+ uint64_t pool_size_m1 : 13; /**< FPA buffer size in 64bit words minus 1 */
+ uint64_t index : 13; /**< Number of commands already used in buffer */
+} __cvmx_cmd_queue_state_t;
+
+/**
+ * This structure contains the global state of all command queues.
+ * It is stored in a bootmem named block and shared by all
+ * applications running on Octeon. Tickets are stored in a different
+ * cache line that queue information to reduce the contention on the
+ * ll/sc used to get a ticket. If this is not the case, the update
+ * of queue state causes the ll/sc to fail quite often.
+ */
+typedef struct
+{
+ uint64_t ticket[(CVMX_CMD_QUEUE_END>>16) * 256];
+ __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END>>16) * 256];
+} __cvmx_cmd_queue_all_state_t;
+
+extern CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @param queue_id Hardware command queue to initialize.
+ * @param max_depth Maximum outstanding commands that can be queued.
+ * @param fpa_pool FPA pool the command queues should come from.
+ * @param pool_size Size of each buffer in the FPA pool (bytes)
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size);
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @param queue_id Queue to shutdown
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @param queue_id Hardware command queue to query
+ *
+ * @return Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access to the low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @param queue_id Command queue to query
+ *
+ * @return Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * @INTERNAL
+ * Get the index into the state arrays for the supplied queue id.
+ *
+ * @param queue_id Queue ID to get an index for
+ *
+ * @return Index into the state arrays
+ */
+static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
+{
+ /* Warning: This code currently only works with devices that have 256 queues
+ or less. Devices with more than 16 queues are laid out in memory to allow
+ cores quick access to every 16th queue. This reduces cache thrashing
+ when you are running 16 queues per port to support lockless operation */
+ int unit = queue_id>>16;
+ int q = (queue_id >> 4) & 0xf;
+ int core = queue_id & 0xf;
+ return unit*256 + core*16 + q;
+}
+
+
+/**
+ * @INTERNAL
+ * Lock the supplied queue so nobody else is updating it at the same
+ * time as us.
+ *
+ * @param queue_id Queue ID to lock
+ * @param qptr Pointer to the queue's global state
+ */
+static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, __cvmx_cmd_queue_state_t *qptr)
+{
+ int tmp;
+ int my_ticket;
+ CVMX_PREFETCH(qptr, 0);
+ asm volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %[my_ticket], %[ticket_ptr]\n" /* Atomic add one to ticket_ptr */
+ "li %[ticket], 1\n" /* and store the original value */
+ "baddu %[ticket], %[my_ticket]\n" /* in my_ticket */
+ "sc %[ticket], %[ticket_ptr]\n"
+ "beqz %[ticket], 1b\n"
+ " nop\n"
+ "lbu %[ticket], %[now_serving]\n" /* Load the current now_serving ticket */
+ "2:\n"
+ "beq %[ticket], %[my_ticket], 4f\n" /* Jump out if now_serving == my_ticket */
+ " subu %[ticket], %[my_ticket], %[ticket]\n" /* Find out how many tickets are in front of me */
+ "subu %[ticket], 1\n" /* Use tickets in front of me minus one to delay */
+ "cins %[ticket], %[ticket], 5, 7\n" /* Delay will be ((tickets in front)-1)*32 loops */
+ "3:\n"
+ "bnez %[ticket], 3b\n" /* Loop here until our ticket might be up */
+ " subu %[ticket], 1\n"
+ "b 2b\n" /* Jump back up to check out ticket again */
+ " lbu %[ticket], %[now_serving]\n" /* Load the current now_serving ticket */
+ "4:\n"
+ ".set pop\n"
+ : [ticket_ptr] "=m" (__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+ [now_serving] "=m" (qptr->now_serving),
+ [ticket] "=r" (tmp),
+ [my_ticket] "=r" (my_ticket)
+ );
+}
+
+
+/**
+ * @INTERNAL
+ * Unlock the queue, flushing all writes.
+ *
+ * @param qptr Queue to unlock
+ */
+static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
+{
+ uint8_t ns;
+
+ ns = qptr->now_serving + 1;
+ CVMX_SYNCWS; /* Order queue manipulation with respect to the unlock. */
+ qptr->now_serving = ns;
+ CVMX_SYNCWS; /* nudge out the unlock. */
+}
+
+
+/**
+ * @INTERNAL
+ * Get the queue state structure for the given queue id
+ *
+ * @param queue_id Queue id to get
+ *
+ * @return Queue structure or NULL on failure
+ */
+static inline __cvmx_cmd_queue_state_t *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
+{
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (cvmx_unlikely(queue_id >= CVMX_CMD_QUEUE_END))
+ return NULL;
+ if (cvmx_unlikely((queue_id & 0xffff) >= 256))
+ return NULL;
+ }
+ return &__cvmx_cmd_queue_state_ptr->state[__cvmx_cmd_queue_get_index(queue_id)];
+}
+
+
+/**
+ * Write an arbitrary number of command words to a command queue.
+ * This is a generic function; the fixed number of command word
+ * functions yield higher performance.
+ *
+ * @param queue_id Hardware command queue to write to
+ * @param use_locking
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @param cmd_count Number of command words to write
+ * @param cmds Array of commands to write
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t queue_id, int use_locking, int cmd_count, uint64_t *cmds)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (cvmx_unlikely(qptr == NULL))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ if (cvmx_unlikely((cmd_count < 1) || (cmd_count > 32)))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ if (cvmx_unlikely(cmds == NULL))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /* Make sure nobody else is updating the same queue */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /* If a max queue length was specified then make sure we don't
+ exceed it. If any part of the command would be below the limit
+ we allow it */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
+ {
+ if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /* Normally there is plenty of room in the current buffer for the command */
+ if (cvmx_likely(qptr->index + cmd_count < qptr->pool_size_m1))
+ {
+ uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ qptr->index += cmd_count;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ }
+ else
+ {
+ uint64_t *ptr;
+ int count;
+ /* We need a new command buffer. Fail if there isn't one available */
+ uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
+ if (cvmx_unlikely(new_buffer == NULL))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ /* Figure out how many command words will fit in this buffer. One
+ location will be needed for the next buffer pointer */
+ count = qptr->pool_size_m1 - qptr->index;
+ ptr += qptr->index;
+ cmd_count-=count;
+ while (count--)
+ *ptr++ = *cmds++;
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /* The current buffer is full and has a link to the next buffer. Time
+ to write the rest of the commands into the new buffer */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = cmd_count;
+ ptr = new_buffer;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+
+/**
+ * Simple function to write two command words to a command
+ * queue.
+ *
+ * @param queue_id Hardware command queue to write to
+ * @param use_locking
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @param cmd1 Command
+ * @param cmd2 Command
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (cvmx_unlikely(qptr == NULL))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /* Make sure nobody else is updating the same queue */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /* If a max queue length was specified then make sure we don't
+ exceed it. If any part of the command would be below the limit
+ we allow it */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
+ {
+ if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /* Normally there is plenty of room in the current buffer for the command */
+ if (cvmx_likely(qptr->index + 2 < qptr->pool_size_m1))
+ {
+ uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ qptr->index += 2;
+ ptr[0] = cmd1;
+ ptr[1] = cmd2;
+ }
+ else
+ {
+ uint64_t *ptr;
+ /* Figure out how many command words will fit in this buffer. One
+ location will be needed for the next buffer pointer */
+ int count = qptr->pool_size_m1 - qptr->index;
+ /* We need a new command buffer. Fail if there isn't one available */
+ uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
+ if (cvmx_unlikely(new_buffer == NULL))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ count--;
+ ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ *ptr++ = cmd1;
+ if (cvmx_likely(count))
+ *ptr++ = cmd2;
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /* The current buffer is full and has a link to the next buffer. Time
+ to write the rest of the commands into the new buffer */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = 0;
+ if (cvmx_unlikely(count == 0))
+ {
+ qptr->index = 1;
+ new_buffer[0] = cmd2;
+ }
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+
+/**
+ * Simple function to write three command words to a command
+ * queue.
+ *
+ * @param queue_id Hardware command queue to write to
+ * @param use_locking
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @param cmd1 Command
+ * @param cmd2 Command
+ * @param cmd3 Command
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2, uint64_t cmd3)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (cvmx_unlikely(qptr == NULL))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /* Make sure nobody else is updating the same queue */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /* If a max queue length was specified then make sure we don't
+ exceed it. If any part of the command would be below the limit
+ we allow it */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
+ {
+ if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /* Normally there is plenty of room in the current buffer for the command */
+ if (cvmx_likely(qptr->index + 3 < qptr->pool_size_m1))
+ {
+ uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ qptr->index += 3;
+ ptr[0] = cmd1;
+ ptr[1] = cmd2;
+ ptr[2] = cmd3;
+ }
+ else
+ {
+ uint64_t *ptr;
+ /* Figure out how many command words will fit in this buffer. One
+ location will be needed for the next buffer pointer */
+ int count = qptr->pool_size_m1 - qptr->index;
+ /* We need a new command buffer. Fail if there isn't one available */
+ uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
+ if (cvmx_unlikely(new_buffer == NULL))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ count--;
+ ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ *ptr++ = cmd1;
+ if (count)
+ {
+ *ptr++ = cmd2;
+ if (count > 1)
+ *ptr++ = cmd3;
+ }
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /* The current buffer is full and has a link to the next buffer. Time
+ to write the rest of the commands into the new buffer */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = 0;
+ ptr = new_buffer;
+ if (count == 0)
+ {
+ *ptr++ = cmd2;
+ qptr->index++;
+ }
+ if (count < 2)
+ {
+ *ptr++ = cmd3;
+ qptr->index++;
+ }
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_CMD_QUEUE_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-cmd-queue.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,217 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the EBH-30xx specific devices
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#include <time.h>
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-cn3010-evb-hs5.h"
+#include "cvmx-twsi.h"
+
+
+static inline uint8_t bin2bcd(uint8_t bin)
+{
+ return (bin / 10) << 4 | (bin % 10);
+}
+
+static inline uint8_t bcd2bin(uint8_t bcd)
+{
+ return (bcd >> 4) * 10 + (bcd & 0xf);
+}
+
+#define TM_CHECK(_expr, _msg) \
+ do { \
+ if (_expr) { \
+ cvmx_dprintf("Warning: RTC has invalid %s field\n", (_msg)); \
+ rc = -1; \
+ } \
+ } while(0);
+
+static int validate_tm_struct(struct tm * tms)
+{
+ int rc = 0;
+
+ if (!tms)
+ return -1;
+
+ TM_CHECK(tms->tm_sec < 0 || tms->tm_sec > 60, "second"); /* + Leap sec */
+ TM_CHECK(tms->tm_min < 0 || tms->tm_min > 59, "minute");
+ TM_CHECK(tms->tm_hour < 0 || tms->tm_hour > 23, "hour");
+ TM_CHECK(tms->tm_mday < 1 || tms->tm_mday > 31, "day");
+ TM_CHECK(tms->tm_wday < 0 || tms->tm_wday > 6, "day of week");
+ TM_CHECK(tms->tm_mon < 0 || tms->tm_mon > 11, "month");
+ TM_CHECK(tms->tm_year < 0 || tms->tm_year > 200,"year");
+
+ return rc;
+}
+
+/*
+ * Board-specifc RTC read
+ * Time is expressed in seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
+ * and converted internally to calendar format.
+ */
+uint32_t cvmx_rtc_ds1337_read(void)
+{
+ int i, retry;
+ uint32_t time;
+ uint8_t reg[8];
+ uint8_t sec;
+ struct tm tms;
+
+
+ memset(®, 0, sizeof(reg));
+ memset(&tms, 0, sizeof(struct tm));
+
+ for(retry=0; retry<2; retry++)
+ {
+ /* Lockless read: detects the infrequent roll-over and retries */
+ reg[0] = cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0);
+ for(i=1; i<7; i++)
+ reg[i] = cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1337_ADDR);
+
+ sec = cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0);
+ if ((sec & 0xf) == (reg[0] & 0xf))
+ break; /* Time did not roll-over, value is correct */
+ }
+
+ tms.tm_sec = bcd2bin(reg[0] & 0x7f);
+ tms.tm_min = bcd2bin(reg[1] & 0x7f);
+ tms.tm_hour = bcd2bin(reg[2] & 0x3f);
+ if ((reg[2] & 0x40) && (reg[2] & 0x20)) /* AM/PM format and is PM time */
+ {
+ tms.tm_hour = (tms.tm_hour + 12) % 24;
+ }
+ tms.tm_wday = (reg[3] & 0x7) - 1; /* Day of week field is 0..6 */
+ tms.tm_mday = bcd2bin(reg[4] & 0x3f);
+ tms.tm_mon = bcd2bin(reg[5] & 0x1f) - 1; /* Month field is 0..11 */
+ tms.tm_year = ((reg[5] & 0x80) ? 100 : 0) + bcd2bin(reg[6]);
+
+
+ if (validate_tm_struct(&tms))
+ cvmx_dprintf("Warning: RTC calendar is not configured properly\n");
+
+ time = mktime(&tms);
+
+ return time;
+}
+
+/*
+ * Board-specific RTC write
+ * Time returned is in seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
+ */
+int cvmx_rtc_ds1337_write(uint32_t time)
+{
+ int i, rc, retry;
+ struct tm tms;
+ uint8_t reg[8];
+ uint8_t sec;
+ time_t time_from_epoch = time;
+
+
+ localtime_r(&time_from_epoch, &tms);
+
+ if (validate_tm_struct(&tms))
+ {
+ cvmx_dprintf("Error: RTC was passed wrong calendar values, write failed\n");
+ goto tm_invalid;
+ }
+
+ reg[0] = bin2bcd(tms.tm_sec);
+ reg[1] = bin2bcd(tms.tm_min);
+ reg[2] = bin2bcd(tms.tm_hour); /* Force 0..23 format even if using AM/PM */
+ reg[3] = bin2bcd(tms.tm_wday + 1);
+ reg[4] = bin2bcd(tms.tm_mday);
+ reg[5] = bin2bcd(tms.tm_mon + 1);
+ if (tms.tm_year >= 100) /* Set century bit*/
+ {
+ reg[5] |= 0x80;
+ }
+ reg[6] = bin2bcd(tms.tm_year % 100);
+
+ /* Lockless write: detects the infrequent roll-over and retries */
+ for(retry=0; retry<2; retry++)
+ {
+ rc = 0;
+ for(i=0; i<7; i++)
+ {
+ rc |= cvmx_twsi_write8(CVMX_RTC_DS1337_ADDR, i, reg[i]);
+ }
+
+ sec = cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0);
+ if ((sec & 0xf) == (reg[0] & 0xf))
+ break; /* Time did not roll-over, value is correct */
+ }
+
+ return (rc ? -1 : 0);
+
+ tm_invalid:
+ return -1;
+}
+
+#ifdef CVMX_RTC_DEBUG
+
+void cvmx_rtc_ds1337_dump_state(void)
+{
+ int i = 0;
+
+ printf("RTC:\n");
+ printf("%d : %02X ", i, cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0));
+ for(i=1; i<16; i++) {
+ printf("%02X ", cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1337_ADDR));
+ }
+ printf("\n");
+}
+
+#endif /* CVMX_RTC_DEBUG */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,72 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+#ifndef __CVMX_CN3010_EVB_HS5_H__
+#define __CVMX_CN3010_EVB_HS5_H__
+
+/**
+ * @file
+ *
+ * Interface to the EBH-30xx specific devices
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_RTC_DS1337_ADDR (0x68)
+
+uint32_t cvmx_rtc_ds1337_read(void);
+int cvmx_rtc_ds1337_write(uint32_t time);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_CN3010_EVB_HS5_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-cn3010-evb-hs5.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-compactflash.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-compactflash.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-compactflash.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,434 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-compactflash.h"
+
+
+#ifndef MAX
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#endif
+#define FLASH_RoundUP(_Dividend, _Divisor) (((_Dividend)+(_Divisor-1))/(_Divisor))
+/**
+ * Convert nanosecond based time to setting used in the
+ * boot bus timing register, based on timing multiple
+ *
+ *
+ */
+static uint32_t ns_to_tim_reg(int tim_mult, uint32_t nsecs)
+{
+ uint32_t val;
+
+ /* Compute # of eclock periods to get desired duration in nanoseconds */
+ val = FLASH_RoundUP(nsecs * (cvmx_clock_get_rate(CVMX_CLOCK_SCLK)/1000000), 1000);
+
+ /* Factor in timing multiple, if not 1 */
+ if (tim_mult != 1)
+ val = FLASH_RoundUP(val, tim_mult);
+
+ return (val);
+}
+
+uint64_t cvmx_compactflash_generate_dma_tim(int tim_mult, uint16_t *ident_data, int *mwdma_mode_ptr)
+{
+
+ cvmx_mio_boot_dma_timx_t dma_tim;
+ int oe_a;
+ int oe_n;
+ int dma_acks;
+ int dma_ackh;
+ int dma_arq;
+ int pause;
+ int To,Tkr,Td;
+ int mwdma_mode = -1;
+ uint16_t word53_field_valid;
+ uint16_t word63_mwdma;
+ uint16_t word163_adv_timing_info;
+
+ if (!ident_data)
+ return 0;
+
+ word53_field_valid = ident_data[53];
+ word63_mwdma = ident_data[63];
+ word163_adv_timing_info = ident_data[163];
+
+ dma_tim.u64 = 0;
+
+ /* Check for basic MWDMA modes */
+ if (word53_field_valid & 0x2)
+ {
+ if (word63_mwdma & 0x4)
+ mwdma_mode = 2;
+ else if (word63_mwdma & 0x2)
+ mwdma_mode = 1;
+ else if (word63_mwdma & 0x1)
+ mwdma_mode = 0;
+ }
+
+ /* Check for advanced MWDMA modes */
+ switch ((word163_adv_timing_info >> 3) & 0x7)
+ {
+ case 1:
+ mwdma_mode = 3;
+ break;
+ case 2:
+ mwdma_mode = 4;
+ break;
+ default:
+ break;
+
+ }
+ /* DMA is not supported by this card */
+ if (mwdma_mode < 0)
+ return 0;
+
+ /* Now set up the DMA timing */
+ switch (tim_mult)
+ {
+ case 1:
+ dma_tim.s.tim_mult = 1;
+ break;
+ case 2:
+ dma_tim.s.tim_mult = 2;
+ break;
+ case 4:
+ dma_tim.s.tim_mult = 0;
+ break;
+ case 8:
+ dma_tim.s.tim_mult = 3;
+ break;
+ default:
+ cvmx_dprintf("ERROR: invalid boot bus dma tim_mult setting\n");
+ break;
+ }
+
+
+ switch (mwdma_mode)
+ {
+ case 4:
+ To = 80;
+ Td = 55;
+ Tkr = 20;
+
+ oe_a = Td + 20; // Td (Seem to need more margin here....
+ oe_n = MAX(To - oe_a, Tkr); // Tkr from cf spec, lengthened to meet To
+
+ // oe_n + oe_h must be >= To (cycle time)
+ dma_acks = 0; //Ti
+ dma_ackh = 5; // Tj
+
+ dma_arq = 8; // not spec'ed, value in eclocks, not affected by tim_mult
+ pause = 25 - dma_arq * 1000/(cvmx_clock_get_rate(CVMX_CLOCK_SCLK)/1000000); // Tz
+ break;
+ case 3:
+ To = 100;
+ Td = 65;
+ Tkr = 20;
+
+ oe_a = Td + 20; // Td (Seem to need more margin here....
+ oe_n = MAX(To - oe_a, Tkr); // Tkr from cf spec, lengthened to meet To
+
+ // oe_n + oe_h must be >= To (cycle time)
+ dma_acks = 0; //Ti
+ dma_ackh = 5; // Tj
+
+ dma_arq = 8; // not spec'ed, value in eclocks, not affected by tim_mult
+ pause = 25 - dma_arq * 1000/(cvmx_clock_get_rate(CVMX_CLOCK_SCLK)/1000000); // Tz
+ break;
+ case 2:
+ // +20 works
+ // +10 works
+ // + 10 + 0 fails
+ // n=40, a=80 works
+ To = 120;
+ Td = 70;
+ Tkr = 25;
+
+ // oe_a 0 fudge doesn't work; 10 seems to
+ oe_a = Td + 20 + 10; // Td (Seem to need more margin here....
+ oe_n = MAX(To - oe_a, Tkr) + 10; // Tkr from cf spec, lengthened to meet To
+ // oe_n 0 fudge fails;;; 10 boots
+
+ // 20 ns fudge needed on dma_acks
+ // oe_n + oe_h must be >= To (cycle time)
+ dma_acks = 0 + 20; //Ti
+ dma_ackh = 5; // Tj
+
+ dma_arq = 8; // not spec'ed, value in eclocks, not affected by tim_mult
+ pause = 25 - dma_arq * 1000/(cvmx_clock_get_rate(CVMX_CLOCK_SCLK)/1000000); // Tz
+ // no fudge needed on pause
+
+ break;
+ case 1:
+ case 0:
+ default:
+ cvmx_dprintf("ERROR: Unsupported DMA mode: %d\n", mwdma_mode);
+ return(-1);
+ break;
+ }
+
+ if (mwdma_mode_ptr)
+ *mwdma_mode_ptr = mwdma_mode;
+
+ dma_tim.s.dmack_pi = 1;
+
+ dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
+ dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
+
+ dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, dma_acks);
+ dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
+
+ dma_tim.s.dmarq = dma_arq;
+ dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
+
+ dma_tim.s.rd_dly = 0; /* Sample right on edge */
+
+ /* writes only */
+ dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
+ dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
+
+#if 0
+ cvmx_dprintf("ns to ticks (mult %d) of %d is: %d\n", TIM_MULT, 60, ns_to_tim_reg(60));
+ cvmx_dprintf("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
+ dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
+#endif
+
+ return(dma_tim.u64);
+
+
+}
+
+
+/**
+ * Setup timing and region config to support a specific IDE PIO
+ * mode over the bootbus.
+ *
+ * @param cs0 Bootbus region number connected to CS0 on the IDE device
+ * @param cs1 Bootbus region number connected to CS1 on the IDE device
+ * @param pio_mode PIO mode to set (0-6)
+ */
+void cvmx_compactflash_set_piomode(int cs0, int cs1, int pio_mode)
+{
+ cvmx_mio_boot_reg_cfgx_t mio_boot_reg_cfg;
+ cvmx_mio_boot_reg_timx_t mio_boot_reg_tim;
+ int cs;
+ int clocks_us; /* Number of clock cycles per microsec */
+ int tim_mult;
+ int use_iordy; /* Set for PIO0-4, not set for PIO5-6 */
+ int t1; /* These t names are timing parameters from the ATA spec */
+ int t2;
+ int t2i;
+ int t4;
+ int t6;
+ int t6z;
+ int t9;
+
+ /* PIO modes 0-4 all allow the device to deassert IORDY to slow down
+ the host */
+ use_iordy = 1;
+
+ /* Use the PIO mode to determine timing parameters */
+ switch(pio_mode) {
+ case 6:
+ /* CF spec say IORDY should be ignore in PIO 5 */
+ use_iordy = 0;
+ t1 = 10;
+ t2 = 55;
+ t2i = 20;
+ t4 = 5;
+ t6 = 5;
+ t6z = 20;
+ t9 = 10;
+ break;
+ case 5:
+ /* CF spec say IORDY should be ignore in PIO 6 */
+ use_iordy = 0;
+ t1 = 15;
+ t2 = 65;
+ t2i = 25;
+ t4 = 5;
+ t6 = 5;
+ t6z = 20;
+ t9 = 10;
+ break;
+ case 4:
+ t1 = 25;
+ t2 = 70;
+ t2i = 25;
+ t4 = 10;
+ t6 = 5;
+ t6z = 30;
+ t9 = 10;
+ break;
+ case 3:
+ t1 = 30;
+ t2 = 80;
+ t2i = 70;
+ t4 = 10;
+ t6 = 5;
+ t6z = 30;
+ t9 = 10;
+ break;
+ case 2:
+ t1 = 30;
+ t2 = 100;
+ t2i = 0;
+ t4 = 15;
+ t6 = 5;
+ t6z = 30;
+ t9 = 10;
+ break;
+ case 1:
+ t1 = 50;
+ t2 = 125;
+ t2i = 0;
+ t4 = 20;
+ t6 = 5;
+ t6z = 30;
+ t9 = 15;
+ break;
+ default:
+ t1 = 70;
+ t2 = 165;
+ t2i = 0;
+ t4 = 30;
+ t6 = 5;
+ t6z = 30;
+ t9 = 20;
+ break;
+ }
+ /* Convert times in ns to clock cycles, rounding up */
+ clocks_us = FLASH_RoundUP(cvmx_clock_get_rate(CVMX_CLOCK_SCLK), 1000000);
+
+ /* Convert times in clock cycles, rounding up. Octeon parameters are in
+ minus one notation, so take off one after the conversion */
+ t1 = FLASH_RoundUP(t1 * clocks_us, 1000);
+ if (t1)
+ t1--;
+ t2 = FLASH_RoundUP(t2 * clocks_us, 1000);
+ if (t2)
+ t2--;
+ t2i = FLASH_RoundUP(t2i * clocks_us, 1000);
+ if (t2i)
+ t2i--;
+ t4 = FLASH_RoundUP(t4 * clocks_us, 1000);
+ if (t4)
+ t4--;
+ t6 = FLASH_RoundUP(t6 * clocks_us, 1000);
+ if (t6)
+ t6--;
+ t6z = FLASH_RoundUP(t6z * clocks_us, 1000);
+ if (t6z)
+ t6z--;
+ t9 = FLASH_RoundUP(t9 * clocks_us, 1000);
+ if (t9)
+ t9--;
+
+ /* Start using a scale factor of one cycle. Keep doubling it until
+ the parameters fit in their fields. Since t2 is the largest number,
+ we only need to check it */
+ tim_mult = 1;
+ while (t2 >= 1<<6)
+ {
+ t1 = FLASH_RoundUP(t1, 2);
+ t2 = FLASH_RoundUP(t2, 2);
+ t2i = FLASH_RoundUP(t2i, 2);
+ t4 = FLASH_RoundUP(t4, 2);
+ t6 = FLASH_RoundUP(t6, 2);
+ t6z = FLASH_RoundUP(t6z, 2);
+ t9 = FLASH_RoundUP(t9, 2);
+ tim_mult *= 2;
+ }
+
+ cs = cs0;
+ do {
+ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+ mio_boot_reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
+ switch(tim_mult) {
+ case 1:
+ mio_boot_reg_cfg.s.tim_mult = 1;
+ break;
+ case 2:
+ mio_boot_reg_cfg.s.tim_mult = 2;
+ break;
+ case 4:
+ mio_boot_reg_cfg.s.tim_mult = 0;
+ break;
+ case 8:
+ default:
+ mio_boot_reg_cfg.s.tim_mult = 3;
+ break;
+ }
+ mio_boot_reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
+ mio_boot_reg_cfg.s.sam = 0; /* Don't combine write and output enable */
+ mio_boot_reg_cfg.s.we_ext = 0; /* No write enable extension */
+ mio_boot_reg_cfg.s.oe_ext = 0; /* No read enable extension */
+ mio_boot_reg_cfg.s.en = 1; /* Enable this region */
+ mio_boot_reg_cfg.s.orbit = 0; /* Don't combine with previos region */
+ mio_boot_reg_cfg.s.width = 1; /* 16 bits wide */
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), mio_boot_reg_cfg.u64);
+ if(cs == cs0)
+ cs = cs1;
+ else
+ cs = cs0;
+ } while(cs != cs0);
+
+ mio_boot_reg_tim.u64 = 0;
+ mio_boot_reg_tim.s.pagem = 0; /* Disable page mode */
+ mio_boot_reg_tim.s.waitm = use_iordy; /* Enable dynamic timing */
+ mio_boot_reg_tim.s.pages = 0; /* Pages are disabled */
+ mio_boot_reg_tim.s.ale = 8; /* If someone uses ALE, this seems to work */
+ mio_boot_reg_tim.s.page = 0; /* Not used */
+ mio_boot_reg_tim.s.wait = 0; /* Time after IORDY to coninue to assert the data */
+ mio_boot_reg_tim.s.pause = 0; /* Time after CE that signals stay valid */
+ mio_boot_reg_tim.s.wr_hld = t9; /* How long to hold after a write */
+ mio_boot_reg_tim.s.rd_hld = t9; /* How long to wait after a read for device to tristate */
+ mio_boot_reg_tim.s.we = t2; /* How long write enable is asserted */
+ mio_boot_reg_tim.s.oe = t2; /* How long read enable is asserted */
+ mio_boot_reg_tim.s.ce = t1; /* Time after CE that read/write starts */
+ mio_boot_reg_tim.s.adr = 1; /* Time before CE that address is valid */
+
+ /* Program the bootbus region timing for both chip selects */
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs0), mio_boot_reg_tim.u64);
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs1), mio_boot_reg_tim.u64);
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-compactflash.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-compactflash.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-compactflash.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-compactflash.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,79 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+#ifndef __CVMX_COMPACTFLASH_H__
+#define __CVMX_COMPACTFLASH_H__
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * This function takes values from the compact flash device
+ * identify response, and returns the appropriate value to write
+ * into the boot bus DMA timing register.
+ *
+ * @param tim_mult Eclock timing multiple to use
+ * @param ident_data Data returned by the 'identify' command. This is used to
+ * determine the DMA modes supported by the card, if any.
+ * @param mwdma_mode_ptr
+ * Optional pointer to return MWDMA mode in
+ *
+ * @return 64 bit value to write to DMA timing register
+ */
+extern uint64_t cvmx_compactflash_generate_dma_tim(int tim_mult, uint16_t *ident_data, int *mwdma_mode_ptr);
+
+/**
+ * Setup timing and region config to support a specific IDE PIO
+ * mode over the bootbus.
+ *
+ * @param cs0 Bootbus region number connected to CS0 on the IDE device
+ * @param cs1 Bootbus region number connected to CS1 on the IDE device
+ * @param pio_mode PIO mode to set (0-6)
+ */
+extern void cvmx_compactflash_set_piomode(int cs0, int cs1, int pio_mode);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __CVMX_COMPACTFLASH_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-compactflash.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-core.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-core.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-core.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,163 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Module to support operations on core such as TLB config, etc.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-core.h>
+#else
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-core.h"
+#endif
+
+
+/**
+ * Adds a wired TLB entry, and returns the index of the entry added.
+ * Parameters are written to TLB registers without further processing.
+ *
+ * @param hi HI register value
+ * @param lo0 lo0 register value
+ * @param lo1 lo1 register value
+ * @param page_mask pagemask register value
+ *
+ * @return Success: TLB index used (0-31 Octeon, 0-63 Octeon+, or 0-127
+ * Octeon2). Failure: -1
+ */
+int cvmx_core_add_wired_tlb_entry(uint64_t hi, uint64_t lo0, uint64_t lo1, cvmx_tlb_pagemask_t page_mask)
+{
+ uint32_t index;
+
+ CVMX_MF_TLB_WIRED(index);
+ if (index >= (unsigned int)cvmx_core_get_tlb_entries())
+ {
+ return(-1);
+ }
+ CVMX_MT_ENTRY_HIGH(hi);
+ CVMX_MT_ENTRY_LO_0(lo0);
+ CVMX_MT_ENTRY_LO_1(lo1);
+ CVMX_MT_PAGEMASK(page_mask);
+ CVMX_MT_TLB_INDEX(index);
+ CVMX_MT_TLB_WIRED(index + 1);
+ CVMX_EHB;
+ CVMX_TLBWI;
+ CVMX_EHB;
+ return(index);
+}
+
+
+
+/**
+ * Adds a fixed (wired) TLB mapping. Returns TLB index used or -1 on error.
+ * This is a wrapper around cvmx_core_add_wired_tlb_entry()
+ *
+ * @param vaddr Virtual address to map
+ * @param page0_addr page 0 physical address, with low 3 bits representing the DIRTY, VALID, and GLOBAL bits
+ * @param page1_addr page1 physical address, with low 3 bits representing the DIRTY, VALID, and GLOBAL bits
+ * @param page_mask page mask.
+ *
+ * @return Success: TLB index used (0-31)
+ * Failure: -1
+ */
+int cvmx_core_add_fixed_tlb_mapping_bits(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask)
+{
+
+ if ((vaddr & (page_mask | 0x7ff))
+ || ((page0_addr & ~0x7ULL) & ((page_mask | 0x7ff) >> 1))
+ || ((page1_addr & ~0x7ULL) & ((page_mask | 0x7ff) >> 1)))
+ {
+ cvmx_dprintf("Error adding tlb mapping: invalid address alignment at vaddr: 0x%llx\n", (unsigned long long)vaddr);
+ return(-1);
+ }
+
+
+ return(cvmx_core_add_wired_tlb_entry(vaddr,
+ (page0_addr >> 6) | (page0_addr & 0x7),
+ (page1_addr >> 6) | (page1_addr & 0x7),
+ page_mask));
+
+}
+/**
+ * Adds a fixed (wired) TLB mapping. Returns TLB index used or -1 on error.
+ * Assumes both pages are valid. Use cvmx_core_add_fixed_tlb_mapping_bits for more control.
+ * This is a wrapper around cvmx_core_add_wired_tlb_entry()
+ *
+ * @param vaddr Virtual address to map
+ * @param page0_addr page 0 physical address
+ * @param page1_addr page1 physical address
+ * @param page_mask page mask.
+ *
+ * @return Success: TLB index used (0-31)
+ * Failure: -1
+ */
+int cvmx_core_add_fixed_tlb_mapping(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask)
+{
+
+ return(cvmx_core_add_fixed_tlb_mapping_bits(vaddr, page0_addr | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, page1_addr | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, page_mask));
+
+}
+
+/**
+ * Return number of TLB entries.
+ */
+int cvmx_core_get_tlb_entries(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return 32;
+ else if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return 64;
+ else
+ return 128;
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-core.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-core.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-core.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-core.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,189 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Module to support operations on core such as TLB config, etc.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+
+#ifndef __CVMX_CORE_H__
+#define __CVMX_CORE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * The types of performance counters supported per cpu
+ */
+typedef enum
+{
+ CVMX_CORE_PERF_NONE = 0, /**< Turn off the performance counter */
+ CVMX_CORE_PERF_CLK = 1, /**< Conditionally clocked cycles (as opposed to count/cvm_count which count even with no clocks) */
+ CVMX_CORE_PERF_ISSUE = 2, /**< Instructions issued but not retired */
+ CVMX_CORE_PERF_RET = 3, /**< Instructions retired */
+ CVMX_CORE_PERF_NISSUE = 4, /**< Cycles no issue */
+ CVMX_CORE_PERF_SISSUE = 5, /**< Cycles single issue */
+ CVMX_CORE_PERF_DISSUE = 6, /**< Cycles dual issue */
+ CVMX_CORE_PERF_IFI = 7, /**< Cycle ifetch issued (but not necessarily commit to pp_mem) */
+ CVMX_CORE_PERF_BR = 8, /**< Branches retired */
+ CVMX_CORE_PERF_BRMIS = 9, /**< Branch mispredicts */
+ CVMX_CORE_PERF_J = 10, /**< Jumps retired */
+ CVMX_CORE_PERF_JMIS = 11, /**< Jumps mispredicted */
+ CVMX_CORE_PERF_REPLAY = 12, /**< Mem Replays */
+ CVMX_CORE_PERF_IUNA = 13, /**< Cycles idle due to unaligned_replays */
+ CVMX_CORE_PERF_TRAP = 14, /**< trap_6a signal */
+ CVMX_CORE_PERF_UULOAD = 16, /**< Unexpected unaligned loads (REPUN=1) */
+ CVMX_CORE_PERF_UUSTORE = 17, /**< Unexpected unaligned store (REPUN=1) */
+ CVMX_CORE_PERF_ULOAD = 18, /**< Unaligned loads (REPUN=1 or USEUN=1) */
+ CVMX_CORE_PERF_USTORE = 19, /**< Unaligned store (REPUN=1 or USEUN=1) */
+ CVMX_CORE_PERF_EC = 20, /**< Exec clocks(must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_MC = 21, /**< Mul clocks(must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_CC = 22, /**< Crypto clocks(must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_CSRC = 23, /**< Issue_csr clocks(must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_CFETCH = 24, /**< Icache committed fetches (demand+prefetch) */
+ CVMX_CORE_PERF_CPREF = 25, /**< Icache committed prefetches */
+ CVMX_CORE_PERF_ICA = 26, /**< Icache aliases */
+ CVMX_CORE_PERF_II = 27, /**< Icache invalidates */
+ CVMX_CORE_PERF_IP = 28, /**< Icache parity error */
+ CVMX_CORE_PERF_CIMISS = 29, /**< Cycles idle due to imiss (must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_WBUF = 32, /**< Number of write buffer entries created */
+ CVMX_CORE_PERF_WDAT = 33, /**< Number of write buffer data cycles used (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_WBUFLD = 34, /**< Number of write buffer entries forced out by loads */
+ CVMX_CORE_PERF_WBUFFL = 35, /**< Number of cycles that there was no available write buffer entry (may need to set CvmCtl[DISCE] and CvmMemCtl[MCLK] for accurate counts) */
+ CVMX_CORE_PERF_WBUFTR = 36, /**< Number of stores that found no available write buffer entries */
+ CVMX_CORE_PERF_BADD = 37, /**< Number of address bus cycles used (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_BADDL2 = 38, /**< Number of address bus cycles not reflected (i.e. destined for L2) (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_BFILL = 39, /**< Number of fill bus cycles used (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_DDIDS = 40, /**< Number of Dstream DIDs created */
+ CVMX_CORE_PERF_IDIDS = 41, /**< Number of Istream DIDs created */
+ CVMX_CORE_PERF_DIDNA = 42, /**< Number of cycles that no DIDs were available (may need to set CvmCtl[DISCE] and CvmMemCtl[MCLK] for accurate counts) */
+ CVMX_CORE_PERF_LDS = 43, /**< Number of load issues */
+ CVMX_CORE_PERF_LMLDS = 44, /**< Number of local memory load */
+ CVMX_CORE_PERF_IOLDS = 45, /**< Number of I/O load issues */
+ CVMX_CORE_PERF_DMLDS = 46, /**< Number of loads that were not prefetches and missed in the cache */
+ CVMX_CORE_PERF_STS = 48, /**< Number of store issues */
+ CVMX_CORE_PERF_LMSTS = 49, /**< Number of local memory store issues */
+ CVMX_CORE_PERF_IOSTS = 50, /**< Number of I/O store issues */
+ CVMX_CORE_PERF_IOBDMA = 51, /**< Number of IOBDMAs */
+ CVMX_CORE_PERF_DTLB = 53, /**< Number of dstream TLB refill, invalid, or modified exceptions */
+ CVMX_CORE_PERF_DTLBAD = 54, /**< Number of dstream TLB address errors */
+ CVMX_CORE_PERF_ITLB = 55, /**< Number of istream TLB refill, invalid, or address error exceptions */
+ CVMX_CORE_PERF_SYNC = 56, /**< Number of SYNC stall cycles (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_SYNCIOB = 57, /**< Number of SYNCIOBDMA stall cycles (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_SYNCW = 58, /**< Number of SYNCWs */
+ /* Added in CN63XX */
+ CVMX_CORE_PERF_ERETMIS = 64, /**< D/eret mispredicts */
+ CVMX_CORE_PERF_LIKMIS = 65, /**< Branch likely mispredicts */
+ CVMX_CORE_PERF_HAZTR = 66, /**< Hazard traps due to *MTC0 to CvmCtl, Perf counter control, EntryHi, or CvmMemCtl registers */
+ CVMX_CORE_PERF_MAX /**< This not a counter, just a marker for the highest number */
+} cvmx_core_perf_t;
+
+/**
+ * Bit description of the COP0 counter control register
+ */
+typedef union
+{
+ uint32_t u32;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t m : 1; /**< Set to 1 for sel 0 and 0 for sel 2, indicating there are two performance counters */
+ uint32_t w : 1; /**< Set to 1 indicating counters are 64 bit */
+ uint32_t reserved_11_29 :15;
+ cvmx_core_perf_t event :10; /**< Selects the event to be counted by the corresponding Counter Register */
+ uint32_t ie : 1; /**< Interrupt Enable */
+ uint32_t u : 1; /**< Count in user mode */
+ uint32_t s : 1; /**< Count in supervisor mode */
+ uint32_t k : 1; /**< Count in kernel mode */
+ uint32_t ex : 1; /**< Count in exception context */
+#else
+ uint32_t ex : 1;
+ uint32_t k : 1;
+ uint32_t s : 1;
+ uint32_t u : 1;
+ uint32_t ie : 1;
+ uint32_t event :10;
+ uint32_t reserved_11_29 :15;
+ uint32_t w : 1;
+ uint32_t m : 1;
+#endif
+ } s;
+} cvmx_core_perf_control_t;
+
+typedef enum {
+ CVMX_TLB_PAGEMASK_4K = 0x3 << 11,
+ CVMX_TLB_PAGEMASK_16K = 0xF << 11,
+ CVMX_TLB_PAGEMASK_64K = 0x3F << 11,
+ CVMX_TLB_PAGEMASK_256K = 0xFF << 11,
+ CVMX_TLB_PAGEMASK_1M = 0x3FF << 11,
+ CVMX_TLB_PAGEMASK_4M = 0xFFF << 11,
+ CVMX_TLB_PAGEMASK_16M = 0x3FFF << 11,
+ CVMX_TLB_PAGEMASK_64M = 0xFFFF << 11,
+ CVMX_TLB_PAGEMASK_256M = 0x3FFFF << 11,
+} cvmx_tlb_pagemask_t;
+
+
+int cvmx_core_add_wired_tlb_entry(uint64_t hi, uint64_t lo0, uint64_t lo1, cvmx_tlb_pagemask_t page_mask);
+
+
+int cvmx_core_add_fixed_tlb_mapping(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask);
+int cvmx_core_add_fixed_tlb_mapping_bits(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask);
+
+/**
+ * Return number of TLB entries.
+ */
+int cvmx_core_get_tlb_entries(void);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_CORE_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-core.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-coremask.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-coremask.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-coremask.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,135 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Module to support operations on bitmap of cores. Coremask can be used to
+ * select a specific core, a group of cores, or all available cores, for
+ * initialization and differentiation of roles within a single shared binary
+ * executable image.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-coremask.h"
+
+
+#define CVMX_COREMASK_MAX_SYNCS 20 /* maximum number of coremasks for barrier sync */
+
+/**
+ * This structure defines the private state maintained by coremask module.
+ *
+ */
+CVMX_SHARED static struct {
+
+ cvmx_spinlock_t lock; /**< mutex spinlock */
+
+ struct {
+
+ unsigned int coremask; /**< coremask specified for barrier */
+ unsigned int checkin; /**< bitmask of cores checking in */
+ volatile unsigned int exit; /**< variable to poll for exit condition */
+
+ } s[CVMX_COREMASK_MAX_SYNCS];
+
+} state = {
+
+ { CVMX_SPINLOCK_UNLOCKED_VAL },
+
+ { { 0, 0, 0 } },
+};
+
+
+/**
+ * Wait (stall) until all cores in the given coremask has reached this point
+ * in the program execution before proceeding.
+ *
+ * @param coremask the group of cores performing the barrier sync
+ *
+ */
+void cvmx_coremask_barrier_sync(unsigned int coremask)
+{
+ int i;
+ unsigned int target;
+
+ assert(coremask != 0);
+
+ cvmx_spinlock_lock(&state.lock);
+
+ for (i = 0; i < CVMX_COREMASK_MAX_SYNCS; i++) {
+
+ if (state.s[i].coremask == 0) {
+ /* end of existing coremask list, create new entry, fall-thru */
+ state.s[i].coremask = coremask;
+ }
+
+ if (state.s[i].coremask == coremask) {
+
+ target = state.s[i].exit + 1; /* wrap-around at 32b */
+
+ state.s[i].checkin |= cvmx_coremask_core(cvmx_get_core_num());
+ if (state.s[i].checkin == coremask) {
+ state.s[i].checkin = 0;
+ state.s[i].exit = target; /* signal exit condition */
+ }
+ cvmx_spinlock_unlock(&state.lock);
+
+ while (state.s[i].exit != target)
+ ;
+
+ return;
+ }
+ }
+
+ /* error condition - coremask array overflowed */
+ cvmx_spinlock_unlock(&state.lock);
+ assert(0);
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-coremask.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-coremask.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-coremask.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-coremask.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,320 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Module to support operations on bitmap of cores. Coremask can be used to
+ * select a specific core, a group of cores, or all available cores, for
+ * initialization and differentiation of roles within a single shared binary
+ * executable image.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+
+#ifndef __CVMX_COREMASK_H__
+#define __CVMX_COREMASK_H__
+
+#include "cvmx-asm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint64_t cvmx_coremask_holder_t; /* basic type to hold the
+ coremask bits */
+
+#define CVMX_COREMASK_HLDRSZ ((int)(sizeof(cvmx_coremask_holder_t) * 8))
+ /* bits per holder */
+
+#define CVMX_COREMASK_BMPSZ ((int)(CVMX_MAX_CORES / CVMX_COREMASK_HLDRSZ + 1))
+ /* bit map size */
+
+/*
+ * The macro pair implement a way to iterate active cores in the mask.
+ * @param fec_pcm points to the coremask.
+ * @param fec_ppid is the active core's id.
+ */
+#define CVMX_COREMASK_FOR_EACH_CORE_BEGIN(fec_pcm, fec_ppid) \
+ do { \
+ int fec_i, fec_j; \
+ \
+ for (fec_i = 0; fec_i < CVMX_COREMASK_BMPSZ; fec_i++) \
+ { \
+ for (fec_j = 0; fec_j < CVMX_COREMASK_HLDRSZ; fec_j++) \
+ { \
+ if (((cvmx_coremask_holder_t)1 << fec_j) & \
+ (fec_pcm)->coremask_bitmap[fec_i]) \
+ { \
+ fec_ppid = fec_i * CVMX_COREMASK_HLDRSZ + fec_j;
+
+
+#define CVMX_COREMASK_FOR_EACH_CORE_END \
+ } \
+ } \
+ } \
+ } while (0)
+
+struct cvmx_coremask {
+ /*
+ * Big-endian. Array elems of larger indices represent cores of
+ * bigger ids. So do MSBs within a cvmx_coremask_holder_t. Ditto
+ * MSbs within a byte.
+ */
+ cvmx_coremask_holder_t coremask_bitmap[CVMX_COREMASK_BMPSZ];
+};
+
+/*
+ * Is ``core'' set in the coremask?
+ *
+ * @param pcm is the pointer to the coremask.
+ * @param core
+ * @return 1 if core is set and 0 if not.
+ */
+static inline int cvmx_coremask_is_set_core(struct cvmx_coremask *pcm,
+ int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_HLDRSZ;
+ i = core / CVMX_COREMASK_HLDRSZ;
+
+ return (int)((pcm->coremask_bitmap[i] & (1ull << n)) != 0);
+}
+
+/*
+ * Set ``core'' in the coremask.
+ *
+ * @param pcm is the pointer to the coremask.
+ * @param core
+ * @return 0.
+ */
+static inline int cvmx_coremask_set_core(struct cvmx_coremask *pcm,
+ int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_HLDRSZ;
+ i = core / CVMX_COREMASK_HLDRSZ;
+ pcm->coremask_bitmap[i] |= (1ull << n);
+
+ return 0;
+}
+
+/*
+ * Clear ``core'' from the coremask.
+ *
+ * @param pcm is the pointer to the coremask.
+ * @param core
+ * @return 0.
+ */
+static inline int cvmx_coremask_clear_core(struct cvmx_coremask *pcm,
+ int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_HLDRSZ;
+ i = core / CVMX_COREMASK_HLDRSZ;
+ pcm->coremask_bitmap[i] &= ~(1ull << n);
+
+ return 0;
+}
+
+/*
+ * Clear the coremask.
+ *
+ * @param pcm is the pointer to the coremask.
+ * @return 0.
+ */
+static inline int cvmx_coremask_clear_all(struct cvmx_coremask *pcm)
+{
+ int i;
+
+ for (i = 0; i < CVMX_COREMASK_BMPSZ; i++)
+ pcm->coremask_bitmap[i] = 0;
+
+ return 0;
+}
+
+/*
+ * Is the current core the first in the coremask?
+ *
+ * @param pcm is the pointer to the coremask.
+ * @return 1 for yes and 0 for no.
+ */
+static inline int cvmx_coremask_first_core_bmp(struct cvmx_coremask *pcm)
+{
+ int n, i;
+
+ n = (int) cvmx_get_core_num();
+ for (i = 0; i < CVMX_COREMASK_BMPSZ; i++)
+ {
+ if (pcm->coremask_bitmap[i])
+ {
+ if (n == 0 && pcm->coremask_bitmap[i] & 1)
+ return 1;
+
+ if (n >= CVMX_COREMASK_HLDRSZ)
+ return 0;
+
+ return ((((1ull << n) - 1) & pcm->coremask_bitmap[i]) == 0);
+ }
+ else
+ n -= CVMX_COREMASK_HLDRSZ;
+ }
+
+ return 0;
+}
+
+/*
+ * Is the current core a member of the coremask?
+ *
+ * @param pcm is the pointer to the coremask.
+ * @return 1 for yes and 0 for no.
+ */
+static inline int cvmx_coremask_is_member_bmp(struct cvmx_coremask *pcm)
+{
+ return cvmx_coremask_is_set_core(pcm, (int)cvmx_get_core_num());
+}
+
+/*
+ * coremask is simply unsigned int (32 bits).
+ *
+ * NOTE: supports up to 32 cores maximum.
+ *
+ * union of coremasks is simply bitwise-or.
+ * intersection of coremasks is simply bitwise-and.
+ *
+ */
+
+#define CVMX_COREMASK_MAX 0xFFFFFFFFu /* maximum supported mask */
+
+
+/**
+ * Compute coremask for a specific core.
+ *
+ * @param core_id The core ID
+ *
+ * @return coremask for a specific core
+ *
+ */
+static inline unsigned int cvmx_coremask_core(unsigned int core_id)
+{
+ return (1u << core_id);
+}
+
+/**
+ * Compute coremask for num_cores cores starting with core 0.
+ *
+ * @param num_cores number of cores
+ *
+ * @return coremask for num_cores cores
+ *
+ */
+static inline unsigned int cvmx_coremask_numcores(unsigned int num_cores)
+{
+ return (CVMX_COREMASK_MAX >> (CVMX_MAX_CORES - num_cores));
+}
+
+/**
+ * Compute coremask for a range of cores from core low to core high.
+ *
+ * @param low first core in the range
+ * @param high last core in the range
+ *
+ * @return coremask for the range of cores
+ *
+ */
+static inline unsigned int cvmx_coremask_range(unsigned int low, unsigned int high)
+{
+ return ((CVMX_COREMASK_MAX >> (CVMX_MAX_CORES - 1 - high + low)) << low);
+}
+
+
+/**
+ * Test to see if current core is a member of coremask.
+ *
+ * @param coremask the coremask to test against
+ *
+ * @return 1 if current core is a member of coremask, 0 otherwise
+ *
+ */
+static inline int cvmx_coremask_is_member(unsigned int coremask)
+{
+ return ((cvmx_coremask_core(cvmx_get_core_num()) & coremask) != 0);
+}
+
+/**
+ * Test to see if current core is first core in coremask.
+ *
+ * @param coremask the coremask to test against
+ *
+ * @return 1 if current core is first core in the coremask, 0 otherwise
+ *
+ */
+static inline int cvmx_coremask_first_core(unsigned int coremask)
+{
+ return cvmx_coremask_is_member(coremask)
+ && ((cvmx_get_core_num() == 0) ||
+ ((cvmx_coremask_numcores(cvmx_get_core_num()) & coremask) == 0));
+}
+
+/**
+ * Wait (stall) until all cores in the given coremask has reached this point
+ * in the program execution before proceeding.
+ *
+ * @param coremask the group of cores performing the barrier sync
+ *
+ */
+extern void cvmx_coremask_barrier_sync(unsigned int coremask);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_COREMASK_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-coremask.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-crypto.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-crypto.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-crypto.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,79 @@
+/* $MidnightBSD$ */
+
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Helper utilities for enabling crypto.
+ *
+ * <hr>$Revision: $<hr>
+ */
+#include "executive-config.h"
+#include "cvmx-config.h"
+#include "cvmx.h"
+
+int cvmx_crypto_dormant_enable(uint64_t key)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CRYPTO))
+ return 1;
+
+ if (octeon_has_feature(OCTEON_FEATURE_DORM_CRYPTO)) {
+ cvmx_rnm_eer_key_t v;
+ v.s.key = key;
+ cvmx_write_csr(CVMX_RNM_EER_KEY, v.u64);
+ }
+
+ return octeon_has_feature(OCTEON_FEATURE_CRYPTO);
+}
+
+uint64_t cvmx_crypto_dormant_dbg(void)
+{
+ cvmx_rnm_eer_dbg_t dbg;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_DORM_CRYPTO))
+ return 0;
+
+ dbg.u64 = cvmx_read_csr(CVMX_RNM_EER_DBG);
+ return dbg.s.dat;
+}
+
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-crypto.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-crypto.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-crypto.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-crypto.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,71 @@
+/* $MidnightBSD$ */
+
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Helper utilities for enabling crypto.
+ *
+ * <hr>$Revision: $<hr>
+ */
+
+#ifndef __CVMX_CRYPTO_H__
+#define __CVMX_CRYPTO_H__
+/**
+ * Enable the dormant crypto functions. If crypto is not already
+ * enabled and it is possible to enable it, write the enable key.
+ *
+ * @param key The dormant crypto enable key value.
+ *
+ * @return true if crypto is (or has been) enabled.
+ */
+extern int cvmx_crypto_dormant_enable(uint64_t key);
+
+/**
+ * Read the crypto dormant debug value.
+ *
+ * @return The RNM_EER_DBG.DAT value, or zero if the feature is not
+ * enabled.
+ */
+extern uint64_t cvmx_crypto_dormant_dbg(void);
+
+#endif /* __CVMX_CRYPTO_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-crypto.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-csr-enums.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-csr-enums.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-csr-enums.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,186 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ * Definitions for enumerations used with Octeon CSRs.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+#ifndef __CVMX_CSR_ENUMS_H__
+#define __CVMX_CSR_ENUMS_H__
+
+typedef enum {
+ CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
+ CVMX_IPD_OPC_MODE_STF = 1LL, /* All blocks into L2 */
+ CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
+ CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
+} cvmx_ipd_mode_t;
+
+
+/**
+ * Enumeration representing the amount of packet processing
+ * and validation performed by the input hardware.
+ */
+typedef enum
+{
+ CVMX_PIP_PORT_CFG_MODE_NONE = 0ull, /**< Packet input doesn't perform any
+ processing of the input packet. */
+ CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,/**< Full packet processing is performed
+ with pointer starting at the L2
+ (ethernet MAC) header. */
+ CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull /**< Input packets are assumed to be IP.
+ Results from non IP packets is
+ undefined. Pointers reference the
+ beginning of the IP header. */
+} cvmx_pip_port_parse_mode_t;
+
+
+/**
+ * This enumeration controls how a QoS watcher matches a packet.
+ *
+ * @deprecated This enumeration was used with cvmx_pip_config_watcher which has
+ * been deprecated.
+ */
+typedef enum
+{
+ CVMX_PIP_QOS_WATCH_DISABLE = 0ull, /**< QoS watcher is diabled */
+ CVMX_PIP_QOS_WATCH_PROTNH = 1ull, /**< QoS watcher will match based on the IP protocol */
+ CVMX_PIP_QOS_WATCH_TCP = 2ull, /**< QoS watcher will match TCP packets to a specific destination port */
+ CVMX_PIP_QOS_WATCH_UDP = 3ull /**< QoS watcher will match UDP packets to a specific destination port */
+} cvmx_pip_qos_watch_types;
+
+/**
+ * This enumeration is used in PIP tag config to control how
+ * POW tags are generated by the hardware.
+ */
+typedef enum
+{
+ CVMX_PIP_TAG_MODE_TUPLE = 0ull, /**< Always use tuple tag algorithm. This is the only mode supported on Pass 1 */
+ CVMX_PIP_TAG_MODE_MASK = 1ull, /**< Always use mask tag algorithm */
+ CVMX_PIP_TAG_MODE_IP_OR_MASK = 2ull, /**< If packet is IP, use tuple else use mask */
+ CVMX_PIP_TAG_MODE_TUPLE_XOR_MASK = 3ull /**< tuple XOR mask */
+} cvmx_pip_tag_mode_t;
+
+/**
+ * Tag type definitions
+ */
+typedef enum
+{
+ CVMX_POW_TAG_TYPE_ORDERED = 0L, /**< Tag ordering is maintained */
+ CVMX_POW_TAG_TYPE_ATOMIC = 1L, /**< Tag ordering is maintained, and at most one PP has the tag */
+ CVMX_POW_TAG_TYPE_NULL = 2L, /**< The work queue entry from the order
+ - NEVER tag switch from NULL to NULL */
+ CVMX_POW_TAG_TYPE_NULL_NULL = 3L /**< A tag switch to NULL, and there is no space reserved in POW
+ - NEVER tag switch to NULL_NULL
+ - NEVER tag switch from NULL_NULL
+ - NULL_NULL is entered at the beginning of time and on a deschedule.
+ - NULL_NULL can be exited by a new work request. A NULL_SWITCH load can also switch the state to NULL */
+} cvmx_pow_tag_type_t;
+
+
+/**
+ * LCR bits 0 and 1 control the number of bits per character. See the following table for encodings:
+ *
+ * - 00 = 5 bits (bits 0-4 sent)
+ * - 01 = 6 bits (bits 0-5 sent)
+ * - 10 = 7 bits (bits 0-6 sent)
+ * - 11 = 8 bits (all bits sent)
+ */
+typedef enum
+{
+ CVMX_UART_BITS5 = 0,
+ CVMX_UART_BITS6 = 1,
+ CVMX_UART_BITS7 = 2,
+ CVMX_UART_BITS8 = 3
+} cvmx_uart_bits_t;
+
+
+/**
+ * Interrupt Priority Interrupt Interrupt Interrupt
+ * ID Level Type Source Reset By
+ * ---------------------------------------------------------------------------------------------------------------------------------
+ * 0001 - None None -
+ *
+ * 0110 Highest Receiver Line Overrun, parity, or framing errors or break Reading the Line Status Register
+ * Status interrupt
+ *
+ * 0100 Second Received Data Receiver data available (FIFOs disabled) or Reading the Receiver Buffer Register
+ * Available RX FIFO trigger level reached (FIFOs (FIFOs disabled) or the FIFO drops below
+ * enabled) the trigger level (FIFOs enabled)
+ *
+ * 1100 Second Character No characters in or out of the RX FIFO Reading the Receiver Buffer Register
+ * Timeout during the last 4 character times and there
+ * Indication is at least 1 character in it during this
+ * time
+ *
+ * 0010 Third Transmitter Transmitter Holding Register Empty Reading the Interrupt Identity Register
+ * Holding (Programmable THRE Mode disabled) or TX (if source of interrupt) or writing into
+ * Register FIFO at or below threshold (Programmable THR (FIFOs or THRE Mode disabled) or TX
+ * Empty THRE Mode enabled) FIFO above threshold (FIFOs and THRE
+ * Mode enabled)
+ *
+ * 0000 Fourth Modem Status Clear To Send (CTS) or Data Set Ready (DSR) Reading the Modem Status Register
+ * Changed or Ring Indicator (RI) or Data Center
+ * Detect (DCD) changed
+ *
+ * 0111 Fifth Busy Detect Software has tried to write to the Line Reading the UART Status Register
+ * Indication Control Register while the BUSY bit of the
+ * UART Status Register was set
+ */
+typedef enum
+{
+ CVMX_UART_IID_NONE = 1,
+ CVMX_UART_IID_RX_ERROR = 6,
+ CVMX_UART_IID_RX_DATA = 4,
+ CVMX_UART_IID_RX_TIMEOUT = 12,
+ CVMX_UART_IID_TX_EMPTY = 2,
+ CVMX_UART_IID_MODEM = 0,
+ CVMX_UART_IID_BUSY = 7
+} cvmx_uart_iid_t;
+
+#endif /* __CVMX_CSR_ENUMS_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-csr-enums.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-csr-typedefs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-csr-typedefs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-csr-typedefs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,118 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Octeon. Include cvmx-csr.h instead of this file directly.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision: 69515 $<hr>
+ *
+ */
+#ifndef __CVMX_CSR_TYPEDEFS_H__
+#define __CVMX_CSR_TYPEDEFS_H__
+#include "cvmx-agl-defs.h"
+#include "cvmx-asxx-defs.h"
+#include "cvmx-asx0-defs.h"
+#include "cvmx-ciu2-defs.h"
+#include "cvmx-ciu-defs.h"
+#include "cvmx-dbg-defs.h"
+#include "cvmx-dfa-defs.h"
+#include "cvmx-dfm-defs.h"
+#include "cvmx-dpi-defs.h"
+#include "cvmx-endor-defs.h"
+#include "cvmx-eoi-defs.h"
+#include "cvmx-fpa-defs.h"
+#include "cvmx-gmxx-defs.h"
+#include "cvmx-gpio-defs.h"
+#include "cvmx-ilk-defs.h"
+#include "cvmx-iob1-defs.h"
+#include "cvmx-iob-defs.h"
+#include "cvmx-ipd-defs.h"
+#include "cvmx-key-defs.h"
+#include "cvmx-l2c-defs.h"
+#include "cvmx-l2d-defs.h"
+#include "cvmx-l2t-defs.h"
+#include "cvmx-led-defs.h"
+#include "cvmx-lmcx-defs.h"
+#include "cvmx-mio-defs.h"
+#include "cvmx-mixx-defs.h"
+#include "cvmx-mpi-defs.h"
+#include "cvmx-ndf-defs.h"
+#include "cvmx-npei-defs.h"
+#include "cvmx-npi-defs.h"
+#include "cvmx-pci-defs.h"
+#include "cvmx-pcieepx-defs.h"
+#include "cvmx-pciercx-defs.h"
+#include "cvmx-pcmx-defs.h"
+#include "cvmx-pcm-defs.h"
+#include "cvmx-pcsx-defs.h"
+#include "cvmx-pcsxx-defs.h"
+#include "cvmx-pemx-defs.h"
+#include "cvmx-pescx-defs.h"
+#include "cvmx-pip-defs.h"
+#include "cvmx-pko-defs.h"
+#include "cvmx-pow-defs.h"
+#include "cvmx-rad-defs.h"
+#include "cvmx-rnm-defs.h"
+#include "cvmx-sli-defs.h"
+#include "cvmx-smix-defs.h"
+#include "cvmx-smi-defs.h"
+#include "cvmx-spxx-defs.h"
+#include "cvmx-spx0-defs.h"
+#include "cvmx-sriox-defs.h"
+#include "cvmx-sriomaintx-defs.h"
+#include "cvmx-srxx-defs.h"
+#include "cvmx-sso-defs.h"
+#include "cvmx-stxx-defs.h"
+#include "cvmx-tim-defs.h"
+#include "cvmx-trax-defs.h"
+#include "cvmx-uahcx-defs.h"
+#include "cvmx-uctlx-defs.h"
+#include "cvmx-usbcx-defs.h"
+#include "cvmx-usbnx-defs.h"
+#include "cvmx-zip-defs.h"
+
+#include "cvmx-pexp-defs.h"
+#endif /* __CVMX_CSR_TYPEDEFS_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-csr-typedefs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-csr.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-csr.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-csr.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,224 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Octoen.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+#ifndef __CVMX_CSR_H__
+#define __CVMX_CSR_H__
+
+#ifndef CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENABLE_CSR_ADDRESS_CHECKING 0
+#endif
+
+#include "cvmx-platform.h"
+#include "cvmx-csr-enums.h"
+#include "cvmx-csr-typedefs.h"
+
+/* Map the HW names to the SDK historical names */
+typedef cvmx_ciu_intx_en1_t cvmx_ciu_int1_t;
+typedef cvmx_ciu_intx_sum0_t cvmx_ciu_intx0_t;
+typedef cvmx_ciu_mbox_setx_t cvmx_ciu_mbox_t;
+typedef cvmx_fpa_fpfx_marks_t cvmx_fpa_fpf_marks_t;
+typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que0_page_index_t;
+typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que1_page_index_t;
+typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que2_page_index_t;
+typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que3_page_index_t;
+typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que4_page_index_t;
+typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que5_page_index_t;
+typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que6_page_index_t;
+typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que7_page_index_t;
+typedef cvmx_ipd_1st_mbuff_skip_t cvmx_ipd_mbuff_first_skip_t;
+typedef cvmx_ipd_1st_next_ptr_back_t cvmx_ipd_first_next_ptr_back_t;
+typedef cvmx_ipd_packet_mbuff_size_t cvmx_ipd_mbuff_size_t;
+typedef cvmx_ipd_qosx_red_marks_t cvmx_ipd_qos_red_marks_t;
+typedef cvmx_ipd_wqe_fpa_queue_t cvmx_ipd_wqe_fpa_pool_t;
+typedef cvmx_l2c_pfcx_t cvmx_l2c_pfc0_t;
+typedef cvmx_l2c_pfcx_t cvmx_l2c_pfc1_t;
+typedef cvmx_l2c_pfcx_t cvmx_l2c_pfc2_t;
+typedef cvmx_l2c_pfcx_t cvmx_l2c_pfc3_t;
+typedef cvmx_lmcx_bist_ctl_t cvmx_lmc_bist_ctl_t;
+typedef cvmx_lmcx_bist_result_t cvmx_lmc_bist_result_t;
+typedef cvmx_lmcx_comp_ctl_t cvmx_lmc_comp_ctl_t;
+typedef cvmx_lmcx_ctl_t cvmx_lmc_ctl_t;
+typedef cvmx_lmcx_ctl1_t cvmx_lmc_ctl1_t;
+typedef cvmx_lmcx_dclk_cnt_hi_t cvmx_lmc_dclk_cnt_hi_t;
+typedef cvmx_lmcx_dclk_cnt_lo_t cvmx_lmc_dclk_cnt_lo_t;
+typedef cvmx_lmcx_dclk_ctl_t cvmx_lmc_dclk_ctl_t;
+typedef cvmx_lmcx_ddr2_ctl_t cvmx_lmc_ddr2_ctl_t;
+typedef cvmx_lmcx_delay_cfg_t cvmx_lmc_delay_cfg_t;
+typedef cvmx_lmcx_dll_ctl_t cvmx_lmc_dll_ctl_t;
+typedef cvmx_lmcx_dual_memcfg_t cvmx_lmc_dual_memcfg_t;
+typedef cvmx_lmcx_ecc_synd_t cvmx_lmc_ecc_synd_t;
+typedef cvmx_lmcx_fadr_t cvmx_lmc_fadr_t;
+typedef cvmx_lmcx_ifb_cnt_hi_t cvmx_lmc_ifb_cnt_hi_t;
+typedef cvmx_lmcx_ifb_cnt_lo_t cvmx_lmc_ifb_cnt_lo_t;
+typedef cvmx_lmcx_mem_cfg0_t cvmx_lmc_mem_cfg0_t;
+typedef cvmx_lmcx_mem_cfg1_t cvmx_lmc_mem_cfg1_t;
+typedef cvmx_lmcx_wodt_ctl0_t cvmx_lmc_odt_ctl_t;
+typedef cvmx_lmcx_ops_cnt_hi_t cvmx_lmc_ops_cnt_hi_t;
+typedef cvmx_lmcx_ops_cnt_lo_t cvmx_lmc_ops_cnt_lo_t;
+typedef cvmx_lmcx_pll_bwctl_t cvmx_lmc_pll_bwctl_t;
+typedef cvmx_lmcx_pll_ctl_t cvmx_lmc_pll_ctl_t;
+typedef cvmx_lmcx_pll_status_t cvmx_lmc_pll_status_t;
+typedef cvmx_lmcx_read_level_ctl_t cvmx_lmc_read_level_ctl_t;
+typedef cvmx_lmcx_read_level_dbg_t cvmx_lmc_read_level_dbg_t;
+typedef cvmx_lmcx_read_level_rankx_t cvmx_lmc_read_level_rankx_t;
+typedef cvmx_lmcx_rodt_comp_ctl_t cvmx_lmc_rodt_comp_ctl_t;
+typedef cvmx_lmcx_rodt_ctl_t cvmx_lmc_rodt_ctl_t;
+typedef cvmx_lmcx_wodt_ctl0_t cvmx_lmc_wodt_ctl_t;
+typedef cvmx_lmcx_wodt_ctl0_t cvmx_lmc_wodt_ctl0_t;
+typedef cvmx_lmcx_wodt_ctl1_t cvmx_lmc_wodt_ctl1_t;
+typedef cvmx_mio_boot_reg_cfgx_t cvmx_mio_boot_reg_cfg0_t;
+typedef cvmx_mio_boot_reg_timx_t cvmx_mio_boot_reg_tim0_t;
+typedef cvmx_mio_twsx_int_t cvmx_mio_tws_int_t;
+typedef cvmx_mio_twsx_sw_twsi_t cvmx_mio_tws_sw_twsi_t;
+typedef cvmx_mio_twsx_sw_twsi_ext_t cvmx_mio_tws_sw_twsi_ext_t;
+typedef cvmx_mio_twsx_twsi_sw_t cvmx_mio_tws_twsi_sw_t;
+typedef cvmx_npi_base_addr_inputx_t cvmx_npi_base_addr_input_t;
+typedef cvmx_npi_base_addr_outputx_t cvmx_npi_base_addr_output_t;
+typedef cvmx_npi_buff_size_outputx_t cvmx_npi_buff_size_output_t;
+typedef cvmx_npi_dma_highp_counts_t cvmx_npi_dma_counts_t;
+typedef cvmx_npi_dma_highp_naddr_t cvmx_npi_dma_naddr_t;
+typedef cvmx_npi_highp_dbell_t cvmx_npi_dbell_t;
+typedef cvmx_npi_highp_ibuff_saddr_t cvmx_npi_dma_ibuff_saddr_t;
+typedef cvmx_npi_mem_access_subidx_t cvmx_npi_mem_access_subid_t;
+typedef cvmx_npi_num_desc_outputx_t cvmx_npi_num_desc_output_t;
+typedef cvmx_npi_px_dbpair_addr_t cvmx_npi_dbpair_addr_t;
+typedef cvmx_npi_px_instr_addr_t cvmx_npi_instr_addr_t;
+typedef cvmx_npi_px_instr_cnts_t cvmx_npi_instr_cnts_t;
+typedef cvmx_npi_px_pair_cnts_t cvmx_npi_pair_cnts_t;
+typedef cvmx_npi_size_inputx_t cvmx_npi_size_input_t;
+typedef cvmx_pci_dbellx_t cvmx_pci_dbell_t;
+typedef cvmx_pci_dma_cntx_t cvmx_pci_dma_cnt_t;
+typedef cvmx_pci_dma_int_levx_t cvmx_pci_dma_int_lev_t;
+typedef cvmx_pci_dma_timex_t cvmx_pci_dma_time_t;
+typedef cvmx_pci_instr_countx_t cvmx_pci_instr_count_t;
+typedef cvmx_pci_pkt_creditsx_t cvmx_pci_pkt_credits_t;
+typedef cvmx_pci_pkts_sent_int_levx_t cvmx_pci_pkts_sent_int_lev_t;
+typedef cvmx_pci_pkts_sent_timex_t cvmx_pci_pkts_sent_time_t;
+typedef cvmx_pci_pkts_sentx_t cvmx_pci_pkts_sent_t;
+typedef cvmx_pip_prt_cfgx_t cvmx_pip_port_cfg_t;
+typedef cvmx_pip_prt_tagx_t cvmx_pip_port_tag_cfg_t;
+typedef cvmx_pip_qos_watchx_t cvmx_pip_port_watcher_cfg_t;
+typedef cvmx_pko_mem_queue_ptrs_t cvmx_pko_queue_cfg_t;
+typedef cvmx_pko_reg_cmd_buf_t cvmx_pko_pool_cfg_t;
+typedef cvmx_smix_clk_t cvmx_smi_clk_t;
+typedef cvmx_smix_cmd_t cvmx_smi_cmd_t;
+typedef cvmx_smix_en_t cvmx_smi_en_t;
+typedef cvmx_smix_rd_dat_t cvmx_smi_rd_dat_t;
+typedef cvmx_smix_wr_dat_t cvmx_smi_wr_dat_t;
+typedef cvmx_tim_reg_flags_t cvmx_tim_control_t;
+
+/* The CSRs for bootbus region zero used to be independent of the
+ other 1-7. As of SDK 1.7.0 these were combined. These macros
+ are for backwards compactability */
+#define CVMX_MIO_BOOT_REG_CFG0 CVMX_MIO_BOOT_REG_CFGX(0)
+#define CVMX_MIO_BOOT_REG_TIM0 CVMX_MIO_BOOT_REG_TIMX(0)
+
+/* The CN3XXX and CN58XX chips used to not have a LMC number
+ passed to the address macros. These are here to supply backwards
+ compatability with old code. Code should really use the new addresses
+ with bus arguments for support on other chips */
+#define CVMX_LMC_BIST_CTL CVMX_LMCX_BIST_CTL(0)
+#define CVMX_LMC_BIST_RESULT CVMX_LMCX_BIST_RESULT(0)
+#define CVMX_LMC_COMP_CTL CVMX_LMCX_COMP_CTL(0)
+#define CVMX_LMC_CTL CVMX_LMCX_CTL(0)
+#define CVMX_LMC_CTL1 CVMX_LMCX_CTL1(0)
+#define CVMX_LMC_DCLK_CNT_HI CVMX_LMCX_DCLK_CNT_HI(0)
+#define CVMX_LMC_DCLK_CNT_LO CVMX_LMCX_DCLK_CNT_LO(0)
+#define CVMX_LMC_DCLK_CTL CVMX_LMCX_DCLK_CTL(0)
+#define CVMX_LMC_DDR2_CTL CVMX_LMCX_DDR2_CTL(0)
+#define CVMX_LMC_DELAY_CFG CVMX_LMCX_DELAY_CFG(0)
+#define CVMX_LMC_DLL_CTL CVMX_LMCX_DLL_CTL(0)
+#define CVMX_LMC_DUAL_MEMCFG CVMX_LMCX_DUAL_MEMCFG(0)
+#define CVMX_LMC_ECC_SYND CVMX_LMCX_ECC_SYND(0)
+#define CVMX_LMC_FADR CVMX_LMCX_FADR(0)
+#define CVMX_LMC_IFB_CNT_HI CVMX_LMCX_IFB_CNT_HI(0)
+#define CVMX_LMC_IFB_CNT_LO CVMX_LMCX_IFB_CNT_LO(0)
+#define CVMX_LMC_MEM_CFG0 CVMX_LMCX_MEM_CFG0(0)
+#define CVMX_LMC_MEM_CFG1 CVMX_LMCX_MEM_CFG1(0)
+#define CVMX_LMC_OPS_CNT_HI CVMX_LMCX_OPS_CNT_HI(0)
+#define CVMX_LMC_OPS_CNT_LO CVMX_LMCX_OPS_CNT_LO(0)
+#define CVMX_LMC_PLL_BWCTL CVMX_LMCX_PLL_BWCTL(0)
+#define CVMX_LMC_PLL_CTL CVMX_LMCX_PLL_CTL(0)
+#define CVMX_LMC_PLL_STATUS CVMX_LMCX_PLL_STATUS(0)
+#define CVMX_LMC_READ_LEVEL_CTL CVMX_LMCX_READ_LEVEL_CTL(0)
+#define CVMX_LMC_READ_LEVEL_DBG CVMX_LMCX_READ_LEVEL_DBG(0)
+#define CVMX_LMC_READ_LEVEL_RANKX CVMX_LMCX_READ_LEVEL_RANKX(0)
+#define CVMX_LMC_RODT_COMP_CTL CVMX_LMCX_RODT_COMP_CTL(0)
+#define CVMX_LMC_RODT_CTL CVMX_LMCX_RODT_CTL(0)
+#define CVMX_LMC_WODT_CTL CVMX_LMCX_WODT_CTL0(0)
+#define CVMX_LMC_WODT_CTL0 CVMX_LMCX_WODT_CTL0(0)
+#define CVMX_LMC_WODT_CTL1 CVMX_LMCX_WODT_CTL1(0)
+
+/* The CN3XXX and CN58XX chips used to not have a TWSI bus number
+ passed to the address macros. These are here to supply backwards
+ compatability with old code. Code should really use the new addresses
+ with bus arguments for support on other chips */
+#define CVMX_MIO_TWS_INT CVMX_MIO_TWSX_INT(0)
+#define CVMX_MIO_TWS_SW_TWSI CVMX_MIO_TWSX_SW_TWSI(0)
+#define CVMX_MIO_TWS_SW_TWSI_EXT CVMX_MIO_TWSX_SW_TWSI_EXT(0)
+#define CVMX_MIO_TWS_TWSI_SW CVMX_MIO_TWSX_TWSI_SW(0)
+
+/* The CN3XXX and CN58XX chips used to not have a SMI/MDIO bus number
+ passed to the address macros. These are here to supply backwards
+ compatability with old code. Code should really use the new addresses
+ with bus arguments for support on other chips */
+#define CVMX_SMI_CLK CVMX_SMIX_CLK(0)
+#define CVMX_SMI_CMD CVMX_SMIX_CMD(0)
+#define CVMX_SMI_EN CVMX_SMIX_EN(0)
+#define CVMX_SMI_RD_DAT CVMX_SMIX_RD_DAT(0)
+#define CVMX_SMI_WR_DAT CVMX_SMIX_WR_DAT(0)
+
+#endif /* __CVMX_CSR_H__ */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-csr.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-dbg-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-dbg-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-dbg-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,152 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-dbg-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon dbg.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_DBG_DEFS_H__
+#define __CVMX_DBG_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DBG_DATA CVMX_DBG_DATA_FUNC()
+static inline uint64_t CVMX_DBG_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DBG_DATA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000001E8ull);
+}
+#else
+#define CVMX_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F00000001E8ull))
+#endif
+
+/**
+ * cvmx_dbg_data
+ *
+ * DBG_DATA = Debug Data Register
+ *
+ * Value returned on the debug-data lines from the RSLs
+ */
+union cvmx_dbg_data {
+ uint64_t u64;
+ struct cvmx_dbg_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_dbg_data_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t pll_mul : 3; /**< pll_mul pins sampled at DCOK assertion */
+ uint64_t reserved_23_27 : 5;
+ uint64_t c_mul : 5; /**< Core PLL multiplier sampled at DCOK assertion */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t reserved_23_27 : 5;
+ uint64_t pll_mul : 3;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn30xx;
+ struct cvmx_dbg_data_cn30xx cn31xx;
+ struct cvmx_dbg_data_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t d_mul : 4; /**< D_MUL pins sampled on DCOK assertion */
+ uint64_t dclk_mul2 : 1; /**< Should always be set for fast DDR-II operation */
+ uint64_t cclk_div2 : 1; /**< Should always be clear for fast core clock */
+ uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t cclk_div2 : 1;
+ uint64_t dclk_mul2 : 1;
+ uint64_t d_mul : 4;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn38xx;
+ struct cvmx_dbg_data_cn38xx cn38xxp2;
+ struct cvmx_dbg_data_cn30xx cn50xx;
+ struct cvmx_dbg_data_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t rem : 6; /**< Remaining debug_select pins sampled at DCOK */
+ uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t rem : 6;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn58xx;
+ struct cvmx_dbg_data_cn58xx cn58xxp1;
+};
+typedef union cvmx_dbg_data cvmx_dbg_data_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-dbg-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-debug-handler.S
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-debug-handler.S (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-debug-handler.S 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,279 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+#undef __ASSEMBLY__
+#define __ASSEMBLY__
+
+#ifdef __linux__
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#else
+#include <machine/asm.h>
+#include <machine/regdef.h>
+#endif
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx-asm.h>
+#include <asm/octeon/octeon-boot-info.h>
+#else
+
+#include "cvmx-asm.h"
+
+#ifndef _OCTEON_TOOLCHAIN_RUNTIME
+#include <octeon_mem_map.h>
+#else
+#include "cvmx-platform.h"
+#include "octeon-boot-info.h"
+#endif
+
+#endif
+
+/* The registers saving/restoring is split into two because k0 is stored in the COP0_DESAVE register. */
+#define REGS0 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25
+#define REGS1 27,28,29,30,31
+
+#define SAVE_REGISTER(reg) \
+ sd reg, 0(k0); \
+ addi k0, 8
+
+#define RESTORE_REGISTER(reg) \
+ ld reg, -8(k0); \
+ addi k0, -8
+
+#define SAVE_COP0(reg) \
+ dmfc0 k1,reg; \
+ sd k1, 0(k0); \
+ addi k0, 8
+
+#define RESTORE_COP0(reg) \
+ ld k1, -8(k0); \
+ addi k0, -8; \
+ dmtc0 k1,reg
+
+#define SAVE_ADDRESS(addr) \
+ dli k1, addr; \
+ ld k1, 0(k1); \
+ sd k1, 0(k0); \
+ addi k0, 8
+
+#define RESTORE_ADDRESS(addr) \
+ dli t0, addr; \
+ ld k1, -8(k0); \
+ sd k1, 0(t0); \
+ addi k0, -8
+
+#define REG_SAVE_BASE_DIV_8 (BOOTLOADER_DEBUG_REG_SAVE_BASE >> 3)
+
+
+#define HW_INSTRUCTION_BREAKPOINT_STATUS (0xFFFFFFFFFF301000)
+#define HW_INSTRUCTION_BREAKPOINT_ADDRESS(num) (0xFFFFFFFFFF301100 + 0x100 * (num))
+#define HW_INSTRUCTION_BREAKPOINT_ADDRESS_MASK(num) (0xFFFFFFFFFF301108 + 0x100 * (num))
+#define HW_INSTRUCTION_BREAKPOINT_ASID(num) (0xFFFFFFFFFF301110 + 0x100 * (num))
+#define HW_INSTRUCTION_BREAKPOINT_CONTROL(num) (0xFFFFFFFFFF301118 + 0x100 * (num))
+
+#define HW_DATA_BREAKPOINT_STATUS (0xFFFFFFFFFF302000)
+#define HW_DATA_BREAKPOINT_ADDRESS(num) (0xFFFFFFFFFF302100 + 0x100 * (num))
+#define HW_DATA_BREAKPOINT_ADDRESS_MASK(num) (0xFFFFFFFFFF302108 + 0x100 * (num))
+#define HW_DATA_BREAKPOINT_ASID(num) (0xFFFFFFFFFF302110 + 0x100 * (num))
+#define HW_DATA_BREAKPOINT_CONTROL(num) (0xFFFFFFFFFF302118 + 0x100 * (num))
+
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#define loadaddr(reg, addr, shift) \
+ dla reg, addr##_all; \
+ mfc0 $1, $15, 1; \
+ andi $1, 0xff; \
+ sll $1, shift; \
+ add reg, reg, $1
+#else
+#define loadaddr(reg, addr, shift) \
+ dla reg, addr
+#endif
+
+
+ .set noreorder
+ .set noat
+
+ .text
+
+// Detect debug-mode exception, save all registers, create a stack and then
+// call the stage3 C function.
+
+ .ent __cvmx_debug_handler_stage2
+ .globl __cvmx_debug_handler_stage2
+__cvmx_debug_handler_stage2:
+ // Save off k0 in COP0_DESAVE
+ dmtc0 k0, COP0_DESAVE
+
+ // Use reserved space in kseg0 to save off some temp regs
+ mfc0 k0, $15, 1 // read exception base reg.
+ andi k0, 0xff // mask off core ID
+ sll k0, 12 // multiply by 4096 (512 dwords) DEBUG_NUMREGS
+
+ addiu k0, REG_SAVE_BASE_DIV_8
+ addiu k0, REG_SAVE_BASE_DIV_8
+ addiu k0, REG_SAVE_BASE_DIV_8
+ addiu k0, REG_SAVE_BASE_DIV_8
+ addiu k0, REG_SAVE_BASE_DIV_8
+ addiu k0, REG_SAVE_BASE_DIV_8
+ addiu k0, REG_SAVE_BASE_DIV_8
+ addiu k0, REG_SAVE_BASE_DIV_8
+ // add base offset - after exeption vectors for all cores
+
+ rotr k0, k0, 31 // set bit 31 for kseg0 access
+ addi k0, 1
+ rotr k0, k0, 1
+
+ // save off k1 and at ($1) off to the bootloader reg save area
+ // at is used by dla
+ sd $1, 8(k0) // save at for temp usage
+ sd k1, 216(k0) // save k1 for temp usage
+
+
+ // Detect debug-mode exception.
+ // If COP0_MULTICOREDEBUG[DExecC] is set,
+ dmfc0 k1, COP0_MULTICOREDEBUG
+ bbit0 k1, 16, noexc
+ nop
+
+ // COP0_DEBUG[DINT,DIB,DDBS,DBp,DSS] are not set and
+ dmfc0 k1, COP0_DEBUG
+ andi k1, 0x3f
+ bnez k1, noexc
+ nop
+
+ // COP0_DEBUG[DExecC] is set.
+ dmfc0 k1, COP0_DEBUG
+ dext k1,k1,10,5
+ beqz k1,noexc
+ nop
+
+ // We don't handle debug-mode exceptions in delay-slots so DEBUG[DBD]
+ // should not be set. If yes spin forever.
+ dmfc0 k1, COP0_DEBUG
+1:
+ bbit1 k1, 31, 1b
+ nop
+
+ // It's a debug-mode exception. Flag the occurence. Also if it's
+ // expected just ignore it but returning the subsequent instruction
+ // after the fault.
+
+ loadaddr (k1, __cvmx_debug_mode_exception_occured, 3)
+ sd k1, 0(k1)
+
+ loadaddr (k1, __cvmx_debug_mode_exception_ignore, 3)
+ ld k1, 0(k1)
+ beqz k1, noexc
+ nop
+
+ // Restore k1 and at from the bootloader reg save area
+ ld $1, 8(k0) // save at for temp usage
+ ld k1, 216(k0) // save k1 for temp usage
+
+ dmfc0 k0, COP0_DEPC
+ // Skip the faulting instruction.
+ daddiu k0, 4
+ jr k0
+ dmfc0 k0, COP0_DESAVE
+
+noexc:
+
+ loadaddr (k1, __cvmx_debug_save_regs_area, 8)
+
+ // Restore at
+ ld $1, 8(k0) // restore at for temp usage
+
+ .irp n, REGS0
+ sd $\n, 0(k1)
+ addiu k1, 8
+ .endr
+
+ move $25, k1
+ ld k1, 216(k0) // restore k1 for temp usage
+ move k0, $25
+
+ // Store out k0, we can use $25 here because we just saved it
+ dmfc0 $25, COP0_DESAVE
+ sd $25, 0(k0)
+ addiu k0, 8
+
+ .irp n, REGS1
+ sd $\n, 0(k0)
+ addiu k0, 8
+ .endr
+
+ loadaddr(sp, __cvmx_debug_stack_top, 3)
+ // Load the stack pointer as a pointer size.
+#ifdef _ABIN32
+ lw sp,0(sp)
+#else
+ ld sp,0(sp)
+#endif
+ mflo $4
+ mfhi $5
+ jal __cvmx_debug_handler_stage3
+ nop
+
+ loadaddr(k0, __cvmx_debug_save_regs_area, 8)
+
+ .irp n, REGS0
+ ld $\n, 0(k0)
+ addiu k0, 8
+ .endr
+
+ // Restore k0 to COP0_DESAVE via k1
+ ld k1, 0(k0)
+ addiu k0, 8
+ dmtc0 k1, COP0_DESAVE
+
+ .irp n, REGS1
+ ld $\n, 0(k0)
+ addiu k0, 8
+ .endr
+
+ dmfc0 k0, COP0_DESAVE
+ // Flush the icache; by adding and removing SW breakpoints we change
+ // the instruction stream.
+ synci 0($0)
+ deret
+ nop
+
+ .end __cvmx_debug_handler_stage2
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-debug-handler.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-debug-remote.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-debug-remote.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-debug-remote.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,95 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-debug.h>
+
+#define cvmx_interrupt_in_isr 0
+
+#else
+#include "cvmx.h"
+#include "cvmx-debug.h"
+
+#ifndef CVMX_BUILD_FOR_TOOLCHAIN
+extern int cvmx_interrupt_in_isr;
+#else
+#define cvmx_interrupt_in_isr 0
+#endif
+
+#endif
+
+
+static void cvmx_debug_remote_mem_wait_for_resume(volatile cvmx_debug_core_context_t *context, cvmx_debug_state_t state)
+{
+ //
+ // If we are stepping and not stepping into an interrupt and the debug
+ // exception happened in an interrupt, continue the execution.
+ //
+ if(!state.step_isr &&
+ (context->cop0.debug & 0x1) && /* Single stepping */
+ !(context->cop0.debug & 0x1e) && /* Did not hit a breakpoint */
+ ((context->cop0.status & 0x2) || cvmx_interrupt_in_isr))
+ return;
+
+ context->remote_controlled = 1;
+ CVMX_SYNCW;
+ while (context->remote_controlled)
+ ;
+ CVMX_SYNCW;
+}
+
+static void cvmx_debug_memory_change_core(int oldcore, int newcore)
+{
+ /* FIXME, this should change the core on the host side too. */
+}
+
+cvmx_debug_comm_t cvmx_debug_remote_comm =
+{
+ .init = NULL,
+ .install_break_handler = NULL,
+ .needs_proxy = 0,
+ .getpacket = NULL,
+ .putpacket = NULL,
+ .wait_for_resume = cvmx_debug_remote_mem_wait_for_resume,
+ .change_core = cvmx_debug_memory_change_core,
+};
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-debug-remote.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-debug-uart.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-debug-uart.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-debug-uart.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,268 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-debug.h>
+#include <asm/octeon/cvmx-uart.h>
+#include <asm/octeon/octeon-boot-info.h>
+#include <asm/octeon/cvmx-spinlock.h>
+
+int cvmx_debug_uart = 1;
+
+#else
+#include <limits.h>
+#include "executive-config.h"
+#include "cvmx.h"
+#include "cvmx-debug.h"
+#include "cvmx-uart.h"
+#include "cvmx-spinlock.h"
+#include "octeon-boot-info.h"
+#endif
+
+/*
+ * NOTE: CARE SHOULD BE TAKEN USING STD C LIBRARY FUNCTIONS IN
+ * THIS FILE IF SOMEONE PUTS A BREAKPOINT ON THOSE FUNCTIONS
+ * DEBUGGING WILL FAIL.
+ */
+
+
+#ifdef CVMX_BUILD_FOR_TOOLCHAIN
+#pragma weak cvmx_uart_enable_intr
+int cvmx_debug_uart = 1;
+#endif
+
+
+/* Default to second uart port for backward compatibility. The default (if
+ -debug does not set the uart number) can now be overridden with
+ CVMX_DEBUG_COMM_UART_NUM. */
+#ifndef CVMX_DEBUG_COMM_UART_NUM
+# define CVMX_DEBUG_COMM_UART_NUM 1
+#endif
+
+static CVMX_SHARED cvmx_spinlock_t cvmx_debug_uart_lock;
+
+/**
+ * Interrupt handler for debugger Control-C interrupts.
+ *
+ * @param irq_number IRQ interrupt number
+ * @param registers CPU registers at the time of the interrupt
+ * @param user_arg Unused user argument
+ */
+void cvmx_debug_uart_process_debug_interrupt(int irq_number, uint64_t registers[32], void *user_arg)
+{
+ cvmx_uart_lsr_t lsrval;
+
+ /* Check for a Control-C interrupt from the debugger. This loop will eat
+ all input received on the uart */
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(cvmx_debug_uart));
+ while (lsrval.s.dr)
+ {
+ int c = cvmx_read_csr(CVMX_MIO_UARTX_RBR(cvmx_debug_uart));
+ if (c == '\003')
+ {
+ register uint64_t tmp;
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+ fflush(stderr);
+ fflush(stdout);
+#endif
+ /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also
+ set the MCD0 to be not masked by this core so we know
+ the signal is received by someone */
+ asm volatile (
+ "dmfc0 %0, $22\n"
+ "ori %0, %0, 0x1110\n"
+ "dmtc0 %0, $22\n"
+ : "=r" (tmp));
+ }
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(cvmx_debug_uart));
+ }
+}
+
+
+static void cvmx_debug_uart_init(void)
+{
+ if (cvmx_debug_uart == -1)
+ cvmx_debug_uart = CVMX_DEBUG_COMM_UART_NUM;
+}
+
+static void cvmx_debug_uart_install_break_handler(void)
+{
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+#ifdef CVMX_BUILD_FOR_TOOLCHAIN
+ if (cvmx_uart_enable_intr)
+#endif
+ cvmx_uart_enable_intr(cvmx_debug_uart, cvmx_debug_uart_process_debug_interrupt);
+#endif
+}
+
+/**
+ * Routines to handle hex data
+ *
+ * @param ch
+ * @return
+ */
+static inline int cvmx_debug_uart_hex(char ch)
+{
+ if ((ch >= 'a') && (ch <= 'f'))
+ return(ch - 'a' + 10);
+ if ((ch >= '0') && (ch <= '9'))
+ return(ch - '0');
+ if ((ch >= 'A') && (ch <= 'F'))
+ return(ch - 'A' + 10);
+ return(-1);
+}
+
+/* Get a packet from the UART, return 0 on failure and 1 on success. */
+
+static int cvmx_debug_uart_getpacket(char *buffer, size_t size)
+{
+ while (1)
+ {
+ unsigned char checksum;
+ int timedout = 0;
+ size_t count;
+ char ch;
+
+ ch = cvmx_uart_read_byte_with_timeout(cvmx_debug_uart, &timedout, __SHRT_MAX__);
+
+ if (timedout)
+ return 0;
+
+ /* if this is not the start character, ignore it. */
+ if (ch != '$')
+ continue;
+
+ retry:
+ checksum = 0;
+ count = 0;
+
+ /* now, read until a # or end of buffer is found */
+ while (count < size)
+ {
+ ch = cvmx_uart_read_byte(cvmx_debug_uart);
+ if (ch == '$')
+ goto retry;
+ if (ch == '#')
+ break;
+ checksum = checksum + ch;
+ buffer[count] = ch;
+ count = count + 1;
+ }
+ buffer[count] = 0;
+
+ if (ch == '#')
+ {
+ char csumchars0, csumchars1;
+ unsigned xmitcsum;
+ int n0, n1;
+
+ csumchars0 = cvmx_uart_read_byte(cvmx_debug_uart);
+ csumchars1 = cvmx_uart_read_byte(cvmx_debug_uart);
+ n0 = cvmx_debug_uart_hex(csumchars0);
+ n1 = cvmx_debug_uart_hex(csumchars1);
+ if (n0 == -1 || n1 == -1)
+ return 0;
+
+ xmitcsum = (n0 << 4) | n1;
+ return checksum == xmitcsum;
+ }
+ }
+ return 0;
+}
+
+/* Put the hex value of t into str. */
+static void cvmx_debug_uart_strhex(char *str, unsigned char t)
+{
+ char hexchar[] = "0123456789ABCDEF";
+ str[0] = hexchar[(t>>4)];
+ str[1] = hexchar[t&0xF];
+ str[2] = 0;
+}
+
+static int cvmx_debug_uart_putpacket(char *packet)
+{
+ size_t i;
+ unsigned char csum;
+ unsigned char *ptr = (unsigned char *) packet;
+ char csumstr[3];
+
+ for (csum = 0, i = 0; ptr[i]; i++)
+ csum += ptr[i];
+ cvmx_debug_uart_strhex(csumstr, csum);
+
+ cvmx_spinlock_lock(&cvmx_debug_uart_lock);
+ cvmx_uart_write_byte(cvmx_debug_uart, '$');
+ cvmx_uart_write_string(cvmx_debug_uart, packet);
+ cvmx_uart_write_byte(cvmx_debug_uart, '#');
+ cvmx_uart_write_string(cvmx_debug_uart, csumstr);
+ cvmx_spinlock_unlock(&cvmx_debug_uart_lock);
+
+ return 0;
+}
+
+static void cvmx_debug_uart_change_core(int oldcore, int newcore)
+{
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+ cvmx_ciu_intx0_t irq_control;
+
+ irq_control.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(newcore * 2));
+ irq_control.s.uart |= (1u<<cvmx_debug_uart);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(newcore * 2), irq_control.u64);
+
+ /* Disable interrupts to this core since he is about to die */
+ irq_control.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(oldcore * 2));
+ irq_control.s.uart &= ~(1u<<cvmx_debug_uart);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(oldcore* 2), irq_control.u64);
+#endif
+}
+
+cvmx_debug_comm_t cvmx_debug_uart_comm =
+{
+ .init = cvmx_debug_uart_init,
+ .install_break_handler = cvmx_debug_uart_install_break_handler,
+ .needs_proxy = 1,
+ .getpacket = cvmx_debug_uart_getpacket,
+ .putpacket = cvmx_debug_uart_putpacket,
+ .wait_for_resume = NULL,
+ .change_core = cvmx_debug_uart_change_core,
+};
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-debug-uart.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-debug.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-debug.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-debug.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1601 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/*
+ * @file
+ *
+ * Interface to debug exception handler
+ * NOTE: CARE SHOULD BE TAKE WHEN USING STD C LIBRARY FUNCTIONS IN
+ * THIS FILE IF SOMEONE PUTS A BREAKPOINT ON THOSE FUNCTIONS
+ * DEBUGGING WILL FAIL.
+ *
+ * <hr>$Revision: 50060 $<hr>
+ */
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-debug.h>
+#include <asm/octeon/cvmx-core.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#include <asm/octeon/octeon-boot-info.h>
+#else
+#include <stdint.h>
+#include "cvmx.h"
+#include "cvmx-debug.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-core.h"
+#include "cvmx-coremask.h"
+#include "octeon-boot-info.h"
+#endif
+
+#ifdef CVMX_DEBUG_LOGGING
+# undef CVMX_DEBUG_LOGGING
+# define CVMX_DEBUG_LOGGING 1
+#else
+# define CVMX_DEBUG_LOGGING 0
+#endif
+
+#ifndef CVMX_DEBUG_ATTACH
+# define CVMX_DEBUG_ATTACH 1
+#endif
+
+#define CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_STATUS (0xFFFFFFFFFF301000ull)
+#define CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_ADDRESS(num) (0xFFFFFFFFFF301100ull + 0x100 * (num))
+#define CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_ADDRESS_MASK(num) (0xFFFFFFFFFF301108ull + 0x100 * (num))
+#define CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_ASID(num) (0xFFFFFFFFFF301110ull + 0x100 * (num))
+#define CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_CONTROL(num) (0xFFFFFFFFFF301118ull + 0x100 * (num))
+
+#define CVMX_DEBUG_HW_DATA_BREAKPOINT_STATUS (0xFFFFFFFFFF302000ull)
+#define CVMX_DEBUG_HW_DATA_BREAKPOINT_ADDRESS(num) (0xFFFFFFFFFF302100ull + 0x100 * (num))
+#define CVMX_DEBUG_HW_DATA_BREAKPOINT_ADDRESS_MASK(num) (0xFFFFFFFFFF302108ull + 0x100 * (num))
+#define CVMX_DEBUG_HW_DATA_BREAKPOINT_ASID(num) (0xFFFFFFFFFF302110ull + 0x100 * (num))
+#define CVMX_DEBUG_HW_DATA_BREAKPOINT_CONTROL(num) (0xFFFFFFFFFF302118ull + 0x100 * (num))
+
+#define ERET_INSN 0x42000018U /* Hexcode for eret */
+#define ISR_DELAY_COUNTER 120000000 /* Could be tuned down */
+
+extern cvmx_debug_comm_t cvmx_debug_uart_comm;
+extern cvmx_debug_comm_t cvmx_debug_remote_comm;
+static const cvmx_debug_comm_t *cvmx_debug_comms[COMM_SIZE] = {&cvmx_debug_uart_comm, &cvmx_debug_remote_comm};
+
+
+
+static cvmx_debug_globals_t *cvmx_debug_globals;
+
+/**
+ * @file
+ *
+ */
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+uint64_t __cvmx_debug_save_regs_area[32];
+
+volatile uint64_t __cvmx_debug_mode_exception_ignore;
+volatile uint64_t __cvmx_debug_mode_exception_occured;
+
+static char cvmx_debug_stack[8*1024] __attribute ((aligned (16)));
+char *__cvmx_debug_stack_top = &cvmx_debug_stack[8*1024];
+
+#ifndef CVMX_BUILD_FOR_TOOLCHAIN
+extern int cvmx_interrupt_in_isr;
+#else
+#define cvmx_interrupt_in_isr 0
+#endif
+
+#else
+uint64_t __cvmx_debug_save_regs_area_all[CVMX_MAX_CORES][32];
+#define __cvmx_debug_save_regs_area __cvmx_debug_save_regs_area_all[cvmx_get_core_num()]
+
+volatile uint64_t __cvmx_debug_mode_exception_ignore_all[CVMX_MAX_CORES];
+#define __cvmx_debug_mode_exception_ignore __cvmx_debug_mode_exception_ignore_all[cvmx_get_core_num()]
+volatile uint64_t __cvmx_debug_mode_exception_occured_all[CVMX_MAX_CORES];
+#define __cvmx_debug_mode_exception_occured __cvmx_debug_mode_exception_occured_all[cvmx_get_core_num()]
+
+static char cvmx_debug_stack_all[CVMX_MAX_CORES][8*1024] __attribute ((aligned (16)));
+char *__cvmx_debug_stack_top_all[CVMX_MAX_CORES];
+
+#define cvmx_interrupt_in_isr 0
+
+#endif
+
+
+static size_t cvmx_debug_strlen (const char *str)
+{
+ size_t size = 0;
+ while (*str)
+ {
+ size++;
+ str++;
+ }
+ return size;
+}
+static void cvmx_debug_strcpy (char *dest, const char *src)
+{
+ while (*src)
+ {
+ *dest = *src;
+ src++;
+ dest++;
+ }
+ *dest = 0;
+}
+
+static void cvmx_debug_memcpy_align (void *dest, const void *src, int size) __attribute__ ((__noinline__));
+static void cvmx_debug_memcpy_align (void *dest, const void *src, int size)
+{
+ long long *dest1 = (long long*)dest;
+ const long long *src1 = (const long long*)src;
+ int i;
+ if (size == 40)
+ {
+ long long a0, a1, a2, a3, a4;
+ a0 = src1[0];
+ a1 = src1[1];
+ a2 = src1[2];
+ a3 = src1[3];
+ a4 = src1[4];
+ dest1[0] = a0;
+ dest1[1] = a1;
+ dest1[2] = a2;
+ dest1[3] = a3;
+ dest1[4] = a4;
+ return;
+ }
+ for(i = 0;i < size;i+=8)
+ {
+ *dest1 = *src1;
+ dest1++;
+ src1++;
+ }
+}
+
+
+static inline uint32_t cvmx_debug_core_mask(void)
+{
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+#ifdef CVMX_BUILD_FOR_TOOLCHAIN
+ extern int __octeon_core_mask;
+ return __octeon_core_mask;
+#endif
+return cvmx_sysinfo_get()->core_mask;
+#else
+return octeon_get_boot_coremask ();
+#endif
+}
+
+static inline void cvmx_debug_update_state(cvmx_debug_state_t state)
+{
+ cvmx_debug_memcpy_align(cvmx_debug_globals->state, &state, sizeof(cvmx_debug_state_t));
+}
+
+static inline cvmx_debug_state_t cvmx_debug_get_state(void)
+{
+ cvmx_debug_state_t state;
+ cvmx_debug_memcpy_align(&state, cvmx_debug_globals->state, sizeof(cvmx_debug_state_t));
+ return state;
+}
+
+static void cvmx_debug_printf(char *format, ...) __attribute__((format(__printf__, 1, 2)));
+static void cvmx_debug_printf(char *format, ...)
+{
+ va_list ap;
+
+ if (!CVMX_DEBUG_LOGGING)
+ return;
+
+ va_start(ap, format);
+ cvmx_dvprintf(format, ap);
+ va_end(ap);
+}
+
+static inline int __cvmx_debug_in_focus(cvmx_debug_state_t state, unsigned core)
+{
+ return state.focus_core == core;
+}
+
+static void cvmx_debug_install_handler(unsigned core)
+{
+ extern void __cvmx_debug_handler_stage2(void);
+ int32_t *trampoline = CASTPTR(int32_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, BOOTLOADER_DEBUG_TRAMPOLINE_CORE));
+ trampoline += core;
+
+ *trampoline = (int32_t)(long)&__cvmx_debug_handler_stage2;
+
+ cvmx_debug_printf("Debug handled installed on core %d at %p\n", core, trampoline);
+}
+
+static int cvmx_debug_enabled(void)
+{
+ return cvmx_debug_booted() || CVMX_DEBUG_ATTACH;
+}
+
+static void cvmx_debug_init_global_ptr (void *ptr)
+{
+ uint64_t phys = cvmx_ptr_to_phys (ptr);
+ cvmx_debug_globals_t *p;
+ /* Since at this point, TLBs are not mapped 1 to 1, we should just use KSEG0 accesses. */
+ p = CASTPTR(cvmx_debug_globals_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, phys));
+ memset (p, 0, sizeof(cvmx_debug_globals_t));
+ p->version = CVMX_DEBUG_GLOBALS_VERSION;
+ p->tlb_entries = cvmx_core_get_tlb_entries();
+}
+
+static void cvmx_debug_init_globals(void)
+{
+ uint64_t phys;
+ void *ptr;
+
+ if (cvmx_debug_globals)
+ return;
+ ptr = cvmx_bootmem_alloc_named_range_once(sizeof(cvmx_debug_globals_t), 0, /* KSEG0 max, 512MB=*/0/*1024*1024*512*/, 8,
+ CVMX_DEBUG_GLOBALS_BLOCK_NAME, cvmx_debug_init_global_ptr);
+ phys = cvmx_ptr_to_phys (ptr);
+
+ /* Since TLBs are not always mapped 1 to 1, we should just use access via KSEG0. */
+ cvmx_debug_globals = CASTPTR(cvmx_debug_globals_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, phys));
+ cvmx_debug_printf("Debug named block at %p\n", cvmx_debug_globals);
+}
+
+
+static void cvmx_debug_globals_check_version(void)
+{
+ if (cvmx_debug_globals->version != CVMX_DEBUG_GLOBALS_VERSION)
+ {
+ cvmx_dprintf("Wrong version on the globals struct spinining; expected %d, got: %d.\n", (int)CVMX_DEBUG_GLOBALS_VERSION, (int)(cvmx_debug_globals->version));
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ panic("Wrong version.\n");
+#endif
+ while (1)
+ ;
+ }
+}
+
+static inline volatile cvmx_debug_core_context_t *cvmx_debug_core_context(void);
+static inline void cvmx_debug_save_core_context(volatile cvmx_debug_core_context_t *context, uint64_t hi, uint64_t lo);
+
+void cvmx_debug_init(void)
+{
+ cvmx_debug_state_t state;
+ int core;
+ const cvmx_debug_comm_t *comm;
+ cvmx_spinlock_t *lock;
+ unsigned int coremask = cvmx_debug_core_mask();
+
+ if (!cvmx_debug_enabled())
+ return;
+
+ cvmx_debug_init_globals();
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+ // Put a barrier until all cores have got to this point.
+ cvmx_coremask_barrier_sync(coremask);
+#endif
+ cvmx_debug_globals_check_version();
+
+
+ comm = cvmx_debug_comms[cvmx_debug_globals->comm_type];
+ lock = &cvmx_debug_globals->lock;
+
+ core = cvmx_get_core_num();
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ /* Install the debugger handler on the cores. */
+ {
+ int core1 = 0;
+ for (core1 = 0; core1 < CVMX_MAX_CORES; core1++)
+ {
+ if ((1u<<core1) & coremask)
+ cvmx_debug_install_handler(core1);
+ }
+ }
+#else
+ cvmx_debug_install_handler(core);
+#endif
+
+ if (comm->init)
+ comm->init();
+
+ {
+ cvmx_spinlock_lock(lock);
+ state = cvmx_debug_get_state();
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ state.known_cores |= coremask;
+ state.core_finished &= ~coremask;
+#else
+ state.known_cores |= (1u << core);
+ state.core_finished &= ~(1u << core);
+#endif
+ cvmx_debug_update_state(state);
+ cvmx_spinlock_unlock(lock);
+ }
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+ // Put a barrier until all cores have got to this point.
+ cvmx_coremask_barrier_sync(coremask);
+
+ if (cvmx_coremask_first_core(coremask))
+#endif
+ {
+ cvmx_debug_printf("cvmx_debug_init core: %d\n", core);
+ state = cvmx_debug_get_state();
+ state.focus_core = core;
+ state.active_cores = state.known_cores;
+ state.focus_switch = 1;
+ state.step_isr = 1;
+ cvmx_debug_printf("Known cores at init: 0x%x\n", (int)state.known_cores);
+ cvmx_debug_update_state(state);
+
+ /* Initialize __cvmx_debug_stack_top_all. */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ {
+ int i;
+ for (i = 0; i < CVMX_MAX_CORES; i++)
+ __cvmx_debug_stack_top_all[i] = &cvmx_debug_stack_all[i][8*1024];
+ }
+#endif
+ cvmx_debug_globals->init_complete = 1;
+ CVMX_SYNCW;
+ }
+ while (!cvmx_debug_globals->init_complete)
+ {
+ /* Spin waiting for init to complete */
+ }
+
+ if (cvmx_debug_booted())
+ cvmx_debug_trigger_exception();
+
+ /* Install the break handler after might tripper the debugger exception. */
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+ if (cvmx_coremask_first_core(coremask))
+#endif
+ {
+ if (comm->install_break_handler)
+ comm->install_break_handler();
+ }
+}
+
+static const char cvmx_debug_hexchar[] = "0123456789ABCDEF";
+/* Put the hex value of t into str. */
+static void cvmx_debug_int8_to_strhex(char *str, unsigned char t)
+{
+ str[0] = cvmx_debug_hexchar[(t>>4)&0xf];
+ str[1] = cvmx_debug_hexchar[t&0xF];
+ str[2] = 0;
+}
+
+static void cvmx_debug_int64_to_strhex(char *str, uint64_t t)
+{
+ str[0] = cvmx_debug_hexchar[(t>>60)&0xF];
+ str[1] = cvmx_debug_hexchar[(t>>56)&0xF];
+ str[2] = cvmx_debug_hexchar[(t>>52)&0xF];
+ str[3] = cvmx_debug_hexchar[(t>>48)&0xF];
+ str[4] = cvmx_debug_hexchar[(t>>44)&0xF];
+ str[5] = cvmx_debug_hexchar[(t>>40)&0xF];
+ str[6] = cvmx_debug_hexchar[(t>>36)&0xF];
+ str[7] = cvmx_debug_hexchar[(t>>32)&0xF];
+ str[8] = cvmx_debug_hexchar[(t>>28)&0xF];
+ str[9] = cvmx_debug_hexchar[(t>>24)&0xF];
+ str[10] = cvmx_debug_hexchar[(t>>20)&0xF];
+ str[11] = cvmx_debug_hexchar[(t>>16)&0xF];
+ str[12] = cvmx_debug_hexchar[(t>>12)&0xF];
+ str[13] = cvmx_debug_hexchar[(t>>8)&0xF];
+ str[14] = cvmx_debug_hexchar[(t>>4)&0xF];
+ str[15] = cvmx_debug_hexchar[(t>>0)&0xF];
+ str[16] = 0;
+}
+
+static int cvmx_debug_putpacket_noformat(char *packet)
+{
+ if (cvmx_debug_comms[cvmx_debug_globals->comm_type]->putpacket == NULL)
+ return 0;
+ cvmx_debug_printf("Reply: %s\n", packet);
+ return cvmx_debug_comms[cvmx_debug_globals->comm_type]->putpacket(packet);
+}
+
+static int cvmx_debug_putcorepacket(char *buf, int core)
+{
+ char *tmp = "!Core XX ";
+ int tmpsize = cvmx_debug_strlen(tmp);
+ int bufsize = cvmx_debug_strlen(buf);
+ char *packet = __builtin_alloca(tmpsize + bufsize + 1);
+ cvmx_debug_strcpy(packet, tmp);
+ cvmx_debug_strcpy(&packet[tmpsize], buf);
+ if (core < 10)
+ {
+ packet[6] = ' ';
+ packet[7] = core + '0';
+ }
+ else if (core < 20)
+ {
+ packet[6] = '1';
+ packet[7] = core - 10 + '0';
+ }
+ else if (core < 30)
+ {
+ packet[6] = '2';
+ packet[7] = core - 20 + '0';
+ }
+ else
+ {
+ packet[6] = '3';
+ packet[7] = core - 30 + '0';
+ }
+ return cvmx_debug_putpacket_noformat(packet);
+}
+
+/* Put a buf followed by an integer formated as a hex. */
+static int cvmx_debug_putpacket_hexint(char *buf, uint64_t value)
+{
+ size_t size = cvmx_debug_strlen(buf);
+ char *packet = __builtin_alloca(size + 16 + 1);
+ cvmx_debug_strcpy(packet, buf);
+ cvmx_debug_int64_to_strhex(&packet[size], value);
+ return cvmx_debug_putpacket_noformat(packet);
+}
+
+static int cvmx_debug_active_core(cvmx_debug_state_t state, unsigned core)
+{
+ return state.active_cores & (1u << core);
+}
+
+static volatile cvmx_debug_core_context_t *cvmx_debug_core_context(void)
+{
+ return &cvmx_debug_globals->contextes[cvmx_get_core_num()];
+}
+
+static volatile uint64_t *cvmx_debug_regnum_to_context_ref(int regnum, volatile cvmx_debug_core_context_t *context)
+{
+ /* Must be kept in sync with mips_octeon_reg_names in gdb/mips-tdep.c. */
+ if (regnum < 32)
+ return &context->regs[regnum];
+ switch (regnum)
+ {
+ case 32: return &context->cop0.status;
+ case 33: return &context->lo;
+ case 34: return &context->hi;
+ case 35: return &context->cop0.badvaddr;
+ case 36: return &context->cop0.cause;
+ case 37: return &context->cop0.depc;
+ default: return NULL;
+ }
+}
+
+static int cvmx_debug_probe_load(unsigned char *ptr, unsigned char *result)
+{
+ volatile unsigned char *p = ptr;
+ int ok;
+ unsigned char tem;
+
+ {
+ __cvmx_debug_mode_exception_ignore = 1;
+ __cvmx_debug_mode_exception_occured = 0;
+ /* We don't handle debug-mode exceptions in delay slots. Avoid them. */
+ asm volatile (".set push \n\t"
+ ".set noreorder \n\t"
+ "nop \n\t"
+ "lbu %0, %1 \n\t"
+ "nop \n\t"
+ ".set pop" : "=r"(tem) : "m"(*p));
+ ok = __cvmx_debug_mode_exception_occured == 0;
+ __cvmx_debug_mode_exception_ignore = 0;
+ __cvmx_debug_mode_exception_occured = 0;
+ *result = tem;
+ }
+ return ok;
+}
+
+static int cvmx_debug_probe_store(unsigned char *ptr)
+{
+ volatile unsigned char *p = ptr;
+ int ok;
+
+ __cvmx_debug_mode_exception_ignore = 1;
+ __cvmx_debug_mode_exception_occured = 0;
+ /* We don't handle debug-mode exceptions in delay slots. Avoid them. */
+ asm volatile (".set push \n\t"
+ ".set noreorder \n\t"
+ "nop \n\t"
+ "sb $0, %0 \n\t"
+ "nop \n\t"
+ ".set pop" : "=m"(*p));
+ ok = __cvmx_debug_mode_exception_occured == 0;
+
+ __cvmx_debug_mode_exception_ignore = 0;
+ __cvmx_debug_mode_exception_occured = 0;
+ return ok;
+}
+
+
+/**
+ * Routines to handle hex data
+ *
+ * @param ch
+ * @return
+ */
+static inline int cvmx_debug_hex(char ch)
+{
+ if ((ch >= 'a') && (ch <= 'f'))
+ return(ch - 'a' + 10);
+ if ((ch >= '0') && (ch <= '9'))
+ return(ch - '0');
+ if ((ch >= 'A') && (ch <= 'F'))
+ return(ch - 'A' + 10);
+ return(-1);
+}
+
+/**
+ * While we find nice hex chars, build an int.
+ * Return number of chars processed.
+ *
+ * @param ptr
+ * @param intValue
+ * @return
+ */
+static int cvmx_debug_hexToLong(const char **ptr, uint64_t *intValue)
+{
+ int numChars = 0;
+ long hexValue;
+
+ *intValue = 0;
+ while (**ptr)
+ {
+ hexValue = cvmx_debug_hex(**ptr);
+ if (hexValue < 0)
+ break;
+
+ *intValue = (*intValue << 4) | hexValue;
+ numChars ++;
+
+ (*ptr)++;
+ }
+
+ return(numChars);
+}
+
+/**
+ * Initialize the performance counter control registers.
+ *
+ */
+static void cvmx_debug_set_perf_control_reg (volatile cvmx_debug_core_context_t *context, int perf_event, int perf_counter)
+{
+ cvmx_core_perf_control_t control;
+
+ control.u32 = 0;
+ control.s.u = 1;
+ control.s.s = 1;
+ control.s.k = 1;
+ control.s.ex = 1;
+ control.s.w = 1;
+ control.s.m = 1 - perf_counter;
+ control.s.event = perf_event;
+
+ context->cop0.perfctrl[perf_counter] = control.u32;
+}
+
+static cvmx_debug_command_t cvmx_debug_process_packet(const char *packet)
+{
+ const char *buf = packet;
+ cvmx_debug_command_t result = COMMAND_NOP;
+ cvmx_debug_state_t state = cvmx_debug_get_state();
+
+ /* A one letter command code represents what to do. */
+ switch (*buf++)
+ {
+ case '?': /* What protocol do I speak? */
+ cvmx_debug_putpacket_noformat("S0A");
+ break;
+
+ case '\003': /* Control-C */
+ cvmx_debug_putpacket_noformat("T9");
+ break;
+
+ case 'F': /* Change the focus core */
+ {
+ uint64_t core;
+ if (!cvmx_debug_hexToLong(&buf, &core))
+ {
+ cvmx_debug_putpacket_noformat("!Uknown core. Focus not changed.");
+ }
+ /* Only cores in the exception handler may become the focus.
+ If a core not in the exception handler got focus the
+ debugger would hang since nobody would talk to it. */
+ else if (state.handler_cores & (1u << core))
+ {
+ /* Focus change reply must be sent before the focus
+ changes. Otherwise the new focus core will eat our ACK
+ from the debugger. */
+ cvmx_debug_putpacket_hexint("F", core);
+ cvmx_debug_comms[cvmx_debug_globals->comm_type]->change_core(state.focus_core, core);
+ state.focus_core = core;
+ cvmx_debug_update_state(state);
+ break;
+ }
+ else
+ cvmx_debug_putpacket_noformat("!Core is not in the exception handler. Focus not changed.");
+ /* Nothing changed, so we send back the old value */
+ }
+ /* fall through */
+ case 'f': /* Get the focus core */
+ cvmx_debug_putpacket_hexint("F", state.focus_core);
+ break;
+
+ case 'J': /* Set the flag for skip-over-isr in Single-Stepping mode */
+ {
+ if (*buf == '1')
+ state.step_isr = 1; /* Step in ISR */
+ else
+ state.step_isr = 0; /* Step over ISR */
+ cvmx_debug_update_state(state);
+ }
+ /* Fall through. The reply to the set step-isr command is the
+ same as the get step-isr command */
+
+ case 'j': /* Reply with step_isr status */
+ cvmx_debug_putpacket_hexint("J", (unsigned)state.step_isr);
+ break;
+
+
+ case 'I': /* Set the active cores */
+ {
+ uint64_t active_cores;
+ if (!cvmx_debug_hexToLong(&buf, &active_cores))
+ active_cores = 0;
+ /* Limit the active mask to the known to exist cores */
+ state.active_cores = active_cores & state.known_cores;
+
+ /* Lazy user hack to have 0 be all cores */
+ if (state.active_cores == 0)
+ state.active_cores = state.known_cores;
+
+ /* The focus core must be in the active_cores mask */
+ if ((state.active_cores & (1u << state.focus_core)) == 0)
+ {
+ cvmx_debug_putpacket_noformat("!Focus core was added to the masked.");
+ state.active_cores |= 1u << state.focus_core;
+ }
+
+ cvmx_debug_update_state(state);
+ }
+ /* Fall through. The reply to the set active cores command is the
+ same as the get active cores command */
+
+ case 'i': /* Get the active cores */
+ cvmx_debug_putpacket_hexint("I", state.active_cores);
+ break;
+
+ case 'A': /* Setting the step mode all or one */
+ {
+ if (*buf == '1')
+ state.step_all = 1; /* A step or continue will start all cores */
+ else
+ state.step_all = 0; /* A step or continue only affects the focus core */
+ cvmx_debug_update_state(state);
+ }
+ /* Fall through. The reply to the set step-all command is the
+ same as the get step-all command */
+
+ case 'a': /* Getting the current step mode */
+ cvmx_debug_putpacket_hexint("A", state.step_all);
+ break;
+
+ case 'g': /* read a register from global place. */
+ {
+ volatile cvmx_debug_core_context_t *context = cvmx_debug_core_context();
+ uint64_t regno;
+ volatile uint64_t *reg;
+
+ /* Get the register number to read */
+ if (!cvmx_debug_hexToLong(&buf, ®no))
+ {
+ cvmx_debug_printf("Register number cannot be read.\n");
+ cvmx_debug_putpacket_hexint("", 0xDEADBEEF);
+ break;
+ }
+
+ reg = cvmx_debug_regnum_to_context_ref(regno, context);
+ if (!reg)
+ {
+ cvmx_debug_printf("Register #%d is not valid\n", (int)regno);
+ cvmx_debug_putpacket_hexint("", 0xDEADBEEF);
+ break;
+ }
+ cvmx_debug_putpacket_hexint("", *reg);
+ }
+ break;
+
+ case 'G': /* set the value of a register. */
+ {
+ volatile cvmx_debug_core_context_t *context = cvmx_debug_core_context();
+ uint64_t regno;
+ volatile uint64_t *reg;
+ uint64_t value;
+
+ /* Get the register number to write. It should be followed by
+ a comma */
+ if (!cvmx_debug_hexToLong(&buf, ®no)
+ || (*buf++ != ',')
+ || !cvmx_debug_hexToLong(&buf, &value))
+ {
+ cvmx_debug_printf("G packet corrupt: %s\n", buf);
+ goto error_packet;
+ }
+
+ reg = cvmx_debug_regnum_to_context_ref(regno, context);
+ if (!reg)
+ {
+ cvmx_debug_printf("Register #%d is not valid\n", (int)regno);
+ goto error_packet;
+ }
+ *reg = value;
+ }
+ break;
+
+ case 'm': /* Memory read. mAA..AA,LLLL Read LLLL bytes at address AA..AA */
+ {
+ uint64_t addr, i, length;
+ unsigned char *ptr;
+ char *reply;
+
+ /* Get the memory address, a comma, and the length */
+ if (!cvmx_debug_hexToLong(&buf, &addr)
+ || (*buf++ != ',')
+ || !cvmx_debug_hexToLong(&buf, &length))
+ {
+ cvmx_debug_printf("m packet corrupt: %s\n", buf);
+ goto error_packet;
+ }
+ if (length >= 1024)
+ {
+ cvmx_debug_printf("m packet length out of range: %lld\n", (long long)length);
+ goto error_packet;
+ }
+
+ reply = __builtin_alloca(length * 2 + 1);
+ ptr = (unsigned char *)(long)addr;
+ for (i = 0; i < length; i++)
+ {
+ /* Probe memory. If not accessible fail. */
+ unsigned char t;
+ if (!cvmx_debug_probe_load(&ptr[i], &t))
+ goto error_packet;
+ cvmx_debug_int8_to_strhex(&reply[i * 2], t);
+ }
+ cvmx_debug_putpacket_noformat(reply);
+ }
+ break;
+
+ case 'M': /* Memory write. MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
+ {
+ uint64_t addr, i, length;
+ unsigned char *ptr;
+
+ if (!cvmx_debug_hexToLong(&buf, &addr)
+ || *buf++ != ','
+ || !cvmx_debug_hexToLong(&buf, &length)
+ || *buf++ != ':')
+ {
+ cvmx_debug_printf("M packet corrupt: %s\n", buf);
+ goto error_packet;
+ }
+
+ ptr = (unsigned char *)(long)addr;
+ for (i = 0; i < length; i++)
+ {
+ int n, n1;
+ unsigned char c;
+
+ n = cvmx_debug_hex(buf[i * 2]);
+ n1 = cvmx_debug_hex(buf[i * 2 + 1]);
+ c = (n << 4) | n1;
+
+ if (n == -1 || n1 == -1)
+ {
+ cvmx_debug_printf("M packet corrupt: %s\n", &buf[i * 2]);
+ goto error_packet;
+ }
+ /* Probe memory. If not accessible fail. */
+ if (!cvmx_debug_probe_store(&ptr[i]))
+ {
+ cvmx_debug_printf("M cannot write: %p\n", &ptr[i]);
+ goto error_packet;
+ }
+ ptr[i] = c;
+ }
+ cvmx_debug_putpacket_noformat("+");
+ }
+ break;
+
+ case 'e': /* Set/get performance counter events. e[1234]XX..X: [01]
+ is the performance counter to set X is the performance
+ event. [34] is to get the same thing. */
+ {
+ uint64_t perf_event = 0;
+ char encoded_counter = *buf++;
+ uint64_t counter;
+ volatile cvmx_debug_core_context_t *context = cvmx_debug_core_context();
+
+ /* Ignore errors from the packet. */
+ cvmx_debug_hexToLong(&buf, &perf_event);
+
+ switch (encoded_counter)
+ {
+ case '1': /* Set performance counter0 event. */
+ case '2': /* Set performance counter1 event. */
+
+ counter = encoded_counter - '1';
+ context->cop0.perfval[counter] = 0;
+ cvmx_debug_set_perf_control_reg(context, perf_event, counter);
+ break;
+
+ case '3': /* Get performance counter0 event. */
+ case '4': /* Get performance counter1 event. */
+ {
+ cvmx_core_perf_control_t c;
+ char outpacket[16*2 +2];
+ counter = encoded_counter - '3';
+ /* Pass performance counter0 event and counter to
+ the debugger. */
+ c.u32 = context->cop0.perfctrl[counter];
+ cvmx_debug_int64_to_strhex(outpacket, context->cop0.perfval[counter]);
+ outpacket[16] = ',';
+ cvmx_debug_int64_to_strhex(&outpacket[17], c.s.event);
+ outpacket[33] = 0;
+ cvmx_debug_putpacket_noformat(outpacket);
+ }
+ break;
+ }
+ }
+ break;
+
+#if 0
+ case 't': /* Return the trace buffer read data register contents. */
+ {
+ uint64_t tra_data;
+ uint64_t tra_ctl;
+ char tmp[64];
+
+ /* If trace buffer is disabled no trace data information is available. */
+ if ((tra_ctl & 0x1) == 0)
+ {
+ cvmx_debug_putpacket_noformat("!Trace buffer not enabled\n");
+ cvmx_debug_putpacket_noformat("t");
+ }
+ else
+ {
+ cvmx_debug_putpacket_noformat("!Trace buffer is enabled\n");
+ tra_data = cvmx_read_csr(OCTEON_TRA_READ_DATA);
+ mem2hex (&tra_data, tmp, 8);
+ strcpy (debug_output_buffer, "t");
+ strcat (debug_output_buffer, tmp);
+ cvmx_debug_putpacket_noformat(debug_output_buffer);
+ }
+ }
+ break;
+#endif
+
+ case 'Z': /* Insert hardware breakpoint: Z[di]NN..N,AA.A, [di] data or
+ instruction, NN..Nth at address AA..A */
+ {
+ enum type
+ {
+ WP_LOAD = 1,
+ WP_STORE = 2,
+ WP_ACCESS = 3
+ };
+
+ uint64_t num, size;
+ uint64_t addr;
+ uint64_t type;
+ char bp_type = *buf++;
+ const int BE = 1, TE = 4;
+ volatile cvmx_debug_core_context_t *context = cvmx_debug_core_context();
+
+ if (!cvmx_debug_hexToLong(&buf, &num)
+ || *buf++ != ','
+ || !cvmx_debug_hexToLong(&buf, &addr))
+ {
+ cvmx_debug_printf("Z packet corrupt: %s\n", &packet[1]);
+ goto error_packet;
+ }
+
+ switch (bp_type)
+ {
+ case 'i': // Instruction hardware breakpoint
+ if (num > 4)
+ {
+ cvmx_debug_printf("Z packet corrupt: %s\n", &packet[1]);
+ goto error_packet;
+ }
+
+ context->hw_ibp.address[num] = addr;
+ context->hw_ibp.address_mask[num] = 0;
+ context->hw_ibp.asid[num] = 0;
+ context->hw_ibp.control[num] = BE | TE;
+ break;
+
+ case 'd': // Data hardware breakpoint
+ {
+ uint64_t dbc = 0xff0 | BE | TE;
+ uint64_t dbm;
+ if (num > 4
+ || *buf++ != ','
+ || !cvmx_debug_hexToLong(&buf, &size)
+ || *buf++ != ','
+ || !cvmx_debug_hexToLong(&buf, &type)
+ || type > WP_ACCESS
+ || type < WP_LOAD)
+ {
+ cvmx_debug_printf("Z packet corrupt: %s\n", &packet[1]);
+ goto error_packet;
+ }
+
+ /* Set DBC[BE,TE,BLM]. */
+ context->hw_dbp.address[num] = addr;
+ context->hw_dbp.asid[num] = 0;
+
+ dbc |= type == WP_STORE ? 0x1000 : type == WP_LOAD ? 0x2000 : 0;
+ /* Mask the bits depending on the size for
+ debugger to stop while accessing parts of the
+ memory location. */
+ dbm = (size == 8) ? 0x7 : ((size == 4) ? 3
+ : (size == 2) ? 1 : 0);
+ context->hw_dbp.address_mask[num] = dbm;
+ context->hw_dbp.control[num] = dbc;
+ break;
+ }
+ default:
+ cvmx_debug_printf("Z packet corrupt: %s\n", &packet[1]);
+ goto error_packet;
+ }
+ }
+ break;
+
+ case 'z': /* Remove hardware breakpoint: z[di]NN..N remove NN..Nth
+breakpoint. */
+ {
+ uint64_t num;
+ char bp_type = *buf++;
+ volatile cvmx_debug_core_context_t *context = cvmx_debug_core_context();
+
+ if (!cvmx_debug_hexToLong(&buf, &num) || num > 4)
+ {
+ cvmx_debug_printf("z packet corrupt: %s\n", buf);
+ goto error_packet;
+ }
+
+ switch (bp_type)
+ {
+ case 'i': // Instruction hardware breakpoint
+ context->hw_ibp.address[num] = 0;
+ context->hw_ibp.address_mask[num] = 0;
+ context->hw_ibp.asid[num] = 0;
+ context->hw_ibp.control[num] = 0;
+ break;
+ case 'd': // Data hardware breakpoint
+ context->hw_dbp.address[num] = 0;
+ context->hw_dbp.address_mask[num] = 0;
+ context->hw_dbp.asid[num] = 0;
+ context->hw_dbp.control[num] = 0;
+ break;
+ default:
+ cvmx_debug_printf("z packet corrupt: %s\n", buf);
+ goto error_packet;
+ }
+ }
+ break;
+
+ case 's': /* Single step. sAA..AA Step one instruction from AA..AA (optional) */
+ result = COMMAND_STEP;
+ break;
+
+ case 'c': /* Continue. cAA..AA Continue at address AA..AA (optional) */
+ result = COMMAND_CONTINUE;
+ break;
+
+ case '+': /* Don't know. I think it is a communications sync */
+ /* Ignoring this command */
+ break;
+
+ default:
+ cvmx_debug_printf("Unknown debug command: %s\n", buf - 1);
+error_packet:
+ cvmx_debug_putpacket_noformat("-");
+ break;
+ }
+
+ return result;
+}
+
+static cvmx_debug_command_t cvmx_debug_process_next_packet(void)
+{
+ char packet[CVMX_DEBUG_MAX_REQUEST_SIZE];
+ if (cvmx_debug_comms[cvmx_debug_globals->comm_type]->getpacket(packet, CVMX_DEBUG_MAX_REQUEST_SIZE))
+ {
+ cvmx_debug_printf("Request: %s\n", packet);
+ return cvmx_debug_process_packet(packet);
+ }
+ return COMMAND_NOP;
+}
+
+/* If a core isn't in the active core mask we need to start him up again. We
+ can only do this if the core didn't hit a breakpoint or single step. If the
+ core hit CVMX_CIU_DINT interrupt (generally happens when while executing
+ _exit() at the end of the program). Remove the core from known cores so
+ that when the cores in active core mask are done executing the program, the
+ focus will not be transfered to this core. */
+
+static int cvmx_debug_stop_core(cvmx_debug_state_t state, unsigned core, cvmx_debug_register_t *debug_reg, int proxy)
+{
+ if (!cvmx_debug_active_core(state, core) && !debug_reg->s.dbp && !debug_reg->s.dss && (debug_reg->s.dint != 1))
+ {
+ debug_reg->s.sst = 0;
+ cvmx_debug_printf("Core #%d not in active cores, continuing.\n", core);
+ return 0;
+ }
+ if ((state.core_finished & (1u<<core)) && proxy)
+ return 0;
+ return 1;
+}
+
+/* check to see if current exc is single-stepped and that no other exc
+ was also simultaneously noticed. */
+static int cvmx_debug_single_step_exc(cvmx_debug_register_t *debug_reg)
+{
+ if (debug_reg->s.dss && !debug_reg->s.dib && !debug_reg->s.dbp && !debug_reg->s.ddbs && !debug_reg->s.ddbl)
+ return 1;
+ return 0;
+}
+
+static void cvmx_debug_set_focus_core(cvmx_debug_state_t *state, int core)
+{
+ if (state->ever_been_in_debug)
+ cvmx_debug_putcorepacket("taking focus.", core);
+ cvmx_debug_comms[cvmx_debug_globals->comm_type]->change_core (state->focus_core, core);
+ state->focus_core = core;
+}
+
+static void cvmx_debug_may_elect_as_focus_core(cvmx_debug_state_t *state, int core, cvmx_debug_register_t *debug_reg)
+{
+ /* If another core has already elected itself as the focus core, we're late. */
+ if (state->handler_cores & (1u << state->focus_core))
+ return;
+
+ /* If we hit a breakpoint, elect ourselves. */
+ if (debug_reg->s.dib || debug_reg->s.dbp || debug_reg->s.ddbs || debug_reg->s.ddbl)
+ cvmx_debug_set_focus_core(state, core);
+
+ /* It is possible the focus core has completed processing and exited the
+ program. When this happens the focus core will not be in
+ known_cores. If this is the case we need to elect a new focus. */
+ if ((state->known_cores & (1u << state->focus_core)) == 0)
+ cvmx_debug_set_focus_core(state, core);
+}
+
+static void cvmx_debug_send_stop_reason(cvmx_debug_register_t *debug_reg, volatile cvmx_debug_core_context_t *context)
+{
+ /* Handle Debug Data Breakpoint Store/Load Exception. */
+ if (debug_reg->s.ddbs || debug_reg->s.ddbl)
+ cvmx_debug_putpacket_hexint("T8:", (int) context->hw_dbp.status);
+ else
+ cvmx_debug_putpacket_noformat("T9");
+}
+
+
+static void cvmx_debug_clear_status(volatile cvmx_debug_core_context_t *context)
+{
+ /* SW needs to clear the BreakStatus bits after a watchpoint is hit or on
+ reset. */
+ context->hw_dbp.status &= ~0x3fff;
+
+ /* Clear MCD0, which is write-1-to-clear. */
+ context->cop0.multicoredebug |= 1;
+}
+
+static void cvmx_debug_sync_up_cores(void)
+{
+ /* NOTE this reads directly from the state array for speed reasons
+ and we don't change the array. */
+ do {
+ asm("": : : "memory");
+ } while (cvmx_debug_globals->state[offsetof(cvmx_debug_state_t, step_all)/sizeof(uint32_t)]
+ && cvmx_debug_globals->state[offsetof(cvmx_debug_state_t, handler_cores)/sizeof(uint32_t)] != 0);
+}
+
+/* Delay the focus core a little if it is likely another core needs to steal
+ focus. Once we enter the main loop focus can't be stolen */
+static void cvmx_debug_delay_focus_core(cvmx_debug_state_t state, unsigned core, cvmx_debug_register_t *debug_reg)
+{
+ volatile int i;
+ if (debug_reg->s.dss || debug_reg->s.dbp || core != state.focus_core)
+ return;
+ for (i = 0; i < 2400; i++)
+ {
+ asm volatile (".set push \n\t"
+ ".set noreorder \n\t"
+ "nop \n\t"
+ "nop \n\t"
+ "nop \n\t"
+ "nop \n\t"
+ ".set pop");
+ /* Spin giving the breakpoint core time to steal focus */
+ }
+
+}
+
+/* If this core was single-stepping in a group,
+ && it was not the last focus-core,
+ && last focus-core happens to be inside an ISR, blocking focus-switch
+ then burn some cycles, to avoid unnecessary focus toggles. */
+static void cvmx_debug_delay_isr_core(unsigned core, uint32_t depc, int single_stepped_exc_only,
+ cvmx_debug_state_t state)
+{
+ volatile uint64_t i;
+ if(!single_stepped_exc_only || state.step_isr || core == state.focus_core || state.focus_switch)
+ return;
+
+ cvmx_debug_printf ("Core #%u spinning for focus at 0x%x\n", core, (unsigned int)depc);
+
+ for(i = ISR_DELAY_COUNTER; i > 0 ; i--)
+ {
+ state = cvmx_debug_get_state();
+ /* Spin giving the focus core time to service ISR */
+ /* But cut short the loop, if we can. Shrink down i, only once. */
+ if (i > 600000 && state.focus_switch)
+ i = 500000;
+ }
+
+}
+
+static int cvmx_debug_perform_proxy(cvmx_debug_register_t *debug_reg, volatile cvmx_debug_core_context_t *context)
+{
+ unsigned core = cvmx_get_core_num();
+ cvmx_debug_state_t state = cvmx_debug_get_state();
+ cvmx_debug_command_t command = COMMAND_NOP;
+ int single_stepped_exc_only = cvmx_debug_single_step_exc (debug_reg);
+
+ /* All cores should respect the focus core if it has to
+ stop focus switching while servicing an interrupt.
+ If the system is single-stepping, then the following
+ code path is valid. If the current core tripped on a
+ break-point or some other error while going through
+ an ISR, then we shouldn't be returning unconditionally.
+ In that case (non-single-step case) we must enter
+ the debugger exception stub fully. */
+ if (!state.step_isr && (cvmx_interrupt_in_isr || (context->cop0.status & 0x2ULL)) && single_stepped_exc_only)
+ {
+ cvmx_spinlock_lock(&cvmx_debug_globals->lock);
+ state = cvmx_debug_get_state();
+ /* If this is the focus core, switch off focus switching
+ till ISR_DELAY_COUNTER. This will let focus core
+ keep the focus until the ISR is completed. */
+ if(state.focus_switch && core == state.focus_core)
+ {
+ cvmx_debug_printf ("Core #%u stopped focus stealing at 0x%llx\n", core, (unsigned long long)context->cop0.depc);
+ state.focus_switch = 0;
+ }
+ /* Alow other cores to steal focus.
+ Focus core has completed ISR. */
+ if (*(uint32_t*)((__SIZE_TYPE__)context->cop0.depc) == ERET_INSN && core == state.focus_core)
+ {
+ cvmx_debug_printf ("Core #%u resumed focus stealing at 0x%llx\n", core, (unsigned long long)context->cop0.depc);
+ state.focus_switch = 1;
+ }
+ cvmx_debug_update_state(state);
+ cvmx_spinlock_unlock(&cvmx_debug_globals->lock);
+ cvmx_debug_printf ("Core #%u resumed skipping isr.\n", core);
+ return 0;
+ }
+
+ /* Delay the focus core a little if it is likely another core needs to
+ steal focus. Once we enter the main loop focus can't be stolen */
+ cvmx_debug_delay_focus_core(state, core, debug_reg);
+
+ cvmx_debug_delay_isr_core (core, context->cop0.depc, single_stepped_exc_only, state);
+
+ /* The following section of code does two critical things. First, it
+ populates the handler_cores bitmask of all cores in the exception
+ handler. Only one core at a time can update this field. Second it
+ changes the focus core if needed. */
+ {
+ cvmx_debug_printf("Core #%d stopped\n", core);
+ cvmx_spinlock_lock(&cvmx_debug_globals->lock);
+ state = cvmx_debug_get_state();
+
+ state.handler_cores |= (1u << core);
+ cvmx_debug_may_elect_as_focus_core(&state, core, debug_reg);
+
+/* Push all updates before exiting the critical section */
+ state.focus_switch = 1;
+ cvmx_debug_update_state(state);
+ cvmx_spinlock_unlock(&cvmx_debug_globals->lock);
+ }
+ if (__cvmx_debug_in_focus(state, core))
+ cvmx_debug_send_stop_reason(debug_reg, context);
+
+ do {
+ unsigned oldfocus = state.focus_core;
+ state = cvmx_debug_get_state();
+ /* Note the focus core can change in this loop. */
+ if (__cvmx_debug_in_focus(state, core))
+ {
+ /* If the focus has changed and the old focus has exited, then send a signal
+ that we should stop if step_all is off. */
+ if (oldfocus != state.focus_core && ((1u << oldfocus) & state.core_finished)
+ && !state.step_all)
+ cvmx_debug_send_stop_reason(debug_reg, context);
+
+ command = cvmx_debug_process_next_packet();
+ state = cvmx_debug_get_state();
+ /* When resuming let the other cores resume as well with
+ step-all. */
+ if (command != COMMAND_NOP && state.step_all)
+ {
+ state.command = command;
+ cvmx_debug_update_state(state);
+ }
+ }
+ /* When steping all cores, update the non focus core's command too. */
+ else if (state.step_all)
+ command = state.command;
+
+ /* If we did not get a command and the communication changed return,
+ we are changing the communications. */
+ if (command == COMMAND_NOP && cvmx_debug_globals->comm_changed)
+ {
+ /* FIXME, this should a sync not based on cvmx_coremask_barrier_sync. */
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+ /* Sync up. */
+ cvmx_coremask_barrier_sync(state.handler_cores);
+#endif
+ return 1;
+ }
+ } while (command == COMMAND_NOP);
+
+ debug_reg->s.sst = command == COMMAND_STEP;
+ cvmx_debug_printf("Core #%d running\n", core);
+
+ {
+ cvmx_spinlock_lock(&cvmx_debug_globals->lock);
+ state = cvmx_debug_get_state();
+ state.handler_cores ^= (1u << core);
+ cvmx_debug_update_state(state);
+ cvmx_spinlock_unlock(&cvmx_debug_globals->lock);
+ }
+
+ cvmx_debug_sync_up_cores();
+ /* Now that all cores are out, reset the command. */
+ if (__cvmx_debug_in_focus(state, core))
+ {
+ cvmx_spinlock_lock(&cvmx_debug_globals->lock);
+ state = cvmx_debug_get_state();
+ state.command = COMMAND_NOP;
+ cvmx_debug_update_state(state);
+ cvmx_spinlock_unlock(&cvmx_debug_globals->lock);
+ }
+ return 0;
+}
+
+static void cvmx_debug_save_core_context(volatile cvmx_debug_core_context_t *context, uint64_t hi, uint64_t lo)
+{
+ unsigned i;
+ cvmx_debug_memcpy_align ((char *) context->regs, __cvmx_debug_save_regs_area, sizeof(context->regs));
+ context->lo = lo;
+ context->hi = hi;
+ CVMX_MF_COP0(context->cop0.index, COP0_INDEX);
+ CVMX_MF_COP0(context->cop0.entrylo[0], COP0_ENTRYLO0);
+ CVMX_MF_COP0(context->cop0.entrylo[1], COP0_ENTRYLO1);
+ CVMX_MF_COP0(context->cop0.entryhi, COP0_ENTRYHI);
+ CVMX_MF_COP0(context->cop0.pagemask, COP0_PAGEMASK);
+ CVMX_MF_COP0(context->cop0.status, COP0_STATUS);
+ CVMX_MF_COP0(context->cop0.cause, COP0_CAUSE);
+ CVMX_MF_COP0(context->cop0.debug, COP0_DEBUG);
+ CVMX_MF_COP0(context->cop0.multicoredebug, COP0_MULTICOREDEBUG);
+ CVMX_MF_COP0(context->cop0.perfval[0], COP0_PERFVALUE0);
+ CVMX_MF_COP0(context->cop0.perfval[1], COP0_PERFVALUE1);
+ CVMX_MF_COP0(context->cop0.perfctrl[0], COP0_PERFCONTROL0);
+ CVMX_MF_COP0(context->cop0.perfctrl[1], COP0_PERFCONTROL1);
+ /* Save DEPC and DESAVE since debug-mode exceptions (see
+ debug_probe_{load,store}) can clobber these. */
+ CVMX_MF_COP0(context->cop0.depc, COP0_DEPC);
+ CVMX_MF_COP0(context->cop0.desave, COP0_DESAVE);
+
+ context->hw_ibp.status = cvmx_read_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_STATUS);
+ for (i = 0; i < 4; i++)
+ {
+ context->hw_ibp.address[i] = cvmx_read_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_ADDRESS(i));
+ context->hw_ibp.address_mask[i] = cvmx_read_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_ADDRESS_MASK(i));
+ context->hw_ibp.asid[i] = cvmx_read_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_ASID(i));
+ context->hw_ibp.control[i] = cvmx_read_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_CONTROL(i));
+ }
+
+ context->hw_dbp.status = cvmx_read_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_STATUS);
+ for (i = 0; i < 4; i++)
+ {
+ context->hw_dbp.address[i] = cvmx_read_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_ADDRESS(i));
+ context->hw_dbp.address_mask[i] = cvmx_read_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_ADDRESS_MASK(i));
+ context->hw_dbp.asid[i] = cvmx_read_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_ASID(i));
+ context->hw_dbp.control[i] = cvmx_read_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_CONTROL(i));
+ }
+
+ for (i = 0; i < cvmx_debug_globals->tlb_entries; i++)
+ {
+ CVMX_MT_COP0(i, COP0_INDEX);
+ asm volatile ("tlbr");
+ CVMX_MF_COP0(context->tlbs[i].entrylo[0], COP0_ENTRYLO0);
+ CVMX_MF_COP0(context->tlbs[i].entrylo[1], COP0_ENTRYLO1);
+ CVMX_MF_COP0(context->tlbs[i].entryhi, COP0_ENTRYHI);
+ CVMX_MF_COP0(context->tlbs[i].pagemask, COP0_PAGEMASK);
+ }
+ CVMX_SYNCW;
+}
+
+static void cvmx_debug_restore_core_context(volatile cvmx_debug_core_context_t *context)
+{
+ uint64_t hi, lo;
+ int i;
+ cvmx_debug_memcpy_align (__cvmx_debug_save_regs_area, (char *) context->regs, sizeof(context->regs));
+ /* We don't change the TLB so no need to restore it. */
+ cvmx_write_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_STATUS, context->hw_dbp.status);
+ for (i = 0; i < 4; i++)
+ {
+ cvmx_write_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_ADDRESS(i), context->hw_dbp.address[i]);
+ cvmx_write_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_ADDRESS_MASK(i), context->hw_dbp.address_mask[i]);
+ cvmx_write_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_ASID(i), context->hw_dbp.asid[i]);
+ cvmx_write_csr(CVMX_DEBUG_HW_DATA_BREAKPOINT_CONTROL(i), context->hw_dbp.control[i]);
+ }
+ cvmx_write_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_STATUS, context->hw_ibp.status);
+ for (i = 0; i < 4; i++)
+ {
+ cvmx_write_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_ADDRESS(i), context->hw_ibp.address[i]);
+ cvmx_write_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_ADDRESS_MASK(i), context->hw_ibp.address_mask[i]);
+ cvmx_write_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_ASID(i), context->hw_ibp.asid[i]);
+ cvmx_write_csr(CVMX_DEBUG_HW_INSTRUCTION_BREAKPOINT_CONTROL(i), context->hw_ibp.control[i]);
+ }
+ CVMX_MT_COP0(context->cop0.index, COP0_INDEX);
+ CVMX_MT_COP0(context->cop0.entrylo[0], COP0_ENTRYLO0);
+ CVMX_MT_COP0(context->cop0.entrylo[1], COP0_ENTRYLO1);
+ CVMX_MT_COP0(context->cop0.entryhi, COP0_ENTRYHI);
+ CVMX_MT_COP0(context->cop0.pagemask, COP0_PAGEMASK);
+ CVMX_MT_COP0(context->cop0.status, COP0_STATUS);
+ CVMX_MT_COP0(context->cop0.cause, COP0_CAUSE);
+ CVMX_MT_COP0(context->cop0.debug, COP0_DEBUG);
+ CVMX_MT_COP0(context->cop0.multicoredebug, COP0_MULTICOREDEBUG);
+ CVMX_MT_COP0(context->cop0.perfval[0], COP0_PERFVALUE0);
+ CVMX_MT_COP0(context->cop0.perfval[1], COP0_PERFVALUE1);
+ CVMX_MT_COP0(context->cop0.perfctrl[0], COP0_PERFCONTROL0);
+ CVMX_MT_COP0(context->cop0.perfctrl[1], COP0_PERFCONTROL1);
+ CVMX_MT_COP0(context->cop0.depc, COP0_DEPC);
+ CVMX_MT_COP0(context->cop0.desave, COP0_DESAVE);
+ lo = context->lo;
+ hi = context->hi;
+ asm("mtlo %0" :: "r"(lo));
+ asm("mthi %0" :: "r"(hi));
+}
+
+static inline void cvmx_debug_print_cause(volatile cvmx_debug_core_context_t *context)
+{
+ if (!CVMX_DEBUG_LOGGING)
+ return;
+ if (context->cop0.multicoredebug & 1)
+ cvmx_dprintf("MCD0 was pulsed\n");
+ if (context->cop0.multicoredebug & (1 << 16))
+ cvmx_dprintf("Exception %lld in Debug Mode\n", (long long)((context->cop0.debug >> 10) & 0x1f));
+ if (context->cop0.debug & (1 << 19))
+ cvmx_dprintf("DDBSImpr\n");
+ if (context->cop0.debug & (1 << 18))
+ cvmx_dprintf("DDBLImpr\n");
+ if (context->cop0.debug & (1 << 5))
+ cvmx_dprintf("DINT\n");
+ if (context->cop0.debug & (1 << 4))
+ cvmx_dprintf("Debug Instruction Breakpoint (DIB) exception\n");
+ if (context->cop0.debug & (1 << 3))
+ cvmx_dprintf("Debug Date Break Store (DDBS) exception\n");
+ if (context->cop0.debug & (1 << 2))
+ cvmx_dprintf("Debug Date Break Load (DDBL) exception\n");
+ if (context->cop0.debug & (1 << 1))
+ cvmx_dprintf("Debug Breakpoint (DBp) exception\n");
+ if (context->cop0.debug & (1 << 0))
+ cvmx_dprintf("Debug Single Step (DSS) exception\n");
+}
+
+void __cvmx_debug_handler_stage3 (uint64_t lo, uint64_t hi)
+{
+ volatile cvmx_debug_core_context_t *context;
+ int comms_changed = 0;
+
+ cvmx_debug_printf("Entering debug exception handler\n");
+ cvmx_debug_printf("Debug named block at %p\n", cvmx_debug_globals);
+ if (__cvmx_debug_mode_exception_occured)
+ {
+ uint64_t depc;
+ CVMX_MF_COP0(depc, COP0_DEPC);
+ cvmx_dprintf("Unexpected debug-mode exception occured at 0x%llx, 0x%llx spinning\n", (long long) depc, (long long)(__cvmx_debug_mode_exception_occured));
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ panic("Unexpected debug-mode exception occured at 0x%llx, 0x%llx\n", (long long) depc, (long long)(__cvmx_debug_mode_exception_occured));
+#endif
+ while (1)
+ ;
+ }
+
+ context = cvmx_debug_core_context();
+ cvmx_debug_save_core_context(context, hi, lo);
+
+ {
+ cvmx_debug_state_t state;
+ cvmx_spinlock_lock(&cvmx_debug_globals->lock);
+ state = cvmx_debug_get_state();
+ state.ever_been_in_debug = 1;
+ cvmx_debug_update_state (state);
+ cvmx_spinlock_unlock(&cvmx_debug_globals->lock);
+ }
+ cvmx_debug_print_cause(context);
+
+ do
+ {
+ int needs_proxy;
+ comms_changed = 0;
+ /* If the communication changes, change it. */
+ cvmx_spinlock_lock(&cvmx_debug_globals->lock);
+ if (cvmx_debug_globals->comm_changed)
+ {
+ cvmx_debug_printf("Communication changed: %d\n", (int)cvmx_debug_globals->comm_changed);
+ if (cvmx_debug_globals->comm_changed > COMM_SIZE)
+ {
+ cvmx_dprintf("Unknown communication spinning: %lld > %d.\n", (long long)cvmx_debug_globals->comm_changed, (int)(COMM_SIZE));
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ panic("Unknown communication.\n");
+#endif
+ while (1)
+ ;
+ }
+ cvmx_debug_globals->comm_type = cvmx_debug_globals->comm_changed - 1;
+ cvmx_debug_globals->comm_changed = 0;
+ }
+ cvmx_spinlock_unlock(&cvmx_debug_globals->lock);
+ needs_proxy = cvmx_debug_comms[cvmx_debug_globals->comm_type]->needs_proxy;
+
+ {
+ cvmx_debug_register_t debug_reg;
+ cvmx_debug_state_t state;
+ unsigned core = cvmx_get_core_num();
+
+ state = cvmx_debug_get_state();
+ debug_reg.u64 = context->cop0.debug;
+ /* All cores stop on any exception. See if we want nothing from this and
+ it should resume. This needs to be done for non proxy based debugging
+ so that some non active-cores can control the other cores. */
+ if (!cvmx_debug_stop_core(state, core, &debug_reg, needs_proxy))
+ {
+ context->cop0.debug = debug_reg.u64;
+ break;
+ }
+ }
+
+ if (needs_proxy)
+ {
+ cvmx_debug_register_t debug_reg;
+ debug_reg.u64 = context->cop0.debug;
+ cvmx_debug_printf("Starting to proxy\n");
+ comms_changed = cvmx_debug_perform_proxy(&debug_reg, context);
+ context->cop0.debug = debug_reg.u64;
+ }
+ else
+ {
+ cvmx_debug_printf("Starting to wait for remote host\n");
+ cvmx_debug_comms[cvmx_debug_globals->comm_type]->wait_for_resume(context, cvmx_debug_get_state());
+ }
+ } while (comms_changed);
+
+ cvmx_debug_clear_status(context);
+
+ cvmx_debug_restore_core_context(context);
+ cvmx_debug_printf("Exiting debug exception handler\n");
+}
+
+void cvmx_debug_trigger_exception(void)
+{
+ /* Set CVMX_CIU_DINT to enter debug exception handler. */
+ cvmx_write_csr (CVMX_CIU_DINT, 1u << cvmx_get_core_num ());
+ /* Perform an immediate read after every write to an RSL register to force
+ the write to complete. It doesn't matter what RSL read we do, so we
+ choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
+ cvmx_read_csr (CVMX_MIO_BOOT_BIST_STAT);
+}
+
+/**
+ * Inform debugger about the end of the program. This is
+ * called from crt0 after all the C cleanup code finishes.
+ * Our current stack is the C one, not the debug exception
+ * stack. */
+void cvmx_debug_finish(void)
+{
+ unsigned coreid = cvmx_get_core_num();
+ cvmx_debug_state_t state;
+
+ if (!cvmx_debug_globals) return;
+ cvmx_debug_printf ("Debug _exit reached!, core %d, cvmx_debug_globals = %p\n", coreid, cvmx_debug_globals);
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+ fflush (stdout);
+ fflush (stderr);
+#endif
+
+ cvmx_spinlock_lock(&cvmx_debug_globals->lock);
+ state = cvmx_debug_get_state();
+ state.known_cores ^= (1u << coreid);
+ state.core_finished |= (1u <<coreid);
+ cvmx_debug_update_state(state);
+
+ /* Tell the user the core has finished. */
+ if (state.ever_been_in_debug)
+ cvmx_debug_putcorepacket("finished.", coreid);
+
+ /* Notify the debugger if all cores have completed the program */
+ if ((cvmx_debug_core_mask () & state.core_finished) == cvmx_debug_core_mask ())
+ {
+ cvmx_debug_printf("All cores done!\n");
+ if (state.ever_been_in_debug)
+ cvmx_debug_putpacket_noformat("D0");
+ }
+ if (state.focus_core == coreid && state.known_cores != 0)
+ {
+ /* Loop through cores looking for someone to handle interrupts.
+ Since we already check that known_cores is non zero, this
+ should always find a core */
+ unsigned newcore;
+ for (newcore = 0; newcore < CVMX_MAX_CORES; newcore++)
+ {
+ if (state.known_cores & (1u<<newcore))
+ {
+ cvmx_debug_printf("Routing uart interrupts to Core #%u.\n", newcore);
+ cvmx_debug_set_focus_core(&state, newcore);
+ cvmx_debug_update_state(state);
+ break;
+ }
+ }
+ }
+ cvmx_spinlock_unlock(&cvmx_debug_globals->lock);
+
+ /* If we ever been in the debug, report to it that we have exited the core. */
+ if (state.ever_been_in_debug)
+ cvmx_debug_trigger_exception();
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-debug.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-debug.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-debug.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-debug.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,455 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Interface to debug exception handler
+ *
+ * <hr>$Revision: $<hr>
+ */
+
+#ifndef __CVMX_DEBUG_H__
+#define __CVMX_DEBUG_H__
+
+#include "cvmx-core.h"
+#include "cvmx-spinlock.h"
+
+
+#define CVMX_DEBUG_MAX_REQUEST_SIZE 1024 + 34 /* Enough room for setting memory of 512 bytes. */
+#define CVMX_DEBUG_MAX_RESPONSE_SIZE 1024 + 5
+
+#define CVMX_DEBUG_GLOBALS_BLOCK_NAME "cvmx-debug-globals"
+#define CVMX_DEBUG_GLOBALS_VERSION 3
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void cvmx_debug_init(void);
+void cvmx_debug_finish(void);
+void cvmx_debug_trigger_exception(void);
+
+#ifdef CVMX_BUILD_FOR_TOOLCHAIN
+extern int __octeon_debug_booted;
+
+static inline int cvmx_debug_booted(void)
+{
+ return __octeon_debug_booted;
+}
+
+#else
+
+static inline int cvmx_debug_booted(void)
+{
+ return cvmx_sysinfo_get()->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_DEBUG;
+}
+#endif
+
+/* There are 64 TLB entries in CN5XXX and 32 TLB entries in CN3XXX and
+ 128 TLB entries in CN6XXX. */
+#define CVMX_DEBUG_N_TLB_ENTRIES 128
+
+/* Maximium number of hardware breakpoints/watchpoints allowed */
+#define CVMX_DEBUG_MAX_OCTEON_HW_BREAKPOINTS 4
+
+typedef struct
+{
+ volatile uint64_t remote_controlled;
+ uint64_t regs[32];
+ uint64_t lo;
+ uint64_t hi;
+
+#define CVMX_DEBUG_BASIC_CONTEXT \
+ F(remote_controlled); \
+ { int i; \
+ for (i = 0; i < 32; i++) \
+ F(regs[i]); \
+ } \
+ F(lo); \
+ F(hi);
+
+ struct {
+ uint64_t index;
+ uint64_t entrylo[2];
+ uint64_t entryhi;
+ uint64_t pagemask;
+ uint64_t status;
+ uint64_t badvaddr;
+ uint64_t cause;
+ uint64_t depc;
+ uint64_t desave;
+ uint64_t debug;
+ uint64_t multicoredebug;
+ uint64_t perfval[2];
+ uint64_t perfctrl[2];
+ } cop0;
+
+#define CVMX_DEBUG_COP0_CONTEXT \
+ F(cop0.index); \
+ F(cop0.entrylo[0]); \
+ F(cop0.entrylo[1]); \
+ F(cop0.entryhi); \
+ F(cop0.pagemask); \
+ F(cop0.status); \
+ F(cop0.badvaddr); \
+ F(cop0.cause); \
+ F(cop0.depc); \
+ F(cop0.desave); \
+ F(cop0.debug); \
+ F(cop0.multicoredebug); \
+ F(cop0.perfval[0]); \
+ F(cop0.perfval[1]); \
+ F(cop0.perfctrl[0]); \
+ F(cop0.perfctrl[1]);
+
+ struct
+ {
+ uint64_t status;
+ uint64_t address[4];
+ uint64_t address_mask[4];
+ uint64_t asid[4];
+ uint64_t control[4];
+ } hw_ibp, hw_dbp;
+
+/* Hardware Instruction Break Point */
+
+#define CVMX_DEBUG_HW_IBP_CONTEXT \
+ F(hw_ibp.status); \
+ F(hw_ibp.address[0]); \
+ F(hw_ibp.address[1]); \
+ F(hw_ibp.address[2]); \
+ F(hw_ibp.address[3]); \
+ F(hw_ibp.address_mask[0]); \
+ F(hw_ibp.address_mask[1]); \
+ F(hw_ibp.address_mask[2]); \
+ F(hw_ibp.address_mask[3]); \
+ F(hw_ibp.asid[0]); \
+ F(hw_ibp.asid[1]); \
+ F(hw_ibp.asid[2]); \
+ F(hw_ibp.asid[3]); \
+ F(hw_ibp.control[0]); \
+ F(hw_ibp.control[1]); \
+ F(hw_ibp.control[2]); \
+ F(hw_ibp.control[3]);
+
+/* Hardware Data Break Point */
+#define CVMX_DEBUG_HW_DBP_CONTEXT \
+ F(hw_dbp.status); \
+ F(hw_dbp.address[0]); \
+ F(hw_dbp.address[1]); \
+ F(hw_dbp.address[2]); \
+ F(hw_dbp.address[3]); \
+ F(hw_dbp.address_mask[0]); \
+ F(hw_dbp.address_mask[1]); \
+ F(hw_dbp.address_mask[2]); \
+ F(hw_dbp.address_mask[3]); \
+ F(hw_dbp.asid[0]); \
+ F(hw_dbp.asid[1]); \
+ F(hw_dbp.asid[2]); \
+ F(hw_dbp.asid[3]); \
+ F(hw_dbp.control[0]); \
+ F(hw_dbp.control[1]); \
+ F(hw_dbp.control[2]); \
+ F(hw_dbp.control[3]);
+
+
+ struct cvmx_debug_tlb_t
+ {
+ uint64_t entryhi;
+ uint64_t pagemask;
+ uint64_t entrylo[2];
+ uint64_t reserved;
+ } tlbs[CVMX_DEBUG_N_TLB_ENTRIES];
+
+#define CVMX_DEBUG_TLB_CONTEXT \
+ { int i; \
+ for (i = 0; i < CVMX_DEBUG_N_TLB_ENTRIES; i++) \
+ { \
+ F(tlbs[i].entryhi); \
+ F(tlbs[i].pagemask); \
+ F(tlbs[i].entrylo[0]); \
+ F(tlbs[i].entrylo[1]); \
+ } \
+ }
+
+} cvmx_debug_core_context_t;
+
+typedef struct cvmx_debug_tlb_t cvmx_debug_tlb_t;
+
+
+
+typedef enum cvmx_debug_comm_type_e
+{
+ COMM_UART,
+ COMM_REMOTE,
+ COMM_SIZE
+}cvmx_debug_comm_type_t;
+
+typedef enum
+{
+ COMMAND_NOP = 0, /**< Core doesn't need to do anything. Just stay in exception handler */
+ COMMAND_STEP, /**< Core needs to perform a single instruction step */
+ COMMAND_CONTINUE /**< Core need to start running. Doesn't return until some debug event occurs */
+} cvmx_debug_command_t;
+
+/* Every field in this struct has to be uint32_t. */
+typedef struct
+{
+ uint32_t known_cores;
+ uint32_t step_isr; /**< True if we are going to step into ISR's. */
+ uint32_t focus_switch; /**< Focus can be switched. */
+ uint32_t core_finished; /**< True if a core has finished and not been processed yet. */
+ uint32_t command; /**< Command for all cores (cvmx_debug_command_t) */
+ uint32_t step_all; /**< True if step and continue should affect all cores. False, only the focus core is affected */
+ uint32_t focus_core; /**< Core currently under control of the debugger */
+ uint32_t active_cores; /**< Bitmask of cores that should stop on a breakpoint */
+ uint32_t handler_cores; /**< Bitmask of cores currently running the exception handler */
+ uint32_t ever_been_in_debug; /**< True if we have been ever been in the debugger stub at all. */
+}__attribute__ ((aligned(sizeof(uint64_t)))) cvmx_debug_state_t;
+
+typedef int cvmx_debug_state_t_should_fit_inside_a_cache_block[sizeof(cvmx_debug_state_t)+sizeof(cvmx_spinlock_t)+4*sizeof(uint64_t) > 128 ? -1 : 1];
+
+typedef struct cvmx_debug_globals_s
+{
+ uint64_t version; /* This is always the first element of this struct */
+ uint64_t comm_type; /* cvmx_debug_comm_type_t */
+ volatile uint64_t comm_changed; /* cvmx_debug_comm_type_t+1 when someone wants to change it. */
+ volatile uint64_t init_complete;
+ uint32_t tlb_entries;
+ uint32_t state[sizeof(cvmx_debug_state_t)/sizeof(uint32_t)];
+ cvmx_spinlock_t lock;
+
+ volatile cvmx_debug_core_context_t contextes[CVMX_MAX_CORES];
+} cvmx_debug_globals_t;
+
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t rsrvd:32; /**< Unused */
+ uint64_t dbd:1; /**< Indicates whether the last debug exception or
+ exception in Debug Mode occurred in a branch or
+ jump delay slot */
+ uint64_t dm:1; /**< Indicates that the processor is operating in Debug
+ Mode: */
+ uint64_t nodcr:1; /**< Indicates whether the dseg segment is present */
+ uint64_t lsnm:1; /**< Controls access of loads/stores between the dseg
+ segment and remaining memory when the dseg
+ segment is present */
+ uint64_t doze:1; /**< Indicates that the processor was in a low-power mode
+ when a debug exception occurred */
+ uint64_t halt:1; /**< Indicates that the internal processor system bus clock
+ was stopped when the debug exception occurred */
+ uint64_t countdm:1; /**< Controls or indicates the Count register behavior in
+ Debug Mode. Implementations can have fixed
+ behavior, in which case this bit is read-only (R), or
+ the implementation can allow this bit to control the
+ behavior, in which case this bit is read/write (R/W).
+ The reset value of this bit indicates the behavior after
+ reset, and depends on the implementation.
+ Encoding of the bit is:
+ - 0 Count register stopped in Debug Mode Count register is running in Debug
+ - 1 Mode
+ This bit is read-only (R) and reads as zero if not implemented. */
+ uint64_t ibusep:1; /**< Indicates if a Bus Error exception is pending from an
+ instruction fetch. Set when an instruction fetch bus
+ error event occurs or a 1 is written to the bit by
+ software. Cleared when a Bus Error exception on an
+ instruction fetch is taken by the processor. If IBusEP
+ is set when IEXI is cleared, a Bus Error exception on
+ an instruction fetch is taken by the processor, and
+ IBusEP is cleared.
+ In Debug Mode, a Bus Error exception applies to a
+ Debug Mode Bus Error exception.
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t mcheckp:1; /**< Indicates if a Machine Check exception is pending.
+ Set when a machine check event occurs or a 1 is
+ written to the bit by software. Cleared when a
+ Machine Check exception is taken by the processor.
+ If MCheckP is set when IEXI is cleared, a Machine
+ Check exception is taken by the processor, and
+ MCheckP is cleared.
+ In Debug Mode, a Machine Check exception applies
+ to a Debug Mode Machine Check exception.
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t cacheep:1; /**< Indicates if a Cache Error is pending. Set when a
+ cache error event occurs or a 1 is written to the bit by
+ software. Cleared when a Cache Error exception is
+ taken by the processor. If CacheEP is set when IEXI
+ is cleared, a Cache Error exception is taken by the
+ processor, and CacheEP is cleared.
+ In Debug Mode, a Cache Error exception applies to a
+ Debug Mode Cache Error exception.
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t dbusep:1; /**< Indicates if a Data Access Bus Error exception is
+ pending. Set when a data access bus error event
+ occurs or a 1 is written to the bit by software. Cleared
+ when a Bus Error exception on data access is taken by
+ the processor. If DBusEP is set when IEXI is cleared,
+ a Bus Error exception on data access is taken by the
+ processor, and DBusEP is cleared.
+ In Debug Mode, a Bus Error exception applies to a
+ Debug Mode Bus Error exception.
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t iexi:1; /**< An Imprecise Error eXception Inhibit (IEXI) controls
+ exceptions taken due to imprecise error indications.
+ Set when the processor takes a debug exception or an
+ exception in Debug Mode occurs. Cleared by
+ execution of the DERET instruction. Otherwise
+ modifiable by Debug Mode software.
+ When IEXI is set, then the imprecise error exceptions
+ from bus errors on instruction fetches or data
+ accesses, cache errors, or machine checks are
+ inhibited and deferred until the bit is cleared.
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t ddbsimpr:1; /**< Indicates that a Debug Data Break Store Imprecise
+ exception due to a store was the cause of the debug
+ exception, or that an imprecise data hardware break
+ due to a store was indicated after another debug
+ exception occurred. Cleared on exception in Debug
+ Mode.
+ - 0 No match of an imprecise data hardware breakpoint on store
+ - 1 Match of imprecise data hardware breakpoint on store
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t ddblimpr:1; /**< Indicates that a Debug Data Break Load Imprecise
+ exception due to a load was the cause of the debug
+ exception, or that an imprecise data hardware break
+ due to a load was indicated after another debug
+ exception occurred. Cleared on exception in Debug
+ Mode.
+ - 0 No match of an imprecise data hardware breakpoint on load
+ - 1 Match of imprecise data hardware breakpoint on load
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t ejtagver:3; /**< Provides the EJTAG version.
+ - 0 Version 1 and 2.0
+ - 1 Version 2.5
+ - 2 Version 2.6
+ - 3-7 Reserved */
+ uint64_t dexccode:5; /**< Indicates the cause of the latest exception in Debug
+ Mode.
+ The field is encoded as the ExcCode field in the
+ Cause register for those exceptions that can occur in
+ Debug Mode (the encoding is shown in MIPS32 and
+ MIPS64 specifications), with addition of code 30
+ with the mnemonic CacheErr for cache errors and the
+ use of code 9 with mnemonic Bp for the SDBBP
+ instruction.
+ This value is undefined after a debug exception. */
+ uint64_t nosst:1; /**< Indicates whether the single-step feature controllable
+ by the SSt bit is available in this implementation:
+ - 0 Single-step feature available
+ - 1 No single-step feature available
+ A minimum number of hardware instruction
+ breakpoints must be available if no single-step
+ feature is implemented in hardware. Refer to Section
+ 4.8.1 on page 69 for more information. */
+ uint64_t sst:1; /**< Controls whether single-step feature is enabled:
+ - 0 No enable of single-step feature
+ - 1 Single-step feature enabled
+ This bit is read-only (R) and reads as zero if not
+ implemented due to no single-step feature (NoSSt is
+ 1). */
+ uint64_t rsrvd2:2; /**< Must be zero */
+ uint64_t dint:1; /**< Indicates that a Debug Interrupt exception occurred.
+ Cleared on exception in Debug Mode.
+ - 0 No Debug Interrupt exception
+ - 1 Debug Interrupt exception
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t dib:1; /**< Indicates that a Debug Instruction Break exception
+ occurred. Cleared on exception in Debug Mode.
+ - 0 No Debug Instruction Break exception
+ - 1 Debug Instruction Break exception
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t ddbs:1; /**< Indicates that a Debug Data Break Store exception
+ occurred on a store due to a precise data hardware
+ break. Cleared on exception in Debug Mode.
+ - 0 No Debug Data Break Store Exception
+ - 1 Debug Data Break Store Exception
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t ddbl:1; /**< Indicates that a Debug Data Break Load exception
+ occurred on a load due to a precise data hardware
+ break. Cleared on exception in Debug Mode.
+ - 0 No Debug Data Break Store Exception
+ - 1 Debug Data Break Store Exception
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ uint64_t dbp:1; /**< Indicates that a Debug Breakpoint exception
+ occurred. Cleared on exception in Debug Mode.
+ - 0 No Debug Breakpoint exception
+ - 1 Debug Breakpoint exception */
+ uint64_t dss:1; /**< Indicates that a Debug Single Step exception
+ occurred. Cleared on exception in Debug Mode.
+ - 0 No debug single-step exception
+ - 1 Debug single-step exception
+ This bit is read-only (R) and reads as zero if not
+ implemented. */
+ } s;
+} cvmx_debug_register_t;
+
+
+typedef struct
+{
+ void (*init)(void);
+ void (*install_break_handler)(void);
+ int needs_proxy;
+ int (*getpacket)(char *, size_t);
+ int (*putpacket)(char *);
+ void (*wait_for_resume)(volatile cvmx_debug_core_context_t *, cvmx_debug_state_t);
+ void (*change_core)(int, int);
+} cvmx_debug_comm_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_DEBUG_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-debug.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-dfa-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-dfa-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-dfa-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,5695 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-dfa-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon dfa.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_DFA_DEFS_H__
+#define __CVMX_DFA_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_BIST0 CVMX_DFA_BIST0_FUNC()
+static inline uint64_t CVMX_DFA_BIST0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_BIST0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800370007F0ull);
+}
+#else
+#define CVMX_DFA_BIST0 (CVMX_ADD_IO_SEG(0x00011800370007F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_BIST1 CVMX_DFA_BIST1_FUNC()
+static inline uint64_t CVMX_DFA_BIST1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_BIST1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800370007F8ull);
+}
+#else
+#define CVMX_DFA_BIST1 (CVMX_ADD_IO_SEG(0x00011800370007F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_BST0 CVMX_DFA_BST0_FUNC()
+static inline uint64_t CVMX_DFA_BST0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_BST0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800300007F0ull);
+}
+#else
+#define CVMX_DFA_BST0 (CVMX_ADD_IO_SEG(0x00011800300007F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_BST1 CVMX_DFA_BST1_FUNC()
+static inline uint64_t CVMX_DFA_BST1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_BST1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800300007F8ull);
+}
+#else
+#define CVMX_DFA_BST1 (CVMX_ADD_IO_SEG(0x00011800300007F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_CFG CVMX_DFA_CFG_FUNC()
+static inline uint64_t CVMX_DFA_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000000ull);
+}
+#else
+#define CVMX_DFA_CFG (CVMX_ADD_IO_SEG(0x0001180030000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_CONFIG CVMX_DFA_CONFIG_FUNC()
+static inline uint64_t CVMX_DFA_CONFIG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_CONFIG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000000ull);
+}
+#else
+#define CVMX_DFA_CONFIG (CVMX_ADD_IO_SEG(0x0001180037000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_CONTROL CVMX_DFA_CONTROL_FUNC()
+static inline uint64_t CVMX_DFA_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000020ull);
+}
+#else
+#define CVMX_DFA_CONTROL (CVMX_ADD_IO_SEG(0x0001180037000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DBELL CVMX_DFA_DBELL_FUNC()
+static inline uint64_t CVMX_DFA_DBELL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_DBELL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001370000000000ull);
+}
+#else
+#define CVMX_DFA_DBELL (CVMX_ADD_IO_SEG(0x0001370000000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_ADDR CVMX_DFA_DDR2_ADDR_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_ADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_ADDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000210ull);
+}
+#else
+#define CVMX_DFA_DDR2_ADDR (CVMX_ADD_IO_SEG(0x0001180030000210ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_BUS CVMX_DFA_DDR2_BUS_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_BUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_BUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000080ull);
+}
+#else
+#define CVMX_DFA_DDR2_BUS (CVMX_ADD_IO_SEG(0x0001180030000080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_CFG CVMX_DFA_DDR2_CFG_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000208ull);
+}
+#else
+#define CVMX_DFA_DDR2_CFG (CVMX_ADD_IO_SEG(0x0001180030000208ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_COMP CVMX_DFA_DDR2_COMP_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_COMP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_COMP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000090ull);
+}
+#else
+#define CVMX_DFA_DDR2_COMP (CVMX_ADD_IO_SEG(0x0001180030000090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_EMRS CVMX_DFA_DDR2_EMRS_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_EMRS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_EMRS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000268ull);
+}
+#else
+#define CVMX_DFA_DDR2_EMRS (CVMX_ADD_IO_SEG(0x0001180030000268ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_FCNT CVMX_DFA_DDR2_FCNT_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_FCNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_FCNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000078ull);
+}
+#else
+#define CVMX_DFA_DDR2_FCNT (CVMX_ADD_IO_SEG(0x0001180030000078ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_MRS CVMX_DFA_DDR2_MRS_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_MRS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_MRS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000260ull);
+}
+#else
+#define CVMX_DFA_DDR2_MRS (CVMX_ADD_IO_SEG(0x0001180030000260ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_OPT CVMX_DFA_DDR2_OPT_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_OPT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_OPT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000070ull);
+}
+#else
+#define CVMX_DFA_DDR2_OPT (CVMX_ADD_IO_SEG(0x0001180030000070ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_PLL CVMX_DFA_DDR2_PLL_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_PLL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_PLL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000088ull);
+}
+#else
+#define CVMX_DFA_DDR2_PLL (CVMX_ADD_IO_SEG(0x0001180030000088ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DDR2_TMG CVMX_DFA_DDR2_TMG_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_TMG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_TMG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000218ull);
+}
+#else
+#define CVMX_DFA_DDR2_TMG (CVMX_ADD_IO_SEG(0x0001180030000218ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DEBUG0 CVMX_DFA_DEBUG0_FUNC()
+static inline uint64_t CVMX_DFA_DEBUG0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_DEBUG0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000040ull);
+}
+#else
+#define CVMX_DFA_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180037000040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DEBUG1 CVMX_DFA_DEBUG1_FUNC()
+static inline uint64_t CVMX_DFA_DEBUG1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_DEBUG1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000048ull);
+}
+#else
+#define CVMX_DFA_DEBUG1 (CVMX_ADD_IO_SEG(0x0001180037000048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DEBUG2 CVMX_DFA_DEBUG2_FUNC()
+static inline uint64_t CVMX_DFA_DEBUG2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_DEBUG2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000050ull);
+}
+#else
+#define CVMX_DFA_DEBUG2 (CVMX_ADD_IO_SEG(0x0001180037000050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DEBUG3 CVMX_DFA_DEBUG3_FUNC()
+static inline uint64_t CVMX_DFA_DEBUG3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_DEBUG3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000058ull);
+}
+#else
+#define CVMX_DFA_DEBUG3 (CVMX_ADD_IO_SEG(0x0001180037000058ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DIFCTL CVMX_DFA_DIFCTL_FUNC()
+static inline uint64_t CVMX_DFA_DIFCTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_DIFCTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001370600000000ull);
+}
+#else
+#define CVMX_DFA_DIFCTL (CVMX_ADD_IO_SEG(0x0001370600000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DIFRDPTR CVMX_DFA_DIFRDPTR_FUNC()
+static inline uint64_t CVMX_DFA_DIFRDPTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_DIFRDPTR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001370200000000ull);
+}
+#else
+#define CVMX_DFA_DIFRDPTR (CVMX_ADD_IO_SEG(0x0001370200000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_DTCFADR CVMX_DFA_DTCFADR_FUNC()
+static inline uint64_t CVMX_DFA_DTCFADR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_DTCFADR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000060ull);
+}
+#else
+#define CVMX_DFA_DTCFADR (CVMX_ADD_IO_SEG(0x0001180037000060ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_ECLKCFG CVMX_DFA_ECLKCFG_FUNC()
+static inline uint64_t CVMX_DFA_ECLKCFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_ECLKCFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000200ull);
+}
+#else
+#define CVMX_DFA_ECLKCFG (CVMX_ADD_IO_SEG(0x0001180030000200ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_ERR CVMX_DFA_ERR_FUNC()
+static inline uint64_t CVMX_DFA_ERR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_ERR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000028ull);
+}
+#else
+#define CVMX_DFA_ERR (CVMX_ADD_IO_SEG(0x0001180030000028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_ERROR CVMX_DFA_ERROR_FUNC()
+static inline uint64_t CVMX_DFA_ERROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_ERROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000028ull);
+}
+#else
+#define CVMX_DFA_ERROR (CVMX_ADD_IO_SEG(0x0001180037000028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_INTMSK CVMX_DFA_INTMSK_FUNC()
+static inline uint64_t CVMX_DFA_INTMSK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_INTMSK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000030ull);
+}
+#else
+#define CVMX_DFA_INTMSK (CVMX_ADD_IO_SEG(0x0001180037000030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_MEMCFG0 CVMX_DFA_MEMCFG0_FUNC()
+static inline uint64_t CVMX_DFA_MEMCFG0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMCFG0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000008ull);
+}
+#else
+#define CVMX_DFA_MEMCFG0 (CVMX_ADD_IO_SEG(0x0001180030000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_MEMCFG1 CVMX_DFA_MEMCFG1_FUNC()
+static inline uint64_t CVMX_DFA_MEMCFG1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMCFG1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000010ull);
+}
+#else
+#define CVMX_DFA_MEMCFG1 (CVMX_ADD_IO_SEG(0x0001180030000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_MEMCFG2 CVMX_DFA_MEMCFG2_FUNC()
+static inline uint64_t CVMX_DFA_MEMCFG2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMCFG2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000060ull);
+}
+#else
+#define CVMX_DFA_MEMCFG2 (CVMX_ADD_IO_SEG(0x0001180030000060ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_MEMFADR CVMX_DFA_MEMFADR_FUNC()
+static inline uint64_t CVMX_DFA_MEMFADR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMFADR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000030ull);
+}
+#else
+#define CVMX_DFA_MEMFADR (CVMX_ADD_IO_SEG(0x0001180030000030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_MEMFCR CVMX_DFA_MEMFCR_FUNC()
+static inline uint64_t CVMX_DFA_MEMFCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMFCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000038ull);
+}
+#else
+#define CVMX_DFA_MEMFCR (CVMX_ADD_IO_SEG(0x0001180030000038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_MEMHIDAT CVMX_DFA_MEMHIDAT_FUNC()
+static inline uint64_t CVMX_DFA_MEMHIDAT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_MEMHIDAT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001370700000000ull);
+}
+#else
+#define CVMX_DFA_MEMHIDAT (CVMX_ADD_IO_SEG(0x0001370700000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_MEMRLD CVMX_DFA_MEMRLD_FUNC()
+static inline uint64_t CVMX_DFA_MEMRLD_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMRLD not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000018ull);
+}
+#else
+#define CVMX_DFA_MEMRLD (CVMX_ADD_IO_SEG(0x0001180030000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_NCBCTL CVMX_DFA_NCBCTL_FUNC()
+static inline uint64_t CVMX_DFA_NCBCTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_NCBCTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000020ull);
+}
+#else
+#define CVMX_DFA_NCBCTL (CVMX_ADD_IO_SEG(0x0001180030000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_PFC0_CNT CVMX_DFA_PFC0_CNT_FUNC()
+static inline uint64_t CVMX_DFA_PFC0_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_PFC0_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000090ull);
+}
+#else
+#define CVMX_DFA_PFC0_CNT (CVMX_ADD_IO_SEG(0x0001180037000090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_PFC0_CTL CVMX_DFA_PFC0_CTL_FUNC()
+static inline uint64_t CVMX_DFA_PFC0_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_PFC0_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000088ull);
+}
+#else
+#define CVMX_DFA_PFC0_CTL (CVMX_ADD_IO_SEG(0x0001180037000088ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_PFC1_CNT CVMX_DFA_PFC1_CNT_FUNC()
+static inline uint64_t CVMX_DFA_PFC1_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_PFC1_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800370000A0ull);
+}
+#else
+#define CVMX_DFA_PFC1_CNT (CVMX_ADD_IO_SEG(0x00011800370000A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_PFC1_CTL CVMX_DFA_PFC1_CTL_FUNC()
+static inline uint64_t CVMX_DFA_PFC1_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_PFC1_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000098ull);
+}
+#else
+#define CVMX_DFA_PFC1_CTL (CVMX_ADD_IO_SEG(0x0001180037000098ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_PFC2_CNT CVMX_DFA_PFC2_CNT_FUNC()
+static inline uint64_t CVMX_DFA_PFC2_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_PFC2_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800370000B0ull);
+}
+#else
+#define CVMX_DFA_PFC2_CNT (CVMX_ADD_IO_SEG(0x00011800370000B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_PFC2_CTL CVMX_DFA_PFC2_CTL_FUNC()
+static inline uint64_t CVMX_DFA_PFC2_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_PFC2_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800370000A8ull);
+}
+#else
+#define CVMX_DFA_PFC2_CTL (CVMX_ADD_IO_SEG(0x00011800370000A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_PFC3_CNT CVMX_DFA_PFC3_CNT_FUNC()
+static inline uint64_t CVMX_DFA_PFC3_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_PFC3_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800370000C0ull);
+}
+#else
+#define CVMX_DFA_PFC3_CNT (CVMX_ADD_IO_SEG(0x00011800370000C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_PFC3_CTL CVMX_DFA_PFC3_CTL_FUNC()
+static inline uint64_t CVMX_DFA_PFC3_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_PFC3_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800370000B8ull);
+}
+#else
+#define CVMX_DFA_PFC3_CTL (CVMX_ADD_IO_SEG(0x00011800370000B8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_PFC_GCTL CVMX_DFA_PFC_GCTL_FUNC()
+static inline uint64_t CVMX_DFA_PFC_GCTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_DFA_PFC_GCTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180037000080ull);
+}
+#else
+#define CVMX_DFA_PFC_GCTL (CVMX_ADD_IO_SEG(0x0001180037000080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_RODT_COMP_CTL CVMX_DFA_RODT_COMP_CTL_FUNC()
+static inline uint64_t CVMX_DFA_RODT_COMP_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_RODT_COMP_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000068ull);
+}
+#else
+#define CVMX_DFA_RODT_COMP_CTL (CVMX_ADD_IO_SEG(0x0001180030000068ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_SBD_DBG0 CVMX_DFA_SBD_DBG0_FUNC()
+static inline uint64_t CVMX_DFA_SBD_DBG0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_SBD_DBG0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000040ull);
+}
+#else
+#define CVMX_DFA_SBD_DBG0 (CVMX_ADD_IO_SEG(0x0001180030000040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_SBD_DBG1 CVMX_DFA_SBD_DBG1_FUNC()
+static inline uint64_t CVMX_DFA_SBD_DBG1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_SBD_DBG1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000048ull);
+}
+#else
+#define CVMX_DFA_SBD_DBG1 (CVMX_ADD_IO_SEG(0x0001180030000048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_SBD_DBG2 CVMX_DFA_SBD_DBG2_FUNC()
+static inline uint64_t CVMX_DFA_SBD_DBG2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_SBD_DBG2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000050ull);
+}
+#else
+#define CVMX_DFA_SBD_DBG2 (CVMX_ADD_IO_SEG(0x0001180030000050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFA_SBD_DBG3 CVMX_DFA_SBD_DBG3_FUNC()
+static inline uint64_t CVMX_DFA_SBD_DBG3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_SBD_DBG3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180030000058ull);
+}
+#else
+#define CVMX_DFA_SBD_DBG3 (CVMX_ADD_IO_SEG(0x0001180030000058ull))
+#endif
+
+/**
+ * cvmx_dfa_bist0
+ *
+ * DFA_BIST0 = DFA Bist Status (per-DTC)
+ *
+ * Description:
+ */
+union cvmx_dfa_bist0 {
+ uint64_t u64;
+ struct cvmx_dfa_bist0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t gfb : 3; /**< Bist Results for GFB RAM(s) (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_22_23 : 2;
+ uint64_t stx2 : 2; /**< Bist Results for STX2 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t stx1 : 2; /**< Bist Results for STX1 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t stx : 2; /**< Bist Results for STX0 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_14_15 : 2;
+ uint64_t dtx2 : 2; /**< Bist Results for DTX2 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dtx1 : 2; /**< Bist Results for DTX1 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dtx : 2; /**< Bist Results for DTX0 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_7_7 : 1;
+ uint64_t rdf : 3; /**< Bist Results for RWB RAM(s) (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_3_3 : 1;
+ uint64_t pdb : 3; /**< Bist Results for PDB RAM(s) (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t pdb : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t rdf : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t dtx : 2;
+ uint64_t dtx1 : 2;
+ uint64_t dtx2 : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t stx : 2;
+ uint64_t stx1 : 2;
+ uint64_t stx2 : 2;
+ uint64_t reserved_22_23 : 2;
+ uint64_t gfb : 3;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } s;
+ struct cvmx_dfa_bist0_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t gfb : 1; /**< Bist Results for GFB RAM(s) (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_18_23 : 6;
+ uint64_t stx : 2; /**< Bist Results for STX0 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_10_15 : 6;
+ uint64_t dtx : 2; /**< Bist Results for DTX0 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_5_7 : 3;
+ uint64_t rdf : 1; /**< Bist Results for RWB RAM(s) (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_1_3 : 3;
+ uint64_t pdb : 1; /**< Bist Results for PDB RAM(s) (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t pdb : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t rdf : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t dtx : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t stx : 2;
+ uint64_t reserved_18_23 : 6;
+ uint64_t gfb : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } cn61xx;
+ struct cvmx_dfa_bist0_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t mwb : 1; /**< Bist Results for MWB RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_25_27 : 3;
+ uint64_t gfb : 1; /**< Bist Results for GFB RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_18_23 : 6;
+ uint64_t stx : 2; /**< Bist Results for STX RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_10_15 : 6;
+ uint64_t dtx : 2; /**< Bist Results for DTX RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_5_7 : 3;
+ uint64_t rdf : 1; /**< Bist Results for RWB[3:0] RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_1_3 : 3;
+ uint64_t pdb : 1; /**< Bist Results for PDB RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t pdb : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t rdf : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t dtx : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t stx : 2;
+ uint64_t reserved_18_23 : 6;
+ uint64_t gfb : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t mwb : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn63xx;
+ struct cvmx_dfa_bist0_cn63xx cn63xxp1;
+ struct cvmx_dfa_bist0_cn63xx cn66xx;
+ struct cvmx_dfa_bist0_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63 : 34;
+ uint64_t mrp : 2; /**< Bist Results for MRP RAM(s) (per-DLC)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_27_27 : 1;
+ uint64_t gfb : 3; /**< Bist Results for GFB RAM(s) (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_22_23 : 2;
+ uint64_t stx2 : 2; /**< Bist Results for STX2 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t stx1 : 2; /**< Bist Results for STX1 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t stx : 2; /**< Bist Results for STX0 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_14_15 : 2;
+ uint64_t dtx2 : 2; /**< Bist Results for DTX2 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dtx1 : 2; /**< Bist Results for DTX1 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dtx : 2; /**< Bist Results for DTX0 RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_7_7 : 1;
+ uint64_t rdf : 3; /**< Bist Results for RWB RAM(s) (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_3_3 : 1;
+ uint64_t pdb : 3; /**< Bist Results for PDB RAM(s) (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t pdb : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t rdf : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t dtx : 2;
+ uint64_t dtx1 : 2;
+ uint64_t dtx2 : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t stx : 2;
+ uint64_t stx1 : 2;
+ uint64_t stx2 : 2;
+ uint64_t reserved_22_23 : 2;
+ uint64_t gfb : 3;
+ uint64_t reserved_27_27 : 1;
+ uint64_t mrp : 2;
+ uint64_t reserved_30_63 : 34;
+#endif
+ } cn68xx;
+ struct cvmx_dfa_bist0_cn68xx cn68xxp1;
+};
+typedef union cvmx_dfa_bist0 cvmx_dfa_bist0_t;
+
+/**
+ * cvmx_dfa_bist1
+ *
+ * DFA_BIST1 = DFA Bist Status (Globals)
+ *
+ * Description:
+ */
+union cvmx_dfa_bist1 {
+ uint64_t u64;
+ struct cvmx_dfa_bist1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t dlc1ram : 1; /**< DLC1 Bist Results
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dlc0ram : 1; /**< DLC0 Bist Results
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dc2ram3 : 1; /**< Cluster#2 Bist Results for RAM3 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dc2ram2 : 1; /**< Cluster#2 Bist Results for RAM2 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dc2ram1 : 1; /**< Cluster#2 Bist Results for RAM1 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dc1ram3 : 1; /**< Cluster#1 Bist Results for RAM3 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dc1ram2 : 1; /**< Cluster#1 Bist Results for RAM2 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dc1ram1 : 1; /**< Cluster#1 Bist Results for RAM1 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ram3 : 1; /**< Cluster#0 Bist Results for RAM3 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ram2 : 1; /**< Cluster#0 Bist Results for RAM2 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ram1 : 1; /**< Cluster#0 Bist Results for RAM1 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t crq : 1; /**< Bist Results for CRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gutv : 1; /**< Bist Results for GUTV RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_7_7 : 1;
+ uint64_t gutp : 3; /**< Bist Results for GUTP RAMs (per-cluster)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ncd : 1; /**< Bist Results for NCD RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gif : 1; /**< Bist Results for GIF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gib : 1; /**< Bist Results for GIB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gfu : 1; /**< Bist Results for GFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t gfu : 1;
+ uint64_t gib : 1;
+ uint64_t gif : 1;
+ uint64_t ncd : 1;
+ uint64_t gutp : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t gutv : 1;
+ uint64_t crq : 1;
+ uint64_t ram1 : 1;
+ uint64_t ram2 : 1;
+ uint64_t ram3 : 1;
+ uint64_t dc1ram1 : 1;
+ uint64_t dc1ram2 : 1;
+ uint64_t dc1ram3 : 1;
+ uint64_t dc2ram1 : 1;
+ uint64_t dc2ram2 : 1;
+ uint64_t dc2ram3 : 1;
+ uint64_t dlc0ram : 1;
+ uint64_t dlc1ram : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_dfa_bist1_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dlc0ram : 1; /**< DLC0 Bist Results
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_13_18 : 6;
+ uint64_t ram3 : 1; /**< Cluster#0 Bist Results for RAM3 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ram2 : 1; /**< Cluster#0 Bist Results for RAM2 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ram1 : 1; /**< Cluster#0 Bist Results for RAM1 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t crq : 1; /**< Bist Results for CRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gutv : 1; /**< Bist Results for GUTV RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_5_7 : 3;
+ uint64_t gutp : 1; /**< Bist Results for GUTP RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ncd : 1; /**< Bist Results for NCD RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gif : 1; /**< Bist Results for GIF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gib : 1; /**< Bist Results for GIB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gfu : 1; /**< Bist Results for GFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t gfu : 1;
+ uint64_t gib : 1;
+ uint64_t gif : 1;
+ uint64_t ncd : 1;
+ uint64_t gutp : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gutv : 1;
+ uint64_t crq : 1;
+ uint64_t ram1 : 1;
+ uint64_t ram2 : 1;
+ uint64_t ram3 : 1;
+ uint64_t reserved_13_18 : 6;
+ uint64_t dlc0ram : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn61xx;
+ struct cvmx_dfa_bist1_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t ram3 : 1; /**< Bist Results for RAM3 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ram2 : 1; /**< Bist Results for RAM2 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ram1 : 1; /**< Bist Results for RAM1 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t crq : 1; /**< Bist Results for CRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gutv : 1; /**< Bist Results for GUTV RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_5_7 : 3;
+ uint64_t gutp : 1; /**< Bist Results for NCD RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ncd : 1; /**< Bist Results for NCD RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gif : 1; /**< Bist Results for GIF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gib : 1; /**< Bist Results for GIB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gfu : 1; /**< Bist Results for GFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t gfu : 1;
+ uint64_t gib : 1;
+ uint64_t gif : 1;
+ uint64_t ncd : 1;
+ uint64_t gutp : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t gutv : 1;
+ uint64_t crq : 1;
+ uint64_t ram1 : 1;
+ uint64_t ram2 : 1;
+ uint64_t ram3 : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn63xx;
+ struct cvmx_dfa_bist1_cn63xx cn63xxp1;
+ struct cvmx_dfa_bist1_cn63xx cn66xx;
+ struct cvmx_dfa_bist1_s cn68xx;
+ struct cvmx_dfa_bist1_s cn68xxp1;
+};
+typedef union cvmx_dfa_bist1 cvmx_dfa_bist1_t;
+
+/**
+ * cvmx_dfa_bst0
+ *
+ * DFA_BST0 = DFA Bist Status
+ *
+ * Description:
+ */
+union cvmx_dfa_bst0 {
+ uint64_t u64;
+ struct cvmx_dfa_bst0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rdf : 16; /**< Bist Results for RDF[3:0] RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t pdf : 16; /**< Bist Results for PDF[3:0] RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t pdf : 16;
+ uint64_t rdf : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_dfa_bst0_s cn31xx;
+ struct cvmx_dfa_bst0_s cn38xx;
+ struct cvmx_dfa_bst0_s cn38xxp2;
+ struct cvmx_dfa_bst0_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t rdf : 4; /**< Bist Results for RDF[3:0] RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_4_15 : 12;
+ uint64_t pdf : 4; /**< Bist Results for PDF[3:0] RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t pdf : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t rdf : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn58xx;
+ struct cvmx_dfa_bst0_cn58xx cn58xxp1;
+};
+typedef union cvmx_dfa_bst0 cvmx_dfa_bst0_t;
+
+/**
+ * cvmx_dfa_bst1
+ *
+ * DFA_BST1 = DFA Bist Status
+ *
+ * Description:
+ */
+union cvmx_dfa_bst1 {
+ uint64_t u64;
+ struct cvmx_dfa_bst1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t crq : 1; /**< Bist Results for CRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ifu : 1; /**< Bist Results for IFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gfu : 1; /**< Bist Results for GFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t drf : 1; /**< Bist Results for DRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t crf : 1; /**< Bist Results for CRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p0_bwb : 1; /**< Bist Results for P0_BWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p1_bwb : 1; /**< Bist Results for P1_BWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p0_brf : 8; /**< Bist Results for P0_BRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p1_brf : 8; /**< Bist Results for P1_BRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t p1_brf : 8;
+ uint64_t p0_brf : 8;
+ uint64_t p1_bwb : 1;
+ uint64_t p0_bwb : 1;
+ uint64_t crf : 1;
+ uint64_t drf : 1;
+ uint64_t gfu : 1;
+ uint64_t ifu : 1;
+ uint64_t crq : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_dfa_bst1_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t crq : 1; /**< Bist Results for CRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ifu : 1; /**< Bist Results for IFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gfu : 1; /**< Bist Results for GFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t drf : 1; /**< Bist Results for DRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t crf : 1; /**< Bist Results for CRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_0_17 : 18;
+#else
+ uint64_t reserved_0_17 : 18;
+ uint64_t crf : 1;
+ uint64_t drf : 1;
+ uint64_t gfu : 1;
+ uint64_t ifu : 1;
+ uint64_t crq : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } cn31xx;
+ struct cvmx_dfa_bst1_s cn38xx;
+ struct cvmx_dfa_bst1_s cn38xxp2;
+ struct cvmx_dfa_bst1_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t crq : 1; /**< Bist Results for CRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ifu : 1; /**< Bist Results for IFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gfu : 1; /**< Bist Results for GFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_19_19 : 1;
+ uint64_t crf : 1; /**< Bist Results for CRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p0_bwb : 1; /**< Bist Results for P0_BWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p1_bwb : 1; /**< Bist Results for P1_BWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p0_brf : 8; /**< Bist Results for P0_BRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p1_brf : 8; /**< Bist Results for P1_BRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t p1_brf : 8;
+ uint64_t p0_brf : 8;
+ uint64_t p1_bwb : 1;
+ uint64_t p0_bwb : 1;
+ uint64_t crf : 1;
+ uint64_t reserved_19_19 : 1;
+ uint64_t gfu : 1;
+ uint64_t ifu : 1;
+ uint64_t crq : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } cn58xx;
+ struct cvmx_dfa_bst1_cn58xx cn58xxp1;
+};
+typedef union cvmx_dfa_bst1 cvmx_dfa_bst1_t;
+
+/**
+ * cvmx_dfa_cfg
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * DFA_CFG = DFA Configuration
+ *
+ * Description:
+ */
+union cvmx_dfa_cfg {
+ uint64_t u64;
+ struct cvmx_dfa_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t nrpl_ena : 1; /**< When set, allows the per-node replication feature to be
+ enabled.
+ In 36-bit mode: The IWORD0[31:30]=SNREPL field AND
+ bits [21:20] of the Next Node ptr are used in generating
+ the next node address (see OCTEON HRM - DFA Chapter for
+ psuedo-code of DTE next node address generation).
+ NOTE: When NRPL_ENA=1 and IWORD0[TY]=1(36b mode),
+ (regardless of IWORD0[NRPLEN]), the Resultant Word1+
+ [[47:44],[23:20]] = Next Node's [27:20] bits. This allows
+ SW to use the RESERVED bits of the final node for SW
+ caching. Also, if required, SW will use [22:21]=Node
+ Replication to re-start the same graph walk(if graph
+ walk prematurely terminated (ie: DATA_GONE).
+ In 18-bit mode: The IWORD0[31:30]=SNREPL field AND
+ bit [16:14] of the Next Node ptr are used in generating
+ the next node address (see OCTEON HRM - DFA Chapter for
+ psuedo-code of DTE next node address generation).
+ If (IWORD0[NREPLEN]=1 and DFA_CFG[NRPL_ENA]=1) [
+ If next node ptr[16] is set [
+ next node ptr[15:14] indicates the next node repl
+ next node ptr[13:0] indicates the position of the
+ node relative to the first normal node (i.e.
+ IWORD3[Msize] must be added to get the final node)
+ ]
+ else If next node ptr[16] is not set [
+ next node ptr[15:0] indicates the next node id
+ next node repl = 0
+ ]
+ ]
+ NOTE: For 18b node replication, MAX node space=64KB(2^16)
+ is used in detecting terminal node space(see HRM for full
+ description).
+ NOTE: The DFA graphs MUST BE built/written to DFA LLM memory
+ aware of the "per-node" replication. */
+ uint64_t nxor_ena : 1; /**< When set, allows the DTE Instruction IWORD0[NXOREN]
+ to be used to enable/disable the per-node address 'scramble'
+ of the LLM address to lessen the effects of bank conflicts.
+ If IWORD0[NXOREN] is also set, then:
+ In 36-bit mode: The node_Id[7:0] 8-bit value is XORed
+ against the LLM address addr[9:2].
+ In 18-bit mode: The node_id[6:0] 7-bit value is XORed
+ against the LLM address addr[8:2]. (note: we don't address
+ scramble outside the mode's node space).
+ NOTE: The DFA graphs MUST BE built/written to DFA LLM memory
+ aware of the "per-node" address scramble.
+ NOTE: The address 'scramble' ocurs for BOTH DFA LLM graph
+ read/write operations. */
+ uint64_t gxor_ena : 1; /**< When set, the DTE Instruction IWORD0[GXOR]
+ field is used to 'scramble' the LLM address
+ to lessen the effects of bank conflicts.
+ In 36-bit mode: The GXOR[7:0] 8-bit value is XORed
+ against the LLM address addr[9:2].
+ In 18-bit mode: GXOR[6:0] 7-bit value is XORed against
+ the LLM address addr[8:2]. (note: we don't address
+ scramble outside the mode's node space)
+ NOTE: The DFA graphs MUST BE built/written to DFA LLM memory
+ aware of the "per-graph" address scramble.
+ NOTE: The address 'scramble' ocurs for BOTH DFA LLM graph
+ read/write operations. */
+ uint64_t sarb : 1; /**< DFA Source Arbiter Mode
+ Selects the arbitration mode used to select DFA
+ requests issued from either CP2 or the DTE (NCB-CSR
+ or DFA HW engine).
+ - 0: Fixed Priority [Highest=CP2, Lowest=DTE]
+ - 1: Round-Robin
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t sarb : 1;
+ uint64_t gxor_ena : 1;
+ uint64_t nxor_ena : 1;
+ uint64_t nrpl_ena : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_dfa_cfg_s cn38xx;
+ struct cvmx_dfa_cfg_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t sarb : 1; /**< DFA Source Arbiter Mode
+ Selects the arbitration mode used to select DFA
+ requests issued from either CP2 or the DTE (NCB-CSR
+ or DFA HW engine).
+ - 0: Fixed Priority [Highest=CP2, Lowest=DTE]
+ - 1: Round-Robin
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t sarb : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn38xxp2;
+ struct cvmx_dfa_cfg_s cn58xx;
+ struct cvmx_dfa_cfg_s cn58xxp1;
+};
+typedef union cvmx_dfa_cfg cvmx_dfa_cfg_t;
+
+/**
+ * cvmx_dfa_config
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * DFA_CONFIG = DFA Configuration Register
+ *
+ * Description:
+ */
+union cvmx_dfa_config {
+ uint64_t u64;
+ struct cvmx_dfa_config_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t dlcclear_bist : 1; /**< When DLCSTART_BIST is written 0->1, if DLCCLEAR_BIST=1, all
+ previous DLC BiST state is cleared.
+ NOTES:
+ 1) DLCCLEAR_BIST must be written to 1 before DLCSTART_BIST
+ is written to 1 udsing a separate CSR write.
+ 2) DLCCLEAR_BIST must not be changed after writing DLCSTART_BIST
+ 0->1 until the BIST operation completes. */
+ uint64_t dlcstart_bist : 1; /**< When software writes DLCSTART_BIST=0->1, a BiST is executed
+ for the DLC sub-block RAMs which contains DCLK domain
+ asynchronous RAMs.
+ NOTES:
+ 1) This bit should only be written after DCLK has been enabled
+ by software and is stable.
+ (see LMC initialization routine for details on how to enable
+ the DDR3 memory (DCLK) - which requires LMC PLL init, clock
+ divider and proper DLL initialization sequence). */
+ uint64_t repl_ena : 1; /**< Replication Mode Enable
+ *** o63-P2 NEW ***
+ When set, enables replication mode performance enhancement
+ feature. This enables the DFA to communicate address
+ replication information during memory references to the
+ memory controller.
+ For o63-P2: This is used by the memory controller
+ to support graph data in multiple banks (or bank sets), so that
+ the least full bank can be selected to minimize the effects of
+ DDR3 bank conflicts (ie: tRC=row cycle time).
+ For o68: This is used by the memory controller to support graph
+ data in multiple ports (or port sets), so that the least full
+ port can be selected to minimize latency effects.
+ SWNOTE: Using this mode requires the DFA SW compiler and DFA
+ driver to be aware of the address replication changes.
+ This involves changes to the MLOAD/GWALK DFA instruction format
+ (see: IWORD2.SREPL), as well as changes to node arc and metadata
+ definitions which now support an additional REPL field.
+ When clear, replication mode is disabled, and DFA will interpret
+ DFA instructions and node-arc formats which DO NOT have
+ address replication information. */
+ uint64_t clmskcrip : 4; /**< Cluster Cripple Mask
+ A one in each bit of the mask represents which DTE cluster to
+ cripple.
+ NOTE: o63 has only a single Cluster (therefore CLMSKCRIP[0]
+ is the only bit used.
+ o2 has 4 clusters, where all CLMSKCRIP mask bits are used.
+ SWNOTE: The MIO_FUS___DFA_CLMASK_CRIPPLE[3:0] fuse bits will
+ be forced into this register at reset. Any fuse bits that
+ contain '1' will be disallowed during a write and will always
+ be read as '1'. */
+ uint64_t cldtecrip : 3; /**< Encoding which represents \#of DTEs to cripple for each
+ cluster. Typically DTE_CLCRIP=0 which enables all DTEs
+ within each cluster. However, when the DFA performance
+ counters are used, SW may want to limit the \#of DTEs
+ per cluster available, as there are only 4 parallel
+ performance counters.
+ DTE_CLCRIP | \#DTEs crippled(per cluster)
+ ------------+-----------------------------
+ 0 | 0 DTE[15:0]:ON
+ 1 | 1/2 DTE[15:8]:OFF /DTE[7:0]:ON
+ 2 | 1/4 DTE[15:12]:OFF /DTE[11:0]:ON
+ 3 | 3/4 DTE[15:4]:OFF /DTE[3:0]:ON
+ 4 | 1/8 DTE[15:14]:OFF /DTE[13:0]:ON
+ 5 | 5/8 DTE[15:6]:OFF /DTE[5:0]:ON
+ 6 | 3/8 DTE[15:10]:OFF /DTE[9:0]:ON
+ 7 | 7/8 DTE[15:2]:OFF /DTE[1:0]:ON
+ NOTE: Higher numbered DTEs are crippled first. For instance,
+ on o63 (with 16 DTEs/cluster), if DTE_CLCRIP=1(1/2), then
+ DTE#s [15:8] within the cluster are crippled and only
+ DTE#s [7:0] are available.
+ IMPNOTE: The encodings are done in such a way as to later
+ be used with fuses (for future o2 revisions which will disable
+ some \#of DTEs). Blowing a fuse has the effect that there will
+ always be fewer DTEs available. [ie: we never want a customer
+ to blow additional fuses to get more DTEs].
+ SWNOTE: The MIO_FUS___DFA_NUMDTE_CRIPPLE[2:0] fuse bits will
+ be forced into this register at reset. Any fuse bits that
+ contain '1' will be disallowed during a write and will always
+ be read as '1'. */
+ uint64_t dteclkdis : 1; /**< DFA Clock Disable Source
+ When SET, the DFA clocks for DTE(thread engine)
+ operation are disabled (to conserve overall chip clocking
+ power when the DFA function is not used).
+ NOTE: When SET, SW MUST NEVER issue NCB-Direct CSR
+ operations to the DFA (will result in NCB Bus Timeout
+ errors).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ SWNOTE: The MIO_FUS___DFA_DTE_DISABLE fuse bit will
+ be forced into this register at reset. If the fuse bit
+ contains '1', writes to DTECLKDIS are disallowed and
+ will always be read as '1'. */
+#else
+ uint64_t dteclkdis : 1;
+ uint64_t cldtecrip : 3;
+ uint64_t clmskcrip : 4;
+ uint64_t repl_ena : 1;
+ uint64_t dlcstart_bist : 1;
+ uint64_t dlcclear_bist : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_dfa_config_s cn61xx;
+ struct cvmx_dfa_config_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t repl_ena : 1; /**< Replication Mode Enable
+ *** o63-P2 NEW ***
+ When set, enables replication mode performance enhancement
+ feature. This enables the DFA to communicate address
+ replication information during memory references to the DFM
+ (memory controller). This in turn is used by the DFM to support
+ graph data in multiple banks (or bank sets), so that the least
+ full bank can be selected to minimize the effects of DDR3 bank
+ conflicts (ie: tRC=row cycle time).
+ SWNOTE: Using this mode requires the DFA SW compiler and DFA
+ driver to be aware of the o63-P2 address replication changes.
+ This involves changes to the MLOAD/GWALK DFA instruction format
+ (see: IWORD2.SREPL), as well as changes to node arc and metadata
+ definitions which now support an additional REPL field.
+ When clear, replication mode is disabled, and DFA will interpret
+ o63-P1 DFA instructions and node-arc formats which DO NOT have
+ address replication information. */
+ uint64_t clmskcrip : 4; /**< Cluster Cripple Mask
+ A one in each bit of the mask represents which DTE cluster to
+ cripple.
+ NOTE: o63 has only a single Cluster (therefore CLMSKCRIP[0]
+ is the only bit used.
+ o2 has 4 clusters, where all CLMSKCRIP mask bits are used.
+ SWNOTE: The MIO_FUS___DFA_CLMASK_CRIPPLE[3:0] fuse bits will
+ be forced into this register at reset. Any fuse bits that
+ contain '1' will be disallowed during a write and will always
+ be read as '1'. */
+ uint64_t cldtecrip : 3; /**< Encoding which represents \#of DTEs to cripple for each
+ cluster. Typically DTE_CLCRIP=0 which enables all DTEs
+ within each cluster. However, when the DFA performance
+ counters are used, SW may want to limit the \#of DTEs
+ per cluster available, as there are only 4 parallel
+ performance counters.
+ DTE_CLCRIP | \#DTEs crippled(per cluster)
+ ------------+-----------------------------
+ 0 | 0 DTE[15:0]:ON
+ 1 | 1/2 DTE[15:8]:OFF /DTE[7:0]:ON
+ 2 | 1/4 DTE[15:12]:OFF /DTE[11:0]:ON
+ 3 | 3/4 DTE[15:4]:OFF /DTE[3:0]:ON
+ 4 | 1/8 DTE[15:14]:OFF /DTE[13:0]:ON
+ 5 | 5/8 DTE[15:6]:OFF /DTE[5:0]:ON
+ 6 | 3/8 DTE[15:10]:OFF /DTE[9:0]:ON
+ 7 | 7/8 DTE[15:2]:OFF /DTE[1:0]:ON
+ NOTE: Higher numbered DTEs are crippled first. For instance,
+ on o63 (with 16 DTEs/cluster), if DTE_CLCRIP=1(1/2), then
+ DTE#s [15:8] within the cluster are crippled and only
+ DTE#s [7:0] are available.
+ IMPNOTE: The encodings are done in such a way as to later
+ be used with fuses (for future o2 revisions which will disable
+ some \#of DTEs). Blowing a fuse has the effect that there will
+ always be fewer DTEs available. [ie: we never want a customer
+ to blow additional fuses to get more DTEs].
+ SWNOTE: The MIO_FUS___DFA_NUMDTE_CRIPPLE[2:0] fuse bits will
+ be forced into this register at reset. Any fuse bits that
+ contain '1' will be disallowed during a write and will always
+ be read as '1'. */
+ uint64_t dteclkdis : 1; /**< DFA Clock Disable Source
+ When SET, the DFA clocks for DTE(thread engine)
+ operation are disabled (to conserve overall chip clocking
+ power when the DFA function is not used).
+ NOTE: When SET, SW MUST NEVER issue NCB-Direct CSR
+ operations to the DFA (will result in NCB Bus Timeout
+ errors).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ SWNOTE: The MIO_FUS___DFA_DTE_DISABLE fuse bit will
+ be forced into this register at reset. If the fuse bit
+ contains '1', writes to DTECLKDIS are disallowed and
+ will always be read as '1'. */
+#else
+ uint64_t dteclkdis : 1;
+ uint64_t cldtecrip : 3;
+ uint64_t clmskcrip : 4;
+ uint64_t repl_ena : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn63xx;
+ struct cvmx_dfa_config_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t clmskcrip : 4; /**< Cluster Cripple Mask
+ A one in each bit of the mask represents which DTE cluster to
+ cripple.
+ NOTE: o63 has only a single Cluster (therefore CLMSKCRIP[0]
+ is the only bit used.
+ o2 has 4 clusters, where all CLMSKCRIP mask bits are used.
+ SWNOTE: The MIO_FUS___DFA_CLMASK_CRIPPLE[3:0] fuse bits will
+ be forced into this register at reset. Any fuse bits that
+ contain '1' will be disallowed during a write and will always
+ be read as '1'. */
+ uint64_t cldtecrip : 3; /**< Encoding which represents \#of DTEs to cripple for each
+ cluster. Typically DTE_CLCRIP=0 which enables all DTEs
+ within each cluster. However, when the DFA performance
+ counters are used, SW may want to limit the \#of DTEs
+ per cluster available, as there are only 4 parallel
+ performance counters.
+ DTE_CLCRIP | \#DTEs crippled(per cluster)
+ ------------+-----------------------------
+ 0 | 0 DTE[15:0]:ON
+ 1 | 1/2 DTE[15:8]:OFF /DTE[7:0]:ON
+ 2 | 1/4 DTE[15:12]:OFF /DTE[11:0]:ON
+ 3 | 3/4 DTE[15:4]:OFF /DTE[3:0]:ON
+ 4 | 1/8 DTE[15:14]:OFF /DTE[13:0]:ON
+ 5 | 5/8 DTE[15:6]:OFF /DTE[5:0]:ON
+ 6 | 3/8 DTE[15:10]:OFF /DTE[9:0]:ON
+ 7 | 7/8 DTE[15:2]:OFF /DTE[1:0]:ON
+ NOTE: Higher numbered DTEs are crippled first. For instance,
+ on o63 (with 16 DTEs/cluster), if DTE_CLCRIP=1(1/2), then
+ DTE#s [15:8] within the cluster are crippled and only
+ DTE#s [7:0] are available.
+ IMPNOTE: The encodings are done in such a way as to later
+ be used with fuses (for future o2 revisions which will disable
+ some \#of DTEs). Blowing a fuse has the effect that there will
+ always be fewer DTEs available. [ie: we never want a customer
+ to blow additional fuses to get more DTEs].
+ SWNOTE: The MIO_FUS___DFA_NUMDTE_CRIPPLE[2:0] fuse bits will
+ be forced into this register at reset. Any fuse bits that
+ contain '1' will be disallowed during a write and will always
+ be read as '1'. */
+ uint64_t dteclkdis : 1; /**< DFA Clock Disable Source
+ When SET, the DFA clocks for DTE(thread engine)
+ operation are disabled (to conserve overall chip clocking
+ power when the DFA function is not used).
+ NOTE: When SET, SW MUST NEVER issue NCB-Direct CSR
+ operations to the DFA (will result in NCB Bus Timeout
+ errors).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ SWNOTE: The MIO_FUS___DFA_DTE_DISABLE fuse bit will
+ be forced into this register at reset. If the fuse bit
+ contains '1', writes to DTECLKDIS are disallowed and
+ will always be read as '1'. */
+#else
+ uint64_t dteclkdis : 1;
+ uint64_t cldtecrip : 3;
+ uint64_t clmskcrip : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn63xxp1;
+ struct cvmx_dfa_config_cn63xx cn66xx;
+ struct cvmx_dfa_config_s cn68xx;
+ struct cvmx_dfa_config_s cn68xxp1;
+};
+typedef union cvmx_dfa_config cvmx_dfa_config_t;
+
+/**
+ * cvmx_dfa_control
+ *
+ * DFA_CONTROL = DFA Control Register
+ *
+ * Description:
+ */
+union cvmx_dfa_control {
+ uint64_t u64;
+ struct cvmx_dfa_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t sbdnum : 6; /**< SBD Debug Entry#
+ *FOR INTERNAL USE ONLY*
+ DFA Scoreboard debug control
+ Selects which one of 48 DFA Scoreboard entries is
+ latched into the DFA_SBD_DBG[0-3] registers. */
+ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe
+ *FOR INTERNAL USE ONLY*
+ DFA Scoreboard debug control
+ When written with a '1', the DFA Scoreboard Debug
+ registers (DFA_SBD_DBG[0-3]) are all locked down.
+ This allows SW to lock down the contents of the entire
+ SBD for a single instant in time. All subsequent reads
+ of the DFA scoreboard registers will return the data
+ from that instant in time. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode
+ (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode
+ (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t imode : 1; /**< NCB-Inbound Arbiter
+ (0=FP [LP=NRQ,HP=NRP], 1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t imode : 1;
+ uint64_t qmode : 1;
+ uint64_t pmode : 1;
+ uint64_t reserved_3_4 : 2;
+ uint64_t sbdlck : 1;
+ uint64_t sbdnum : 6;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_dfa_control_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t sbdnum : 4; /**< SBD Debug Entry#
+ *FOR INTERNAL USE ONLY*
+ DFA Scoreboard debug control
+ Selects which one of 16 DFA Scoreboard entries is
+ latched into the DFA_SBD_DBG[0-3] registers. */
+ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe
+ *FOR INTERNAL USE ONLY*
+ DFA Scoreboard debug control
+ When written with a '1', the DFA Scoreboard Debug
+ registers (DFA_SBD_DBG[0-3]) are all locked down.
+ This allows SW to lock down the contents of the entire
+ SBD for a single instant in time. All subsequent reads
+ of the DFA scoreboard registers will return the data
+ from that instant in time. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode
+ (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode
+ (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t imode : 1; /**< NCB-Inbound Arbiter
+ (0=FP [LP=NRQ,HP=NRP], 1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t imode : 1;
+ uint64_t qmode : 1;
+ uint64_t pmode : 1;
+ uint64_t reserved_3_4 : 2;
+ uint64_t sbdlck : 1;
+ uint64_t sbdnum : 4;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_dfa_control_cn61xx cn63xx;
+ struct cvmx_dfa_control_cn61xx cn63xxp1;
+ struct cvmx_dfa_control_cn61xx cn66xx;
+ struct cvmx_dfa_control_s cn68xx;
+ struct cvmx_dfa_control_s cn68xxp1;
+};
+typedef union cvmx_dfa_control cvmx_dfa_control_t;
+
+/**
+ * cvmx_dfa_dbell
+ *
+ * DFA_DBELL = DFA Doorbell Register
+ *
+ * Description:
+ * NOTE: To write to the DFA_DBELL register, a device would issue an IOBST directed at the DFA with addr[34:33]=2'b00.
+ * To read the DFA_DBELL register, a device would issue an IOBLD64 directed at the DFA with addr[34:33]=2'b00.
+ *
+ * NOTE: If DFA_CONFIG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_DBELL register do not take effect.
+ * NOTE: If FUSE[TBD]="DFA DTE disable" is blown, reads/writes to the DFA_DBELL register do not take effect.
+ */
+union cvmx_dfa_dbell {
+ uint64_t u64;
+ struct cvmx_dfa_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dbell : 20; /**< Represents the cumulative total of pending
+ DFA instructions which SW has previously written
+ into the DFA Instruction FIFO (DIF) in main memory.
+ Each DFA instruction contains a fixed size 32B
+ instruction word which is executed by the DFA HW.
+ The DBL register can hold up to 1M-1 (2^20-1)
+ pending DFA instruction requests.
+ During a read (by SW), the 'most recent' contents
+ of the DFA_DBELL register are returned at the time
+ the NCB-INB bus is driven.
+ NOTE: Since DFA HW updates this register, its
+ contents are unpredictable in SW. */
+#else
+ uint64_t dbell : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_dfa_dbell_s cn31xx;
+ struct cvmx_dfa_dbell_s cn38xx;
+ struct cvmx_dfa_dbell_s cn38xxp2;
+ struct cvmx_dfa_dbell_s cn58xx;
+ struct cvmx_dfa_dbell_s cn58xxp1;
+ struct cvmx_dfa_dbell_s cn61xx;
+ struct cvmx_dfa_dbell_s cn63xx;
+ struct cvmx_dfa_dbell_s cn63xxp1;
+ struct cvmx_dfa_dbell_s cn66xx;
+ struct cvmx_dfa_dbell_s cn68xx;
+ struct cvmx_dfa_dbell_s cn68xxp1;
+};
+typedef union cvmx_dfa_dbell cvmx_dfa_dbell_t;
+
+/**
+ * cvmx_dfa_ddr2_addr
+ *
+ * DFA_DDR2_ADDR = DFA DDR2 fclk-domain Memory Address Config Register
+ *
+ *
+ * Description: The following registers are used to compose the DFA's DDR2 address into ROW/COL/BNK
+ * etc.
+ */
+union cvmx_dfa_ddr2_addr {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t rdimm_ena : 1; /**< If there is a need to insert a register chip on the
+ system (the equivalent of a registered DIMM) to
+ provide better setup for the command and control bits
+ turn this mode on.
+ RDIMM_ENA
+ 0 Registered Mode OFF
+ 1 Registered Mode ON */
+ uint64_t num_rnks : 2; /**< NUM_RNKS is programmed based on how many ranks there
+ are in the system. This needs to be programmed correctly
+ regardless of whether we are in RNK_LO mode or not.
+ NUM_RNKS \# of Ranks
+ 0 1
+ 1 2
+ 2 4
+ 3 RESERVED */
+ uint64_t rnk_lo : 1; /**< When this mode is turned on, consecutive addresses
+ outside the bank boundary
+ are programmed to go to different ranks in order to
+ minimize bank conflicts. It is useful in 4-bank DDR2
+ parts based memory to extend out the \#physical banks
+ available and minimize bank conflicts.
+ On 8 bank ddr2 parts, this mode is not very useful
+ because this mode does come with
+ a penalty which is that every successive reads that
+ cross rank boundary will need a 1 cycle bubble
+ inserted to prevent bus turnaround conflicts.
+ RNK_LO
+ 0 - OFF
+ 1 - ON */
+ uint64_t num_colrows : 3; /**< NUM_COLROWS is used to set the MSB of the ROW_ADDR
+ and the LSB of RANK address when not in RNK_LO mode.
+ Calculate the sum of \#COL and \#ROW and program the
+ controller appropriately
+ RANK_LSB \#COLs + \#ROWs
+ ------------------------------
+ - 000: 22
+ - 001: 23
+ - 010: 24
+ - 011: 25
+ - 100-111: RESERVED */
+ uint64_t num_cols : 2; /**< The Long word address that the controller receives
+ needs to be converted to Row, Col, Rank and Bank
+ addresses depending on the memory part's micro arch.
+ NUM_COL tells the controller how many colum bits
+ there are and the controller uses this info to map
+ the LSB of the row address
+ - 00: num_cols = 9
+ - 01: num_cols = 10
+ - 10: num_cols = 11
+ - 11: RESERVED */
+#else
+ uint64_t num_cols : 2;
+ uint64_t num_colrows : 3;
+ uint64_t rnk_lo : 1;
+ uint64_t num_rnks : 2;
+ uint64_t rdimm_ena : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_addr_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_addr cvmx_dfa_ddr2_addr_t;
+
+/**
+ * cvmx_dfa_ddr2_bus
+ *
+ * DFA_DDR2_BUS = DFA DDR Bus Activity Counter
+ *
+ *
+ * Description: This counter counts \# cycles that the memory bus is doing a read/write/command
+ * Useful to benchmark the bus utilization as a ratio of
+ * \#Cycles of Data Transfer/\#Cycles since init or
+ * \#Cycles of Data Transfer/\#Cycles that memory controller is active
+ */
+union cvmx_dfa_ddr2_bus {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_bus_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t bus_cnt : 47; /**< Counter counts the \# cycles of Data transfer */
+#else
+ uint64_t bus_cnt : 47;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_bus_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_bus cvmx_dfa_ddr2_bus_t;
+
+/**
+ * cvmx_dfa_ddr2_cfg
+ *
+ * DFA_DDR2_CFG = DFA DDR2 fclk-domain Memory Configuration \#0 Register
+ *
+ * Description:
+ */
+union cvmx_dfa_ddr2_cfg {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_41_63 : 23;
+ uint64_t trfc : 5; /**< Establishes tRFC(from DDR2 data sheets) in \# of
+ 4 fclk intervals.
+ General Equation:
+ TRFC(csr) = ROUNDUP[tRFC(data-sheet-ns)/(4 * fclk(ns))]
+ Example:
+ tRFC(data-sheet-ns) = 127.5ns
+ Operational Frequency: 533MHz DDR rate
+ [fclk=266MHz(3.75ns)]
+ Then:
+ TRFC(csr) = ROUNDUP[127.5ns/(4 * 3.75ns)]
+ = 9 */
+ uint64_t mrs_pgm : 1; /**< When clear, the HW initialization sequence fixes
+ some of the *MRS register bit definitions.
+ EMRS:
+ A[14:13] = 0 RESERVED
+ A[12] = 0 Output Buffers Enabled (FIXED)
+ A[11] = 0 RDQS Disabled (FIXED)
+ A[10] = 0 DQSn Enabled (FIXED)
+ A[9:7] = 0 OCD Not supported (FIXED)
+ A[6] = 0 RTT Disabled (FIXED)
+ A[5:3]=DFA_DDR2_TMG[ADDLAT] (if DFA_DDR2_TMG[POCAS]=1)
+ Additive LATENCY (Programmable)
+ A[2]=0 RTT Disabled (FIXED)
+ A[1]=DFA_DDR2_TMG[DIC] (Programmable)
+ A[0] = 0 DLL Enabled (FIXED)
+ MRS:
+ A[14:13] = 0 RESERVED
+ A[12] = 0 Fast Active Power Down Mode (FIXED)
+ A[11:9] = DFA_DDR2_TMG[TWR](Programmable)
+ A[8] = 1 DLL Reset (FIXED)
+ A[7] = 0 Test Mode (FIXED)
+ A[6:4]=DFA_DDR2_TMG[CASLAT] CAS LATENCY (Programmable)
+ A[3] = 0 Burst Type(must be 0:Sequential) (FIXED)
+ A[2:0] = 2 Burst Length=4 (must be 0:Sequential) (FIXED)
+ When set, the HW initialization sequence sources
+ the DFA_DDR2_MRS, DFA_DDR2_EMRS registers which are
+ driven onto the DFA_A[] pins. (this allows the MRS/EMRS
+ fields to be completely programmable - however care
+ must be taken by software).
+ This mode is useful for customers who wish to:
+ 1) override the FIXED definitions(above), or
+ 2) Use a "clamshell mode" of operation where the
+ address bits(per rank) are swizzled on the
+ board to reduce stub lengths for optimal
+ frequency operation.
+ Use this in combination with DFA_DDR2_CFG[RNK_MSK]
+ to specify the INIT sequence for each of the 4
+ supported ranks. */
+ uint64_t fpip : 3; /**< Early Fill Programmable Pipe [\#fclks]
+ This field dictates the \#fclks prior to the arrival
+ of fill data(in fclk domain), to start the 'early' fill
+ command pipe (in the eclk domain) so as to minimize the
+ overall fill latency.
+ The programmable early fill command signal is synchronized
+ into the eclk domain, where it is used to pull data out of
+ asynchronous RAM as fast as possible.
+ NOTE: A value of FPIP=0 is the 'safest' setting and will
+ result in the early fill command pipe starting in the
+ same cycle as the fill data.
+ General Equation: (for FPIP)
+ FPIP <= MIN[6, (ROUND_DOWN[6/EF_RATIO] + 1)]
+ where:
+ EF_RATIO = ECLK/FCLK Ratio [eclk(MHz)/fclk(MHz)]
+ Example: FCLK=200MHz/ECLK=600MHz
+ FPIP = MIN[6, (ROUND_DOWN[6/(600/200))] + 1)]
+ FPIP <= 3 */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ref_int : 13; /**< Refresh Interval (represented in \#of fclk
+ increments).
+ Each refresh interval will generate a single
+ auto-refresh command sequence which implicitly targets
+ all banks within the device:
+ Example: For fclk=200MHz(5ns)/400MHz(DDR):
+ trefint(ns) = [tREFI(max)=3.9us = 3900ns [datasheet]
+ REF_INT = ROUND_DOWN[(trefint/fclk)]
+ = ROUND_DOWN[(3900ns/5ns)]
+ = 780 fclks (0x30c)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t tskw : 2; /**< Board Skew (represented in \#fclks)
+ Represents additional board skew of DQ/DQS.
+ - 00: board-skew = 0 fclk
+ - 01: board-skew = 1 fclk
+ - 10: board-skew = 2 fclk
+ - 11: board-skew = 3 fclk
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t rnk_msk : 4; /**< Controls the CS_N[3:0] during a) a HW Initialization
+ sequence (triggered by DFA_DDR2_CFG[INIT]) or
+ b) during a normal refresh sequence. If
+ the RNK_MSK[x]=1, the corresponding CS_N[x] is driven.
+ NOTE: This is required for DRAM used in a
+ clamshell configuration, since the address lines
+ carry Mode Register write data that is unique
+ per rank(or clam). In a clamshell configuration,
+ the N3K DFA_A[x] pin may be tied into Clam#0's A[x]
+ and also into Clam#1's 'mirrored' address bit A[y]
+ (eg: Clam0 sees A[5] and Clam1 sees A[15]).
+ To support clamshell designs, SW must initiate
+ separate HW init sequences each unique rank address
+ mapping. Before each HW init sequence is triggered,
+ SW must preload the DFA_DDR2_MRS/EMRS registers with
+ the data that will be driven onto the A[14:0] wires
+ during the EMRS/MRS mode register write(s).
+ NOTE: After the final HW initialization sequence has
+ been triggered, SW must wait 64K eclks before writing
+ the RNK_MSK[3:0] field = 3'b1111 (so that CS_N[3:0]
+ is driven during refresh sequences in normal operation.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t silo_qc : 1; /**< Enables Quarter Cycle move of the Rd sampling window */
+ uint64_t silo_hc : 1; /**< A combination of SILO_HC, SILO_QC and TSKW
+ specifies the positioning of the sampling strobe
+ when receiving read data back from DDR2. This is
+ done to offset any board trace induced delay on
+ the DQ and DQS which inherently makes these
+ asynchronous with respect to the internal clk of
+ controller. TSKW moves this sampling window by
+ integer cycles. SILO_QC and HC move this quarter
+ and half a cycle respectively. */
+ uint64_t sil_lat : 2; /**< Silo Latency (\#fclks): On reads, determines how many
+ additional fclks to wait (on top of CASLAT+1) before
+ pulling data out of the padring silos used for time
+ domain boundary crossing.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t bprch : 1; /**< Tristate Enable (back porch) (\#fclks)
+ On reads, allows user to control the shape of the
+ tristate disable back porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t fprch : 1; /**< Tristate Enable (front porch) (\#fclks)
+ On reads, allows user to control the shape of the
+ tristate disable front porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t init : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for the LLM Memory Port is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port
+ a) PRTENA=1
+ 2) Wait 200us (to ensure a stable clock
+ to the DDR2) - as per DDR2 spec.
+ 3) Write a '1' to the INIT which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_DDR2* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t prtena : 1; /**< Enable DFA Memory
+ When enabled, this bit lets N3K be the default
+ driver for DFA-LLM memory port. */
+#else
+ uint64_t prtena : 1;
+ uint64_t init : 1;
+ uint64_t fprch : 1;
+ uint64_t bprch : 1;
+ uint64_t sil_lat : 2;
+ uint64_t silo_hc : 1;
+ uint64_t silo_qc : 1;
+ uint64_t rnk_msk : 4;
+ uint64_t tskw : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t ref_int : 13;
+ uint64_t reserved_29_31 : 3;
+ uint64_t fpip : 3;
+ uint64_t mrs_pgm : 1;
+ uint64_t trfc : 5;
+ uint64_t reserved_41_63 : 23;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_cfg_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_cfg cvmx_dfa_ddr2_cfg_t;
+
+/**
+ * cvmx_dfa_ddr2_comp
+ *
+ * DFA_DDR2_COMP = DFA DDR2 I/O PVT Compensation Configuration
+ *
+ *
+ * Description: The following are registers to program the DDR2 PLL and DLL
+ */
+union cvmx_dfa_ddr2_comp {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dfa__pctl : 4; /**< DFA DDR pctl from compensation circuit
+ Internal DBG only */
+ uint64_t dfa__nctl : 4; /**< DFA DDR nctl from compensation circuit
+ Internal DBG only */
+ uint64_t reserved_9_55 : 47;
+ uint64_t pctl_csr : 4; /**< Compensation control bits */
+ uint64_t nctl_csr : 4; /**< Compensation control bits */
+ uint64_t comp_bypass : 1; /**< Compensation Bypass */
+#else
+ uint64_t comp_bypass : 1;
+ uint64_t nctl_csr : 4;
+ uint64_t pctl_csr : 4;
+ uint64_t reserved_9_55 : 47;
+ uint64_t dfa__nctl : 4;
+ uint64_t dfa__pctl : 4;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_comp_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_comp cvmx_dfa_ddr2_comp_t;
+
+/**
+ * cvmx_dfa_ddr2_emrs
+ *
+ * DFA_DDR2_EMRS = DDR2 EMRS Register(s) EMRS1[14:0], EMRS1_OCD[14:0]
+ * Description: This register contains the data driven onto the Address[14:0] lines during DDR INIT
+ * To support Clamshelling (where N3K DFA_A[] pins are not 1:1 mapped to each clam(or rank), a HW init
+ * sequence is allowed on a "per-rank" basis. Care must be taken in the values programmed into these
+ * registers during the HW initialization sequence (see N3K specific restrictions in notes below).
+ * DFA_DDR2_CFG[MRS_PGM] must be 1 to support this feature.
+ *
+ * Notes:
+ * For DDR-II please consult your device's data sheet for further details:
+ *
+ */
+union cvmx_dfa_ddr2_emrs {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_emrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t emrs1_ocd : 15; /**< Memory Address[14:0] during "EMRS1 (OCD Calibration)"
+ step \#12a "EMRS OCD Default Command" A[9:7]=111
+ of DDR2 HW initialization sequence.
+ (See JEDEC DDR2 specification (JESD79-2):
+ Power Up and initialization sequence).
+ A[14:13] = 0, RESERVED
+ A[12] = 0, Output Buffers Enabled
+ A[11] = 0, RDQS Disabled (we do not support RDQS)
+ A[10] = 0, DQSn Enabled
+ A[9:7] = 7, OCD Calibration Mode Default
+ A[6] = 0, ODT Disabled
+ A[5:3]=DFA_DDR2_TMG[ADDLAT] Additive LATENCY (Default 0)
+ A[2]=0 Termination Res RTT (ODT off Default)
+ [A6,A2] = 0 -> ODT Disabled
+ 1 -> 75 ohm; 2 -> 150 ohm; 3 - Reserved
+ A[1]=0 Normal Output Driver Imp mode
+ (1 - weak ie., 60% of normal drive strength)
+ A[0] = 0 DLL Enabled */
+ uint64_t reserved_15_15 : 1;
+ uint64_t emrs1 : 15; /**< Memory Address[14:0] during:
+ a) Step \#7 "EMRS1 to enable DLL (A[0]=0)"
+ b) Step \#12b "EMRS OCD Calibration Mode Exit"
+ steps of DDR2 HW initialization sequence.
+ (See JEDEC DDR2 specification (JESD79-2): Power Up and
+ initialization sequence).
+ A[14:13] = 0, RESERVED
+ A[12] = 0, Output Buffers Enabled
+ A[11] = 0, RDQS Disabled (we do not support RDQS)
+ A[10] = 0, DQSn Enabled
+ A[9:7] = 0, OCD Calibration Mode exit/maintain
+ A[6] = 0, ODT Disabled
+ A[5:3]=DFA_DDR2_TMG[ADDLAT] Additive LATENCY (Default 0)
+ A[2]=0 Termination Res RTT (ODT off Default)
+ [A6,A2] = 0 -> ODT Disabled
+ 1 -> 75 ohm; 2 -> 150 ohm; 3 - Reserved
+ A[1]=0 Normal Output Driver Imp mode
+ (1 - weak ie., 60% of normal drive strength)
+ A[0] = 0 DLL Enabled */
+#else
+ uint64_t emrs1 : 15;
+ uint64_t reserved_15_15 : 1;
+ uint64_t emrs1_ocd : 15;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_emrs_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_emrs cvmx_dfa_ddr2_emrs_t;
+
+/**
+ * cvmx_dfa_ddr2_fcnt
+ *
+ * DFA_DDR2_FCNT = DFA FCLK Counter
+ *
+ *
+ * Description: This FCLK cycle counter gets going after memory has been initialized
+ */
+union cvmx_dfa_ddr2_fcnt {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_fcnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t fcyc_cnt : 47; /**< Counter counts FCLK cycles or \# cycles that the memory
+ controller has requests queued up depending on FCNT_MODE
+ If FCNT_MODE = 0, this counter counts the \# FCLK cycles
+ If FCNT_MODE = 1, this counter counts the \# cycles the
+ controller is active with memory requests. */
+#else
+ uint64_t fcyc_cnt : 47;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_fcnt_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_fcnt cvmx_dfa_ddr2_fcnt_t;
+
+/**
+ * cvmx_dfa_ddr2_mrs
+ *
+ * DFA_DDR2_MRS = DDR2 MRS Register(s) MRS_DLL[14:0], MRS[14:0]
+ * Description: This register contains the data driven onto the Address[14:0] lines during DDR INIT
+ * To support Clamshelling (where N3K DFA_A[] pins are not 1:1 mapped to each clam(or rank), a HW init
+ * sequence is allowed on a "per-rank" basis. Care must be taken in the values programmed into these
+ * registers during the HW initialization sequence (see N3K specific restrictions in notes below).
+ * DFA_DDR2_CFG[MRS_PGM] must be 1 to support this feature.
+ *
+ * Notes:
+ * For DDR-II please consult your device's data sheet for further details:
+ *
+ */
+union cvmx_dfa_ddr2_mrs {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_mrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t mrs : 15; /**< Memory Address[14:0] during "MRS without resetting
+ DLL A[8]=0" step of HW initialization sequence.
+ (See JEDEC DDR2 specification (JESD79-2): Power Up
+ and initialization sequence - Step \#11).
+ A[14:13] = 0, RESERVED
+ A[12] = 0, Fast Active Power Down Mode
+ A[11:9] = DFA_DDR2_TMG[TWR]
+ A[8] = 0, for DLL Reset
+ A[7] =0 Test Mode (must be 0 for normal operation)
+ A[6:4]=DFA_DDR2_TMG[CASLAT] CAS LATENCY (default 4)
+ A[3]=0 Burst Type(must be 0:Sequential)
+ A[2:0]=2 Burst Length=4(default) */
+ uint64_t reserved_15_15 : 1;
+ uint64_t mrs_dll : 15; /**< Memory Address[14:0] during "MRS for DLL_RESET A[8]=1"
+ step of HW initialization sequence.
+ (See JEDEC DDR2 specification (JESD79-2): Power Up
+ and initialization sequence - Step \#8).
+ A[14:13] = 0, RESERVED
+ A[12] = 0, Fast Active Power Down Mode
+ A[11:9] = DFA_DDR2_TMG[TWR]
+ A[8] = 1, for DLL Reset
+ A[7] = 0 Test Mode (must be 0 for normal operation)
+ A[6:4]=DFA_DDR2_TMG[CASLAT] CAS LATENCY (default 4)
+ A[3] = 0 Burst Type(must be 0:Sequential)
+ A[2:0] = 2 Burst Length=4(default) */
+#else
+ uint64_t mrs_dll : 15;
+ uint64_t reserved_15_15 : 1;
+ uint64_t mrs : 15;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_mrs_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_mrs cvmx_dfa_ddr2_mrs_t;
+
+/**
+ * cvmx_dfa_ddr2_opt
+ *
+ * DFA_DDR2_OPT = DFA DDR2 Optimization Registers
+ *
+ *
+ * Description: The following are registers to tweak certain parameters to boost performance
+ */
+union cvmx_dfa_ddr2_opt {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_opt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t max_read_batch : 5; /**< Maximum number of consecutive read to service before
+ allowing write to interrupt. */
+ uint64_t max_write_batch : 5; /**< Maximum number of consecutive writes to service before
+ allowing reads to interrupt. */
+#else
+ uint64_t max_write_batch : 5;
+ uint64_t max_read_batch : 5;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_opt_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_opt cvmx_dfa_ddr2_opt_t;
+
+/**
+ * cvmx_dfa_ddr2_pll
+ *
+ * DFA_DDR2_PLL = DFA DDR2 PLL and DLL Configuration
+ *
+ *
+ * Description: The following are registers to program the DDR2 PLL and DLL
+ */
+union cvmx_dfa_ddr2_pll {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_pll_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pll_setting : 17; /**< Internal Debug Use Only */
+ uint64_t reserved_32_46 : 15;
+ uint64_t setting90 : 5; /**< Contains the setting of DDR DLL; Internal DBG only */
+ uint64_t reserved_21_26 : 6;
+ uint64_t dll_setting : 5; /**< Contains the open loop setting value for the DDR90 delay
+ line. */
+ uint64_t dll_byp : 1; /**< DLL Bypass. When set, the DDR90 DLL is bypassed and
+ the DLL behaves in Open Loop giving a fixed delay
+ set by DLL_SETTING */
+ uint64_t qdll_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
+ erst deassertion will reset the DDR 90 DLL. Allow
+ 200 micro seconds for Lock before DDR Init. */
+ uint64_t bw_ctl : 4; /**< Internal Use Only - for Debug */
+ uint64_t bw_upd : 1; /**< Internal Use Only - for Debug */
+ uint64_t pll_div2 : 1; /**< PLL Output is further divided by 2. Useful for slow
+ fclk frequencies where the PLL may be out of range. */
+ uint64_t reserved_7_7 : 1;
+ uint64_t pll_ratio : 5; /**< Bits <6:2> sets the clk multiplication ratio
+ If the fclk frequency desired is less than 260MHz
+ (lower end saturation point of the pll), write 2x
+ the ratio desired in this register and set PLL_DIV2 */
+ uint64_t pll_bypass : 1; /**< PLL Bypass. Uses the ref_clk without multiplication. */
+ uint64_t pll_init : 1; /**< Need a 0 to 1 pulse on this CSR to get the DFA
+ Clk Generator Started. Write this register before
+ starting anything. Allow 200 uS for PLL Lock before
+ doing anything. */
+#else
+ uint64_t pll_init : 1;
+ uint64_t pll_bypass : 1;
+ uint64_t pll_ratio : 5;
+ uint64_t reserved_7_7 : 1;
+ uint64_t pll_div2 : 1;
+ uint64_t bw_upd : 1;
+ uint64_t bw_ctl : 4;
+ uint64_t qdll_ena : 1;
+ uint64_t dll_byp : 1;
+ uint64_t dll_setting : 5;
+ uint64_t reserved_21_26 : 6;
+ uint64_t setting90 : 5;
+ uint64_t reserved_32_46 : 15;
+ uint64_t pll_setting : 17;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_pll_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_pll cvmx_dfa_ddr2_pll_t;
+
+/**
+ * cvmx_dfa_ddr2_tmg
+ *
+ * DFA_DDR2_TMG = DFA DDR2 Memory Timing Config Register
+ *
+ *
+ * Description: The following are registers to program the DDR2 memory timing parameters.
+ */
+union cvmx_dfa_ddr2_tmg {
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_tmg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t fcnt_mode : 1; /**< If FCNT_MODE = 0, this counter counts the \# FCLK cycles
+ If FCNT_MODE = 1, this counter counts the \# cycles the
+ controller is active with memory requests. */
+ uint64_t cnt_clr : 1; /**< Clears the FCLK Cyc & Bus Util counter */
+ uint64_t cavmipo : 1; /**< RESERVED */
+ uint64_t ctr_rst : 1; /**< Reset oneshot pulse for refresh counter & Perf counters
+ SW should first write this field to a one to clear
+ & then write to a zero for normal operation */
+ uint64_t odt_rtt : 2; /**< DDR2 Termination Resistor Setting
+ These two bits are loaded into the RTT
+ portion of the EMRS register bits A6 & A2. If DDR2's
+ termination (for the memory's DQ/DQS/DM pads) is not
+ desired, set it to 00. If it is, chose between
+ 01 for 75 ohm and 10 for 150 ohm termination.
+ 00 = ODT Disabled
+ 01 = 75 ohm Termination
+ 10 = 150 ohm Termination
+ 11 = 50 ohm Termination */
+ uint64_t dqsn_ena : 1; /**< For DDR-II Mode, DIC[1] is used to load into EMRS
+ bit 10 - DQSN Enable/Disable field. By default, we
+ program the DDR's to drive the DQSN also. Set it to
+ 1 if DQSN should be Hi-Z.
+ 0 - DQSN Enable
+ 1 - DQSN Disable */
+ uint64_t dic : 1; /**< Drive Strength Control:
+ For DDR-I/II Mode, DIC[0] is
+ loaded into the Extended Mode Register (EMRS) A1 bit
+ during initialization. (see DDR-I data sheet EMRS
+ description)
+ 0 = Normal
+ 1 = Reduced */
+ uint64_t r2r_slot : 1; /**< A 1 on this register will force the controller to
+ slot a bubble between every reads */
+ uint64_t tfaw : 5; /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1
+ Four Access Window time. Relevant only in
+ 8-bank parts.
+ TFAW = 5'b0 for DDR2-4bank
+ TFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 in DDR2-8bank */
+ uint64_t twtr : 4; /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)]
+ Last Wr Data to Rd Command time.
+ (Represented in fclk cycles)
+ TYP=15ns
+ - 0000: RESERVED
+ - 0001: 1
+ - ...
+ - 0111: 7
+ - 1000-1111: RESERVED */
+ uint64_t twr : 3; /**< DDR Write Recovery time (tWR). Last Wr Brst to Prech
+ This is not a direct encoding of the value. Its
+ programmed as below per DDR2 spec. The decimal number
+ on the right is RNDUP(tWR(ns) / clkFreq)
+ TYP=15ns
+ - 000: RESERVED
+ - 001: 2
+ - 010: 3
+ - 011: 4
+ - 100: 5
+ - 101: 6
+ - 110-111: RESERVED */
+ uint64_t trp : 4; /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)]
+ (Represented in fclk cycles)
+ TYP=15ns
+ - 0000: RESERVED
+ - 0001: 1
+ - ...
+ - 0111: 7
+ - 1000-1111: RESERVED
+ When using parts with 8 banks (DFA_CFG->MAX_BNK
+ is 1), load tRP cycles + 1 into this register. */
+ uint64_t tras : 5; /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)]
+ (Represented in fclk cycles)
+ TYP=45ns
+ - 00000-0001: RESERVED
+ - 00010: 2
+ - ...
+ - 10100: 20
+ - 10101-11111: RESERVED */
+ uint64_t trrd : 3; /**< tRRD cycles: ACT-ACT timing parameter for different
+ banks. (Represented in fclk cycles)
+ For DDR2, TYP=7.5ns
+ - 000: RESERVED
+ - 001: 1 tCYC
+ - 010: 2 tCYC
+ - 011: 3 tCYC
+ - 100: 4 tCYC
+ - 101: 5 tCYC
+ - 110-111: RESERVED */
+ uint64_t trcd : 4; /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)]
+ (Represented in fclk cycles)
+ TYP=15ns
+ - 0000: RESERVED
+ - 0001: 2 (2 is the smallest value allowed)
+ - 0002: 2
+ - ...
+ - 0111: 7
+ - 1110-1111: RESERVED */
+ uint64_t addlat : 3; /**< When in Posted CAS mode ADDLAT needs to be programmed
+ to tRCD-1
+ ADDLAT \#additional latency cycles
+ 000 0
+ 001 1 (tRCD = 2 fclk's)
+ 010 2 (tRCD = 3 fclk's)
+ 011 3 (tRCD = 4 fclk's)
+ 100 4 (tRCD = 5 fclk's)
+ 101 5 (tRCD = 6 fclk's)
+ 110 6 (tRCD = 7 fclk's)
+ 111 7 (tRCD = 8 fclk's) */
+ uint64_t pocas : 1; /**< Posted CAS mode. When 1, we use DDR2's Posted CAS
+ feature. When using this mode, ADDLAT needs to be
+ programmed as well */
+ uint64_t caslat : 3; /**< CAS Latency in \# fclk Cycles
+ CASLAT \# CAS latency cycles
+ 000 - 010 RESERVED
+ 011 3
+ 100 4
+ 101 5
+ 110 6
+ 111 7 */
+ uint64_t tmrd : 2; /**< tMRD Cycles
+ (Represented in fclk tCYC)
+ For DDR2, its TYP 2*tCYC)
+ - 000: RESERVED
+ - 001: 1
+ - 010: 2
+ - 011: 3 */
+ uint64_t ddr2t : 1; /**< When 2T mode is turned on, command signals are
+ setup a cycle ahead of when the CS is enabled
+ and kept for a total of 2 cycles. This mode is
+ enabled in higher speeds when there is difficulty
+ meeting setup. Performance could
+ be negatively affected in 2T mode */
+#else
+ uint64_t ddr2t : 1;
+ uint64_t tmrd : 2;
+ uint64_t caslat : 3;
+ uint64_t pocas : 1;
+ uint64_t addlat : 3;
+ uint64_t trcd : 4;
+ uint64_t trrd : 3;
+ uint64_t tras : 5;
+ uint64_t trp : 4;
+ uint64_t twr : 3;
+ uint64_t twtr : 4;
+ uint64_t tfaw : 5;
+ uint64_t r2r_slot : 1;
+ uint64_t dic : 1;
+ uint64_t dqsn_ena : 1;
+ uint64_t odt_rtt : 2;
+ uint64_t ctr_rst : 1;
+ uint64_t cavmipo : 1;
+ uint64_t cnt_clr : 1;
+ uint64_t fcnt_mode : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_tmg_s cn31xx;
+};
+typedef union cvmx_dfa_ddr2_tmg cvmx_dfa_ddr2_tmg_t;
+
+/**
+ * cvmx_dfa_debug0
+ *
+ * DFA_DEBUG0 = DFA Scoreboard Debug \#0 Register
+ * *FOR INTERNAL USE ONLY*
+ * Description: When the DFA_CONTROL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_CONTROL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+union cvmx_dfa_debug0 {
+ uint64_t u64;
+ struct cvmx_dfa_debug0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sbd0 : 64; /**< DFA ScoreBoard \#0 Data
+ (DFA Scoreboard Debug)
+ [63:38] (26) rptr[28:3]: Result Base Pointer (QW-aligned)
+ [37:22] (16) Cumulative Result Write Counter (for HDR write)
+ [21] (1) Waiting for GRdRsp EOT
+ [20] (1) Waiting for GRdReq Issue (to NRQ)
+ [19] (1) GLPTR/GLCNT Valid
+ [18] (1) Completion Mark Detected
+ [17:15] (3) Completion Code [0=PDGONE/1=PERR/2=RFULL/3=TERM]
+ [14] (1) Completion Detected
+ [13] (1) Waiting for HDR RWrCmtRsp
+ [12] (1) Waiting for LAST RESULT RWrCmtRsp
+ [11] (1) Waiting for HDR RWrReq
+ [10] (1) Waiting for RWrReq
+ [9] (1) Waiting for WQWrReq issue
+ [8] (1) Waiting for PRdRsp EOT
+ [7] (1) Waiting for PRdReq Issue (to NRQ)
+ [6] (1) Packet Data Valid
+ [5] (1) WQVLD
+ [4] (1) WQ Done Point (either WQWrReq issued (for WQPTR<>0) OR HDR RWrCmtRsp)
+ [3] (1) Resultant write STF/P Mode
+ [2] (1) Packet Data LDT mode
+ [1] (1) Gather Mode
+ [0] (1) Valid */
+#else
+ uint64_t sbd0 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_debug0_s cn61xx;
+ struct cvmx_dfa_debug0_s cn63xx;
+ struct cvmx_dfa_debug0_s cn63xxp1;
+ struct cvmx_dfa_debug0_s cn66xx;
+ struct cvmx_dfa_debug0_s cn68xx;
+ struct cvmx_dfa_debug0_s cn68xxp1;
+};
+typedef union cvmx_dfa_debug0 cvmx_dfa_debug0_t;
+
+/**
+ * cvmx_dfa_debug1
+ *
+ * DFA_DEBUG1 = DFA Scoreboard Debug \#1 Register
+ * *FOR INTERNAL USE ONLY*
+ * Description: When the DFA_CONTROL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_CONTROL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+union cvmx_dfa_debug1 {
+ uint64_t u64;
+ struct cvmx_dfa_debug1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sbd1 : 64; /**< DFA ScoreBoard \#1 Data
+ DFA Scoreboard Debug Data
+ [63:56] (8) UNUSED
+ [55:16] (40) Packet Data Pointer
+ [15:0] (16) Packet Data Counter */
+#else
+ uint64_t sbd1 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_debug1_s cn61xx;
+ struct cvmx_dfa_debug1_s cn63xx;
+ struct cvmx_dfa_debug1_s cn63xxp1;
+ struct cvmx_dfa_debug1_s cn66xx;
+ struct cvmx_dfa_debug1_s cn68xx;
+ struct cvmx_dfa_debug1_s cn68xxp1;
+};
+typedef union cvmx_dfa_debug1 cvmx_dfa_debug1_t;
+
+/**
+ * cvmx_dfa_debug2
+ *
+ * DFA_DEBUG2 = DFA Scoreboard Debug \#2 Register
+ *
+ * Description: When the DFA_CONTROL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_CONTROL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+union cvmx_dfa_debug2 {
+ uint64_t u64;
+ struct cvmx_dfa_debug2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sbd2 : 64; /**< DFA ScoreBoard \#2 Data
+ [63:45] (19) UNUSED
+ [44:42] (3) Instruction Type
+ [41:5] (37) rwptr[39:3]: Result Write Pointer
+ [4:0] (5) prwcnt[4:0]: Pending Result Write Counter */
+#else
+ uint64_t sbd2 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_debug2_s cn61xx;
+ struct cvmx_dfa_debug2_s cn63xx;
+ struct cvmx_dfa_debug2_s cn63xxp1;
+ struct cvmx_dfa_debug2_s cn66xx;
+ struct cvmx_dfa_debug2_s cn68xx;
+ struct cvmx_dfa_debug2_s cn68xxp1;
+};
+typedef union cvmx_dfa_debug2 cvmx_dfa_debug2_t;
+
+/**
+ * cvmx_dfa_debug3
+ *
+ * DFA_DEBUG3 = DFA Scoreboard Debug \#3 Register
+ *
+ * Description: When the DFA_CONTROL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_CONTROL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+union cvmx_dfa_debug3 {
+ uint64_t u64;
+ struct cvmx_dfa_debug3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sbd3 : 64; /**< DFA ScoreBoard \#3 Data
+ [63:52] (11) rptr[39:29]: Result Base Pointer (QW-aligned)
+ [52:16] (37) glptr[39:3]: Gather List Pointer
+ [15:0] (16) glcnt Gather List Counter */
+#else
+ uint64_t sbd3 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_debug3_s cn61xx;
+ struct cvmx_dfa_debug3_s cn63xx;
+ struct cvmx_dfa_debug3_s cn63xxp1;
+ struct cvmx_dfa_debug3_s cn66xx;
+ struct cvmx_dfa_debug3_s cn68xx;
+ struct cvmx_dfa_debug3_s cn68xxp1;
+};
+typedef union cvmx_dfa_debug3 cvmx_dfa_debug3_t;
+
+/**
+ * cvmx_dfa_difctl
+ *
+ * DFA_DIFCTL = DFA Instruction FIFO (DIF) Control Register
+ *
+ * Description:
+ * NOTE: To write to the DFA_DIFCTL register, a device would issue an IOBST directed at the DFA with addr[34:32]=3'b110.
+ * To read the DFA_DIFCTL register, a device would issue an IOBLD64 directed at the DFA with addr[34:32]=3'b110.
+ *
+ * NOTE: This register is intended to ONLY be written once (at power-up). Any future writes could
+ * cause the DFA and FPA HW to become unpredictable.
+ *
+ * NOTE: If DFA_CONFIG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_DIFCTL register do not take effect.
+ * NOTE: If FUSE[TBD]="DFA DTE disable" is blown, reads/writes to the DFA_DIFCTL register do not take effect.
+ */
+union cvmx_dfa_difctl {
+ uint64_t u64;
+ struct cvmx_dfa_difctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t msegbase : 6; /**< Memory Segmentation Base Address
+ For debug purposes, backdoor accesses to the DFA
+ memory are supported via NCB-Direct CSR accesses to
+ the DFA Memory REGION(if addr[34:32]=5. However due
+ to the existing NCB address decoding scheme, the
+ address only offers a 4GB extent into the DFA memory
+ REGION. Therefore, the MSEGBASE CSR field provides
+ the additional upper memory address bits to allow access
+ to the full extent of memory (128GB MAX).
+ For DFA Memory REGION read NCB-Direct CSR accesses, the
+ 38bit L2/DRAM memory byte address is generated as follows:
+ memaddr[37:0] = [DFA_DIFCTL[MSEGBASE],ncb_addr[31:3],3'b0]
+ NOTE: See the upper 6bits of the memory address are sourced
+ from DFA_DIFCTL[MSEGBASE] CSR field. The lower 4GB address
+ offset is directly referenced using the NCB address bits during
+ the reference itself.
+ NOTE: The DFA_DIFCTL[MSEGBASE] is shared amongst all references.
+ As such, if multiple PPs are accessing different segments in memory,
+ their must be a SW mutual exclusive lock during each DFA Memory
+ REGION access to avoid collisions between PPs using the same MSEGBASE
+ CSR field.
+ NOTE: See also DFA_ERROR[DFANXM] programmable interrupt which is
+ flagged if SW tries to access non-existent memory space (address hole
+ or upper unused region of 38bit address space). */
+ uint64_t dwbcnt : 8; /**< Represents the \# of cache lines in the instruction
+ buffer that may be dirty and should not be
+ written-back to memory when the instruction
+ chunk is returned to the Free Page list.
+ NOTE: Typically SW will want to mark all DFA
+ Instruction memory returned to the Free Page list
+ as DWB (Don't WriteBack), therefore SW should
+ seed this register as:
+ DFA_DIFCTL[DWBCNT] = (DFA_DIFCTL[SIZE] + 4)/4 */
+ uint64_t pool : 3; /**< Represents the 3bit buffer pool-id used by DFA HW
+ when the DFA instruction chunk is recycled back
+ to the Free Page List maintained by the FPA HW
+ (once the DFA instruction has been issued). */
+ uint64_t size : 9; /**< Represents the \# of 32B instructions contained
+ within each DFA instruction chunk. At Power-on,
+ SW will seed the SIZE register with a fixed
+ chunk-size. (Must be at least 3)
+ DFA HW uses this field to determine the size
+ of each DFA instruction chunk, in order to:
+ a) determine when to read the next DFA
+ instruction chunk pointer which is
+ written by SW at the end of the current
+ DFA instruction chunk (see DFA description
+ of next chunk buffer Ptr for format).
+ b) determine when a DFA instruction chunk
+ can be returned to the Free Page List
+ maintained by the FPA HW. */
+#else
+ uint64_t size : 9;
+ uint64_t pool : 3;
+ uint64_t dwbcnt : 8;
+ uint64_t msegbase : 6;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } s;
+ struct cvmx_dfa_difctl_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwbcnt : 8; /**< Represents the \# of cache lines in the instruction
+ buffer that may be dirty and should not be
+ written-back to memory when the instruction
+ chunk is returned to the Free Page list.
+ NOTE: Typically SW will want to mark all DFA
+ Instruction memory returned to the Free Page list
+ as DWB (Don't WriteBack), therefore SW should
+ seed this register as:
+ DFA_DIFCTL[DWBCNT] = (DFA_DIFCTL[SIZE] + 4)/4 */
+ uint64_t pool : 3; /**< Represents the 3bit buffer pool-id used by DFA HW
+ when the DFA instruction chunk is recycled back
+ to the Free Page List maintained by the FPA HW
+ (once the DFA instruction has been issued). */
+ uint64_t size : 9; /**< Represents the \# of 32B instructions contained
+ within each DFA instruction chunk. At Power-on,
+ SW will seed the SIZE register with a fixed
+ chunk-size. (Must be at least 3)
+ DFA HW uses this field to determine the size
+ of each DFA instruction chunk, in order to:
+ a) determine when to read the next DFA
+ instruction chunk pointer which is
+ written by SW at the end of the current
+ DFA instruction chunk (see DFA description
+ of next chunk buffer Ptr for format).
+ b) determine when a DFA instruction chunk
+ can be returned to the Free Page List
+ maintained by the FPA HW. */
+#else
+ uint64_t size : 9;
+ uint64_t pool : 3;
+ uint64_t dwbcnt : 8;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn31xx;
+ struct cvmx_dfa_difctl_cn31xx cn38xx;
+ struct cvmx_dfa_difctl_cn31xx cn38xxp2;
+ struct cvmx_dfa_difctl_cn31xx cn58xx;
+ struct cvmx_dfa_difctl_cn31xx cn58xxp1;
+ struct cvmx_dfa_difctl_s cn61xx;
+ struct cvmx_dfa_difctl_cn31xx cn63xx;
+ struct cvmx_dfa_difctl_cn31xx cn63xxp1;
+ struct cvmx_dfa_difctl_cn31xx cn66xx;
+ struct cvmx_dfa_difctl_s cn68xx;
+ struct cvmx_dfa_difctl_s cn68xxp1;
+};
+typedef union cvmx_dfa_difctl cvmx_dfa_difctl_t;
+
+/**
+ * cvmx_dfa_difrdptr
+ *
+ * DFA_DIFRDPTR = DFA Instruction FIFO (DIF) RDPTR Register
+ *
+ * Description:
+ * NOTE: To write to the DFA_DIFRDPTR register, a device would issue an IOBST directed at the DFA with addr[34:33]=2'b01.
+ * To read the DFA_DIFRDPTR register, a device would issue an IOBLD64 directed at the DFA with addr[34:33]=2'b01.
+ *
+ * NOTE: If DFA_CONFIG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_DIFRDPTR register do not take effect.
+ * NOTE: If FUSE[TBD]="DFA DTE disable" is blown, reads/writes to the DFA_DIFRDPTR register do not take effect.
+ */
+union cvmx_dfa_difrdptr {
+ uint64_t u64;
+ struct cvmx_dfa_difrdptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t rdptr : 35; /**< Represents the 32B-aligned address of the current
+ instruction in the DFA Instruction FIFO in main
+ memory. The RDPTR must be seeded by software at
+ boot time, and is then maintained thereafter
+ by DFA HW.
+ During the seed write (by SW), RDPTR[6:5]=0,
+ since DFA instruction chunks must be 128B aligned.
+ During a read (by SW), the 'most recent' contents
+ of the RDPTR register are returned at the time
+ the NCB-INB bus is driven.
+ NOTE: Since DFA HW updates this register, its
+ contents are unpredictable in SW (unless
+ its guaranteed that no new DoorBell register
+ writes have occurred and the DoorBell register is
+ read as zero). */
+ uint64_t reserved_0_4 : 5;
+#else
+ uint64_t reserved_0_4 : 5;
+ uint64_t rdptr : 35;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_dfa_difrdptr_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t rdptr : 31; /**< Represents the 32B-aligned address of the current
+ instruction in the DFA Instruction FIFO in main
+ memory. The RDPTR must be seeded by software at
+ boot time, and is then maintained thereafter
+ by DFA HW.
+ During the seed write (by SW), RDPTR[6:5]=0,
+ since DFA instruction chunks must be 128B aligned.
+ During a read (by SW), the 'most recent' contents
+ of the RDPTR register are returned at the time
+ the NCB-INB bus is driven.
+ NOTE: Since DFA HW updates this register, its
+ contents are unpredictable in SW (unless
+ its guaranteed that no new DoorBell register
+ writes have occurred and the DoorBell register is
+ read as zero). */
+ uint64_t reserved_0_4 : 5;
+#else
+ uint64_t reserved_0_4 : 5;
+ uint64_t rdptr : 31;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn31xx;
+ struct cvmx_dfa_difrdptr_cn31xx cn38xx;
+ struct cvmx_dfa_difrdptr_cn31xx cn38xxp2;
+ struct cvmx_dfa_difrdptr_cn31xx cn58xx;
+ struct cvmx_dfa_difrdptr_cn31xx cn58xxp1;
+ struct cvmx_dfa_difrdptr_s cn61xx;
+ struct cvmx_dfa_difrdptr_s cn63xx;
+ struct cvmx_dfa_difrdptr_s cn63xxp1;
+ struct cvmx_dfa_difrdptr_s cn66xx;
+ struct cvmx_dfa_difrdptr_s cn68xx;
+ struct cvmx_dfa_difrdptr_s cn68xxp1;
+};
+typedef union cvmx_dfa_difrdptr cvmx_dfa_difrdptr_t;
+
+/**
+ * cvmx_dfa_dtcfadr
+ *
+ * DFA_DTCFADR = DFA DTC Failing Address Register
+ *
+ * Description: DFA Node Cache Failing Address/Control Error Capture information
+ * This register contains useful information to help in isolating a Node Cache RAM failure.
+ * NOTE: The first detected PERR failure is captured in DFA_DTCFADR (locked down), until the
+ * corresponding PERR Interrupt is cleared by writing one (W1C). (see: DFA_ERR[DC0PERR[2:0]]).
+ * NOTE: In the rare event that multiple parity errors are detected in the same cycle from multiple
+ * clusters, the FADR register will be locked down for the least signicant cluster \# (0->3).
+ */
+union cvmx_dfa_dtcfadr {
+ uint64_t u64;
+ struct cvmx_dfa_dtcfadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t ram3fadr : 12; /**< DFA RAM3 Failing Address
+ If DFA_ERR[DC0PERR<2>]=1, this field indicates the
+ failing RAM3 Address. The failing address is locked
+ down until the DC0PERR<2> W1C occurs.
+ NOTE: If multiple DC0PERR<0>=1 errors are detected,
+ then the lsb cluster error information is captured. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t ram2fadr : 9; /**< DFA RAM2 Failing Address
+ If DFA_ERR[DC0PERR<1>]=1, this field indicates the
+ failing RAM2 Address. The failing address is locked
+ down until the DC0PERR<1> W1C occurs.
+ NOTE: If multiple DC0PERR<0>=1 errors are detected,
+ then the lsb cluster error information is captured. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t ram1fadr : 14; /**< DFA RAM1 Failing Address
+ If DFA_ERR[DC0PERR<0>]=1, this field indicates the
+ failing RAM1 Address. The failing address is locked
+ down until the DC0PERR<0> W1C occurs.
+ NOTE: If multiple DC0PERR<0>=1 errors are detected,
+ then the lsb cluster error information is captured. */
+#else
+ uint64_t ram1fadr : 14;
+ uint64_t reserved_14_15 : 2;
+ uint64_t ram2fadr : 9;
+ uint64_t reserved_25_31 : 7;
+ uint64_t ram3fadr : 12;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_dfa_dtcfadr_s cn61xx;
+ struct cvmx_dfa_dtcfadr_s cn63xx;
+ struct cvmx_dfa_dtcfadr_s cn63xxp1;
+ struct cvmx_dfa_dtcfadr_s cn66xx;
+ struct cvmx_dfa_dtcfadr_s cn68xx;
+ struct cvmx_dfa_dtcfadr_s cn68xxp1;
+};
+typedef union cvmx_dfa_dtcfadr cvmx_dfa_dtcfadr_t;
+
+/**
+ * cvmx_dfa_eclkcfg
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * DFA_ECLKCFG = DFA eclk-domain Configuration Registers
+ *
+ * Description:
+ */
+union cvmx_dfa_eclkcfg {
+ uint64_t u64;
+ struct cvmx_dfa_eclkcfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t sbdnum : 3; /**< SBD Debug Entry#
+ For internal use only. (DFA Scoreboard debug)
+ Selects which one of 8 DFA Scoreboard entries is
+ latched into the DFA_SBD_DBG[0-3] registers. */
+ uint64_t reserved_15_15 : 1;
+ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe
+ For internal use only. (DFA Scoreboard debug)
+ When written with a '1', the DFA Scoreboard Debug
+ registers (DFA_SBD_DBG[0-3]) are all locked down.
+ This allows SW to lock down the contents of the entire
+ SBD for a single instant in time. All subsequent reads
+ of the DFA scoreboard registers will return the data
+ from that instant in time. */
+ uint64_t dcmode : 1; /**< DRF-CRQ/DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=CRQ/HP=DTE],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t dtmode : 1; /**< DRF-DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=DTE[15],...,HP=DTE[0]],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode
+ (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode
+ (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t imode : 1; /**< NCB-Inbound Arbiter
+ (0=FP [LP=NRQ,HP=NRP], 1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t sarb : 1; /**< DFA Source Arbiter Mode
+ Selects the arbitration mode used to select DFA requests
+ issued from either CP2 or the DTE (NCB-CSR or DFA HW engine).
+ - 0: Fixed Priority [Highest=CP2, Lowest=DTE]
+ - 1: Round-Robin
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t dteclkdis : 1; /**< DFA DTE Clock Disable
+ When SET, the DFA clocks for DTE(thread engine)
+ operation are disabled.
+ NOTE: When SET, SW MUST NEVER issue ANY operations to
+ the DFA via the NCB Bus. All DFA Operations must be
+ issued solely through the CP2 interface. */
+ uint64_t maxbnk : 1; /**< Maximum Banks per-device (used by the address mapper
+ when extracting address bits for the memory bank#.
+ - 0: 4 banks/device
+ - 1: 8 banks/device */
+ uint64_t dfa_frstn : 1; /**< Hold this 0 until the DFA DDR PLL and DLL lock
+ and then write a 1. A 1 on this register deasserts
+ the internal frst_n. Refer to DFA_DDR2_PLL registers for more
+ startup information.
+ Startup sequence if DFA interface needs to be ON:
+ After valid power up,
+ Write DFA_DDR2_PLL-> PLL_RATIO & PLL_DIV2 & PLL_BYPASS
+ to the appropriate values
+ Wait a few cycles
+ Write a 1 DFA_DDR2_PLL -> PLL_INIT
+ Wait 100 microseconds
+ Write a 1 to DFA_DDR2_PLL -> QDLL_ENA
+ Wait 10 microseconds
+ Write a 1 to this register DFA_FRSTN to pull DFA out of
+ reset
+ Now the DFA block is ready to be initialized (follow the
+ DDR init sequence). */
+#else
+ uint64_t dfa_frstn : 1;
+ uint64_t maxbnk : 1;
+ uint64_t dteclkdis : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t sarb : 1;
+ uint64_t imode : 1;
+ uint64_t qmode : 1;
+ uint64_t pmode : 1;
+ uint64_t dtmode : 1;
+ uint64_t dcmode : 1;
+ uint64_t sbdlck : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t sbdnum : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_dfa_eclkcfg_s cn31xx;
+};
+typedef union cvmx_dfa_eclkcfg cvmx_dfa_eclkcfg_t;
+
+/**
+ * cvmx_dfa_err
+ *
+ * DFA_ERR = DFA ERROR Register
+ *
+ * Description:
+ */
+union cvmx_dfa_err {
+ uint64_t u64;
+ struct cvmx_dfa_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t dblina : 1; /**< Doorbell Overflow Interrupt Enable bit.
+ When set, doorbell overflow conditions are reported. */
+ uint64_t dblovf : 1; /**< Doorbell Overflow detected - Status bit
+ When set, the 20b accumulated doorbell register
+ had overflowed (SW wrote too many doorbell requests).
+ If the DBLINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ NOTE: Detection of a Doorbell Register overflow
+ is a catastrophic error which may leave the DFA
+ HW in an unrecoverable state. */
+ uint64_t cp2pina : 1; /**< CP2 LW Mode Parity Error Interrupt Enable bit.
+ When set, all PP-generated LW Mode read
+ transactions which encounter a parity error (across
+ the 36b of data) are reported. */
+ uint64_t cp2perr : 1; /**< PP-CP2 Parity Error Detected - Status bit
+ When set, a parity error had been detected for a
+ PP-generated LW Mode read transaction.
+ If the CP2PINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure. */
+ uint64_t cp2parena : 1; /**< CP2 LW Mode Parity Error Enable
+ When set, all PP-generated LW Mode read
+ transactions which encounter a parity error (across
+ the 36b of data) are reported.
+ NOTE: This signal must only be written to a different
+ value when there are no PP-CP2 transactions
+ (preferrably during power-on software initialization). */
+ uint64_t dtepina : 1; /**< DTE Parity Error Interrupt Enable bit
+ (for 18b SIMPLE mode ONLY).
+ When set, all DTE-generated 18b SIMPLE Mode read
+ transactions which encounter a parity error (across
+ the 17b of data) are reported. */
+ uint64_t dteperr : 1; /**< DTE Parity Error Detected (for 18b SIMPLE mode ONLY)
+ When set, all DTE-generated 18b SIMPLE Mode read
+ transactions which encounter a parity error (across
+ the 17b of data) are reported. */
+ uint64_t dteparena : 1; /**< DTE Parity Error Enable (for 18b SIMPLE mode ONLY)
+ When set, all DTE-generated 18b SIMPLE Mode read
+ transactions which encounter a parity error (across
+ the 17b of data) are reported.
+ NOTE: This signal must only be written to a different
+ value when there are no DFA thread engines active
+ (preferrably during power-on). */
+ uint64_t dtesyn : 7; /**< DTE 29b ECC Failing 6bit Syndrome
+ When DTESBE or DTEDBE are set, this field contains
+ the failing 7b ECC syndrome. */
+ uint64_t dtedbina : 1; /**< DTE 29b Double Bit Error Interrupt Enable bit
+ When set, an interrupt is posted for any DTE-generated
+ 36b SIMPLE Mode read which encounters a double bit
+ error. */
+ uint64_t dtesbina : 1; /**< DTE 29b Single Bit Error Interrupt Enable bit
+ When set, an interrupt is posted for any DTE-generated
+ 36b SIMPLE Mode read which encounters a single bit
+ error (which is also corrected). */
+ uint64_t dtedbe : 1; /**< DTE 29b Double Bit Error Detected - Status bit
+ When set, a double bit error had been detected
+ for a DTE-generated 36b SIMPLE Mode read transaction.
+ The DTESYN contains the failing syndrome.
+ If the DTEDBINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure.
+ NOTE: DTE-generated 18b SIMPLE Mode Read transactions
+ do not participate in ECC check/correct). */
+ uint64_t dtesbe : 1; /**< DTE 29b Single Bit Error Corrected - Status bit
+ When set, a single bit error had been detected and
+ corrected for a DTE-generated 36b SIMPLE Mode read
+ transaction.
+ If the DTEDBE=0, then the DTESYN contains the
+ failing syndrome (used during correction).
+ NOTE: DTE-generated 18b SIMPLE Mode Read
+ transactions do not participate in ECC check/correct).
+ If the DTESBINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure. */
+ uint64_t dteeccena : 1; /**< DTE 29b ECC Enable (for 36b SIMPLE mode ONLY)
+ When set, 29b ECC is enabled on all DTE-generated
+ 36b SIMPLE Mode read transactions.
+ NOTE: This signal must only be written to a different
+ value when there are no DFA thread engines active
+ (preferrably during power-on software initialization). */
+ uint64_t cp2syn : 8; /**< PP-CP2 QW ECC Failing 8bit Syndrome
+ When CP2SBE or CP2DBE are set, this field contains
+ the failing ECC 8b syndrome.
+ Refer to CP2ECCENA. */
+ uint64_t cp2dbina : 1; /**< PP-CP2 Double Bit Error Interrupt Enable bit
+ When set, an interrupt is posted for any PP-generated
+ QW Mode read which encounters a double bit error.
+ Refer to CP2DBE. */
+ uint64_t cp2sbina : 1; /**< PP-CP2 Single Bit Error Interrupt Enable bit
+ When set, an interrupt is posted for any PP-generated
+ QW Mode read which encounters a single bit error
+ (which is also corrected).
+ Refer to CP2SBE. */
+ uint64_t cp2dbe : 1; /**< PP-CP2 Double Bit Error Detected - Status bit
+ When set, a double bit error had been detected
+ for a PP-generated QW Mode read transaction.
+ The CP2SYN contains the failing syndrome.
+ NOTE: PP-generated LW Mode Read transactions
+ do not participate in ECC check/correct).
+ Refer to CP2ECCENA.
+ If the CP2DBINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure. */
+ uint64_t cp2sbe : 1; /**< PP-CP2 Single Bit Error Corrected - Status bit
+ When set, a single bit error had been detected and
+ corrected for a PP-generated QW Mode read
+ transaction.
+ If the CP2DBE=0, then the CP2SYN contains the
+ failing syndrome (used during correction).
+ Refer to CP2ECCENA.
+ If the CP2SBINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure.
+ NOTE: PP-generated LW Mode Read transactions
+ do not participate in ECC check/correct). */
+ uint64_t cp2eccena : 1; /**< PP-CP2 QW ECC Enable (for QW Mode transactions)
+ When set, 8bit QW ECC is enabled on all PP-generated
+ QW Mode read transactions, CP2SBE and
+ CP2DBE may be set, and CP2SYN may be filled.
+ NOTE: This signal must only be written to a different
+ value when there are no PP-CP2 transactions
+ (preferrably during power-on software initialization).
+ NOTE: QW refers to a 64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 36-bit load/store. */
+#else
+ uint64_t cp2eccena : 1;
+ uint64_t cp2sbe : 1;
+ uint64_t cp2dbe : 1;
+ uint64_t cp2sbina : 1;
+ uint64_t cp2dbina : 1;
+ uint64_t cp2syn : 8;
+ uint64_t dteeccena : 1;
+ uint64_t dtesbe : 1;
+ uint64_t dtedbe : 1;
+ uint64_t dtesbina : 1;
+ uint64_t dtedbina : 1;
+ uint64_t dtesyn : 7;
+ uint64_t dteparena : 1;
+ uint64_t dteperr : 1;
+ uint64_t dtepina : 1;
+ uint64_t cp2parena : 1;
+ uint64_t cp2perr : 1;
+ uint64_t cp2pina : 1;
+ uint64_t dblovf : 1;
+ uint64_t dblina : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_dfa_err_s cn31xx;
+ struct cvmx_dfa_err_s cn38xx;
+ struct cvmx_dfa_err_s cn38xxp2;
+ struct cvmx_dfa_err_s cn58xx;
+ struct cvmx_dfa_err_s cn58xxp1;
+};
+typedef union cvmx_dfa_err cvmx_dfa_err_t;
+
+/**
+ * cvmx_dfa_error
+ *
+ * DFA_ERROR = DFA ERROR Register
+ *
+ * Description:
+ */
+union cvmx_dfa_error {
+ uint64_t u64;
+ struct cvmx_dfa_error_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t replerr : 1; /**< DFA Illegal Replication Factor Error
+ For o68: DFA only supports 1x, 2x, and 4x port replication.
+ Legal configurations for memory are to support 2 port or
+ 4 port configurations.
+ The REPLERR interrupt will be set in the following illegal
+ configuration cases:
+ 1) An 8x replication factor is detected for any memory reference.
+ 2) A 4x replication factor is detected for any memory reference
+ when only 2 memory ports are enabled.
+ NOTE: If REPLERR is set during a DFA Graph Walk operation,
+ then the walk will prematurely terminate with RWORD0[REA]=ERR.
+ If REPLERR is set during a NCB-Direct CSR read access to DFA
+ Memory REGION, then the CSR read response data is UNPREDICTABLE. */
+ uint64_t dfanxm : 1; /**< DFA Non-existent Memory Access
+ For o68: DTEs (and backdoor CSR DFA Memory REGION reads)
+ have access to the following 38bit L2/DRAM address space
+ which maps to a 37bit physical DDR3 SDRAM address space.
+ see:
+ DR0: 0x0 0000 0000 0000 to 0x0 0000 0FFF FFFF
+ maps to lower 256MB of physical DDR3 SDRAM
+ DR1: 0x0 0000 2000 0000 to 0x0 0020 0FFF FFFF
+ maps to upper 127.75GB of DDR3 SDRAM
+ L2/DRAM address space Physical DDR3 SDRAM Address space
+ (38bit address) (37bit address)
+ +-----------+ 0x0020.0FFF.FFFF
+
+ === DR1 === +-----------+ 0x001F.FFFF.FFFF
+ (128GB-256MB)| |
+ | | => | | (128GB-256MB)
+ +-----------+ 0x0000.1FFF.FFFF | DR1
+ 256MB | HOLE | (DO NOT USE)
+ +-----------+ 0x0000.0FFF.FFFF +-----------+ 0x0000.0FFF.FFFF
+ 256MB | DR0 | | DR0 | (256MB)
+ +-----------+ 0x0000.0000.0000 +-----------+ 0x0000.0000.0000
+ In the event the DFA generates a reference to the L2/DRAM
+ address hole (0x0000.0FFF.FFFF - 0x0000.1FFF.FFFF) or to
+ an address above 0x0020.0FFF.FFFF, the DFANXM programmable
+ interrupt bit will be set.
+ SWNOTE: Both the 1) SW DFA Graph compiler and the 2) SW NCB-Direct CSR
+ accesses to DFA Memory REGION MUST avoid making references
+ to these non-existent memory regions.
+ NOTE: If DFANXM is set during a DFA Graph Walk operation,
+ then the walk will prematurely terminate with RWORD0[REA]=ERR.
+ If DFANXM is set during a NCB-Direct CSR read access to DFA
+ Memory REGION, then the CSR read response data is forced to
+ 128'hBADE_FEED_DEAD_BEEF_FACE_CAFE_BEAD_C0DE. (NOTE: the QW
+ being accessed, either the upper or lower QW will be returned). */
+ uint64_t cndrd : 1; /**< If Any of the cluster's detected a Parity error on RAM1
+ this additional bit further specifies that the
+ RAM1 parity error was detected during a CND-RD
+ (Cache Node Metadata Read).
+
+ For CNDRD Parity Error, the previous CNA arc fetch
+ information is written to RWORD1+ as follows:
+ RWORD1+[NTYPE]=MNODE
+ RWORD1+[NDNID]=cna.ndnid
+ RWORD1+[NHMSK]=cna.hmsk
+ RWORD1+[NNPTR]=cna.nnptr[13:0]
+ NOTE: This bit is set if ANY node cluster's RAM1 accesses
+ detect a CNDRD error. */
+ uint64_t reserved_15_15 : 1;
+ uint64_t dlc1_ovferr : 1; /**< DLC1 Fifo Overflow Error Detected
+ This condition should NEVER architecturally occur, and
+ is here in case HW credit/debit scheme is not working. */
+ uint64_t dlc0_ovferr : 1; /**< DLC0 Fifo Overflow Error Detected
+ This condition should NEVER architecturally occur, and
+ is here in case HW credit/debit scheme is not working. */
+ uint64_t reserved_10_12 : 3;
+ uint64_t dc2perr : 3; /**< Cluster#2 RAM[3:1] Parity Error Detected
+ See also DFA_DTCFADR register which contains the
+ failing addresses for the internal node cache RAMs. */
+ uint64_t dc1perr : 3; /**< Cluster#1 RAM[3:1] Parity Error Detected
+ See also DFA_DTCFADR register which contains the
+ failing addresses for the internal node cache RAMs. */
+ uint64_t dc0perr : 3; /**< Cluster#0 RAM[3:1] Parity Error Detected
+ See also DFA_DTCFADR register which contains the
+ failing addresses for the internal node cache RAMs. */
+ uint64_t dblovf : 1; /**< Doorbell Overflow detected - Status bit
+ When set, the 20b accumulated doorbell register
+ had overflowed (SW wrote too many doorbell requests).
+ If the DBLINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ NOTE: Detection of a Doorbell Register overflow
+ is a catastrophic error which may leave the DFA
+ HW in an unrecoverable state. */
+#else
+ uint64_t dblovf : 1;
+ uint64_t dc0perr : 3;
+ uint64_t dc1perr : 3;
+ uint64_t dc2perr : 3;
+ uint64_t reserved_10_12 : 3;
+ uint64_t dlc0_ovferr : 1;
+ uint64_t dlc1_ovferr : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t cndrd : 1;
+ uint64_t dfanxm : 1;
+ uint64_t replerr : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_dfa_error_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t replerr : 1; /**< DFA Illegal Replication Factor Error
+ For o68: DFA only supports 1x, 2x, and 4x port replication.
+ Legal configurations for memory are to support 2 port or
+ 4 port configurations.
+ The REPLERR interrupt will be set in the following illegal
+ configuration cases:
+ 1) An 8x replication factor is detected for any memory reference.
+ 2) A 4x replication factor is detected for any memory reference
+ when only 2 memory ports are enabled.
+ NOTE: If REPLERR is set during a DFA Graph Walk operation,
+ then the walk will prematurely terminate with RWORD0[REA]=ERR.
+ If REPLERR is set during a NCB-Direct CSR read access to DFA
+ Memory REGION, then the CSR read response data is UNPREDICTABLE. */
+ uint64_t dfanxm : 1; /**< DFA Non-existent Memory Access
+ For o68/o61: DTEs (and backdoor CSR DFA Memory REGION reads)
+ have access to the following 38bit L2/DRAM address space
+ which maps to a 37bit physical DDR3 SDRAM address space.
+ see:
+ DR0: 0x0 0000 0000 0000 to 0x0 0000 0FFF FFFF
+ maps to lower 256MB of physical DDR3 SDRAM
+ DR1: 0x0 0000 2000 0000 to 0x0 0020 0FFF FFFF
+ maps to upper 127.75GB of DDR3 SDRAM
+ L2/DRAM address space Physical DDR3 SDRAM Address space
+ (38bit address) (37bit address)
+ +-----------+ 0x0020.0FFF.FFFF
+ |
+ === DR1 === +-----------+ 0x001F.FFFF.FFFF
+ (128GB-256MB)| | |
+ | | => | | (128GB-256MB)
+ +-----------+ 0x0000.1FFF.FFFF | DR1
+ 256MB | HOLE | (DO NOT USE) |
+ +-----------+ 0x0000.0FFF.FFFF +-----------+ 0x0000.0FFF.FFFF
+ 256MB | DR0 | | DR0 | (256MB)
+ +-----------+ 0x0000.0000.0000 +-----------+ 0x0000.0000.0000
+ In the event the DFA generates a reference to the L2/DRAM
+ address hole (0x0000.0FFF.FFFF - 0x0000.1FFF.FFFF) or to
+ an address above 0x0020.0FFF.FFFF, the DFANXM programmable
+ interrupt bit will be set.
+ SWNOTE: Both the 1) SW DFA Graph compiler and the 2) SW NCB-Direct CSR
+ accesses to DFA Memory REGION MUST avoid making references
+ to these non-existent memory regions.
+ NOTE: If DFANXM is set during a DFA Graph Walk operation,
+ then the walk will prematurely terminate with RWORD0[REA]=ERR.
+ If DFANXM is set during a NCB-Direct CSR read access to DFA
+ Memory REGION, then the CSR read response data is forced to
+ 128'hBADE_FEED_DEAD_BEEF_FACE_CAFE_BEAD_C0DE. (NOTE: the QW
+ being accessed, either the upper or lower QW will be returned). */
+ uint64_t cndrd : 1; /**< If any of the cluster's detected a Parity error on RAM1
+ this additional bit further specifies that the
+ RAM1 parity error was detected during a CND-RD
+ (Cache Node Metadata Read).
+
+ For CNDRD Parity Error, the previous CNA arc fetch
+ information is written to RWORD1+ as follows:
+ RWORD1+[NTYPE]=MNODE
+ RWORD1+[NDNID]=cna.ndnid
+ RWORD1+[NHMSK]=cna.hmsk
+ RWORD1+[NNPTR]=cna.nnptr[13:0]
+ NOTE: This bit is set if ANY node cluster's RAM1 accesses
+ detect a CNDRD error. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t dlc0_ovferr : 1; /**< DLC0 Fifo Overflow Error Detected
+ This condition should NEVER architecturally occur, and
+ is here in case HW credit/debit scheme is not working. */
+ uint64_t reserved_4_12 : 9;
+ uint64_t dc0perr : 3; /**< Cluster#0 RAM[3:1] Parity Error Detected
+ See also DFA_DTCFADR register which contains the
+ failing addresses for the internal node cache RAMs. */
+ uint64_t dblovf : 1; /**< Doorbell Overflow detected - Status bit
+ When set, the 20b accumulated doorbell register
+ had overflowed (SW wrote too many doorbell requests).
+ If the DBLINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ NOTE: Detection of a Doorbell Register overflow
+ is a catastrophic error which may leave the DFA
+ HW in an unrecoverable state. */
+#else
+ uint64_t dblovf : 1;
+ uint64_t dc0perr : 3;
+ uint64_t reserved_4_12 : 9;
+ uint64_t dlc0_ovferr : 1;
+ uint64_t reserved_14_15 : 2;
+ uint64_t cndrd : 1;
+ uint64_t dfanxm : 1;
+ uint64_t replerr : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn61xx;
+ struct cvmx_dfa_error_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t cndrd : 1; /**< If DC0PERR[0]=1 indicating a RAM1 Parity error,
+ this additional bit further specifies that the
+ RAM1 parity error was detected during a CND-RD
+ (Cache Node Metadata Read).
+
+ For CNDRD Parity Error, the previous CNA arc fetch
+ information is written to RWORD1+ as follows:
+ RWORD1+[NTYPE]=MNODE
+ RWORD1+[NDNID]=cna.ndnid
+ RWORD1+[NHMSK]=cna.hmsk
+ RWORD1+[NNPTR]=cna.nnptr[13:0] */
+ uint64_t reserved_4_15 : 12;
+ uint64_t dc0perr : 3; /**< RAM[3:1] Parity Error Detected from Node Cluster \#0
+ See also DFA_DTCFADR register which contains the
+ failing addresses for the internal node cache RAMs. */
+ uint64_t dblovf : 1; /**< Doorbell Overflow detected - Status bit
+ When set, the 20b accumulated doorbell register
+ had overflowed (SW wrote too many doorbell requests).
+ If the DBLINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ NOTE: Detection of a Doorbell Register overflow
+ is a catastrophic error which may leave the DFA
+ HW in an unrecoverable state. */
+#else
+ uint64_t dblovf : 1;
+ uint64_t dc0perr : 3;
+ uint64_t reserved_4_15 : 12;
+ uint64_t cndrd : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn63xx;
+ struct cvmx_dfa_error_cn63xx cn63xxp1;
+ struct cvmx_dfa_error_cn63xx cn66xx;
+ struct cvmx_dfa_error_s cn68xx;
+ struct cvmx_dfa_error_s cn68xxp1;
+};
+typedef union cvmx_dfa_error cvmx_dfa_error_t;
+
+/**
+ * cvmx_dfa_intmsk
+ *
+ * DFA_INTMSK = DFA ERROR Interrupt Mask Register
+ *
+ * Description:
+ */
+union cvmx_dfa_intmsk {
+ uint64_t u64;
+ struct cvmx_dfa_intmsk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t replerrena : 1; /**< DFA Illegal Replication Factor Interrupt Enable */
+ uint64_t dfanxmena : 1; /**< DFA Non-existent Memory Access Interrupt Enable */
+ uint64_t reserved_15_16 : 2;
+ uint64_t dlc1_ovfena : 1; /**< DLC1 Fifo Overflow Error Interrupt Enable */
+ uint64_t dlc0_ovfena : 1; /**< DLC0 Fifo Overflow Error Interrupt Enable */
+ uint64_t reserved_10_12 : 3;
+ uint64_t dc2pena : 3; /**< RAM[3:1] Parity Error Enabled Node Cluster \#2 */
+ uint64_t dc1pena : 3; /**< RAM[3:1] Parity Error Enabled Node Cluster \#1 */
+ uint64_t dc0pena : 3; /**< RAM[3:1] Parity Error Enabled Node Cluster \#0 */
+ uint64_t dblina : 1; /**< Doorbell Overflow Interrupt Enable bit.
+ When set, doorbell overflow conditions are reported. */
+#else
+ uint64_t dblina : 1;
+ uint64_t dc0pena : 3;
+ uint64_t dc1pena : 3;
+ uint64_t dc2pena : 3;
+ uint64_t reserved_10_12 : 3;
+ uint64_t dlc0_ovfena : 1;
+ uint64_t dlc1_ovfena : 1;
+ uint64_t reserved_15_16 : 2;
+ uint64_t dfanxmena : 1;
+ uint64_t replerrena : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_dfa_intmsk_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t replerrena : 1; /**< DFA Illegal Replication Factor Interrupt Enable */
+ uint64_t dfanxmena : 1; /**< DFA Non-existent Memory Access Interrupt Enable */
+ uint64_t reserved_14_16 : 3;
+ uint64_t dlc0_ovfena : 1; /**< DLC0 Fifo Overflow Error Interrupt Enable */
+ uint64_t reserved_4_12 : 9;
+ uint64_t dc0pena : 3; /**< RAM[3:1] Parity Error Enabled Node Cluster \#0 */
+ uint64_t dblina : 1; /**< Doorbell Overflow Interrupt Enable bit.
+ When set, doorbell overflow conditions are reported. */
+#else
+ uint64_t dblina : 1;
+ uint64_t dc0pena : 3;
+ uint64_t reserved_4_12 : 9;
+ uint64_t dlc0_ovfena : 1;
+ uint64_t reserved_14_16 : 3;
+ uint64_t dfanxmena : 1;
+ uint64_t replerrena : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn61xx;
+ struct cvmx_dfa_intmsk_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t dc0pena : 3; /**< RAM[3:1] Parity Error Enabled Node Cluster \#0 */
+ uint64_t dblina : 1; /**< Doorbell Overflow Interrupt Enable bit.
+ When set, doorbell overflow conditions are reported. */
+#else
+ uint64_t dblina : 1;
+ uint64_t dc0pena : 3;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn63xx;
+ struct cvmx_dfa_intmsk_cn63xx cn63xxp1;
+ struct cvmx_dfa_intmsk_cn63xx cn66xx;
+ struct cvmx_dfa_intmsk_s cn68xx;
+ struct cvmx_dfa_intmsk_s cn68xxp1;
+};
+typedef union cvmx_dfa_intmsk cvmx_dfa_intmsk_t;
+
+/**
+ * cvmx_dfa_memcfg0
+ *
+ * DFA_MEMCFG0 = DFA Memory Configuration
+ *
+ * Description:
+ */
+union cvmx_dfa_memcfg0 {
+ uint64_t u64;
+ struct cvmx_dfa_memcfg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rldqck90_rst : 1; /**< RLDCK90 and RLDQK90 DLL SW Reset
+ When written with a '1' the RLDCK90 and RLDQK90 DLL are
+ in soft-reset. */
+ uint64_t rldck_rst : 1; /**< RLDCK Zero Delay DLL(Clock Generator) SW Reset
+ When written with a '1' the RLDCK zero delay DLL is in
+ soft-reset. */
+ uint64_t clkdiv : 2; /**< RLDCLK Divisor Select
+ - 0: RLDx_CK_H/L = Core Clock /2
+ - 1: RESERVED (must not be used)
+ - 2: RLDx_CK_H/L = Core Clock /3
+ - 3: RLDx_CK_H/L = Core Clock /4
+ The DFA LLM interface(s) are tied to the core clock
+ frequency through this programmable clock divisor.
+ Examples:
+ Core Clock(MHz) | DFA-LLM Clock(MHz) | CLKDIV
+ -----------------+--------------------+--------
+ 800 | 400/(800-DDR) | /2
+ 1000 | 333/(666-DDR) | /3
+ 800 | 200/(400-DDR) | /4
+ NOTE: This value MUST BE programmed BEFORE doing a
+ Hardware init sequence (see: DFA_MEMCFG0[INIT_Px] bits). */
+ uint64_t lpp_ena : 1; /**< PP Linear Port Addressing Mode Enable
+ When enabled, PP-core LLM accesses to the lower-512MB
+ LLM address space are sent to the single DFA port
+ which is enabled. NOTE: If LPP_ENA=1, only
+ one DFA RLDRAM port may be enabled for RLDRAM accesses
+ (ie: ENA_P0 and ENA_P1 CAN NEVER BOTH be set).
+ PP-core LLM accesses to the upper-512MB LLM address
+ space are sent to the other 'disabled' DFA port.
+ SW RESTRICTION: If LPP_ENA=1, then only one DFA port
+ may be enabled for RLDRAM accesses (ie: ENA_P0 and
+ ENA_P1 CAN NEVER BOTH be set).
+ NOTE: This bit is used to allow PP-Core LLM accesses to a
+ disabled port, such that each port can be sequentially
+ addressed (ie: disable LW address interleaving).
+ Enabling this bit allows BOTH PORTs to be active and
+ sequentially addressable. The single port that is
+ enabled(ENA_Px) will respond to the low-512MB LLM address
+ space, and the other 'disabled' port will respond to the
+ high-512MB LLM address space.
+ Example usage:
+ - DFA RLD0 pins used for TCAM-FPGA(CP2 accesses)
+ - DFA RLD1 pins used for RLDRAM (DTE/CP2 accesses).
+ USAGE NOTE:
+ If LPP_ENA=1 and SW DOES NOT initialize the disabled port
+ (ie: INIT_Px=0->1), then refreshes and the HW init
+ sequence WILL NOT occur for the disabled port.
+ If LPP_ENA=1 and SW does initialize the disabled port
+ (INIT_Px=0->1 with ENA_Px=0), then refreshes and
+ the HW init sequence WILL occur to the disabled port. */
+ uint64_t bunk_init : 2; /**< Controls the CS_N[1:0] during a) a HW Initialization
+ sequence (triggered by DFA_MEMCFG0[INIT_Px]) or
+ b) during a normal refresh sequence. If
+ the BNK_INIT[x]=1, the corresponding CS_N[x] is driven.
+ NOTE: This is required for DRAM used in a
+ clamshell configuration, since the address lines
+ carry Mode Register write data that is unique
+ per bunk(or clam). In a clamshell configuration,
+ The N3K A[x] pin may be tied into Clam#0's A[x]
+ and also into Clam#1's 'mirrored' address bit A[y]
+ (eg: Clam0 sees A[5] and Clam1 sees A[15]).
+ To support clamshell designs, SW must initiate
+ two separate HW init sequences for the two bunks
+ (or clams) . Before each HW init sequence is triggered,
+ SW must preload the DFA_MEMRLD[22:0] with the data
+ that will be driven onto the A[22:0] wires during
+ an MRS mode register write.
+ NOTE: After the final HW initialization sequence has
+ been triggered, SW must wait 64K eclks before writing
+ the BUNK_INIT[1:0] field = 3'b11 (so that CS_N[1:0] is
+ driven during refresh sequences in normal operation.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t init_p0 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#0 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Set up the DFA_MEMCFG0[CLKDIV] ratio for intended
+ RLDRAM operation.
+ [legal values 0: DIV2 2: DIV3 3: DIV4]
+ 2) Write a '1' into BOTH the DFA_MEM_CFG0[RLDCK_RST]
+ and DFA_MEM_CFG0[RLDQCK90_RST] field at
+ the SAME TIME. This step puts all three DLLs in
+ SW reset (RLDCK, RLDCK90, RLDQK90 DLLs).
+ 3) Write a '0' into the DFA_MEM_CFG0[RLDCK_RST] field.
+ This step takes the RLDCK DLL out of soft-reset so
+ that the DLL can generate the RLDx_CK_H/L clock pins.
+ 4) Wait 1ms (for RLDCK DLL to achieve lock)
+ 5) Write a '0' into DFA_MEM_CFG0[RLDQCK90_RST] field.
+ This step takes the RLDCK90 DLL AND RLDQK90 DLL out
+ of soft-reset.
+ 6) Wait 1ms (for RLDCK90/RLDQK90 DLLs to achieve lock)
+ 7) Enable memory port(s): ENA_P0=1/ENA_P1=1
+ 8) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ - - - - - Hardware Initialization Sequence - - - - -
+ 9) Setup the DFA_MEMCFG0[BUNK_INIT] for the bunk(s)
+ intended to be initialized.
+ 10) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence to that'specific' port.
+ 11) Wait (DFA_MEMCFG0[CLKDIV] * 32K) eclk cycles.
+ [to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers]
+ - - - - - Hardware Initialization Sequence - - - - -
+ 12) Write the DFA_MEMCFG0[BUNK_INIT]=3 to enable
+ refreshes to BOTH bunks.
+ NOTE: In some cases (where the address wires are routed
+ differently between the front and back 'bunks'),
+ SW will need to use DFA_MEMCFG0[BUNK_INIT] bits to
+ control the Hardware initialization sequence for a
+ 'specific bunk'. In these cases, SW would setup the
+ BUNK_INIT and repeat Steps \#9-11 for each bunk/port.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t init_p1 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#1 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Set up the DFA_MEMCFG0[CLKDIV] ratio for intended
+ RLDRAM operation.
+ [legal values 0: DIV2 2: DIV3 3: DIV4]
+ 2) Write a '1' into BOTH the DFA_MEM_CFG0[RLDCK_RST]
+ and DFA_MEM_CFG0[RLDQCK90_RST] field at
+ the SAME TIME. This step puts all three DLLs in
+ SW reset (RLDCK, RLDCK90, RLDQK90 DLLs).
+ 3) Write a '0' into the DFA_MEM_CFG0[RLDCK_RST] field.
+ This step takes the RLDCK DLL out of soft-reset so
+ that the DLL can generate the RLDx_CK_H/L clock pins.
+ 4) Wait 1ms (for RLDCK DLL to achieve lock)
+ 5) Write a '0' into DFA_MEM_CFG0[RLDQCK90_RST] field.
+ This step takes the RLDCK90 DLL AND RLDQK90 DLL out
+ of soft-reset.
+ 6) Wait 1ms (for RLDCK90/RLDQK90 DLLs to achieve lock)
+ 7) Enable memory port(s) ENA_P0=1/ENA_P1=1
+ 8) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ - - - - - Hardware Initialization Sequence - - - - -
+ 9) Setup the DFA_MEMCFG0[BUNK_INIT] for the bunk(s)
+ intended to be initialized.
+ 10) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence to that'specific' port.
+ 11) Wait (DFA_MEMCFG0[CLKDIV] * 32K) eclk cycles.
+ [to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers]
+ - - - - - Hardware Initialization Sequence - - - - -
+ 12) Write the DFA_MEMCFG0[BUNK_INIT]=3 to enable
+ refreshes to BOTH bunks.
+ NOTE: In some cases (where the address wires are routed
+ differently between the front and back 'bunks'),
+ SW will need to use DFA_MEMCFG0[BUNK_INIT] bits to
+ control the Hardware initialization sequence for a
+ 'specific bunk'. In these cases, SW would setup the
+ BUNK_INIT and repeat Steps \#9-11 for each bunk/port.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+ uint64_t r2r_pbunk : 1; /**< When enabled, an additional command bubble is inserted
+ if back to back reads are issued to different physical
+ bunks. This is to avoid DQ data bus collisions when
+ references cross between physical bunks.
+ [NOTE: the physical bunk address boundary is determined
+ by the PBUNK bit].
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t pbunk : 3; /**< Physical Bunk address bit pointer.
+ Specifies which address bit within the Longword
+ Memory address MA[23:0] is used to determine the
+ chip selects.
+ [RLD_CS0_N corresponds to physical bunk \#0, and
+ RLD_CS1_N corresponds to physical bunk \#1].
+ - 000: CS0_N = MA[19]/CS1_N = !MA[19]
+ - 001: CS0_N = MA[20]/CS1_N = !MA[20]
+ - 010: CS0_N = MA[21]/CS1_N = !MA[21]
+ - 011: CS0_N = MA[22]/CS1_N = !MA[22]
+ - 100: CS0_N = MA[23]/CS1_N = !MA[23]
+ - 101-111: CS0_N = 0 /CS1_N = 1
+ Example(s):
+ To build out a 128MB DFA memory, 4x 32Mx9
+ parts could be used to fill out TWO physical
+ bunks (clamshell configuration). Each (of the
+ two) physical bunks contains 2x 32Mx9 = 16Mx36.
+ Each RLDRAM device also contains 8 internal banks,
+ therefore the memory Address is 16M/8banks = 2M
+ addresses/bunk (2^21). In this case, MA[21] would
+ select the physical bunk.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ be used to determine the Chip Select(s). */
+ uint64_t blen : 1; /**< Device Burst Length (0=2-burst/1=4-burst)
+ NOTE: RLDRAM-II MUST USE BLEN=0(2-burst) */
+ uint64_t bprch : 2; /**< Tristate Enable (back porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable back porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t fprch : 2; /**< Tristate Enable (front porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable front porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t wr_dly : 4; /**< Write->Read CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from write to read. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II(BL2): (TBL=1)
+ WR_DLY = ROUND_UP[((TWL+TBL)*2 - TSKW + FPRCH) / 2] - TRL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the WR_DLY 'may' be tuned down(-1) if bus fight
+ on W->R transitions is not pronounced. */
+ uint64_t rw_dly : 4; /**< Read->Write CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from read to write. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II(BL2): (TBL=1)
+ RW_DLY = ROUND_UP[((TRL+TBL)*2 + TSKW + BPRCH+2)/2] - TWL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the RW_DLY 'may' be tuned down(-1) if bus fight
+ on R->W transitions is not pronounced. */
+ uint64_t sil_lat : 2; /**< Silo Latency (\#dclks): On reads, determines how many
+ additional dclks to wait (on top of tRL+1) before
+ pulling data out of the padring silos used for time
+ domain boundary crossing.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t mtype : 1; /**< FCRAM-II Memory Type
+ *** CN58XX UNSUPPORTED *** */
+ uint64_t reserved_2_2 : 1;
+ uint64_t ena_p0 : 1; /**< Enable DFA RLDRAM Port#0
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#0.
+ NOTE: a customer is at
+ liberty to enable either Port#0 or Port#1 or both.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t ena_p1 : 1; /**< Enable DFA RLDRAM Port#1
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#1.
+ NOTE: a customer is at
+ liberty to enable either Port#0 or Port#1 or both.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+#else
+ uint64_t ena_p1 : 1;
+ uint64_t ena_p0 : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t mtype : 1;
+ uint64_t sil_lat : 2;
+ uint64_t rw_dly : 4;
+ uint64_t wr_dly : 4;
+ uint64_t fprch : 2;
+ uint64_t bprch : 2;
+ uint64_t blen : 1;
+ uint64_t pbunk : 3;
+ uint64_t r2r_pbunk : 1;
+ uint64_t init_p1 : 1;
+ uint64_t init_p0 : 1;
+ uint64_t bunk_init : 2;
+ uint64_t lpp_ena : 1;
+ uint64_t clkdiv : 2;
+ uint64_t rldck_rst : 1;
+ uint64_t rldqck90_rst : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_dfa_memcfg0_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t lpp_ena : 1; /**< PP Linear Port Addressing Mode Enable
+ When enabled, PP-core LLM accesses to the lower-512MB
+ LLM address space are sent to the single DFA port
+ which is enabled. NOTE: If LPP_ENA=1, only
+ one DFA RLDRAM port may be enabled for RLDRAM accesses
+ (ie: ENA_P0 and ENA_P1 CAN NEVER BOTH be set).
+ PP-core LLM accesses to the upper-512MB LLM address
+ space are sent to the other 'disabled' DFA port.
+ SW RESTRICTION: If LPP_ENA=1, then only one DFA port
+ may be enabled for RLDRAM accesses (ie: ENA_P0 and
+ ENA_P1 CAN NEVER BOTH be set).
+ NOTE: This bit is used to allow PP-Core LLM accesses to a
+ disabled port, such that each port can be sequentially
+ addressed (ie: disable LW address interleaving).
+ Enabling this bit allows BOTH PORTs to be active and
+ sequentially addressable. The single port that is
+ enabled(ENA_Px) will respond to the low-512MB LLM address
+ space, and the other 'disabled' port will respond to the
+ high-512MB LLM address space.
+ Example usage:
+ - DFA RLD0 pins used for TCAM-FPGA(CP2 accesses)
+ - DFA RLD1 pins used for RLDRAM (DTE/CP2 accesses).
+ USAGE NOTE:
+ If LPP_ENA=1 and SW DOES NOT initialize the disabled port
+ (ie: INIT_Px=0->1), then refreshes and the HW init
+ sequence WILL NOT occur for the disabled port.
+ If LPP_ENA=1 and SW does initialize the disabled port
+ (INIT_Px=0->1 with ENA_Px=0), then refreshes and
+ the HW init sequence WILL occur to the disabled port. */
+ uint64_t bunk_init : 2; /**< Controls the CS_N[1:0] during a) a HW Initialization
+ sequence (triggered by DFA_MEMCFG0[INIT_Px]) or
+ b) during a normal refresh sequence. If
+ the BNK_INIT[x]=1, the corresponding CS_N[x] is driven.
+ NOTE: This is required for DRAM used in a
+ clamshell configuration, since the address lines
+ carry Mode Register write data that is unique
+ per bunk(or clam). In a clamshell configuration,
+ The N3K A[x] pin may be tied into Clam#0's A[x]
+ and also into Clam#1's 'mirrored' address bit A[y]
+ (eg: Clam0 sees A[5] and Clam1 sees A[15]).
+ To support clamshell designs, SW must initiate
+ two separate HW init sequences for the two bunks
+ (or clams) . Before each HW init sequence is triggered,
+ SW must preload the DFA_MEMRLD[22:0] with the data
+ that will be driven onto the A[22:0] wires during
+ an MRS mode register write.
+ NOTE: After the final HW initialization sequence has
+ been triggered, SW must wait 64K eclks before writing
+ the BUNK_INIT[1:0] field = 3'b11 (so that CS_N[1:0] is
+ driven during refresh sequences in normal operation.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For MTYPE=1(FCRAM) Mode, each bunk MUST BE
+ initialized independently. In other words, a HW init
+ must be done for Bunk#0, and then another HW init
+ must be done for Bunk#1 at power-on. */
+ uint64_t init_p0 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#0 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port(s):
+ a) ENA_P1=1 (single port in pass 1) OR
+ b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1)
+ 2) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ 3) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t init_p1 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#1 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port(s):
+ a) ENA_P1=1 (single port in pass 1) OR
+ b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1)
+ 2) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ 3) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+ uint64_t r2r_pbunk : 1; /**< When enabled, an additional command bubble is inserted
+ if back to back reads are issued to different physical
+ bunks. This is to avoid DQ data bus collisions when
+ references cross between physical bunks.
+ [NOTE: the physical bunk address boundary is determined
+ by the PBUNK bit].
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ When MTYPE=1(FCRAM)/BLEN=0(2-burst), R2R_PBUNK SHOULD BE
+ ZERO(for optimal performance). However, if electrically,
+ DQ-sharing becomes a power/heat issue, then R2R_PBUNK
+ should be set (but at a cost to performance (1/2 BW). */
+ uint64_t pbunk : 3; /**< Physical Bunk address bit pointer.
+ Specifies which address bit within the Longword
+ Memory address MA[23:0] is used to determine the
+ chip selects.
+ [RLD_CS0_N corresponds to physical bunk \#0, and
+ RLD_CS1_N corresponds to physical bunk \#1].
+ - 000: CS0_N = MA[19]/CS1_N = !MA[19]
+ - 001: CS0_N = MA[20]/CS1_N = !MA[20]
+ - 010: CS0_N = MA[21]/CS1_N = !MA[21]
+ - 011: CS0_N = MA[22]/CS1_N = !MA[22]
+ - 100: CS0_N = MA[23]/CS1_N = !MA[23]
+ - 101-111: CS0_N = 0 /CS1_N = 1
+ Example(s):
+ To build out a 128MB DFA memory, 4x 32Mx9
+ parts could be used to fill out TWO physical
+ bunks (clamshell configuration). Each (of the
+ two) physical bunks contains 2x 32Mx9 = 16Mx36.
+ Each RLDRAM device also contains 8 internal banks,
+ therefore the memory Address is 16M/8banks = 2M
+ addresses/bunk (2^21). In this case, MA[21] would
+ select the physical bunk.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ be used to determine the Chip Select(s).
+ NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), a
+ "Redundant Bunk" scheme is employed to provide the
+ highest overall performance (1 Req/ MCLK cycle).
+ In this mode, it's imperative that SW set the PBUNK
+ field +1 'above' the highest address bit. (such that
+ the PBUNK extracted from the address will always be
+ zero). In this mode, the CS_N[1:0] pins are driven
+ to each redundant bunk based on a TDM scheme:
+ [MCLK-EVEN=Bunk#0/MCLK-ODD=Bunk#1]. */
+ uint64_t blen : 1; /**< Device Burst Length (0=2-burst/1=4-burst)
+ When BLEN=0(BL2), all QW reads/writes from CP2 are
+ decomposed into 2 separate BL2(LW) requests to the
+ Low-Latency memory.
+ When BLEN=1(BL4), a LW request (from CP2 or NCB) is
+ treated as 1 BL4(QW) request to the low latency memory.
+ NOTE: QW refers to a 64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 36-bit load/store.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization before the DFA LLM
+ (low latency memory) is used.
+ NOTE: MTYPE=0(RLDRAM-II) MUST USE BLEN=0(2-burst)
+ NOTE: MTYPE=1(FCRAM)/BLEN=0(BL2) requires a
+ multi-bunk(clam) board design.
+ NOTE: If MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=1(BL4),
+ SW SHOULD use CP2 QW read/write requests (for
+ optimal low-latency bus performance).
+ [LW length read/write requests(in BL4 mode) use 50%
+ of the available bus bandwidth]
+ NOTE: MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=0(BL2) can only
+ be used with FCRAM-II devices which support BL2 mode
+ (see: Toshiba FCRAM-II, where DQ tristate after 2 data
+ transfers).
+ NOTE: MTYPE=1(FCRAM)/FCRAM2P=1(II+) does not support LW
+ write requests (FCRAM-II+ device specification has removed
+ the variable write mask function from the devices).
+ As such, if this mode is used, SW must be careful to
+ issue only PP-CP2 QW write requests. */
+ uint64_t bprch : 2; /**< Tristate Enable (back porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable back porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t fprch : 2; /**< Tristate Enable (front porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable front porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t wr_dly : 4; /**< Write->Read CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from write to read. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II(BL2): (TBL=1)
+ For FCRAM-II (BL4): (TBL=2)
+ For FCRAM-II (BL2 grepl=1x ONLY): (TBL=1)
+ For FCRAM-II (BL2 grepl>=2x): (TBL=3)
+ NOTE: When MTYTPE=1(FCRAM-II) BLEN=0(BL2 Mode),
+ grepl>=2x, writes require redundant bunk writes
+ which require an additional 2 cycles before slotting
+ the next read.
+ WR_DLY = ROUND_UP[((TWL+TBL)*2 - TSKW + FPRCH) / 2] - TRL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the WR_DLY 'may' be tuned down(-1) if bus fight
+ on W->R transitions is not pronounced. */
+ uint64_t rw_dly : 4; /**< Read->Write CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from read to write. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II/FCRAM-II (BL2): (TBL=1)
+ For FCRAM-II (BL4): (TBL=2)
+ RW_DLY = ROUND_UP[((TRL+TBL)*2 + TSKW + BPRCH+2)/2] - TWL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the RW_DLY 'may' be tuned down(-1) if bus fight
+ on R->W transitions is not pronounced. */
+ uint64_t sil_lat : 2; /**< Silo Latency (\#dclks): On reads, determines how many
+ additional dclks to wait (on top of tRL+1) before
+ pulling data out of the padring silos used for time
+ domain boundary crossing.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t mtype : 1; /**< Memory Type (0=RLDRAM-II/1=Network DRAM-II/FCRAM)
+ NOTE: N3K-P1 only supports RLDRAM-II
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), only the
+ "unidirectional DS/QS" mode is supported. (see FCRAM
+ data sheet EMRS[A6:A5]=SS(Strobe Select) register
+ definition. [in FCRAM 2-burst mode, we use FCRAM
+ in a clamshell configuration such that clam0 is
+ addressed independently of clam1, and DQ is shared
+ for optimal performance. As such it's imperative that
+ the QS are conditionally received (and are NOT
+ free-running), as the N3K receive data capture silos
+ OR the clam0/1 QS strobes.
+ NOTE: If this bit is SET, the ASX0/1
+ ASX_RLD_FCRAM_MODE[MODE] bit(s) should also be SET
+ in order for the RLD0/1-PHY(s) to support FCRAM devices. */
+ uint64_t reserved_2_2 : 1;
+ uint64_t ena_p0 : 1; /**< Enable DFA RLDRAM Port#0
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#0.
+ NOTE: For N3K-P1, to enable Port#0(2nd port),
+ Port#1 MUST ALSO be enabled.
+ NOTE: For N3K-P2, single port mode, a customer is at
+ liberty to enable either Port#0 or Port#1.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t ena_p1 : 1; /**< Enable DFA RLDRAM Port#1
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#1.
+ NOTE: For N3K-P1, If the customer wishes to use a
+ single port, s/he must enable Port#1 (and not Port#0).
+ NOTE: For N3K-P2, single port mode, a customer is at
+ liberty to enable either Port#0 or Port#1.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+#else
+ uint64_t ena_p1 : 1;
+ uint64_t ena_p0 : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t mtype : 1;
+ uint64_t sil_lat : 2;
+ uint64_t rw_dly : 4;
+ uint64_t wr_dly : 4;
+ uint64_t fprch : 2;
+ uint64_t bprch : 2;
+ uint64_t blen : 1;
+ uint64_t pbunk : 3;
+ uint64_t r2r_pbunk : 1;
+ uint64_t init_p1 : 1;
+ uint64_t init_p0 : 1;
+ uint64_t bunk_init : 2;
+ uint64_t lpp_ena : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn38xx;
+ struct cvmx_dfa_memcfg0_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t bunk_init : 2; /**< Controls the CS_N[1:0] during a) a HW Initialization
+ sequence (triggered by DFA_MEMCFG0[INIT_Px]) or
+ b) during a normal refresh sequence. If
+ the BNK_INIT[x]=1, the corresponding CS_N[x] is driven.
+ NOTE: This is required for DRAM used in a
+ clamshell configuration, since the address lines
+ carry Mode Register write data that is unique
+ per bunk(or clam). In a clamshell configuration,
+ The N3K A[x] pin may be tied into Clam#0's A[x]
+ and also into Clam#1's 'mirrored' address bit A[y]
+ (eg: Clam0 sees A[5] and Clam1 sees A[15]).
+ To support clamshell designs, SW must initiate
+ two separate HW init sequences for the two bunks
+ (or clams) . Before each HW init sequence is triggered,
+ SW must preload the DFA_MEMRLD[22:0] with the data
+ that will be driven onto the A[22:0] wires during
+ an MRS mode register write.
+ NOTE: After the final HW initialization sequence has
+ been triggered, SW must wait 64K eclks before writing
+ the BUNK_INIT[1:0] field = 3'b11 (so that CS_N[1:0] is
+ driven during refresh sequences in normal operation.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For MTYPE=1(FCRAM) Mode, each bunk MUST BE
+ initialized independently. In other words, a HW init
+ must be done for Bunk#0, and then another HW init
+ must be done for Bunk#1 at power-on. */
+ uint64_t init_p0 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#0 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port(s):
+ a) ENA_P1=1 (single port in pass 1) OR
+ b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1)
+ 2) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ 3) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t init_p1 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#1 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port(s):
+ a) ENA_P1=1 (single port in pass 1) OR
+ b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1)
+ 2) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ 3) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+ uint64_t r2r_pbunk : 1; /**< When enabled, an additional command bubble is inserted
+ if back to back reads are issued to different physical
+ bunks. This is to avoid DQ data bus collisions when
+ references cross between physical bunks.
+ [NOTE: the physical bunk address boundary is determined
+ by the PBUNK bit].
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ When MTYPE=1(FCRAM)/BLEN=0(2-burst), R2R_PBUNK SHOULD BE
+ ZERO(for optimal performance). However, if electrically,
+ DQ-sharing becomes a power/heat issue, then R2R_PBUNK
+ should be set (but at a cost to performance (1/2 BW). */
+ uint64_t pbunk : 3; /**< Physical Bunk address bit pointer.
+ Specifies which address bit within the Longword
+ Memory address MA[23:0] is used to determine the
+ chip selects.
+ [RLD_CS0_N corresponds to physical bunk \#0, and
+ RLD_CS1_N corresponds to physical bunk \#1].
+ - 000: CS0_N = MA[19]/CS1_N = !MA[19]
+ - 001: CS0_N = MA[20]/CS1_N = !MA[20]
+ - 010: CS0_N = MA[21]/CS1_N = !MA[21]
+ - 011: CS0_N = MA[22]/CS1_N = !MA[22]
+ - 100: CS0_N = MA[23]/CS1_N = !MA[23]
+ - 101-111: CS0_N = 0 /CS1_N = 1
+ Example(s):
+ To build out a 128MB DFA memory, 4x 32Mx9
+ parts could be used to fill out TWO physical
+ bunks (clamshell configuration). Each (of the
+ two) physical bunks contains 2x 32Mx9 = 16Mx36.
+ Each RLDRAM device also contains 8 internal banks,
+ therefore the memory Address is 16M/8banks = 2M
+ addresses/bunk (2^21). In this case, MA[21] would
+ select the physical bunk.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ be used to determine the Chip Select(s).
+ NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), a
+ "Redundant Bunk" scheme is employed to provide the
+ highest overall performance (1 Req/ MCLK cycle).
+ In this mode, it's imperative that SW set the PBUNK
+ field +1 'above' the highest address bit. (such that
+ the PBUNK extracted from the address will always be
+ zero). In this mode, the CS_N[1:0] pins are driven
+ to each redundant bunk based on a TDM scheme:
+ [MCLK-EVEN=Bunk#0/MCLK-ODD=Bunk#1]. */
+ uint64_t blen : 1; /**< Device Burst Length (0=2-burst/1=4-burst)
+ When BLEN=0(BL2), all QW reads/writes from CP2 are
+ decomposed into 2 separate BL2(LW) requests to the
+ Low-Latency memory.
+ When BLEN=1(BL4), a LW request (from CP2 or NCB) is
+ treated as 1 BL4(QW) request to the low latency memory.
+ NOTE: QW refers to a 64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 36-bit load/store.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization before the DFA LLM
+ (low latency memory) is used.
+ NOTE: MTYPE=0(RLDRAM-II) MUST USE BLEN=0(2-burst)
+ NOTE: MTYPE=1(FCRAM)/BLEN=0(BL2) requires a
+ multi-bunk(clam) board design.
+ NOTE: If MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=1(BL4),
+ SW SHOULD use CP2 QW read/write requests (for
+ optimal low-latency bus performance).
+ [LW length read/write requests(in BL4 mode) use 50%
+ of the available bus bandwidth]
+ NOTE: MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=0(BL2) can only
+ be used with FCRAM-II devices which support BL2 mode
+ (see: Toshiba FCRAM-II, where DQ tristate after 2 data
+ transfers).
+ NOTE: MTYPE=1(FCRAM)/FCRAM2P=1(II+) does not support LW
+ write requests (FCRAM-II+ device specification has removed
+ the variable write mask function from the devices).
+ As such, if this mode is used, SW must be careful to
+ issue only PP-CP2 QW write requests. */
+ uint64_t bprch : 2; /**< Tristate Enable (back porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable back porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t fprch : 2; /**< Tristate Enable (front porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable front porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t wr_dly : 4; /**< Write->Read CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from write to read. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II(BL2): (TBL=1)
+ For FCRAM-II (BL4): (TBL=2)
+ For FCRAM-II (BL2 grepl=1x ONLY): (TBL=1)
+ For FCRAM-II (BL2 grepl>=2x): (TBL=3)
+ NOTE: When MTYTPE=1(FCRAM-II) BLEN=0(BL2 Mode),
+ grepl>=2x, writes require redundant bunk writes
+ which require an additional 2 cycles before slotting
+ the next read.
+ WR_DLY = ROUND_UP[((TWL+TBL)*2 - TSKW + FPRCH) / 2] - TRL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the WR_DLY 'may' be tuned down(-1) if bus fight
+ on W->R transitions is not pronounced. */
+ uint64_t rw_dly : 4; /**< Read->Write CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from read to write. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II/FCRAM-II (BL2): (TBL=1)
+ For FCRAM-II (BL4): (TBL=2)
+ RW_DLY = ROUND_UP[((TRL+TBL)*2 + TSKW + BPRCH+2)/2] - TWL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the RW_DLY 'may' be tuned down(-1) if bus fight
+ on R->W transitions is not pronounced. */
+ uint64_t sil_lat : 2; /**< Silo Latency (\#dclks): On reads, determines how many
+ additional dclks to wait (on top of tRL+1) before
+ pulling data out of the padring silos used for time
+ domain boundary crossing.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t mtype : 1; /**< Memory Type (0=RLDRAM-II/1=Network DRAM-II/FCRAM)
+ NOTE: N3K-P1 only supports RLDRAM-II
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), only the
+ "unidirectional DS/QS" mode is supported. (see FCRAM
+ data sheet EMRS[A6:A5]=SS(Strobe Select) register
+ definition. [in FCRAM 2-burst mode, we use FCRAM
+ in a clamshell configuration such that clam0 is
+ addressed independently of clam1, and DQ is shared
+ for optimal performance. As such it's imperative that
+ the QS are conditionally received (and are NOT
+ free-running), as the N3K receive data capture silos
+ OR the clam0/1 QS strobes.
+ NOTE: If this bit is SET, the ASX0/1
+ ASX_RLD_FCRAM_MODE[MODE] bit(s) should also be SET
+ in order for the RLD0/1-PHY(s) to support FCRAM devices. */
+ uint64_t reserved_2_2 : 1;
+ uint64_t ena_p0 : 1; /**< Enable DFA RLDRAM Port#0
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#0.
+ NOTE: For N3K-P1, to enable Port#0(2nd port),
+ Port#1 MUST ALSO be enabled.
+ NOTE: For N3K-P2, single port mode, a customer is at
+ liberty to enable either Port#0 or Port#1.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t ena_p1 : 1; /**< Enable DFA RLDRAM Port#1
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#1.
+ NOTE: For N3K-P1, If the customer wishes to use a
+ single port, s/he must enable Port#1 (and not Port#0).
+ NOTE: For N3K-P2, single port mode, a customer is at
+ liberty to enable either Port#0 or Port#1.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+#else
+ uint64_t ena_p1 : 1;
+ uint64_t ena_p0 : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t mtype : 1;
+ uint64_t sil_lat : 2;
+ uint64_t rw_dly : 4;
+ uint64_t wr_dly : 4;
+ uint64_t fprch : 2;
+ uint64_t bprch : 2;
+ uint64_t blen : 1;
+ uint64_t pbunk : 3;
+ uint64_t r2r_pbunk : 1;
+ uint64_t init_p1 : 1;
+ uint64_t init_p0 : 1;
+ uint64_t bunk_init : 2;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn38xxp2;
+ struct cvmx_dfa_memcfg0_s cn58xx;
+ struct cvmx_dfa_memcfg0_s cn58xxp1;
+};
+typedef union cvmx_dfa_memcfg0 cvmx_dfa_memcfg0_t;
+
+/**
+ * cvmx_dfa_memcfg1
+ *
+ * DFA_MEMCFG1 = RLDRAM Memory Timing Configuration
+ *
+ * Description:
+ */
+union cvmx_dfa_memcfg1 {
+ uint64_t u64;
+ struct cvmx_dfa_memcfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ref_intlo : 9; /**< Burst Refresh Interval[8:0] (\#dclks)
+ For finer refresh interval granularity control.
+ This field provides an additional level of granularity
+ for the refresh interval. It specifies the additional
+ \#dclks [0...511] to be added to the REF_INT[3:0] field.
+ For RLDRAM-II: For dclk(400MHz=2.5ns):
+ Example: 64K AREF cycles required within tREF=32ms
+ trefint = tREF(ms)/(64K cycles/8banks)
+ = 32ms/8K = 3.9us = 3900ns
+ REF_INT[3:0] = ROUND_DOWN[(trefint/dclk)/512]
+ = ROUND_DOWN[(3900/2.5)/512]
+ = 3
+ REF_INTLO[8:0] = MOD[(trefint/dclk)/512]
+ = MOD[(3900/2.5)/512]
+ = 24
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t aref_ena : 1; /**< Auto Refresh Cycle Enable
+ INTERNAL USE ONLY:
+ NOTE: This mode bit is ONLY intended to be used by
+ low-level power-on initialization routines in the
+ event that the hardware initialization routine
+ does not work. It allows SW to create AREF
+ commands on the RLDRAM bus directly.
+ When this bit is set, ALL RLDRAM writes (issued by
+ a PP through the NCB or CP2) are converted to AREF
+ commands on the RLDRAM bus. The write-address is
+ presented on the A[20:0]/BA[2:0] pins (for which
+ the RLDRAM only interprets BA[2:0]).
+ When this bit is set, only writes are allowed
+ and MUST use grepl=0 (1x).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: MRS_ENA and AREF_ENA are mutually exclusive
+ (SW can set one or the other, but never both!)
+ NOTE: AREF commands generated using this method target
+ the 'addressed' bunk. */
+ uint64_t mrs_ena : 1; /**< Mode Register Set Cycle Enable
+ INTERNAL USE ONLY:
+ NOTE: This mode bit is ONLY intended to be used by
+ low-level power-on initialization routines in the
+ event that the hardware initialization routine
+ does not work. It allows SW to create MRS
+ commands on the RLDRAM bus directly.
+ When this bit is set, ALL RLDRAM writes (issued by
+ a PP through the NCB or CP2) are converted to MRS
+ commands on the RLDRAM bus. The write-address is
+ presented on the A[20:0]/BA[2:0] pins (for which
+ the RLDRAM only interprets A[17:0]).
+ When this bit is set, only writes are allowed
+ and MUST use grepl=0 (1x).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: MRS_ENA and AREF_ENA are mutually exclusive
+ (SW can set one or the other, but never both!)
+ NOTE: MRS commands generated using this method target
+ the 'addressed' bunk. */
+ uint64_t tmrsc : 3; /**< Mode Register Set Cycle Time (represented in \#mclks)
+ - 000-001: RESERVED
+ - 010: tMRSC = 2 mclks
+ - 011: tMRSC = 3 mclks
+ - ...
+ - 111: tMRSC = 7 mclks
+ NOTE: The device tMRSC parameter is a function of CL
+ (which during HW initialization is not known. Its
+ recommended to load tMRSC(MAX) value to avoid timing
+ violations.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t trc : 4; /**< Row Cycle Time (represented in \#mclks)
+ see also: DFA_MEMRLD[RLCFG] field which must
+ correspond with tRL/tWL parameter(s).
+ - 0000-0010: RESERVED
+ - 0011: tRC = 3 mclks
+ - 0100: tRC = 4 mclks
+ - 0101: tRC = 5 mclks
+ - 0110: tRC = 6 mclks
+ - 0111: tRC = 7 mclks
+ - 1000: tRC = 8 mclks
+ - 1001: tRC = 9 mclks
+ - 1010-1111: RESERVED
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t twl : 4; /**< Write Latency (represented in \#mclks)
+ see also: DFA_MEMRLD[RLCFG] field which must
+ correspond with tRL/tWL parameter(s).
+ - 0000-0001: RESERVED
+ - 0010: Write Latency (WL=2.0 mclk)
+ - 0011: Write Latency (WL=3.0 mclks)
+ - 0100: Write Latency (WL=4.0 mclks)
+ - 0101: Write Latency (WL=5.0 mclks)
+ - 0110: Write Latency (WL=6.0 mclks)
+ - 0111: Write Latency (WL=7.0 mclks)
+ - 1000: Write Latency (WL=8.0 mclks)
+ - 1001: Write Latency (WL=9.0 mclks)
+ - 1010: Write Latency (WL=10.0 mclks)
+ - 1011-1111: RESERVED
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t trl : 4; /**< Read Latency (represented in \#mclks)
+ see also: DFA_MEMRLD[RLCFG] field which must
+ correspond with tRL/tWL parameter(s).
+ - 0000-0010: RESERVED
+ - 0011: Read Latency = 3 mclks
+ - 0100: Read Latency = 4 mclks
+ - 0101: Read Latency = 5 mclks
+ - 0110: Read Latency = 6 mclks
+ - 0111: Read Latency = 7 mclks
+ - 1000: Read Latency = 8 mclks
+ - 1001: Read Latency = 9 mclks
+ - 1010: Read Latency = 10 mclks
+ - 1011-1111: RESERVED
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tskw : 2; /**< Board Skew (represented in \#dclks)
+ Represents additional board skew of DQ/DQS.
+ - 00: board-skew = 0 dclk
+ - 01: board-skew = 1 dclk
+ - 10: board-skew = 2 dclk
+ - 11: board-skew = 3 dclk
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t ref_int : 4; /**< Refresh Interval (represented in \#of 512 dclk
+ increments).
+ - 0000: RESERVED
+ - 0001: 1 * 512 = 512 dclks
+ - ...
+ - 1111: 15 * 512 = 7680 dclks
+ NOTE: For finer level of granularity, refer to
+ REF_INTLO[8:0] field.
+ For RLDRAM-II, each refresh interval will
+ generate a burst of 8 AREF commands, one to each of
+ 8 explicit banks (referenced using the RLD_BA[2:0]
+ pins.
+ Example: For mclk=200MHz/dclk(400MHz=2.5ns):
+ 64K AREF cycles required within tREF=32ms
+ trefint = tREF(ms)/(64K cycles/8banks)
+ = 32ms/8K = 3.9us = 3900ns
+ REF_INT = ROUND_DOWN[(trefint/dclk)/512]
+ = ROUND_DOWN[(3900/2.5)/512]
+ = 3
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t ref_int : 4;
+ uint64_t tskw : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t trl : 4;
+ uint64_t twl : 4;
+ uint64_t trc : 4;
+ uint64_t tmrsc : 3;
+ uint64_t mrs_ena : 1;
+ uint64_t aref_ena : 1;
+ uint64_t ref_intlo : 9;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_dfa_memcfg1_s cn38xx;
+ struct cvmx_dfa_memcfg1_s cn38xxp2;
+ struct cvmx_dfa_memcfg1_s cn58xx;
+ struct cvmx_dfa_memcfg1_s cn58xxp1;
+};
+typedef union cvmx_dfa_memcfg1 cvmx_dfa_memcfg1_t;
+
+/**
+ * cvmx_dfa_memcfg2
+ *
+ * DFA_MEMCFG2 = DFA Memory Config Register \#2
+ * *** NOTE: Pass2 Addition
+ *
+ * Description: Additional Memory Configuration CSRs to support FCRAM-II/II+ and Network DRAM-II
+ */
+union cvmx_dfa_memcfg2 {
+ uint64_t u64;
+ struct cvmx_dfa_memcfg2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t dteclkdis : 1; /**< DFA DTE Clock Disable
+ When SET, the DFA clocks for DTE(thread engine)
+ operation are disabled.
+ NOTE: When SET, SW MUST NEVER issue ANY operations to
+ the DFA via the NCB Bus. All DFA Operations must be
+ issued solely through the CP2 interface.
+
+ NOTE: When DTECLKDIS=1, if CP2 Errors are encountered
+ (ie: CP2SBE, CP2DBE, CP2PERR), the DFA_MEMFADR CSR
+ does not reflect the failing address/ctl information. */
+ uint64_t silrst : 1; /**< LLM-PHY Silo Reset
+ When a '1' is written (when the previous
+ value was a '0') causes the the LLM-PHY Silo read/write
+ pointers to be reset.
+ NOTE: SW MUST WAIT 400 dclks after the LAST HW Init
+ sequence was launched (ie: INIT_START 0->1 CSR write),
+ before the SILRST can be triggered (0->1). */
+ uint64_t trfc : 5; /**< FCRAM-II Refresh Interval
+ *** CN58XX UNSUPPORTED *** */
+ uint64_t refshort : 1; /**< FCRAM Short Refresh Mode
+ *** CN58XX UNSUPPORTED *** */
+ uint64_t ua_start : 2; /**< FCRAM-II Upper Addres Start
+ *** CN58XX UNSUPPORTED *** */
+ uint64_t maxbnk : 1; /**< Maximum Banks per-device (used by the address mapper
+ when extracting address bits for the memory bank#.
+ - 0: 4 banks/device
+ - 1: 8 banks/device */
+ uint64_t fcram2p : 1; /**< FCRAM-II+ Mode Enable
+ *** CN58XX UNSUPPORTED *** */
+#else
+ uint64_t fcram2p : 1;
+ uint64_t maxbnk : 1;
+ uint64_t ua_start : 2;
+ uint64_t refshort : 1;
+ uint64_t trfc : 5;
+ uint64_t silrst : 1;
+ uint64_t dteclkdis : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_dfa_memcfg2_s cn38xx;
+ struct cvmx_dfa_memcfg2_s cn38xxp2;
+ struct cvmx_dfa_memcfg2_s cn58xx;
+ struct cvmx_dfa_memcfg2_s cn58xxp1;
+};
+typedef union cvmx_dfa_memcfg2 cvmx_dfa_memcfg2_t;
+
+/**
+ * cvmx_dfa_memfadr
+ *
+ * DFA_MEMFADR = RLDRAM Failing Address/Control Register
+ *
+ * Description: DFA Memory Failing Address/Control Error Capture information
+ * This register contains useful information to help in isolating an RLDRAM memory failure.
+ * NOTE: The first detected SEC/DED/PERR failure is captured in DFA_MEMFADR, however, a DED or PERR (which is
+ * more severe) will always overwrite a SEC error. The user can 'infer' the source of the interrupt
+ * via the FSRC field.
+ * NOTE: If DFA_MEMCFG2[DTECLKDIS]=1, the contents of this register are UNDEFINED.
+ */
+union cvmx_dfa_memfadr {
+ uint64_t u64;
+ struct cvmx_dfa_memfadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t maddr : 24; /**< Memory Address */
+#else
+ uint64_t maddr : 24;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_dfa_memfadr_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t fdst : 9; /**< Fill-Destination
+ FSRC[1:0] | FDST[8:0]
+ -------------+-------------------------------------
+ 0(NCB-DTE) | [fillstart,2'b0,WIDX(1),DMODE(1),DTE(4)]
+ 1(NCB-CSR) | [ncbSRC[8:0]]
+ 3(CP2-PP) | [2'b0,SIZE(1),INDEX(1),PP(4),FID(1)]
+ where:
+ DTE: DFA Thread Engine ID#
+ PP: Packet Processor ID#
+ FID: Fill-ID# (unique per PP)
+ WIDX: 16b SIMPLE Mode (index)
+ DMODE: (0=16b SIMPLE/1=32b SIMPLE)
+ SIZE: (0=LW Mode access/1=QW Mode Access)
+ INDEX: (0=Low LW/1=High LW)
+ NOTE: QW refers to a 56/64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 32-bit load/store. */
+ uint64_t fsrc : 2; /**< Fill-Source (0=NCB-DTE/1=NCB-CSR/2=RESERVED/3=PP-CP2) */
+ uint64_t pnum : 1; /**< Memory Port
+ NOTE: For O2P, this bit will always return zero. */
+ uint64_t bnum : 3; /**< Memory Bank
+ When DFA_DDR2_ADDR[RNK_LO]=1, BNUM[2]=RANK[0].
+ (RANK[1] can be inferred from MADDR[24:0]) */
+ uint64_t maddr : 25; /**< Memory Address */
+#else
+ uint64_t maddr : 25;
+ uint64_t bnum : 3;
+ uint64_t pnum : 1;
+ uint64_t fsrc : 2;
+ uint64_t fdst : 9;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn31xx;
+ struct cvmx_dfa_memfadr_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t fdst : 9; /**< Fill-Destination
+ FSRC[1:0] | FDST[8:0]
+ -------------+-------------------------------------
+ 0(NCB-DTE) | [fillstart,2'b0,WIDX(1),DMODE(1),DTE(4)]
+ 1(NCB-CSR) | [ncbSRC[8:0]]
+ 3(CP2-PP) | [2'b0,SIZE(1),INDEX(1),PP(4),FID(1)]
+ where:
+ DTE: DFA Thread Engine ID#
+ PP: Packet Processor ID#
+ FID: Fill-ID# (unique per PP)
+ WIDX: 18b SIMPLE Mode (index)
+ DMODE: (0=18b SIMPLE/1=36b SIMPLE)
+ SIZE: (0=LW Mode access/1=QW Mode Access)
+ INDEX: (0=Low LW/1=High LW)
+ NOTE: QW refers to a 64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 36-bit load/store. */
+ uint64_t fsrc : 2; /**< Fill-Source (0=NCB-DTE/1=NCB-CSR/2=RESERVED/3=PP-CP2) */
+ uint64_t pnum : 1; /**< Memory Port
+ NOTE: the port id's are reversed
+ PNUM==0 => port#1
+ PNUM==1 => port#0 */
+ uint64_t bnum : 3; /**< Memory Bank */
+ uint64_t maddr : 24; /**< Memory Address */
+#else
+ uint64_t maddr : 24;
+ uint64_t bnum : 3;
+ uint64_t pnum : 1;
+ uint64_t fsrc : 2;
+ uint64_t fdst : 9;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } cn38xx;
+ struct cvmx_dfa_memfadr_cn38xx cn38xxp2;
+ struct cvmx_dfa_memfadr_cn38xx cn58xx;
+ struct cvmx_dfa_memfadr_cn38xx cn58xxp1;
+};
+typedef union cvmx_dfa_memfadr cvmx_dfa_memfadr_t;
+
+/**
+ * cvmx_dfa_memfcr
+ *
+ * DFA_MEMFCR = FCRAM MRS Register(s) EMRS2[14:0], EMRS1[14:0], MRS[14:0]
+ * *** CN58XX UNSUPPORTED ***
+ *
+ * Notes:
+ * For FCRAM-II please consult your device's data sheet for further details:
+ * MRS Definition:
+ * A[13:8]=0 RESERVED
+ * A[7]=0 TEST MODE (N3K requires test mode 0:"disabled")
+ * A[6:4] CAS LATENCY (fully programmable - SW must ensure that the value programmed
+ * into DFA_MEM_CFG0[TRL] corresponds with this value).
+ * A[3]=0 BURST TYPE (N3K requires 0:"Sequential" Burst Type)
+ * A[2:0] BURST LENGTH Burst Length [1:BL2/2:BL4] (N3K only supports BL=2,4)
+ *
+ * In BL2 mode(for highest performance), only 1/2 the phsyical
+ * memory is unique (ie: each bunk stores the same information).
+ * In BL4 mode(highest capacity), all of the physical memory
+ * is unique (ie: each bunk is uniquely addressable).
+ * EMRS Definition:
+ * A[13:12] REFRESH MODE (N3K Supports only 0:"Conventional" and 1:"Short" auto-refresh modes)
+ *
+ * (SW must ensure that the value programmed into DFA_MEMCFG2[REFSHORT]
+ * is also reflected in the Refresh Mode encoding).
+ * A[11:7]=0 RESERVED
+ * A[6:5]=2 STROBE SELECT (N3K supports only 2:"Unidirectional DS/QS" mode - the read capture
+ * silos rely on a conditional QS strobe)
+ * A[4:3] DIC(QS) QS Drive Strength: fully programmable (consult your FCRAM-II data sheet)
+ * [0: Normal Output Drive/1: Strong Output Drive/2: Weak output Drive]
+ * A[2:1] DIC(DQ) DQ Drive Strength: fully programmable (consult your FCRAM-II data sheet)
+ * [0: Normal Output Drive/1: Strong Output Drive/2: Weak output Drive]
+ * A[0] DLL DLL Enable: Programmable [0:DLL Enable/1: DLL Disable]
+ *
+ * EMRS2 Definition: (for FCRAM-II+)
+ * A[13:11]=0 RESERVED
+ * A[10:8] ODTDS On Die Termination (DS+/-)
+ * [0: ODT Disable /1: 15ohm termination /(2-7): RESERVED]
+ * A[7:6]=0 MBW Multi-Bank Write: (N3K requires use of 0:"single bank" mode only)
+ * A[5:3] ODTin On Die Termination (input pin)
+ * [0: ODT Disable /1: 15ohm termination /(2-7): RESERVED]
+ * A[2:0] ODTDQ On Die Termination (DQ)
+ * [0: ODT Disable /1: 15ohm termination /(2-7): RESERVED]
+ */
+union cvmx_dfa_memfcr {
+ uint64_t u64;
+ struct cvmx_dfa_memfcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t emrs2 : 15; /**< Memory Address[14:0] during EMRS2(for FCRAM-II+)
+ *** CN58XX UNSUPPORTED *** */
+ uint64_t reserved_31_31 : 1;
+ uint64_t emrs : 15; /**< Memory Address[14:0] during EMRS
+ *** CN58XX UNSUPPORTED ***
+ A[0]=1: DLL Enabled) */
+ uint64_t reserved_15_15 : 1;
+ uint64_t mrs : 15; /**< FCRAM Memory Address[14:0] during MRS
+ *** CN58XX UNSUPPORTED ***
+ A[6:4]=4 CAS LATENCY=4(default)
+ A[3]=0 Burst Type(must be 0:Sequential)
+ A[2:0]=2 Burst Length=4(default) */
+#else
+ uint64_t mrs : 15;
+ uint64_t reserved_15_15 : 1;
+ uint64_t emrs : 15;
+ uint64_t reserved_31_31 : 1;
+ uint64_t emrs2 : 15;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfa_memfcr_s cn38xx;
+ struct cvmx_dfa_memfcr_s cn38xxp2;
+ struct cvmx_dfa_memfcr_s cn58xx;
+ struct cvmx_dfa_memfcr_s cn58xxp1;
+};
+typedef union cvmx_dfa_memfcr cvmx_dfa_memfcr_t;
+
+/**
+ * cvmx_dfa_memhidat
+ *
+ * DFA_MEMHIDAT = DFA NCB-Direct CSR access to DFM Memory Space (High QW)
+ *
+ * Description:
+ * DFA supports NCB-Direct CSR acccesses to DFM Memory space for debug purposes. Unfortunately, NCB-Direct accesses
+ * are limited to QW-size(64bits), whereas the minimum access granularity for DFM Memory space is OW(128bits). To
+ * support writes to DFM Memory space, the Hi-QW of data is sourced from the DFA_MEMHIDAT register. Recall, the
+ * OW(128b) in DDR3 memory space is fixed format:
+ * OWDATA[127:118]: OWECC[9:0] 10bits of in-band OWECC SEC/DED codeword
+ * This can be precomputed/written by SW OR
+ * if DFM_FNTCTL[ECC_WENA]=1, DFM hardware will auto-compute the 10b OWECC and place in the
+ * OWDATA[127:118] before being written to memory.
+ * OWDATA[117:0]: Memory Data (contains fixed MNODE/MONODE arc formats for use by DTEs(thread engines).
+ * Or, a user may choose to treat DFM Memory Space as 'scratch pad' in which case the
+ * OWDATA[117:0] may contain user-specified information accessible via NCB-Direct CSR mode
+ * accesses to DFA Memory Space.
+ * NOTE: To write to the DFA_MEMHIDAT register, a device would issue an IOBST directed at the DFA with addr[34:32]=3'b111.
+ * To read the DFA_MEMHIDAT register, a device would issue an IOBLD64 directed at the DFA with addr[34:32]=3'b111.
+ *
+ * NOTE: If DFA_CONFIG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_MEMHIDAT register do not take effect.
+ * NOTE: If FUSE[TBD]="DFA DTE disable" is blown, reads/writes to the DFA_MEMHIDAT register do not take effect.
+ *
+ * NOTE: PLEASE REMOVE DEFINITION FROM o68 HRM
+ */
+union cvmx_dfa_memhidat {
+ uint64_t u64;
+ struct cvmx_dfa_memhidat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hidat : 64; /**< DFA Hi-QW of Write data during NCB-Direct DFM DDR3
+ Memory accesses.
+ All DFM DDR3 memory accesses are OW(128b) references,
+ and since NCB-Direct Mode writes only support QW(64b),
+ the Hi QW of data must be sourced from a CSR register.
+ NOTE: This single register is 'shared' for ALL DFM
+ DDR3 Memory writes.
+ For o68: This register is UNUSED. Treat as spare bits.
+ NOTE: PLEASE REMOVE DEFINITION FROM o68 HRM */
+#else
+ uint64_t hidat : 64;
+#endif
+ } s;
+ struct cvmx_dfa_memhidat_s cn61xx;
+ struct cvmx_dfa_memhidat_s cn63xx;
+ struct cvmx_dfa_memhidat_s cn63xxp1;
+ struct cvmx_dfa_memhidat_s cn66xx;
+ struct cvmx_dfa_memhidat_s cn68xx;
+ struct cvmx_dfa_memhidat_s cn68xxp1;
+};
+typedef union cvmx_dfa_memhidat cvmx_dfa_memhidat_t;
+
+/**
+ * cvmx_dfa_memrld
+ *
+ * DFA_MEMRLD = DFA RLDRAM MRS Register Values
+ *
+ * Description:
+ */
+union cvmx_dfa_memrld {
+ uint64_t u64;
+ struct cvmx_dfa_memrld_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t mrsdat : 23; /**< This field represents the data driven onto the
+ A[22:0] address lines during MRS(Mode Register Set)
+ commands (during a HW init sequence). This field
+ corresponds with the Mode Register Bit Map from
+ your RLDRAM-II device specific data sheet.
+ A[17:10]: RESERVED
+ A[9]: ODT (on die termination)
+ A[8]: Impedance Matching
+ A[7]: DLL Reset
+ A[6]: UNUSED
+ A[5]: Address Mux (for N3K: MUST BE ZERO)
+ A[4:3]: Burst Length (for N3K: MUST BE ZERO)
+ A[2:0]: Configuration (see data sheet for
+ specific RLDRAM-II device).
+ - 000-001: CFG=1 [tRC=4/tRL=4/tWL=5]
+ - 010: CFG=2 [tRC=6/tRL=6/tWL=7]
+ - 011: CFG=3 [tRC=8/tRL=8/tWL=9]
+ - 100-111: RESERVED
+ NOTE: For additional density, the RLDRAM-II parts
+ can be 'clamshelled' (ie: two devices mounted on
+ different sides of the PCB board), since the BGA
+ pinout supports 'mirroring'.
+ To support a clamshell design, SW must preload
+ the MRSDAT[22:0] with the proper A[22:0] pin mapping
+ which is dependent on the 'selected' bunk/clam
+ (see also: DFA_MEMCFG0[BUNK_INIT] field).
+ NOTE: Care MUST BE TAKEN NOT to write to this register
+ within 64K eclk cycles of a HW INIT (see: INIT_P0/INIT_P1).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t mrsdat : 23;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_dfa_memrld_s cn38xx;
+ struct cvmx_dfa_memrld_s cn38xxp2;
+ struct cvmx_dfa_memrld_s cn58xx;
+ struct cvmx_dfa_memrld_s cn58xxp1;
+};
+typedef union cvmx_dfa_memrld cvmx_dfa_memrld_t;
+
+/**
+ * cvmx_dfa_ncbctl
+ *
+ * DFA_NCBCTL = DFA NCB CTL Register
+ *
+ * Description:
+ */
+union cvmx_dfa_ncbctl {
+ uint64_t u64;
+ struct cvmx_dfa_ncbctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t sbdnum : 5; /**< SBD Debug Entry#
+ For internal use only. (DFA Scoreboard debug)
+ Selects which one of 32 DFA Scoreboard entries is
+ latched into the DFA_SBD_DBG[0-3] registers. */
+ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe
+ For internal use only. (DFA Scoreboard debug)
+ When written with a '1', the DFA Scoreboard Debug
+ registers (DFA_SBD_DBG[0-3]) are all locked down.
+ This allows SW to lock down the contents of the entire
+ SBD for a single instant in time. All subsequent reads
+ of the DFA scoreboard registers will return the data
+ from that instant in time. */
+ uint64_t dcmode : 1; /**< DRF-CRQ/DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=CRQ/HP=DTE],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t dtmode : 1; /**< DRF-DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=DTE[15],...,HP=DTE[0]],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode
+ (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode
+ (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t imode : 1; /**< NCB-Inbound Arbiter
+ (0=FP [LP=NRQ,HP=NRP], 1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t imode : 1;
+ uint64_t qmode : 1;
+ uint64_t pmode : 1;
+ uint64_t dtmode : 1;
+ uint64_t dcmode : 1;
+ uint64_t sbdlck : 1;
+ uint64_t sbdnum : 5;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_dfa_ncbctl_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t sbdnum : 4; /**< SBD Debug Entry#
+ For internal use only. (DFA Scoreboard debug)
+ Selects which one of 16 DFA Scoreboard entries is
+ latched into the DFA_SBD_DBG[0-3] registers. */
+ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe
+ For internal use only. (DFA Scoreboard debug)
+ When written with a '1', the DFA Scoreboard Debug
+ registers (DFA_SBD_DBG[0-3]) are all locked down.
+ This allows SW to lock down the contents of the entire
+ SBD for a single instant in time. All subsequent reads
+ of the DFA scoreboard registers will return the data
+ from that instant in time. */
+ uint64_t dcmode : 1; /**< DRF-CRQ/DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=CRQ/HP=DTE],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t dtmode : 1; /**< DRF-DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=DTE[15],...,HP=DTE[0]],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode
+ (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode
+ (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t imode : 1; /**< NCB-Inbound Arbiter
+ (0=FP [LP=NRQ,HP=NRP], 1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t imode : 1;
+ uint64_t qmode : 1;
+ uint64_t pmode : 1;
+ uint64_t dtmode : 1;
+ uint64_t dcmode : 1;
+ uint64_t sbdlck : 1;
+ uint64_t sbdnum : 4;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn38xx;
+ struct cvmx_dfa_ncbctl_cn38xx cn38xxp2;
+ struct cvmx_dfa_ncbctl_s cn58xx;
+ struct cvmx_dfa_ncbctl_s cn58xxp1;
+};
+typedef union cvmx_dfa_ncbctl cvmx_dfa_ncbctl_t;
+
+/**
+ * cvmx_dfa_pfc0_cnt
+ *
+ * DFA_PFC0_CNT = DFA Performance Counter \#0
+ * *FOR INTERNAL USE ONLY*
+ * Description:
+ */
+union cvmx_dfa_pfc0_cnt {
+ uint64_t u64;
+ struct cvmx_dfa_pfc0_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pfcnt0 : 64; /**< Performance Counter \#0
+ When DFA_PFC_GCTL[CNT0ENA]=1, the event selected
+ by DFA_PFC0_CTL[EVSEL] is counted.
+ See also DFA_PFC_GCTL[CNT0WCLR] and DFA_PFC_GCTL
+ [CNT0RCLR] for special clear count cases available
+ for SW data collection. */
+#else
+ uint64_t pfcnt0 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_pfc0_cnt_s cn61xx;
+ struct cvmx_dfa_pfc0_cnt_s cn63xx;
+ struct cvmx_dfa_pfc0_cnt_s cn63xxp1;
+ struct cvmx_dfa_pfc0_cnt_s cn66xx;
+ struct cvmx_dfa_pfc0_cnt_s cn68xx;
+ struct cvmx_dfa_pfc0_cnt_s cn68xxp1;
+};
+typedef union cvmx_dfa_pfc0_cnt cvmx_dfa_pfc0_cnt_t;
+
+/**
+ * cvmx_dfa_pfc0_ctl
+ *
+ * DFA_PFC0_CTL = DFA Performance Counter#0 Control
+ * *FOR INTERNAL USE ONLY*
+ * Description:
+ */
+union cvmx_dfa_pfc0_ctl {
+ uint64_t u64;
+ struct cvmx_dfa_pfc0_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t evsel : 6; /**< Performance Counter#0 Event Selector
+ // Events [0-31] are based on PMODE(0:per cluster-DTE 1:per graph)
+ - 0: \#Total Cycles
+ - 1: \#LDNODE visits
+ - 2: \#SDNODE visits
+ - 3: \#DNODE visits (LD/SD)
+ - 4: \#LCNODE visits
+ - 5: \#SCNODE visits
+ - 6: \#CNODE visits (LC/SC)
+ - 7: \#LMNODE visits
+ - 8: \#SMNODE visits
+ - 9: \#MNODE visits (LM/SM)
+ - 10: \#MONODE visits
+ - 11: \#CACHE visits (DNODE,CNODE) exc: CNDRD,MPHIDX
+ - 12: \#CACHE visits (DNODE,CNODE)+(CNDRD,MPHIDX)
+ - 13: \#MEMORY visits (MNODE+MONODE)
+ - 14: \#CNDRDs detected (occur for SCNODE->*MNODE transitions)
+ - 15: \#MPHIDX detected (occur for ->LMNODE transitions)
+ - 16: \#RESCANs detected (occur when HASH collision is detected)
+ - 17: \#GWALK iterations STALLED - Packet data/Result Buffer
+ - 18: \#GWALK iterations NON-STALLED
+ - 19: \#CLOAD iterations
+ - 20: \#MLOAD iterations
+ [NOTE: If PMODE=1(per-graph) the MLOAD IWORD0.VGID will be used to discern graph#].
+ - 21: \#RWORD1+ writes
+ - 22: \#cycles Cluster is busy
+ - 23: \#GWALK Instructions
+ - 24: \#CLOAD Instructions
+ - 25: \#MLOAD Instructions
+ [NOTE: If PMODE=1(per-graph) the MLOAD IWORD0.VGID will be used to discern graph#].
+ - 26: \#GFREE Instructions
+ - 27-30: RESERVED
+ - 31: \# Node Transitions detected (see DFA_PFC_GCTL[SNODE,ENODE,EDNODE] registers
+ //=============================================================
+ // Events [32-63] are used ONLY FOR PMODE=0(per-cluster DTE mode):
+ - 32: \#cycles a specific cluster-DTE remains active(valid state)
+ - 33: \#cycles a specific cluster-DTE waits for Memory Response Data
+ - 34: \#cycles a specific cluster-DTE waits in resource stall state
+ (waiting for packet data or result buffer space)
+ - 35: \#cycles a specific cluster-DTE waits in resource pending state
+ - 36-63: RESERVED
+ //============================================================= */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cldte : 4; /**< Performance Counter#0 Cluster DTE Selector
+ When DFA_PFC_GCTL[PMODE]=0 (per-cluster DTE), this field
+ is used to select/monitor the cluster's DTE# for all events
+ associated with Performance Counter#0. */
+ uint64_t clnum : 2; /**< Performance Counter#0 Cluster Selector
+ When DFA_PFC_GCTL[PMODE]=0 (per-cluster DTE), this field
+ is used to select/monitor the cluster# for all events
+ associated with Performance Counter#0. */
+#else
+ uint64_t clnum : 2;
+ uint64_t cldte : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t evsel : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_dfa_pfc0_ctl_s cn61xx;
+ struct cvmx_dfa_pfc0_ctl_s cn63xx;
+ struct cvmx_dfa_pfc0_ctl_s cn63xxp1;
+ struct cvmx_dfa_pfc0_ctl_s cn66xx;
+ struct cvmx_dfa_pfc0_ctl_s cn68xx;
+ struct cvmx_dfa_pfc0_ctl_s cn68xxp1;
+};
+typedef union cvmx_dfa_pfc0_ctl cvmx_dfa_pfc0_ctl_t;
+
+/**
+ * cvmx_dfa_pfc1_cnt
+ *
+ * DFA_PFC1_CNT = DFA Performance Counter \#1
+ * *FOR INTERNAL USE ONLY*
+ * Description:
+ */
+union cvmx_dfa_pfc1_cnt {
+ uint64_t u64;
+ struct cvmx_dfa_pfc1_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pfcnt1 : 64; /**< Performance Counter \#1
+ When DFA_PFC_GCTL[CNT1ENA]=1, the event selected
+ by DFA_PFC1_CTL[EVSEL] is counted.
+ See also DFA_PFC_GCTL[CNT1WCLR] and DFA_PFC_GCTL
+ [CNT1RCLR] for special clear count cases available
+ for SW data collection. */
+#else
+ uint64_t pfcnt1 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_pfc1_cnt_s cn61xx;
+ struct cvmx_dfa_pfc1_cnt_s cn63xx;
+ struct cvmx_dfa_pfc1_cnt_s cn63xxp1;
+ struct cvmx_dfa_pfc1_cnt_s cn66xx;
+ struct cvmx_dfa_pfc1_cnt_s cn68xx;
+ struct cvmx_dfa_pfc1_cnt_s cn68xxp1;
+};
+typedef union cvmx_dfa_pfc1_cnt cvmx_dfa_pfc1_cnt_t;
+
+/**
+ * cvmx_dfa_pfc1_ctl
+ *
+ * DFA_PFC1_CTL = DFA Performance Counter#1 Control
+ * *FOR INTERNAL USE ONLY*
+ * Description:
+ */
+union cvmx_dfa_pfc1_ctl {
+ uint64_t u64;
+ struct cvmx_dfa_pfc1_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t evsel : 6; /**< Performance Counter#1 Event Selector
+ - 0: \#Cycles
+ - 1: \#LDNODE visits
+ - 2: \#SDNODE visits
+ - 3: \#DNODE visits (LD/SD)
+ - 4: \#LCNODE visits
+ - 5: \#SCNODE visits
+ - 6: \#CNODE visits (LC/SC)
+ - 7: \#LMNODE visits
+ - 8: \#SMNODE visits
+ - 9: \#MNODE visits (LM/SM)
+ - 10: \#MONODE visits
+ - 11: \#CACHE visits (DNODE,CNODE) exc: CNDRD,MPHIDX
+ - 12: \#CACHE visits (DNODE,CNODE)+(CNDRD,MPHIDX)
+ - 13: \#MEMORY visits (MNODE+MONODE)
+ - 14: \#CNDRDs detected (occur for SCNODE->*MNODE transitions)
+ - 15: \#MPHIDX detected (occur for ->LMNODE transitions)
+ - 16: \#RESCANs detected (occur when HASH collision is detected)
+ - 17: \#GWALK STALLs detected - Packet data/Result Buffer
+ - 18: \#GWALK DTE cycles (all DTE-GNT[3a])
+ - 19: \#CLOAD DTE cycles
+ - 20: \#MLOAD DTE cycles
+ - 21: \#cycles waiting for Memory Response Data
+ - 22: \#cycles waiting in resource stall state (waiting for packet data or result buffer space)
+ - 23: \#cycles waiting in resource pending state
+ - 24: \#RWORD1+ writes
+ - 25: \#DTE-VLD cycles
+ - 26: \#DTE Transitions detected (see DFA_PFC_GCTL[SNODE,ENODE] registers
+ - 27: \#GWALK Instructions
+ - 28: \#CLOAD Instructions
+ - 29: \#MLOAD Instructions
+ - 30: \#GFREE Instructions (== \#GFREE DTE cycles)
+ - 31: RESERVED
+ - 32: \#DTE-Busy cycles (ALL DTE-GNT strobes) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cldte : 4; /**< Performance Counter#1 Cluster DTE Selector
+ When DFA_PFC_GCTL[PMODE]=0 (per-cluster DTE), this field
+ is used to select/monitor the cluster's DTE# for all events
+ associated with Performance Counter#1. */
+ uint64_t clnum : 2; /**< Performance Counter#1 Cluster Selector
+ When DFA_PFC_GCTL[PMODE]=0 (per-cluster DTE), this field
+ is used to select/monitor the cluster# for all events
+ associated with Performance Counter#1. */
+#else
+ uint64_t clnum : 2;
+ uint64_t cldte : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t evsel : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_dfa_pfc1_ctl_s cn61xx;
+ struct cvmx_dfa_pfc1_ctl_s cn63xx;
+ struct cvmx_dfa_pfc1_ctl_s cn63xxp1;
+ struct cvmx_dfa_pfc1_ctl_s cn66xx;
+ struct cvmx_dfa_pfc1_ctl_s cn68xx;
+ struct cvmx_dfa_pfc1_ctl_s cn68xxp1;
+};
+typedef union cvmx_dfa_pfc1_ctl cvmx_dfa_pfc1_ctl_t;
+
+/**
+ * cvmx_dfa_pfc2_cnt
+ *
+ * DFA_PFC2_CNT = DFA Performance Counter \#2
+ * *FOR INTERNAL USE ONLY*
+ * Description:
+ */
+union cvmx_dfa_pfc2_cnt {
+ uint64_t u64;
+ struct cvmx_dfa_pfc2_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pfcnt2 : 64; /**< Performance Counter \#2
+ When DFA_PFC_GCTL[CNT2ENA]=1, the event selected
+ by DFA_PFC2_CTL[EVSEL] is counted.
+ See also DFA_PFC_GCTL[CNT2WCLR] and DFA_PFC_GCTL
+ [CNT2RCLR] for special clear count cases available
+ for SW data collection. */
+#else
+ uint64_t pfcnt2 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_pfc2_cnt_s cn61xx;
+ struct cvmx_dfa_pfc2_cnt_s cn63xx;
+ struct cvmx_dfa_pfc2_cnt_s cn63xxp1;
+ struct cvmx_dfa_pfc2_cnt_s cn66xx;
+ struct cvmx_dfa_pfc2_cnt_s cn68xx;
+ struct cvmx_dfa_pfc2_cnt_s cn68xxp1;
+};
+typedef union cvmx_dfa_pfc2_cnt cvmx_dfa_pfc2_cnt_t;
+
+/**
+ * cvmx_dfa_pfc2_ctl
+ *
+ * DFA_PFC2_CTL = DFA Performance Counter#2 Control
+ * *FOR INTERNAL USE ONLY*
+ * Description:
+ */
+union cvmx_dfa_pfc2_ctl {
+ uint64_t u64;
+ struct cvmx_dfa_pfc2_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t evsel : 6; /**< Performance Counter#2 Event Selector
+ - 0: \#Cycles
+ - 1: \#LDNODE visits
+ - 2: \#SDNODE visits
+ - 3: \#DNODE visits (LD/SD)
+ - 4: \#LCNODE visits
+ - 5: \#SCNODE visits
+ - 6: \#CNODE visits (LC/SC)
+ - 7: \#LMNODE visits
+ - 8: \#SMNODE visits
+ - 9: \#MNODE visits (LM/SM)
+ - 10: \#MONODE visits
+ - 11: \#CACHE visits (DNODE,CNODE) exc: CNDRD,MPHIDX
+ - 12: \#CACHE visits (DNODE,CNODE)+(CNDRD,MPHIDX)
+ - 13: \#MEMORY visits (MNODE+MONODE)
+ - 14: \#CNDRDs detected (occur for SCNODE->*MNODE transitions)
+ - 15: \#MPHIDX detected (occur for ->LMNODE transitions)
+ - 16: \#RESCANs detected (occur when HASH collision is detected)
+ - 17: \#GWALK STALLs detected - Packet data/Result Buffer
+ - 18: \#GWALK DTE cycles (all DTE-GNT[3a])
+ - 19: \#CLOAD DTE cycles
+ - 20: \#MLOAD DTE cycles
+ - 21: \#cycles waiting for Memory Response Data
+ - 22: \#cycles waiting in resource stall state (waiting for packet data or result buffer space)
+ - 23: \#cycles waiting in resource pending state
+ - 24: \#RWORD1+ writes
+ - 25: \#DTE-VLD cycles
+ - 26: \#DTE Transitions detected (see DFA_PFC_GCTL[SNODE,ENODE] registers
+ - 27: \#GWALK Instructions
+ - 28: \#CLOAD Instructions
+ - 29: \#MLOAD Instructions
+ - 30: \#GFREE Instructions (== \#GFREE DTE cycles)
+ - 31: RESERVED
+ - 32: \#DTE-Busy cycles (ALL DTE-GNT strobes) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cldte : 4; /**< Performance Counter#2 Cluster DTE Selector
+ When DFA_PFC_GCTL[PMODE]=0 (per-cluster DTE), this field
+ is used to select/monitor the cluster's DTE# for all events
+ associated with Performance Counter#2. */
+ uint64_t clnum : 2; /**< Performance Counter#2 Cluster Selector
+ When DFA_PFC_GCTL[PMODE]=0 (per-cluster DTE), this field
+ is used to select/monitor the cluster# for all events
+ associated with Performance Counter#2. */
+#else
+ uint64_t clnum : 2;
+ uint64_t cldte : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t evsel : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_dfa_pfc2_ctl_s cn61xx;
+ struct cvmx_dfa_pfc2_ctl_s cn63xx;
+ struct cvmx_dfa_pfc2_ctl_s cn63xxp1;
+ struct cvmx_dfa_pfc2_ctl_s cn66xx;
+ struct cvmx_dfa_pfc2_ctl_s cn68xx;
+ struct cvmx_dfa_pfc2_ctl_s cn68xxp1;
+};
+typedef union cvmx_dfa_pfc2_ctl cvmx_dfa_pfc2_ctl_t;
+
+/**
+ * cvmx_dfa_pfc3_cnt
+ *
+ * DFA_PFC3_CNT = DFA Performance Counter \#3
+ * *FOR INTERNAL USE ONLY*
+ * Description:
+ */
+union cvmx_dfa_pfc3_cnt {
+ uint64_t u64;
+ struct cvmx_dfa_pfc3_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pfcnt3 : 64; /**< Performance Counter \#3
+ When DFA_PFC_GCTL[CNT3ENA]=1, the event selected
+ by DFA_PFC3_CTL[EVSEL] is counted.
+ See also DFA_PFC_GCTL[CNT3WCLR] and DFA_PFC_GCTL
+ [CNT3RCLR] for special clear count cases available
+ for SW data collection. */
+#else
+ uint64_t pfcnt3 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_pfc3_cnt_s cn61xx;
+ struct cvmx_dfa_pfc3_cnt_s cn63xx;
+ struct cvmx_dfa_pfc3_cnt_s cn63xxp1;
+ struct cvmx_dfa_pfc3_cnt_s cn66xx;
+ struct cvmx_dfa_pfc3_cnt_s cn68xx;
+ struct cvmx_dfa_pfc3_cnt_s cn68xxp1;
+};
+typedef union cvmx_dfa_pfc3_cnt cvmx_dfa_pfc3_cnt_t;
+
+/**
+ * cvmx_dfa_pfc3_ctl
+ *
+ * DFA_PFC3_CTL = DFA Performance Counter#3 Control
+ * *FOR INTERNAL USE ONLY*
+ * Description:
+ */
+union cvmx_dfa_pfc3_ctl {
+ uint64_t u64;
+ struct cvmx_dfa_pfc3_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t evsel : 6; /**< Performance Counter#3 Event Selector
+ - 0: \#Cycles
+ - 1: \#LDNODE visits
+ - 2: \#SDNODE visits
+ - 3: \#DNODE visits (LD/SD)
+ - 4: \#LCNODE visits
+ - 5: \#SCNODE visits
+ - 6: \#CNODE visits (LC/SC)
+ - 7: \#LMNODE visits
+ - 8: \#SMNODE visits
+ - 9: \#MNODE visits (LM/SM)
+ - 10: \#MONODE visits
+ - 11: \#CACHE visits (DNODE,CNODE) exc: CNDRD,MPHIDX
+ - 12: \#CACHE visits (DNODE,CNODE)+(CNDRD,MPHIDX)
+ - 13: \#MEMORY visits (MNODE+MONODE)
+ - 14: \#CNDRDs detected (occur for SCNODE->*MNODE transitions)
+ - 15: \#MPHIDX detected (occur for ->LMNODE transitions)
+ - 16: \#RESCANs detected (occur when HASH collision is detected)
+ - 17: \#GWALK STALLs detected - Packet data/Result Buffer
+ - 18: \#GWALK DTE cycles (all DTE-GNT[3a])
+ - 19: \#CLOAD DTE cycles
+ - 20: \#MLOAD DTE cycles
+ - 21: \#cycles waiting for Memory Response Data
+ - 22: \#cycles waiting in resource stall state (waiting for packet data or result buffer space)
+ - 23: \#cycles waiting in resource pending state
+ - 24: \#RWORD1+ writes
+ - 25: \#DTE-VLD cycles
+ - 26: \#DTE Transitions detected (see DFA_PFC_GCTL[SNODE,ENODE] registers
+ - 27: \#GWALK Instructions
+ - 28: \#CLOAD Instructions
+ - 29: \#MLOAD Instructions
+ - 30: \#GFREE Instructions (== \#GFREE DTE cycles)
+ - 31: RESERVED
+ - 32: \#DTE-Busy cycles (ALL DTE-GNT strobes) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cldte : 4; /**< Performance Counter#3 Cluster DTE Selector
+ When DFA_PFC_GCTL[PMODE]=0 (per-cluster DTE), this field
+ is used to select/monitor the cluster's DTE# for all events
+ associated with Performance Counter#3. */
+ uint64_t clnum : 2; /**< Performance Counter#3 Cluster Selector
+ When DFA_PFC_GCTL[PMODE]=0 (per-cluster DTE), this field
+ is used to select/monitor the cluster# for all events
+ associated with Performance Counter#3. */
+#else
+ uint64_t clnum : 2;
+ uint64_t cldte : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t evsel : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_dfa_pfc3_ctl_s cn61xx;
+ struct cvmx_dfa_pfc3_ctl_s cn63xx;
+ struct cvmx_dfa_pfc3_ctl_s cn63xxp1;
+ struct cvmx_dfa_pfc3_ctl_s cn66xx;
+ struct cvmx_dfa_pfc3_ctl_s cn68xx;
+ struct cvmx_dfa_pfc3_ctl_s cn68xxp1;
+};
+typedef union cvmx_dfa_pfc3_ctl cvmx_dfa_pfc3_ctl_t;
+
+/**
+ * cvmx_dfa_pfc_gctl
+ *
+ * DFA_PFC_GCTL = DFA Performance Counter Global Control
+ * *FOR INTERNAL USE ONLY*
+ * Description:
+ */
+union cvmx_dfa_pfc_gctl {
+ uint64_t u64;
+ struct cvmx_dfa_pfc_gctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t vgid : 8; /**< Virtual Graph Id#
+ When PMODE=1(per-graph selector), this field is used
+ to select/monitor only those events which are
+ associated with this selected VGID(virtual graph ID).
+ This field is used globally across all four performance
+ counters.
+ IMPNOTE: I implemented a global VGID across all 4 performance
+ counters to save wires/area. */
+ uint64_t pmode : 1; /**< Select Mode
+ - 0: Events are selected on a per-cluster DTE# (CLNUM/CLDTE)
+ DFA_PFCx_CTL[CLNUM,CLDTE] specifies the cluster-DTE for
+ each 1(of 4) performance counters.
+ - 1: Events are selected on a per-graph basis (VGID=virtual Graph ID).
+ NOTE: Only EVSEL=[0...31] can be used in conjunction with PMODE=1.
+ DFA_PFC_GCTL[VGID] specifies the Virtual graph ID used across
+ all four performance counters. */
+ uint64_t ednode : 2; /**< Ending DNODE Selector
+ When ENODE=0/1(*DNODE), this field is used to further
+ specify the Ending DNODE transition sub-type:
+ - 0: ALL DNODE sub-types
+ - 1: ->D2e (explicit DNODE transition node-arc alone transitions to DNODE)
+ - 2: ->D2i (implicit DNODE transition:arc-present triggers transition)
+ - 3: ->D1r (rescan DNODE transition) */
+ uint64_t enode : 3; /**< Ending Node Selector
+ When DFA_PFCx_CTL[EVSEL]=Node Transition(31), the ENODE
+ field is used to select Ending Node, and the SNODE
+ field is used to select the Starting Node.
+ - 0: LDNODE
+ - 1: SDNODE
+ - 2: LCNODE
+ - 3: SCNODE
+ - 4: LMNODE
+ - 5: SMNODE
+ - 6: MONODE
+ - 7: RESERVED */
+ uint64_t snode : 3; /**< Starting Node Selector
+ When DFA_PFCx_CTL[EVSEL]=Node Transition(31), the SNODE
+ field is used to select Starting Node, and the ENODE
+ field is used to select the Ending Node.
+ - 0: LDNODE
+ - 1: SDNODE
+ - 2: LCNODE
+ - 3: SCNODE
+ - 4: LMNODE
+ - 5: SMNODE
+ - 6: MONODE
+ - 7: RESERVED */
+ uint64_t cnt3rclr : 1; /**< Performance Counter \#3 Read Clear
+ If this bit is set, CSR reads to the DFA_PFC3_CNT
+ will clear the count value. This allows SW to maintain
+ 'cumulative' counters to avoid HW wraparound. */
+ uint64_t cnt2rclr : 1; /**< Performance Counter \#2 Read Clear
+ If this bit is set, CSR reads to the DFA_PFC2_CNT
+ will clear the count value. This allows SW to maintain
+ 'cumulative' counters to avoid HW wraparound. */
+ uint64_t cnt1rclr : 1; /**< Performance Counter \#1 Read Clear
+ If this bit is set, CSR reads to the DFA_PFC1_CNT
+ will clear the count value. This allows SW to maintain
+ 'cumulative' counters to avoid HW wraparound. */
+ uint64_t cnt0rclr : 1; /**< Performance Counter \#0 Read Clear
+ If this bit is set, CSR reads to the DFA_PFC0_CNT
+ will clear the count value. This allows SW to maintain
+ 'cumulative' counters to avoid HW wraparound. */
+ uint64_t cnt3wclr : 1; /**< Performance Counter \#3 Write Clear
+ If this bit is set, CSR writes to the DFA_PFC3_CNT
+ will clear the count value.
+ If this bit is clear, CSR writes to the DFA_PFC3_CNT
+ will continue the count from the written value. */
+ uint64_t cnt2wclr : 1; /**< Performance Counter \#2 Write Clear
+ If this bit is set, CSR writes to the DFA_PFC2_CNT
+ will clear the count value.
+ If this bit is clear, CSR writes to the DFA_PFC2_CNT
+ will continue the count from the written value. */
+ uint64_t cnt1wclr : 1; /**< Performance Counter \#1 Write Clear
+ If this bit is set, CSR writes to the DFA_PFC1_CNT
+ will clear the count value.
+ If this bit is clear, CSR writes to the DFA_PFC1_CNT
+ will continue the count from the written value. */
+ uint64_t cnt0wclr : 1; /**< Performance Counter \#0 Write Clear
+ If this bit is set, CSR writes to the DFA_PFC0_CNT
+ will clear the count value.
+ If this bit is clear, CSR writes to the DFA_PFC0_CNT
+ will continue the count from the written value. */
+ uint64_t cnt3ena : 1; /**< Performance Counter 3 Enable
+ When this bit is set, the performance counter \#3
+ is enabled. */
+ uint64_t cnt2ena : 1; /**< Performance Counter 2 Enable
+ When this bit is set, the performance counter \#2
+ is enabled. */
+ uint64_t cnt1ena : 1; /**< Performance Counter 1 Enable
+ When this bit is set, the performance counter \#1
+ is enabled. */
+ uint64_t cnt0ena : 1; /**< Performance Counter 0 Enable
+ When this bit is set, the performance counter \#0
+ is enabled. */
+#else
+ uint64_t cnt0ena : 1;
+ uint64_t cnt1ena : 1;
+ uint64_t cnt2ena : 1;
+ uint64_t cnt3ena : 1;
+ uint64_t cnt0wclr : 1;
+ uint64_t cnt1wclr : 1;
+ uint64_t cnt2wclr : 1;
+ uint64_t cnt3wclr : 1;
+ uint64_t cnt0rclr : 1;
+ uint64_t cnt1rclr : 1;
+ uint64_t cnt2rclr : 1;
+ uint64_t cnt3rclr : 1;
+ uint64_t snode : 3;
+ uint64_t enode : 3;
+ uint64_t ednode : 2;
+ uint64_t pmode : 1;
+ uint64_t vgid : 8;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_dfa_pfc_gctl_s cn61xx;
+ struct cvmx_dfa_pfc_gctl_s cn63xx;
+ struct cvmx_dfa_pfc_gctl_s cn63xxp1;
+ struct cvmx_dfa_pfc_gctl_s cn66xx;
+ struct cvmx_dfa_pfc_gctl_s cn68xx;
+ struct cvmx_dfa_pfc_gctl_s cn68xxp1;
+};
+typedef union cvmx_dfa_pfc_gctl cvmx_dfa_pfc_gctl_t;
+
+/**
+ * cvmx_dfa_rodt_comp_ctl
+ *
+ * DFA_RODT_COMP_CTL = DFA RLD Compensation control (For read "on die termination")
+ *
+ */
+union cvmx_dfa_rodt_comp_ctl {
+ uint64_t u64;
+ struct cvmx_dfa_rodt_comp_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t enable : 1; /**< Read On Die Termination Enable
+ (0=disable, 1=enable) */
+ uint64_t reserved_12_15 : 4;
+ uint64_t nctl : 4; /**< Compensation control bits */
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5; /**< Compensation control bits */
+#else
+ uint64_t pctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t enable : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_dfa_rodt_comp_ctl_s cn58xx;
+ struct cvmx_dfa_rodt_comp_ctl_s cn58xxp1;
+};
+typedef union cvmx_dfa_rodt_comp_ctl cvmx_dfa_rodt_comp_ctl_t;
+
+/**
+ * cvmx_dfa_sbd_dbg0
+ *
+ * DFA_SBD_DBG0 = DFA Scoreboard Debug \#0 Register
+ *
+ * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+union cvmx_dfa_sbd_dbg0 {
+ uint64_t u64;
+ struct cvmx_dfa_sbd_dbg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sbd0 : 64; /**< DFA ScoreBoard \#0 Data
+ For internal use only! (DFA Scoreboard Debug)
+ [63:40] rptr[26:3]: Result Base Pointer
+ [39:24] rwcnt[15:0] Cumulative Result Write Counter
+ [23] lastgrdrsp: Last Gather-Rd Response
+ [22] wtgrdrsp: Waiting Gather-Rd Response
+ [21] wtgrdreq: Waiting for Gather-Rd Issue
+ [20] glvld: GLPTR/GLCNT Valid
+ [19] cmpmark: Completion Marked Node Detected
+ [18:17] cmpcode[1:0]: Completion Code
+ [0=PDGONE/1=PERR/2=RFULL/3=TERM]
+ [16] cmpdet: Completion Detected
+ [15] wthdrwrcmtrsp: Waiting for HDR RWrCmtRsp
+ [14] wtlastwrcmtrsp: Waiting for LAST RESULT
+ RWrCmtRsp
+ [13] hdrwrreq: Waiting for HDR RWrReq
+ [12] wtrwrreq: Waiting for RWrReq
+ [11] wtwqwrreq: Waiting for WQWrReq issue
+ [10] lastprdrspeot: Last Packet-Rd Response
+ [9] lastprdrsp: Last Packet-Rd Response
+ [8] wtprdrsp: Waiting for PRdRsp EOT
+ [7] wtprdreq: Waiting for PRdReq Issue
+ [6] lastpdvld: PDPTR/PDLEN Valid
+ [5] pdvld: Packet Data Valid
+ [4] wqvld: WQVLD
+ [3] wqdone: WorkQueue Done condition
+ a) WQWrReq issued(for WQPTR<>0) OR
+ b) HDR RWrCmtRsp completed)
+ [2] rwstf: Resultant write STF/P Mode
+ [1] pdldt: Packet-Data LDT mode
+ [0] gmode: Gather-Mode */
+#else
+ uint64_t sbd0 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_sbd_dbg0_s cn31xx;
+ struct cvmx_dfa_sbd_dbg0_s cn38xx;
+ struct cvmx_dfa_sbd_dbg0_s cn38xxp2;
+ struct cvmx_dfa_sbd_dbg0_s cn58xx;
+ struct cvmx_dfa_sbd_dbg0_s cn58xxp1;
+};
+typedef union cvmx_dfa_sbd_dbg0 cvmx_dfa_sbd_dbg0_t;
+
+/**
+ * cvmx_dfa_sbd_dbg1
+ *
+ * DFA_SBD_DBG1 = DFA Scoreboard Debug \#1 Register
+ *
+ * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+union cvmx_dfa_sbd_dbg1 {
+ uint64_t u64;
+ struct cvmx_dfa_sbd_dbg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sbd1 : 64; /**< DFA ScoreBoard \#1 Data
+ For internal use only! (DFA Scoreboard Debug)
+ [63:61] wqptr[35:33]: Work Queue Pointer
+ [60:52] rptr[35:27]: Result Base Pointer
+ [51:16] pdptr[35:0]: Packet Data Pointer
+ [15:0] pdcnt[15:0]: Packet Data Counter */
+#else
+ uint64_t sbd1 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_sbd_dbg1_s cn31xx;
+ struct cvmx_dfa_sbd_dbg1_s cn38xx;
+ struct cvmx_dfa_sbd_dbg1_s cn38xxp2;
+ struct cvmx_dfa_sbd_dbg1_s cn58xx;
+ struct cvmx_dfa_sbd_dbg1_s cn58xxp1;
+};
+typedef union cvmx_dfa_sbd_dbg1 cvmx_dfa_sbd_dbg1_t;
+
+/**
+ * cvmx_dfa_sbd_dbg2
+ *
+ * DFA_SBD_DBG2 = DFA Scoreboard Debug \#2 Register
+ *
+ * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+union cvmx_dfa_sbd_dbg2 {
+ uint64_t u64;
+ struct cvmx_dfa_sbd_dbg2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sbd2 : 64; /**< DFA ScoreBoard \#2 Data
+ [63:49] wqptr[17:3]: Work Queue Pointer
+ [48:16] rwptr[35:3]: Result Write Pointer
+ [15:0] prwcnt[15:0]: Pending Result Write Counter */
+#else
+ uint64_t sbd2 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_sbd_dbg2_s cn31xx;
+ struct cvmx_dfa_sbd_dbg2_s cn38xx;
+ struct cvmx_dfa_sbd_dbg2_s cn38xxp2;
+ struct cvmx_dfa_sbd_dbg2_s cn58xx;
+ struct cvmx_dfa_sbd_dbg2_s cn58xxp1;
+};
+typedef union cvmx_dfa_sbd_dbg2 cvmx_dfa_sbd_dbg2_t;
+
+/**
+ * cvmx_dfa_sbd_dbg3
+ *
+ * DFA_SBD_DBG3 = DFA Scoreboard Debug \#3 Register
+ *
+ * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+union cvmx_dfa_sbd_dbg3 {
+ uint64_t u64;
+ struct cvmx_dfa_sbd_dbg3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sbd3 : 64; /**< DFA ScoreBoard \#3 Data
+ [63:49] wqptr[32:18]: Work Queue Pointer
+ [48:16] glptr[35:3]: Gather List Pointer
+ [15:0] glcnt[15:0]: Gather List Counter */
+#else
+ uint64_t sbd3 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_sbd_dbg3_s cn31xx;
+ struct cvmx_dfa_sbd_dbg3_s cn38xx;
+ struct cvmx_dfa_sbd_dbg3_s cn38xxp2;
+ struct cvmx_dfa_sbd_dbg3_s cn58xx;
+ struct cvmx_dfa_sbd_dbg3_s cn58xxp1;
+};
+typedef union cvmx_dfa_sbd_dbg3 cvmx_dfa_sbd_dbg3_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-dfa-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-dfa.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-dfa.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-dfa.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,123 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support library for the CN31XX, CN38XX, and CN58XX hardware DFA engine.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#include "executive-config.h"
+#ifdef CVMX_ENABLE_DFA_FUNCTIONS
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-fau.h"
+#include "cvmx-dfa.h"
+
+
+
+/**
+ * Initialize the DFA hardware before use
+ */
+int cvmx_dfa_initialize(void)
+{
+ cvmx_dfa_difctl_t control;
+ void *initial_base_address;
+ cvmx_dfa_state_t initial_state;
+ if (!octeon_has_feature(OCTEON_FEATURE_DFA))
+ {
+ cvmx_dprintf("ERROR: attempting to initialize DFA when no DFA hardware present\n.");
+ return -1;
+ }
+
+ control.u64 = 0;
+ control.s.dwbcnt = CVMX_FPA_DFA_POOL_SIZE / 128;
+ control.s.pool = CVMX_FPA_DFA_POOL;
+ control.s.size = (CVMX_FPA_DFA_POOL_SIZE - 8) / sizeof(cvmx_dfa_command_t);
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_DFA_DIFCTL, control.u64);
+
+ initial_base_address = cvmx_fpa_alloc(CVMX_FPA_DFA_POOL);
+
+ initial_state.u64 = 0;
+ initial_state.s.base_address_div16 = (CAST64(initial_base_address))/16;
+ cvmx_fau_atomic_write64(CVMX_FAU_DFA_STATE, initial_state.u64);
+
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_DFA_DIFRDPTR, cvmx_ptr_to_phys(initial_base_address));
+
+ return 0;
+}
+
+
+/**
+ * Shutdown and cleanup resources used by the DFA
+ */
+void cvmx_dfa_shutdown(void)
+{
+ void *final_base_address;
+ cvmx_dfa_state_t final_state;
+
+ CVMX_SYNCWS;
+
+ final_state.u64 = cvmx_fau_fetch_and_add64(CVMX_FAU_DFA_STATE, 0);
+
+ // make sure the carry is clear
+ final_base_address = CASTPTR(void, (final_state.s2.base_address_div32 * 32ull));
+
+ if (final_base_address)
+ {
+ cvmx_fpa_free(final_base_address, CVMX_FPA_DFA_POOL, 0);
+ }
+
+ CVMX_SYNCWS;
+ final_state.u64 = 0;
+ cvmx_fau_atomic_write64(CVMX_FAU_DFA_STATE, final_state.u64);
+}
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-dfa.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-dfa.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-dfa.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-dfa.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,803 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the CN31XX, CN38XX, and CN58XX hardware DFA engine.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_DFA_H__
+#define __CVMX_DFA_H__
+#include "cvmx-llm.h"
+#include "cvmx-wqe.h"
+#include "cvmx-fpa.h"
+
+#include "executive-config.h"
+#ifdef CVMX_ENABLE_DFA_FUNCTIONS
+#include "cvmx-config.h"
+#endif
+
+#define ENABLE_DEPRECATED /* Set to enable the old 18/36 bit names */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Maximum nodes available in a small encoding */
+#define CVMX_DFA_NODESM_MAX_NODES ((OCTEON_IS_MODEL(OCTEON_CN31XX)) ? 0x8000 : 0x20000)
+#define CVMX_DFA_NODESM_SIZE 512 /* Size of each node for small encoding */
+#define CVMX_DFA_NODELG_SIZE 1024 /* Size of each node for large encoding */
+#define CVMX_DFA_NODESM_LAST_TERMINAL (CVMX_DFA_NODESM_MAX_NODES-1)
+
+#ifdef ENABLE_DEPRECATED
+/* These defines are for compatability with old code. They are deprecated */
+#define CVMX_DFA_NODE18_SIZE CVMX_DFA_NODESM_SIZE
+#define CVMX_DFA_NODE36_SIZE CVMX_DFA_NODELG_SIZE
+#define CVMX_DFA_NODE18_MAX_NODES CVMX_DFA_NODESM_MAX_NODES
+#define CVMX_DFA_NODE18_LAST_TERMINAL CVMX_DFA_NODESM_LAST_TERMINAL
+#endif
+
+/**
+ * Which type of memory encoding is this graph using. Make sure you setup
+ * the LLM to match.
+ */
+typedef enum
+{
+ CVMX_DFA_GRAPH_TYPE_SM = 0,
+ CVMX_DFA_GRAPH_TYPE_LG = 1,
+#ifdef ENABLE_DEPRECATED
+ CVMX_DFA_GRAPH_TYPE_18b = 0, /* Deprecated */
+ CVMX_DFA_GRAPH_TYPE_36b = 1 /* Deprecated */
+#endif
+} cvmx_dfa_graph_type_t;
+
+/**
+ * The possible node types.
+ */
+typedef enum
+{
+ CVMX_DFA_NODE_TYPE_NORMAL = 0, /**< Node is a branch */
+ CVMX_DFA_NODE_TYPE_MARKED = 1, /**< Node is marked special */
+ CVMX_DFA_NODE_TYPE_TERMINAL = 2 /**< Node is a terminal leaf */
+} cvmx_dfa_node_type_t;
+
+/**
+ * The possible reasons the DFA stopped processing.
+ */
+typedef enum
+{
+ CVMX_DFA_STOP_REASON_DATA_GONE = 0, /**< DFA ran out of data */
+ CVMX_DFA_STOP_REASON_PARITY_ERROR = 1, /**< DFA encountered a memory error */
+ CVMX_DFA_STOP_REASON_FULL = 2, /**< DFA is full */
+ CVMX_DFA_STOP_REASON_TERMINAL = 3 /**< DFA hit a terminal */
+} cvmx_dfa_stop_reason_t;
+
+/**
+ * This format describes the DFA pointers in small mode
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t mbz :32;/**< Must be zero */
+ uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
+ uint64_t next_node1 :15;/**< Next node if an odd character match */
+ uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
+ uint64_t next_node0 :15;/**< Next node if an even character match */
+ } w32;
+ struct
+ {
+ uint64_t mbz :28;/**< Must be zero */
+ uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
+ uint64_t next_node1 :17;/**< Next node if an odd character match */
+ uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
+ uint64_t next_node0 :17;/**< Next node if an even character match */
+ } w36;
+ struct /**< @ this structure only applies starting in CN58XX and if DFA_CFG[NRPL_ENA] == 1 and IWORD0[NREPLEN] == 1. */
+ {
+ uint64_t mbz :28;/**< Must be zero */
+ uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
+ uint64_t per_node_repl1 : 1;/**< enable for extra replicaiton for next node (CN58XX) */
+ uint64_t next_node_repl1 : 2;/**< extra replicaiton for next node (CN58XX) (if per_node_repl1 is set) */
+ uint64_t next_node1 :14;/**< Next node if an odd character match - IWORD3[Msize], if per_node_repl1==1. */
+ uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
+ uint64_t per_node_repl0 : 1;/**< enable for extra replicaiton for next node (CN58XX) */
+ uint64_t next_node_repl0 : 2;/**< extra replicaiton for next node (CN58XX) (if per_node_repl0 is set) */
+ uint64_t next_node0 :14;/**< Next node if an odd character match - IWORD3[Msize], if per_node_repl0==1. */
+ } w36nrepl_en; /**< use when next_node_repl[01] is 1. */
+ struct /**< this structure only applies starting in CN58XX and if DFA_CFG[NRPL_ENA] == 1 and IWORD0[NREPLEN] == 1. */
+ {
+ uint64_t mbz :28;/**< Must be zero */
+ uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
+ uint64_t per_node_repl1 : 1;/**< enable for extra replicaiton for next node (CN58XX) */
+ uint64_t next_node1 :16;/**< Next node if an odd character match, if per_node_repl1==0. */
+ uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
+ uint64_t per_node_repl0 : 1;/**< enable for extra replicaiton for next node (CN58XX) */
+ uint64_t next_node0 :16;/**< Next node if an odd character match, if per_node_repl0==0. */
+ } w36nrepl_dis; /**< use when next_node_repl[01] is 0. */
+#if defined(ENABLE_DEPRECATED) && !OCTEON_IS_COMMON_BINARY()
+#if CVMX_COMPILED_FOR(OCTEON_CN31XX)
+ struct /**< @deprecated unnamed reference to members */
+ {
+ uint64_t mbz :32;/**< Must be zero */
+ uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
+ uint64_t next_node1 :15;/**< Next node if an odd character match */
+ uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
+ uint64_t next_node0 :15;/**< Next node if an even character match */
+ };
+#elif CVMX_COMPILED_FOR(OCTEON_CN38XX)
+ struct /**< @deprecated unnamed reference to members */
+ {
+ uint64_t mbz :28;/**< Must be zero */
+ uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
+ uint64_t next_node1 :17;/**< Next node if an odd character match */
+ uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
+ uint64_t next_node0 :17;/**< Next node if an even character match */
+ };
+#else
+ /* Other chips don't support the deprecated unnamed unions */
+#endif
+#endif
+} cvmx_dfa_node_next_sm_t;
+
+/**
+ * This format describes the DFA pointers in large mode
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t mbz :32;/**< Must be zero */
+ uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
+ cvmx_dfa_node_type_t type : 2;/**< Node type */
+ uint64_t mbz2 : 3;/**< Must be zero */
+ uint64_t next_node :20;/**< Next node */
+ } w32;
+ struct
+ {
+ uint64_t mbz :28;/**< Must be zero */
+ uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
+ cvmx_dfa_node_type_t type : 2;/**< Node type */
+ uint64_t extra_bits : 5;/**< bits copied to report (PASS3/CN58XX), Must be zero previously */
+ uint64_t next_node_repl : 2;/**< extra replicaiton for next node (PASS3/CN58XX), Must be zero previously */
+ uint64_t next_node :20;/**< Next node ID, Note, combine with next_node_repl to use as start_node
+ for continuation, as in cvmx_dfa_node_next_lgb_t. */
+ } w36;
+#if defined(ENABLE_DEPRECATED) && !OCTEON_IS_COMMON_BINARY()
+#if CVMX_COMPILED_FOR(OCTEON_CN31XX)
+ struct /**< @deprecated unnamed reference to members */
+ {
+ uint64_t mbz :32;/**< Must be zero */
+ uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
+ cvmx_dfa_node_type_t type : 2;/**< Node type */
+ uint64_t mbz2 : 3;/**< Must be zero */
+ uint64_t next_node :20;/**< Next node */
+ };
+#elif CVMX_COMPILED_FOR(OCTEON_CN38XX)
+ struct /**< @deprecated unnamed reference to members */
+ {
+ uint64_t mbz :28;/**< Must be zero */
+ uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
+ cvmx_dfa_node_type_t type : 2;/**< Node type */
+ uint64_t extra_bits : 5;/**< bits copied to report (PASS3/CN58XX), Must be zero previously */
+ uint64_t next_node_repl : 2;/**< extra replicaiton for next node (PASS3/CN58XX), Must be zero previously */
+ uint64_t next_node :20;/**< Next node ID, Note, combine with next_node_repl to use as start_node
+ for continuation, as in cvmx_dfa_node_next_lgb_t. */
+ };
+#else
+ /* Other chips don't support the deprecated unnamed unions */
+#endif
+#endif
+} cvmx_dfa_node_next_lg_t;
+
+/**
+ * This format describes the DFA pointers in large mode, another way
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t mbz :32;/**< Must be zero */
+ uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
+ uint64_t type_terminal : 1;/**< Node type */
+ uint64_t type_marked : 1;/**< Node type */
+ uint64_t mbz2 : 3;/**< Must be zero */
+ uint64_t next_node :20;/**< Next node */
+ } w32;
+ struct
+ {
+ uint64_t mbz :28;/**< Must be zero */
+ uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
+ uint64_t type_terminal : 1;/**< Node type */
+ uint64_t type_marked : 1;/**< Node type */
+ uint64_t extra_bits : 5;/**< bits copied to report (PASS3/CN58XX), Must be zero previously */
+ uint64_t next_node_id_and_repl :22;/**< Next node ID (and repl for PASS3/CN58XX or repl=0 if not),
+ use this as start node for continuation. */
+ } w36;
+#if defined(ENABLE_DEPRECATED) && !OCTEON_IS_COMMON_BINARY()
+#if CVMX_COMPILED_FOR(OCTEON_CN31XX)
+ struct /**< @deprecated unnamed reference to members */
+ {
+ uint64_t mbz :32;/**< Must be zero */
+ uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
+ uint64_t type_terminal : 1;/**< Node type */
+ uint64_t type_marked : 1;/**< Node type */
+ uint64_t mbz2 : 3;/**< Must be zero */
+ uint64_t next_node :20;/**< Next node */
+ };
+#elif CVMX_COMPILED_FOR(OCTEON_CN38XX)
+ struct /**< @deprecated unnamed reference to members */
+ {
+ uint64_t mbz :28;/**< Must be zero */
+ uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
+ uint64_t type_terminal : 1;/**< Node type */
+ uint64_t type_marked : 1;/**< Node type */
+ uint64_t extra_bits : 5;/**< bits copied to report (PASS3/CN58XX), Must be zero previously */
+ uint64_t next_node_id_and_repl :22;/**< Next node ID (and repl for PASS3/CN58XX or repl=0 if not),
+ use this as start node for continuation. */
+ };
+#else
+ /* Other chips don't support the deprecated unnamed unions */
+#endif
+#endif
+} cvmx_dfa_node_next_lgb_t;
+
+/**
+ * This format describes the DFA pointers in large mode
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t mbz :27;/**< Must be zero */
+ uint64_t x0 : 1;/**< XOR of the rest of the bits */
+ uint64_t reserved : 4;/**< Must be zero */
+ uint64_t data :32;/**< LLM Data */
+ } w32;
+ struct
+ {
+ uint64_t mbz :27;/**< Must be zero */
+ uint64_t x0 : 1;/**< XOR of the rest of the bits */
+ uint64_t data :36;/**< LLM Data */
+ } w36;
+#if defined(ENABLE_DEPRECATED) && !OCTEON_IS_COMMON_BINARY()
+#if CVMX_COMPILED_FOR(OCTEON_CN31XX)
+ struct /**< @deprecated unnamed reference to members */
+ {
+ uint64_t mbz :27;/**< Must be zero */
+ uint64_t x0 : 1;/**< XOR of the rest of the bits */
+ uint64_t reserved : 4;/**< Must be zero */
+ uint64_t data :32;/**< LLM Data */
+ };
+#elif CVMX_COMPILED_FOR(OCTEON_CN38XX)
+ struct /**< @deprecated unnamed reference to members */
+ {
+ uint64_t mbz :27;/**< Must be zero */
+ uint64_t x0 : 1;/**< XOR of the rest of the bits */
+ uint64_t data :36;/**< LLM Data */
+ };
+#else
+ /* Other chips don't support the deprecated unnamed unions */
+#endif
+#endif
+} cvmx_dfa_node_next_read_t;
+
+/**
+ * This structure defines the data format in the low-latency memory
+ */
+typedef union
+{
+ uint64_t u64;
+ cvmx_dfa_node_next_sm_t sm; /**< This format describes the DFA pointers in small mode */
+ cvmx_dfa_node_next_lg_t lg; /**< This format describes the DFA pointers in large mode */
+ cvmx_dfa_node_next_lgb_t lgb; /**< This format describes the DFA pointers in large mode, another way */
+ cvmx_dfa_node_next_read_t read; /**< This format describes the DFA pointers in large mode */
+#ifdef ENABLE_DEPRECATED
+ cvmx_dfa_node_next_sm_t s18; /**< Deprecated */
+ cvmx_dfa_node_next_lg_t s36; /**< Deprecated */
+ cvmx_dfa_node_next_lgb_t s36b; /**< Deprecated */
+#endif
+} cvmx_dfa_node_next_t;
+
+/**
+ * These structures define a DFA instruction
+ */
+typedef union
+{
+ uint64_t u64[4];
+ uint32_t u32;
+ struct
+ {
+ // WORD 0
+ uint64_t gxor : 8; /**< Graph XOR value (PASS3/CN58XX), Must be zero for other chips
+ or if DFA_CFG[GXOR_ENA] == 0. */
+ uint64_t nxoren : 1; /**< Node XOR enable (PASS3/CN58XX), Must be zero for other chips
+ or if DFA_CFG[NXOR_ENA] == 0. */
+ uint64_t nreplen : 1; /**< Node Replication mode enable (PASS3/CN58XX), Must be zero for other chips
+ or if DFA_CFG[NRPL_ENA] == 0 or IWORD0[Ty] == 0. */
+#if 0
+ uint64_t snrepl : 2; /**< Start_Node Replication (PASS3/CN58XX), Must be zero for other chips
+ or if DFA_CFG[NRPL_ENA] == 0 or IWORD0[Ty] == 0 or IWORD0[NREPLEN] == 0. */
+ uint64_t start_node_id : 20; /**< Node to start the walk from */
+#else
+ uint64_t start_node : 22; /**< Node to start the walk from, includes ID and snrepl, see notes above. */
+#endif
+
+ uint64_t unused02 : 2; /**< Must be zero */
+ cvmx_llm_replication_t replication : 2; /**< Type of memory replication to use */
+ uint64_t unused03 : 3; /**< Must be zero */
+ cvmx_dfa_graph_type_t type : 1; /**< Type of graph */
+ uint64_t unused04 : 4; /**< Must be zero */
+ uint64_t base : 20; /**< All tables start on 1KB boundary */
+
+ // WORD 1
+ uint64_t input_length : 16; /**< In bytes, # pointers in gather case */
+ uint64_t use_gather : 1; /**< Set to use gather */
+ uint64_t no_L2_alloc : 1; /**< Set to disable loading of the L2 cache by the DFA */
+ uint64_t full_block_write : 1; /**< If set, HW can write entire cache blocks @ result_ptr */
+ uint64_t little_endian : 1; /**< Affects only packet data, not instruction, gather list, or result */
+ uint64_t unused1 : 8; /**< Must be zero */
+ uint64_t data_ptr : 36; /**< Either directly points to data or the gather list. If gather list,
+ data_ptr<2:0> must be zero (i.e. 8B aligned) */
+ // WORD 2
+ uint64_t max_results : 16; /**< in 64-bit quantities, mbz for store */
+ uint64_t unused2 : 12; /**< Must be zero */
+ uint64_t result_ptr : 36; /**< must be 128 byte aligned */
+
+ // WORD 3
+ uint64_t tsize : 8; /**< tsize*256 is the number of terminal nodes for GRAPH_TYPE_SM */
+ uint64_t msize : 16; /**< msize is the number of marked nodes for GRAPH_TYPE_SM */
+ uint64_t unused3 : 4; /**< Must be zero */
+ uint64_t wq_ptr : 36; /**< 0 for no work queue entry creation */
+ } s;
+} cvmx_dfa_command_t;
+
+/**
+ * Format of the first result word written by the hardware.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ cvmx_dfa_stop_reason_t reas : 2;/**< Reason the DFA stopped */
+ uint64_t mbz :44;/**< Zero */
+ uint64_t last_marked : 1;/**< Set if the last entry written is marked */
+ uint64_t done : 1;/**< Set to 1 when the DFA completes */
+ uint64_t num_entries :16;/**< Number of result words written */
+ } s;
+} cvmx_dfa_result0_t;
+
+/**
+ * Format of the second result word and subsequent result words written by the hardware.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t byte_offset : 16; /**< Number of bytes consumed */
+ uint64_t extra_bits_high: 4; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1,
+ then set to <27:24> of the last next-node pointer. Else set to 0x0. */
+ uint64_t prev_node : 20; /**< Index of the previous node */
+ uint64_t extra_bits_low : 2; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1,
+ then set to <23:22> of the last next-node pointer. Else set to 0x0. */
+ uint64_t next_node_repl : 2; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1, then set
+ to next_node_repl (<21:20>) of the last next-node pointer. Else set to 0x0. */
+ uint64_t current_node : 20; /**< Index of the current node */
+ } s;
+ struct
+ {
+ uint64_t byte_offset : 16; /**< Number of bytes consumed */
+ uint64_t extra_bits_high: 4; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1,
+ then set to <27:24> of the last next-node pointer. Else set to 0x0. */
+ uint64_t prev_node : 20; /**< Index of the previous node */
+ uint64_t extra_bits_low : 2; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1,
+ then set to <23:22> of the last next-node pointer. Else set to 0x0. */
+ uint64_t curr_id_and_repl:22; /**< Use ths as start_node for continuation. */
+ } s2;
+} cvmx_dfa_result1_t;
+
+/**
+ * Abstract DFA graph
+ */
+typedef struct
+{
+ cvmx_llm_replication_t replication; /**< Level of memory replication to use. Must match the LLM setup */
+ cvmx_dfa_graph_type_t type; /**< Type of graph */
+ uint64_t base_address; /**< LLM start address of the graph */
+ union {
+ struct {
+ uint64_t gxor : 8; /**< Graph XOR value (PASS3/CN58XX), Must be zero for other chips
+ or if DFA_CFG[GXOR_ENA] == 0. */
+ uint64_t nxoren : 1; /**< Node XOR enable (PASS3/CN58XX), Must be zero for other chips
+ or if DFA_CFG[NXOR_ENA] == 0. */
+ uint64_t nreplen : 1; /**< Node Replication mode enable (PASS3/CN58XX), Must be zero for other chips
+ or if DFA_CFG[NRPL_ENA] == 0 or IWORD0[Ty] == 0. */
+ uint64_t snrepl : 2; /**< Start_Node Replication (PASS3/CN58XX), Must be zero for other chips
+ or if DFA_CFG[NRPL_ENA] == 0 or IWORD0[Ty] == 0 or IWORD0[NREPLEN] == 0.*/
+ uint64_t start_node_id : 20; /**< Start node index for the root of the graph */
+ };
+ uint32_t start_node; /**< Start node index for the root of the graph, incl. snrepl (PASS3/CN58XX)
+ NOTE: for backwards compatibility this name includes the the
+ gxor, nxoren, nreplen, and snrepl fields which will all be
+ zero in applicaitons existing before the introduction of these
+ fields, so that existing applicaiton do not need to change. */
+ };
+ int num_terminal_nodes; /**< Number of terminal nodes in the graph. Only needed for small graphs. */
+ int num_marked_nodes; /**< Number of marked nodes in the graph. Only needed for small graphs. */
+} cvmx_dfa_graph_t;
+
+/**
+ * DFA internal global state -- stored in 8 bytes of FAU
+ */
+typedef union
+{
+ uint64_t u64;
+ struct {
+#define CVMX_DFA_STATE_TICKET_BIT_POS 16
+#ifdef __BIG_ENDIAN_BITFIELD
+ // NOTE: must clear LSB of base_address_div16 due to ticket overflow
+ uint32_t base_address_div16; /**< Current DFA instruction queue chunck base address/16 (clear LSB). */
+ uint8_t ticket_loops; /**< bits [15:8] of total number of tickets requested. */
+ uint8_t ticket; /**< bits [7:0] of total number of tickets requested (current ticket held). */
+ // NOTE: index and now_serving are written together
+ uint8_t now_serving; /**< current ticket being served (or ready to be served). */
+ uint8_t index; /**< index into current chunk: (base_address_div16*16)[index] = next entry. */
+#else // NOTE: little endian mode probably won't work
+ uint8_t index;
+ uint8_t now_serving;
+ uint8_t ticket;
+ uint8_t ticket_loops;
+ uint32_t base_address_div16;
+#endif
+ } s;
+ struct { // a bitfield version of the same thing to extract base address while clearing carry.
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t base_address_div32 : 31; /**< Current DFA instruction queue chunck base address/32. */
+ uint64_t carry : 1; /**< Carry out from total_tickets. */
+ uint64_t total_tickets : 16; /**< Total tickets. */
+ uint64_t now_serving : 8 ; /**< current ticket being served (or ready to be served). */
+ uint64_t index : 8 ; /**< index into current chunk. */
+#else // NOTE: little endian mode probably won't work
+ uint64_t index : 8 ;
+ uint64_t now_serving : 8 ;
+ uint64_t total_tickets : 16;
+ uint64_t carry : 1;
+ uint64_t base_address_div32 : 31;
+#endif
+ } s2;
+} cvmx_dfa_state_t;
+
+/* CSR typedefs have been moved to cvmx-dfa-defs.h */
+
+/**
+ * Write a small node edge to LLM.
+ *
+ * @param graph Graph to modify
+ * @param source_node
+ * Source node for this edge
+ * @param match_index
+ * Index into the node edge table. This is the match character/2.
+ * @param destination_node0
+ * Destination if the character matches (match_index*2).
+ * @param destination_node1
+ * Destination if the character matches (match_index*2+1).
+ */
+static inline void cvmx_dfa_write_edge_sm(const cvmx_dfa_graph_t *graph,
+ uint64_t source_node, uint64_t match_index,
+ uint64_t destination_node0, uint64_t destination_node1)
+{
+ cvmx_llm_address_t address;
+ cvmx_dfa_node_next_t next_ptr;
+
+ address.u64 = graph->base_address + source_node * CVMX_DFA_NODESM_SIZE + match_index * 4;
+
+ next_ptr.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ {
+ next_ptr.sm.w32.next_node0 = destination_node0;
+ next_ptr.sm.w32.p0 = cvmx_llm_parity(destination_node0);
+
+ next_ptr.sm.w32.next_node1 = destination_node1;
+ next_ptr.sm.w32.p1 = cvmx_llm_parity(destination_node1);
+ }
+ else
+ {
+ next_ptr.sm.w36.next_node0 = destination_node0;
+ next_ptr.sm.w36.p0 = cvmx_llm_parity(destination_node0);
+
+ next_ptr.sm.w36.next_node1 = destination_node1;
+ next_ptr.sm.w36.p1 = cvmx_llm_parity(destination_node1);
+ }
+
+ cvmx_llm_write36(address, next_ptr.u64, 0);
+}
+#ifdef ENABLE_DEPRECATED
+#define cvmx_dfa_write_edge18 cvmx_dfa_write_edge_sm
+#endif
+
+
+/**
+ * Write a large node edge to LLM.
+ *
+ * @param graph Graph to modify
+ * @param source_node
+ * Source node for this edge
+ * @param match Character to match before taking this edge.
+ * @param destination_node
+ * Destination node of the edge.
+ * @param destination_type
+ * Node type at the end of this edge.
+ */
+static inline void cvmx_dfa_write_node_lg(const cvmx_dfa_graph_t *graph,
+ uint64_t source_node, unsigned char match,
+ uint64_t destination_node, cvmx_dfa_node_type_t destination_type)
+{
+ cvmx_llm_address_t address;
+ cvmx_dfa_node_next_t next_ptr;
+
+ address.u64 = graph->base_address + source_node * CVMX_DFA_NODELG_SIZE + (uint64_t)match * 4;
+
+ next_ptr.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ {
+ next_ptr.lg.w32.type = destination_type;
+ next_ptr.lg.w32.next_node = destination_node;
+ next_ptr.lg.w32.ecc = cvmx_llm_ecc(next_ptr.u64);
+ }
+ else
+ {
+ next_ptr.lg.w36.type = destination_type;
+ next_ptr.lg.w36.next_node = destination_node;
+ next_ptr.lg.w36.ecc = cvmx_llm_ecc(next_ptr.u64);
+ }
+
+ cvmx_llm_write36(address, next_ptr.u64, 0);
+}
+#ifdef ENABLE_DEPRECATED
+#define cvmx_dfa_write_node36 cvmx_dfa_write_node_lg
+#endif
+
+/**
+ * Ring the DFA doorbell telling it that new commands are
+ * available.
+ *
+ * @param num_commands
+ * Number of new commands
+ */
+static inline void cvmx_dfa_write_doorbell(uint64_t num_commands)
+{
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_DFA_DBELL, num_commands);
+}
+
+/**
+ * @INTERNAL
+ * Write a new command to the DFA. Calls to this function
+ * are internally synchronized across all processors, and
+ * the doorbell is rung during this function.
+ *
+ * @param command Command to write
+ */
+
+#ifdef CVMX_ENABLE_DFA_FUNCTIONS
+static inline void __cvmx_dfa_write_command(cvmx_dfa_command_t *command)
+{
+ cvmx_dfa_state_t cvmx_dfa_state;
+ uint64_t my_ticket; // needs to wrap to 8 bits
+ uint64_t index;
+ cvmx_dfa_command_t *head;
+
+ CVMX_PREFETCH0(command);
+ // take a ticket.
+ cvmx_dfa_state.u64 = cvmx_fau_fetch_and_add64(CVMX_FAU_DFA_STATE, 1ull<<CVMX_DFA_STATE_TICKET_BIT_POS);
+ my_ticket = cvmx_dfa_state.s.ticket;
+
+ // see if it is our turn
+ while (my_ticket != cvmx_dfa_state.s.now_serving) {
+ int delta = my_ticket - cvmx_dfa_state.s.now_serving;
+ if (delta < 0) delta += 256;
+ cvmx_wait(10*delta); // reduce polling load on system
+ cvmx_dfa_state.u64 = cvmx_fau_fetch_and_add64(CVMX_FAU_DFA_STATE, 0); // poll for my_ticket==now_serving
+ }
+
+ // compute index and instruction queue head pointer
+ index = cvmx_dfa_state.s.index;
+
+ // NOTE: the DFA only supports 36-bit addressing
+ head = &((CASTPTR(cvmx_dfa_command_t, (cvmx_dfa_state.s2.base_address_div32 * 32ull))[index]));
+ head = (cvmx_dfa_command_t*)cvmx_phys_to_ptr(CAST64(head)); // NOTE: since we are not storing bit 63 of address, we must set it now
+
+ // copy the command to the instruction queue
+ *head++ = *command;
+
+ // check if a new chunk is needed
+ if (cvmx_unlikely((++index >= ((CVMX_FPA_DFA_POOL_SIZE-8)/sizeof(cvmx_dfa_command_t))))) {
+ uint64_t *new_base = (uint64_t*)cvmx_fpa_alloc(CVMX_FPA_DFA_POOL); // could make this async
+ if (new_base) {
+ // put the link into the instruction queue's "Next Chunk Buffer Ptr"
+ *(uint64_t *)head = cvmx_ptr_to_phys(new_base);
+ // update our state (note 32-bit write to not disturb other fields)
+ cvmx_fau_atomic_write32((cvmx_fau_reg_32_t)(CVMX_FAU_DFA_STATE + (CAST64(&cvmx_dfa_state.s.base_address_div16)-CAST64(&cvmx_dfa_state))),
+ (CAST64(new_base))/16);
+ }
+ else {
+ cvmx_dprintf("__cvmx_dfa_write_command: Out of memory. Expect crashes.\n");
+ }
+ index=0;
+ }
+
+ cvmx_dfa_write_doorbell(1);
+
+ // update index and now_serving in the DFA state FAU location (NOTE: this write16 updates to 8-bit values.)
+ // NOTE: my_ticket+1 carry out is lost due to write16 and index has already been wrapped to fit in uint8.
+ cvmx_fau_atomic_write16((cvmx_fau_reg_16_t)(CVMX_FAU_DFA_STATE+(CAST64(&cvmx_dfa_state.s.now_serving) - CAST64(&cvmx_dfa_state))),
+ ((my_ticket+1)<<8) | index);
+}
+
+
+/**
+ * Submit work to the DFA units for processing
+ *
+ * @param graph Graph to process
+ * @param start_node
+ * The node to start (or continue) walking from
+ * includes. start_node_id and snrepl (PASS3/CN58XX), but gxor,
+ * nxoren, and nreplen are taken from the graph structure
+ * @param input The input to match against
+ * @param input_length
+ * The length of the input in bytes
+ * @param use_gather
+ * The input and input_length are of a gather list
+ * @param is_little_endian
+ * Set to 1 if the input is in little endian format and must
+ * be swapped before compare.
+ * @param result Location the DFA should put the results in. This must be
+ * an area sized in multiples of a cache line.
+ * @param max_results
+ * The maximum number of 64-bit result1 words after result0.
+ * That is, "size of the result area in 64-bit words" - 1.
+ * max_results must be at least 1.
+ * @param work Work queue entry to submit when DFA completes. Can be NULL.
+ */
+static inline void cvmx_dfa_submit(const cvmx_dfa_graph_t *graph, int start_node,
+ void *input, int input_length, int use_gather, int is_little_endian,
+ cvmx_dfa_result0_t *result, int max_results, cvmx_wqe_t *work)
+{
+ cvmx_dfa_command_t command;
+
+ /* Make sure the result's first 64bit word is zero so we can tell when the
+ DFA is done. */
+ result->u64 = 0;
+
+ // WORD 0
+ command.u64[0] = 0;
+ command.s.gxor = graph->gxor; // (PASS3/CN58XX)
+ command.s.nxoren = graph->nxoren; // (PASS3/CN58XX)
+ command.s.nreplen = graph->nreplen; // (PASS3/CN58XX)
+ command.s.start_node = start_node; // includes snrepl (PASS3/CN58XX)
+ command.s.replication = graph->replication;
+ command.s.type = graph->type;
+ command.s.base = graph->base_address>>10;
+
+ // WORD 1
+ command.u64[1] = 0;
+ command.s.input_length = input_length;
+ command.s.use_gather = use_gather;
+ command.s.no_L2_alloc = 0;
+ command.s.full_block_write = 1;
+ command.s.little_endian = is_little_endian;
+ command.s.data_ptr = cvmx_ptr_to_phys(input);
+
+ // WORD 2
+ command.u64[2] = 0;
+ command.s.max_results = max_results;
+ command.s.result_ptr = cvmx_ptr_to_phys(result);
+
+ // WORD 3
+ command.u64[3] = 0;
+ if (graph->type == CVMX_DFA_GRAPH_TYPE_SM)
+ {
+ command.s.tsize = (graph->num_terminal_nodes + 255) / 256;
+ command.s.msize = graph->num_marked_nodes;
+ }
+ command.s.wq_ptr = cvmx_ptr_to_phys(work);
+
+ __cvmx_dfa_write_command(&command); // NOTE: this does synchronization and rings doorbell
+}
+#endif
+
+/**
+ * DFA gather list element
+ */
+typedef struct {
+ uint64_t length : 16; /**< length of piece of data at addr */
+ uint64_t reserved : 12; /**< reserved, set to 0 */
+ uint64_t addr : 36; /**< pointer to piece of data */
+} cvmx_dfa_gather_entry_t;
+
+
+/**
+ * Check if a DFA has completed processing
+ *
+ * @param result_ptr Result area the DFA is using
+ * @return Non zero if the DFA is done
+ */
+static inline uint64_t cvmx_dfa_is_done(cvmx_dfa_result0_t *result_ptr)
+{
+ /* DFA sets the first result 64bit word to non zero when it's done */
+ return ((volatile cvmx_dfa_result0_t *)result_ptr)->s.done;
+}
+
+
+#ifdef CVMX_ENABLE_DFA_FUNCTIONS
+/**
+ * Initialize the DFA hardware before use
+ * Returns 0 on success, -1 on failure
+ */
+int cvmx_dfa_initialize(void);
+
+
+/**
+ * Shutdown and cleanup resources used by the DFA
+ */
+void cvmx_dfa_shutdown(void);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_DFA_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-dfa.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-dfm-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-dfm-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-dfm-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,3219 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-dfm-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon dfm.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_DFM_DEFS_H__
+#define __CVMX_DFM_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_CHAR_CTL CVMX_DFM_CHAR_CTL_FUNC()
+static inline uint64_t CVMX_DFM_CHAR_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_CHAR_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000220ull);
+}
+#else
+#define CVMX_DFM_CHAR_CTL (CVMX_ADD_IO_SEG(0x00011800D4000220ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_CHAR_MASK0 CVMX_DFM_CHAR_MASK0_FUNC()
+static inline uint64_t CVMX_DFM_CHAR_MASK0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_CHAR_MASK0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000228ull);
+}
+#else
+#define CVMX_DFM_CHAR_MASK0 (CVMX_ADD_IO_SEG(0x00011800D4000228ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_CHAR_MASK2 CVMX_DFM_CHAR_MASK2_FUNC()
+static inline uint64_t CVMX_DFM_CHAR_MASK2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_CHAR_MASK2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000238ull);
+}
+#else
+#define CVMX_DFM_CHAR_MASK2 (CVMX_ADD_IO_SEG(0x00011800D4000238ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_CHAR_MASK4 CVMX_DFM_CHAR_MASK4_FUNC()
+static inline uint64_t CVMX_DFM_CHAR_MASK4_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_CHAR_MASK4 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000318ull);
+}
+#else
+#define CVMX_DFM_CHAR_MASK4 (CVMX_ADD_IO_SEG(0x00011800D4000318ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_COMP_CTL2 CVMX_DFM_COMP_CTL2_FUNC()
+static inline uint64_t CVMX_DFM_COMP_CTL2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_COMP_CTL2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40001B8ull);
+}
+#else
+#define CVMX_DFM_COMP_CTL2 (CVMX_ADD_IO_SEG(0x00011800D40001B8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_CONFIG CVMX_DFM_CONFIG_FUNC()
+static inline uint64_t CVMX_DFM_CONFIG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_CONFIG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000188ull);
+}
+#else
+#define CVMX_DFM_CONFIG (CVMX_ADD_IO_SEG(0x00011800D4000188ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_CONTROL CVMX_DFM_CONTROL_FUNC()
+static inline uint64_t CVMX_DFM_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000190ull);
+}
+#else
+#define CVMX_DFM_CONTROL (CVMX_ADD_IO_SEG(0x00011800D4000190ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_DLL_CTL2 CVMX_DFM_DLL_CTL2_FUNC()
+static inline uint64_t CVMX_DFM_DLL_CTL2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_DLL_CTL2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40001C8ull);
+}
+#else
+#define CVMX_DFM_DLL_CTL2 (CVMX_ADD_IO_SEG(0x00011800D40001C8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_DLL_CTL3 CVMX_DFM_DLL_CTL3_FUNC()
+static inline uint64_t CVMX_DFM_DLL_CTL3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_DLL_CTL3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000218ull);
+}
+#else
+#define CVMX_DFM_DLL_CTL3 (CVMX_ADD_IO_SEG(0x00011800D4000218ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_FCLK_CNT CVMX_DFM_FCLK_CNT_FUNC()
+static inline uint64_t CVMX_DFM_FCLK_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_FCLK_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40001E0ull);
+}
+#else
+#define CVMX_DFM_FCLK_CNT (CVMX_ADD_IO_SEG(0x00011800D40001E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_FNT_BIST CVMX_DFM_FNT_BIST_FUNC()
+static inline uint64_t CVMX_DFM_FNT_BIST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_FNT_BIST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40007F8ull);
+}
+#else
+#define CVMX_DFM_FNT_BIST (CVMX_ADD_IO_SEG(0x00011800D40007F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_FNT_CTL CVMX_DFM_FNT_CTL_FUNC()
+static inline uint64_t CVMX_DFM_FNT_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_FNT_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000400ull);
+}
+#else
+#define CVMX_DFM_FNT_CTL (CVMX_ADD_IO_SEG(0x00011800D4000400ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_FNT_IENA CVMX_DFM_FNT_IENA_FUNC()
+static inline uint64_t CVMX_DFM_FNT_IENA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_FNT_IENA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000410ull);
+}
+#else
+#define CVMX_DFM_FNT_IENA (CVMX_ADD_IO_SEG(0x00011800D4000410ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_FNT_SCLK CVMX_DFM_FNT_SCLK_FUNC()
+static inline uint64_t CVMX_DFM_FNT_SCLK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_FNT_SCLK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000418ull);
+}
+#else
+#define CVMX_DFM_FNT_SCLK (CVMX_ADD_IO_SEG(0x00011800D4000418ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_FNT_STAT CVMX_DFM_FNT_STAT_FUNC()
+static inline uint64_t CVMX_DFM_FNT_STAT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_FNT_STAT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000408ull);
+}
+#else
+#define CVMX_DFM_FNT_STAT (CVMX_ADD_IO_SEG(0x00011800D4000408ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_IFB_CNT CVMX_DFM_IFB_CNT_FUNC()
+static inline uint64_t CVMX_DFM_IFB_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_IFB_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40001D0ull);
+}
+#else
+#define CVMX_DFM_IFB_CNT (CVMX_ADD_IO_SEG(0x00011800D40001D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_MODEREG_PARAMS0 CVMX_DFM_MODEREG_PARAMS0_FUNC()
+static inline uint64_t CVMX_DFM_MODEREG_PARAMS0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_MODEREG_PARAMS0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40001A8ull);
+}
+#else
+#define CVMX_DFM_MODEREG_PARAMS0 (CVMX_ADD_IO_SEG(0x00011800D40001A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_MODEREG_PARAMS1 CVMX_DFM_MODEREG_PARAMS1_FUNC()
+static inline uint64_t CVMX_DFM_MODEREG_PARAMS1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_MODEREG_PARAMS1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000260ull);
+}
+#else
+#define CVMX_DFM_MODEREG_PARAMS1 (CVMX_ADD_IO_SEG(0x00011800D4000260ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_OPS_CNT CVMX_DFM_OPS_CNT_FUNC()
+static inline uint64_t CVMX_DFM_OPS_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_OPS_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40001D8ull);
+}
+#else
+#define CVMX_DFM_OPS_CNT (CVMX_ADD_IO_SEG(0x00011800D40001D8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_PHY_CTL CVMX_DFM_PHY_CTL_FUNC()
+static inline uint64_t CVMX_DFM_PHY_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_PHY_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000210ull);
+}
+#else
+#define CVMX_DFM_PHY_CTL (CVMX_ADD_IO_SEG(0x00011800D4000210ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_RESET_CTL CVMX_DFM_RESET_CTL_FUNC()
+static inline uint64_t CVMX_DFM_RESET_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_RESET_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000180ull);
+}
+#else
+#define CVMX_DFM_RESET_CTL (CVMX_ADD_IO_SEG(0x00011800D4000180ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_RLEVEL_CTL CVMX_DFM_RLEVEL_CTL_FUNC()
+static inline uint64_t CVMX_DFM_RLEVEL_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_RLEVEL_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40002A0ull);
+}
+#else
+#define CVMX_DFM_RLEVEL_CTL (CVMX_ADD_IO_SEG(0x00011800D40002A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_RLEVEL_DBG CVMX_DFM_RLEVEL_DBG_FUNC()
+static inline uint64_t CVMX_DFM_RLEVEL_DBG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_RLEVEL_DBG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40002A8ull);
+}
+#else
+#define CVMX_DFM_RLEVEL_DBG (CVMX_ADD_IO_SEG(0x00011800D40002A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DFM_RLEVEL_RANKX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_DFM_RLEVEL_RANKX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800D4000280ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_DFM_RLEVEL_RANKX(offset) (CVMX_ADD_IO_SEG(0x00011800D4000280ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_RODT_MASK CVMX_DFM_RODT_MASK_FUNC()
+static inline uint64_t CVMX_DFM_RODT_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_RODT_MASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000268ull);
+}
+#else
+#define CVMX_DFM_RODT_MASK (CVMX_ADD_IO_SEG(0x00011800D4000268ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_SLOT_CTL0 CVMX_DFM_SLOT_CTL0_FUNC()
+static inline uint64_t CVMX_DFM_SLOT_CTL0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_SLOT_CTL0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40001F8ull);
+}
+#else
+#define CVMX_DFM_SLOT_CTL0 (CVMX_ADD_IO_SEG(0x00011800D40001F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_SLOT_CTL1 CVMX_DFM_SLOT_CTL1_FUNC()
+static inline uint64_t CVMX_DFM_SLOT_CTL1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_SLOT_CTL1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000200ull);
+}
+#else
+#define CVMX_DFM_SLOT_CTL1 (CVMX_ADD_IO_SEG(0x00011800D4000200ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_TIMING_PARAMS0 CVMX_DFM_TIMING_PARAMS0_FUNC()
+static inline uint64_t CVMX_DFM_TIMING_PARAMS0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_TIMING_PARAMS0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000198ull);
+}
+#else
+#define CVMX_DFM_TIMING_PARAMS0 (CVMX_ADD_IO_SEG(0x00011800D4000198ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_TIMING_PARAMS1 CVMX_DFM_TIMING_PARAMS1_FUNC()
+static inline uint64_t CVMX_DFM_TIMING_PARAMS1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_TIMING_PARAMS1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40001A0ull);
+}
+#else
+#define CVMX_DFM_TIMING_PARAMS1 (CVMX_ADD_IO_SEG(0x00011800D40001A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_WLEVEL_CTL CVMX_DFM_WLEVEL_CTL_FUNC()
+static inline uint64_t CVMX_DFM_WLEVEL_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_WLEVEL_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000300ull);
+}
+#else
+#define CVMX_DFM_WLEVEL_CTL (CVMX_ADD_IO_SEG(0x00011800D4000300ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_WLEVEL_DBG CVMX_DFM_WLEVEL_DBG_FUNC()
+static inline uint64_t CVMX_DFM_WLEVEL_DBG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_WLEVEL_DBG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D4000308ull);
+}
+#else
+#define CVMX_DFM_WLEVEL_DBG (CVMX_ADD_IO_SEG(0x00011800D4000308ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DFM_WLEVEL_RANKX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_DFM_WLEVEL_RANKX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800D40002B0ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_DFM_WLEVEL_RANKX(offset) (CVMX_ADD_IO_SEG(0x00011800D40002B0ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DFM_WODT_MASK CVMX_DFM_WODT_MASK_FUNC()
+static inline uint64_t CVMX_DFM_WODT_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_DFM_WODT_MASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800D40001B0ull);
+}
+#else
+#define CVMX_DFM_WODT_MASK (CVMX_ADD_IO_SEG(0x00011800D40001B0ull))
+#endif
+
+/**
+ * cvmx_dfm_char_ctl
+ *
+ * DFM_CHAR_CTL = DFM Characterization Control
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ *
+ * Notes:
+ * DR bit applies on the DQ port
+ *
+ */
+union cvmx_dfm_char_ctl {
+ uint64_t u64;
+ struct cvmx_dfm_char_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t dr : 1; /**< Pattern at Data Rate (not Clock Rate) */
+ uint64_t skew_on : 1; /**< Skew adjacent bits */
+ uint64_t en : 1; /**< Enable characterization */
+ uint64_t sel : 1; /**< Pattern select
+ 0 = PRBS
+ 1 = Programmable pattern */
+ uint64_t prog : 8; /**< Programmable pattern */
+ uint64_t prbs : 32; /**< PRBS Polynomial */
+#else
+ uint64_t prbs : 32;
+ uint64_t prog : 8;
+ uint64_t sel : 1;
+ uint64_t en : 1;
+ uint64_t skew_on : 1;
+ uint64_t dr : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_dfm_char_ctl_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t en : 1; /**< Enable characterization */
+ uint64_t sel : 1; /**< Pattern select
+ 0 = PRBS
+ 1 = Programmable pattern */
+ uint64_t prog : 8; /**< Programmable pattern */
+ uint64_t prbs : 32; /**< PRBS Polynomial */
+#else
+ uint64_t prbs : 32;
+ uint64_t prog : 8;
+ uint64_t sel : 1;
+ uint64_t en : 1;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } cn63xx;
+ struct cvmx_dfm_char_ctl_cn63xx cn63xxp1;
+ struct cvmx_dfm_char_ctl_s cn66xx;
+};
+typedef union cvmx_dfm_char_ctl cvmx_dfm_char_ctl_t;
+
+/**
+ * cvmx_dfm_char_mask0
+ *
+ * DFM_CHAR_MASK0 = DFM Characterization Control Mask0
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ */
+union cvmx_dfm_char_mask0 {
+ uint64_t u64;
+ struct cvmx_dfm_char_mask0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mask : 16; /**< Mask for DQ0[15:0] */
+#else
+ uint64_t mask : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_dfm_char_mask0_s cn63xx;
+ struct cvmx_dfm_char_mask0_s cn63xxp1;
+ struct cvmx_dfm_char_mask0_s cn66xx;
+};
+typedef union cvmx_dfm_char_mask0 cvmx_dfm_char_mask0_t;
+
+/**
+ * cvmx_dfm_char_mask2
+ *
+ * DFM_CHAR_MASK2 = DFM Characterization Control Mask2
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ */
+union cvmx_dfm_char_mask2 {
+ uint64_t u64;
+ struct cvmx_dfm_char_mask2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mask : 16; /**< Mask for DQ1[15:0] */
+#else
+ uint64_t mask : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_dfm_char_mask2_s cn63xx;
+ struct cvmx_dfm_char_mask2_s cn63xxp1;
+ struct cvmx_dfm_char_mask2_s cn66xx;
+};
+typedef union cvmx_dfm_char_mask2 cvmx_dfm_char_mask2_t;
+
+/**
+ * cvmx_dfm_char_mask4
+ *
+ * DFM_CHAR_MASK4 = DFM Characterization Mask4
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ */
+union cvmx_dfm_char_mask4 {
+ uint64_t u64;
+ struct cvmx_dfm_char_mask4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t reset_n_mask : 1; /**< Mask for RESET_N */
+ uint64_t a_mask : 16; /**< Mask for A[15:0] */
+ uint64_t ba_mask : 3; /**< Mask for BA[2:0] */
+ uint64_t we_n_mask : 1; /**< Mask for WE_N */
+ uint64_t cas_n_mask : 1; /**< Mask for CAS_N */
+ uint64_t ras_n_mask : 1; /**< Mask for RAS_N */
+ uint64_t odt1_mask : 2; /**< Mask for ODT1
+ For DFM, ODT1 is reserved. */
+ uint64_t odt0_mask : 2; /**< Mask for ODT0 */
+ uint64_t cs1_n_mask : 2; /**< Mask for CS1_N
+ For DFM, CS1_N is reserved. */
+ uint64_t cs0_n_mask : 2; /**< Mask for CS0_N */
+ uint64_t cke_mask : 2; /**< Mask for CKE
+ For DFM, CKE_MASK[1] is reserved. */
+#else
+ uint64_t cke_mask : 2;
+ uint64_t cs0_n_mask : 2;
+ uint64_t cs1_n_mask : 2;
+ uint64_t odt0_mask : 2;
+ uint64_t odt1_mask : 2;
+ uint64_t ras_n_mask : 1;
+ uint64_t cas_n_mask : 1;
+ uint64_t we_n_mask : 1;
+ uint64_t ba_mask : 3;
+ uint64_t a_mask : 16;
+ uint64_t reset_n_mask : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_dfm_char_mask4_s cn63xx;
+ struct cvmx_dfm_char_mask4_s cn66xx;
+};
+typedef union cvmx_dfm_char_mask4 cvmx_dfm_char_mask4_t;
+
+/**
+ * cvmx_dfm_comp_ctl2
+ *
+ * DFM_COMP_CTL2 = DFM Compensation control2
+ *
+ */
+union cvmx_dfm_comp_ctl2 {
+ uint64_t u64;
+ struct cvmx_dfm_comp_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ddr__ptune : 4; /**< DDR pctl from compensation circuit
+ The encoded value provides debug information for the
+ compensation impedance on P-pullup */
+ uint64_t ddr__ntune : 4; /**< DDR nctl from compensation circuit
+ The encoded value provides debug information for the
+ compensation impedance on N-pulldown */
+ uint64_t m180 : 1; /**< Cap impedance at 180 ohm (instead of 240 ohm) */
+ uint64_t byp : 1; /**< Bypass mode
+ Use compensation setting from PTUNE,NTUNE */
+ uint64_t ptune : 4; /**< PCTL impedance control in bypass mode */
+ uint64_t ntune : 4; /**< NCTL impedance control in bypass mode */
+ uint64_t rodt_ctl : 4; /**< NCTL RODT impedance control bits
+ 0000 = No ODT
+ 0001 = 20 ohm
+ 0010 = 30 ohm
+ 0011 = 40 ohm
+ 0100 = 60 ohm
+ 0101 = 120 ohm
+ 0110-1111 = Reserved */
+ uint64_t cmd_ctl : 4; /**< Drive strength control for CMD/A/RESET_N/CKE drivers
+ 0001 = 24 ohm
+ 0010 = 26.67 ohm
+ 0011 = 30 ohm
+ 0100 = 34.3 ohm
+ 0101 = 40 ohm
+ 0110 = 48 ohm
+ 0111 = 60 ohm
+ 0000,1000-1111 = Reserved */
+ uint64_t ck_ctl : 4; /**< Drive strength control for CK/CS_N/ODT drivers
+ 0001 = 24 ohm
+ 0010 = 26.67 ohm
+ 0011 = 30 ohm
+ 0100 = 34.3 ohm
+ 0101 = 40 ohm
+ 0110 = 48 ohm
+ 0111 = 60 ohm
+ 0000,1000-1111 = Reserved */
+ uint64_t dqx_ctl : 4; /**< Drive strength control for DQ/DQS drivers
+ 0001 = 24 ohm
+ 0010 = 26.67 ohm
+ 0011 = 30 ohm
+ 0100 = 34.3 ohm
+ 0101 = 40 ohm
+ 0110 = 48 ohm
+ 0111 = 60 ohm
+ 0000,1000-1111 = Reserved */
+#else
+ uint64_t dqx_ctl : 4;
+ uint64_t ck_ctl : 4;
+ uint64_t cmd_ctl : 4;
+ uint64_t rodt_ctl : 4;
+ uint64_t ntune : 4;
+ uint64_t ptune : 4;
+ uint64_t byp : 1;
+ uint64_t m180 : 1;
+ uint64_t ddr__ntune : 4;
+ uint64_t ddr__ptune : 4;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_dfm_comp_ctl2_s cn63xx;
+ struct cvmx_dfm_comp_ctl2_s cn63xxp1;
+ struct cvmx_dfm_comp_ctl2_s cn66xx;
+};
+typedef union cvmx_dfm_comp_ctl2 cvmx_dfm_comp_ctl2_t;
+
+/**
+ * cvmx_dfm_config
+ *
+ * DFM_CONFIG = DFM Memory Configuration Register
+ *
+ * This register controls certain parameters of Memory Configuration
+ *
+ * Notes:
+ * a. The self refresh entry sequence(s) power the DLL up/down (depending on DFM_MODEREG_PARAMS[DLL])
+ * when DFM_CONFIG[SREF_WITH_DLL] is set
+ * b. Prior to the self-refresh exit sequence, DFM_MODEREG_PARAMS should be re-programmed (if needed) to the
+ * appropriate values
+ *
+ * DFM Bringup Sequence:
+ * 1. SW must ensure there are no pending DRAM transactions and that the DDR PLL and the DLL have been initialized.
+ * 2. Write DFM_COMP_CTL2, DFM_CONTROL, DFM_WODT_MASK, DFM_RODT_MASK, DFM_DUAL_MEMCFG, DFM_TIMING_PARAMS0, DFM_TIMING_PARAMS1,
+ * DFM_MODEREG_PARAMS0, DFM_MODEREG_PARAMS1, DFM_RESET_CTL (with DDR3RST=0), DFM_CONFIG (with INIT_START=0)
+ * with appropriate values, if necessary.
+ * 3. Wait 200us, then write DFM_RESET_CTL[DDR3RST] = 1.
+ * 4. Initialize all ranks at once by writing DFM_CONFIG[RANKMASK][n] = 1, DFM_CONFIG[INIT_STATUS][n] = 1, and DFM_CONFIG[INIT_START] = 1
+ * where n is a valid rank index for the specific board configuration.
+ * 5. for each rank n to be write-leveled [
+ * if auto write-leveling is desired [
+ * write DFM_CONFIG[RANKMASK][n] = 1, DFM_WLEVEL_CTL appropriately and DFM_CONFIG[INIT_START] = 1
+ * wait until DFM_WLEVEL_RANKn[STATUS] = 3
+ * ] else [
+ * write DFM_WLEVEL_RANKn with appropriate values
+ * ]
+ * ]
+ * 6. for each rank n to be read-leveled [
+ * if auto read-leveling is desired [
+ * write DFM_CONFIG[RANKMASK][n] = 1, DFM_RLEVEL_CTL appropriately and DFM_CONFIG[INIT_START] = 1
+ * wait until DFM_RLEVEL_RANKn[STATUS] = 3
+ * ] else [
+ * write DFM_RLEVEL_RANKn with appropriate values
+ * ]
+ * ]
+ */
+union cvmx_dfm_config {
+ uint64_t u64;
+ struct cvmx_dfm_config_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t early_unload_d1_r1 : 1; /**< Reserved */
+ uint64_t early_unload_d1_r0 : 1; /**< Reserved */
+ uint64_t early_unload_d0_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 1
+ reads.
+ The recommended EARLY_UNLOAD_D0_R1 value can be calculated
+ after the final DFM_RLEVEL_RANK1[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 1 (i.e. calculate maxset=MAX(DFM_RLEVEL_RANK1[BYTEi])
+ across all i), then set EARLY_UNLOAD_D0_R1
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d0_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 0
+ reads.
+ The recommended EARLY_UNLOAD_D0_R0 value can be calculated
+ after the final DFM_RLEVEL_RANK0[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 0 (i.e. calculate maxset=MAX(DFM_RLEVEL_RANK0[BYTEi])
+ across all i), then set EARLY_UNLOAD_D0_R0
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
+ uint64_t init_status : 4; /**< Indicates status of initialization
+ INIT_STATUS[n] = 1 implies rank n has been initialized
+ SW must set necessary INIT_STATUS bits with the
+ same DFM_CONFIG write that initiates
+ power-up/init and self-refresh exit sequences
+ (if the required INIT_STATUS bits are not already
+ set before DFM initiates the sequence).
+ INIT_STATUS determines the chip-selects that assert
+ during refresh, ZQCS, and precharge power-down and
+ self-refresh entry/exit SEQUENCE's.
+ INIT_STATUS<3:2> must be zero. */
+ uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored.
+ MIRRMASK<n> = 1 means Rank n addresses are mirrored
+ for 0 <= n <= 1
+ A mirrored read/write has these differences:
+ - DDR_BA<1> is swapped with DDR_BA<0>
+ - DDR_A<8> is swapped with DDR_A<7>
+ - DDR_A<6> is swapped with DDR_A<5>
+ - DDR_A<4> is swapped with DDR_A<3>
+ MIRRMASK<3:2> must be zero.
+ When RANK_ENA=0, MIRRMASK<1> MBZ */
+ uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized.
+ To write-level/read-level/initialize rank i, set RANKMASK<i>
+ RANK_ENA=1 RANK_ENA=0
+ RANKMASK<0> = CS0 CS0 and CS1
+ RANKMASK<1> = CS1 MBZ
+ For read/write leveling, each rank has to be leveled separately,
+ so RANKMASK should only have one bit set.
+ RANKMASK is not used during self-refresh entry/exit and
+ precharge power-down entry/exit instruction sequences.
+ RANKMASK<3:2> must be zero.
+ When RANK_ENA=0, RANKMASK<1> MBZ */
+ uint64_t rank_ena : 1; /**< RANK enable (for use with multiple ranks)
+ The RANK_ENA bit enables
+ the drive of the CS_N[1:0] and ODT_<1:0> pins differently based on the
+ (PBANK_LSB-1) address bit. */
+ uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2
+ When set, self-refresh entry and exit instruction sequences
+ write MR1 and MR2 (in all ranks). (The writes occur before
+ self-refresh entry, and after self-refresh exit.)
+ When clear, self-refresh entry and exit instruction sequences
+ do not write any registers in the DDR3 parts. */
+ uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when
+ the shortest DQx lines have a larger delay than the CK line */
+ uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1
+ transition on DFM_CONFIG[INIT_START]. Self-refresh entry and
+ precharge power-down entry and exit SEQUENCE's can also
+ be initiated automatically by hardware.
+ 0=power-up/init (RANKMASK used, MR0, MR1, MR2, and MR3 written)
+ 1=read-leveling (RANKMASK used, MR3 written)
+ 2=self-refresh entry (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
+ 3=self-refresh exit, (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
+ 4=precharge power-down entry (all ranks participate)
+ 5=precharge power-down exit (all ranks participate)
+ 6=write-leveling (RANKMASK used, MR1 written)
+ 7=illegal
+ Precharge power-down entry and exit SEQUENCE's may
+ be automatically generated by the HW when IDLEPOWER!=0.
+ Self-refresh entry SEQUENCE's may be automatically
+ generated by hardware upon a chip warm or soft reset
+ sequence when DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
+ DFM writes the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (MR0, MR1, MR2, and MR3) as part of some of these sequences.
+ Refer to the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 descriptions for more details.
+ The DFR_CKE pin gets activated as part of power-up/init,
+ self-refresh exit, and precharge power-down exit sequences.
+ The DFR_CKE pin gets de-activated as part of self-refresh entry,
+ precharge power-down entry, or DRESET assertion.
+ If there are two consecutive power-up/init's without
+ a DRESET assertion between them, DFM asserts DFR_CKE as part of
+ the first power-up/init, and continues to assert DFR_CKE
+ through the remainder of the first and the second power-up/init.
+ If DFR_CKE deactivation and reactivation is needed for
+ a second power-up/init, a DRESET assertion is required
+ between the first and the second. */
+ uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 fclk
+ increments. A Refresh sequence is triggered when bits
+ [24:18] are equal to 0, and a ZQCS sequence is triggered
+ when [36:18] are equal to 0.
+ Program [24:18] to RND-DN(tREFI/clkPeriod/512)
+ Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
+ that this value should always be greater than 32, to account for
+ resistor calibration delays.
+ 000_00000000_00000000: RESERVED
+ Max Refresh interval = 127 * 512 = 65024 fclks
+ Max ZQCS interval = (8*256*256-1) * 512 = 268434944 fclks ~ 335ms for a 1.25 ns clock
+ DFM_CONFIG[INIT_STATUS] determines which ranks receive
+ the REF / ZQCS. DFM does not send any refreshes / ZQCS's
+ when DFM_CONFIG[INIT_STATUS]=0. */
+ uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter,
+ and DFM_OPS_CNT, DFM_IFB_CNT, and DFM_FCLK_CNT
+ CSR's. SW should write this to a one, then re-write
+ it to a zero to cause the reset. */
+ uint64_t ecc_adr : 1; /**< Must be zero. */
+ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
+ having waited for 2^FORCEWRITE cycles. 0=disabled. */
+ uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory
+ controller has been idle for 2^(2+IDLEPOWER) cycles.
+ 0=disabled.
+ This field should only be programmed after initialization.
+ DFM_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
+ is disabled during the precharge power-down. */
+ uint64_t pbank_lsb : 4; /**< Physical bank address bit select
+ Encoding used to determine which memory address
+ bit position represents the rank(or bunk) bit used to enable 1(of 2)
+ ranks(via chip enables) supported by the DFM DDR3 interface.
+ Reverting to the explanation for ROW_LSB, PBANK_LSB would be ROW_LSB bit +
+ \#rowbits + \#rankbits.
+ PBANK_LSB
+ - 0: rank = mem_adr[24]
+ - 1: rank = mem_adr[25]
+ - 2: rank = mem_adr[26]
+ - 3: rank = mem_adr[27]
+ - 4: rank = mem_adr[28]
+ - 5: rank = mem_adr[29]
+ - 6: rank = mem_adr[30]
+ - 7: rank = mem_adr[31]
+ - 8-15: RESERVED
+ DESIGN NOTE: The DFM DDR3 memory bus is 16b wide, therefore DOES NOT
+ support standard 64b/72b DDR3 DIMM modules. The board designer should
+ populate the DFM DDR3 interface using either TWO x8bit DDR3 devices
+ (or a single x16bit device if available) to fully populate the 16b
+ DFM DDR3 data bus.
+ The DFM DDR3 memory controller supports either 1(or 2) rank(s) based
+ on how much total memory is desired for the DFA application. See
+ RANK_ENA CSR bit when enabling for dual-ranks.
+ SW NOTE:
+ 1) When RANK_ENA=0, SW must properly configure the PBANK_LSB to
+ reference upper unused memory address bits.
+ 2) When RANK_ENA=1 (dual ranks), SW must configure PBANK_LSB to
+ reference the upper most address bit based on the total size
+ of the rank.
+ For example, for a DFM DDR3 memory populated using Samsung's k4b1g0846c-f7
+ 1Gb(256MB) (16M x 8 bit x 8 bank) DDR3 parts, the column address width = 10 and
+ the device row address width = 14b. The single x8bit device contains 128MB, and
+ requires TWO such parts to populate the DFM 16b DDR3 interface. This then yields
+ a total rank size = 256MB = 2^28.
+ For a single-rank configuration (RANK_ENA=0), SW would program PBANK_LSB>=3 to
+ select mem_adr[x] bits above the legal DFM address range for mem_adr[27:0]=256MB.
+ For a dual-rank configuration (RANK_ENA=1), SW would program PBANK_LSB=4 to select
+ rank=mem_adr[28] as the bit used to determine which 256MB rank (of 512MB total) to
+ access (via rank chip enables - see: DFM DDR3 CS0[1:0] pins for connection to
+ upper and lower rank). */
+ uint64_t row_lsb : 3; /**< Row Address bit select
+ Encoding used to determine which memory address
+ bit position represents the low order DDR ROW address.
+ The DFM memory address [31:4] which references octawords
+ needs to be translated to DRAM addresses (bnk,row,col,bunk)
+ mem_adr[31:4]:
+ 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | ROW[m:n] | COL[13:3] | BA
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ See:
+ BA[2:0]: mem_adr[6:4]
+ COL[13:0]: [mem_adr[17:7],3'd0]
+ NOTE: The extracted COL address is always 14b fixed size width,
+ and upper unused bits are ignored by the DRAM device.
+ ROW[15:0]: Extraction of ROW starting address bit is programmable,
+ and is dependent on the \#column bits supported by the DRAM device.
+ The actual starting bit of the ROW can actually span into the
+ high order bits of the COL[13:3] field described above.
+ ROW_LSB ROW[15:0]
+ --------------------------
+ - 0: mem_adr[26:11]
+ - 1: mem_adr[27:12]
+ - 2: mem_adr[28:13]
+ - 3: mem_adr[29:14]
+ - 4: mem_adr[30:15]
+ - 5: mem_adr[31:16]
+ 6,7: [1'b0, mem_adr[31:17]] For current DDR3 Jedec spec - UNSUPPORTED
+ For example, for Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10. Therefore,
+ BA[3:0] = mem_adr[6:4] / COL[9:0] = [mem_adr[13:7],3'd0], and
+ we would want the row starting address to be extracted from mem_adr[14].
+ Therefore, a ROW_LSB=3, will extract the row from mem_adr[29:14]. */
+ uint64_t ecc_ena : 1; /**< Must be zero. */
+ uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is
+ selected by DFM_CONFIG[SEQUENCE]. This register is a
+ oneshot and clears itself each time it is set. */
+#else
+ uint64_t init_start : 1;
+ uint64_t ecc_ena : 1;
+ uint64_t row_lsb : 3;
+ uint64_t pbank_lsb : 4;
+ uint64_t idlepower : 3;
+ uint64_t forcewrite : 4;
+ uint64_t ecc_adr : 1;
+ uint64_t reset : 1;
+ uint64_t ref_zqcs_int : 19;
+ uint64_t sequence : 3;
+ uint64_t early_dqx : 1;
+ uint64_t sref_with_dll : 1;
+ uint64_t rank_ena : 1;
+ uint64_t rankmask : 4;
+ uint64_t mirrmask : 4;
+ uint64_t init_status : 4;
+ uint64_t early_unload_d0_r0 : 1;
+ uint64_t early_unload_d0_r1 : 1;
+ uint64_t early_unload_d1_r0 : 1;
+ uint64_t early_unload_d1_r1 : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } s;
+ struct cvmx_dfm_config_s cn63xx;
+ struct cvmx_dfm_config_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63 : 9;
+ uint64_t init_status : 4; /**< Indicates status of initialization
+ INIT_STATUS[n] = 1 implies rank n has been initialized
+ SW must set necessary INIT_STATUS bits with the
+ same DFM_CONFIG write that initiates
+ power-up/init and self-refresh exit sequences
+ (if the required INIT_STATUS bits are not already
+ set before DFM initiates the sequence).
+ INIT_STATUS determines the chip-selects that assert
+ during refresh, ZQCS, and precharge power-down and
+ self-refresh entry/exit SEQUENCE's.
+ INIT_STATUS<3:2> must be zero. */
+ uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored.
+ MIRRMASK<n> = 1 means Rank n addresses are mirrored
+ for 0 <= n <= 1
+ A mirrored read/write has these differences:
+ - DDR_BA<1> is swapped with DDR_BA<0>
+ - DDR_A<8> is swapped with DDR_A<7>
+ - DDR_A<6> is swapped with DDR_A<5>
+ - DDR_A<4> is swapped with DDR_A<3>
+ MIRRMASK<3:2> must be zero.
+ When RANK_ENA=0, MIRRMASK<1> MBZ */
+ uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized.
+ To write-level/read-level/initialize rank i, set RANKMASK<i>
+ RANK_ENA=1 RANK_ENA=0
+ RANKMASK<0> = CS0 CS0 and CS1
+ RANKMASK<1> = CS1 MBZ
+ For read/write leveling, each rank has to be leveled separately,
+ so RANKMASK should only have one bit set.
+ RANKMASK is not used during self-refresh entry/exit and
+ precharge power-down entry/exit instruction sequences.
+ RANKMASK<3:2> must be zero.
+ When RANK_ENA=0, RANKMASK<1> MBZ */
+ uint64_t rank_ena : 1; /**< RANK enable (for use with multiple ranks)
+ The RANK_ENA bit enables
+ the drive of the CS_N[1:0] and ODT_<1:0> pins differently based on the
+ (PBANK_LSB-1) address bit. */
+ uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2
+ When set, self-refresh entry and exit instruction sequences
+ write MR1 and MR2 (in all ranks). (The writes occur before
+ self-refresh entry, and after self-refresh exit.)
+ When clear, self-refresh entry and exit instruction sequences
+ do not write any registers in the DDR3 parts. */
+ uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when
+ the shortest DQx lines have a larger delay than the CK line */
+ uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1
+ transition on DFM_CONFIG[INIT_START]. Self-refresh entry and
+ precharge power-down entry and exit SEQUENCE's can also
+ be initiated automatically by hardware.
+ 0=power-up/init (RANKMASK used, MR0, MR1, MR2, and MR3 written)
+ 1=read-leveling (RANKMASK used, MR3 written)
+ 2=self-refresh entry (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
+ 3=self-refresh exit, (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
+ 4=precharge power-down entry (all ranks participate)
+ 5=precharge power-down exit (all ranks participate)
+ 6=write-leveling (RANKMASK used, MR1 written)
+ 7=illegal
+ Precharge power-down entry and exit SEQUENCE's may
+ be automatically generated by the HW when IDLEPOWER!=0.
+ Self-refresh entry SEQUENCE's may be automatically
+ generated by hardware upon a chip warm or soft reset
+ sequence when DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
+ DFM writes the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (MR0, MR1, MR2, and MR3) as part of some of these sequences.
+ Refer to the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 descriptions for more details.
+ The DFR_CKE pin gets activated as part of power-up/init,
+ self-refresh exit, and precharge power-down exit sequences.
+ The DFR_CKE pin gets de-activated as part of self-refresh entry,
+ precharge power-down entry, or DRESET assertion.
+ If there are two consecutive power-up/init's without
+ a DRESET assertion between them, DFM asserts DFR_CKE as part of
+ the first power-up/init, and continues to assert DFR_CKE
+ through the remainder of the first and the second power-up/init.
+ If DFR_CKE deactivation and reactivation is needed for
+ a second power-up/init, a DRESET assertion is required
+ between the first and the second. */
+ uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 fclk
+ increments. A Refresh sequence is triggered when bits
+ [24:18] are equal to 0, and a ZQCS sequence is triggered
+ when [36:18] are equal to 0.
+ Program [24:18] to RND-DN(tREFI/clkPeriod/512)
+ Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
+ that this value should always be greater than 32, to account for
+ resistor calibration delays.
+ 000_00000000_00000000: RESERVED
+ Max Refresh interval = 127 * 512 = 65024 fclks
+ Max ZQCS interval = (8*256*256-1) * 512 = 268434944 fclks ~ 335ms for a 1.25 ns clock
+ DFM_CONFIG[INIT_STATUS] determines which ranks receive
+ the REF / ZQCS. DFM does not send any refreshes / ZQCS's
+ when DFM_CONFIG[INIT_STATUS]=0. */
+ uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter,
+ and DFM_OPS_CNT, DFM_IFB_CNT, and DFM_FCLK_CNT
+ CSR's. SW should write this to a one, then re-write
+ it to a zero to cause the reset. */
+ uint64_t ecc_adr : 1; /**< Must be zero. */
+ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
+ having waited for 2^FORCEWRITE cycles. 0=disabled. */
+ uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory
+ controller has been idle for 2^(2+IDLEPOWER) cycles.
+ 0=disabled.
+ This field should only be programmed after initialization.
+ DFM_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
+ is disabled during the precharge power-down. */
+ uint64_t pbank_lsb : 4; /**< Physical bank address bit select
+ Encoding used to determine which memory address
+ bit position represents the rank(or bunk) bit used to enable 1(of 2)
+ ranks(via chip enables) supported by the DFM DDR3 interface.
+ Reverting to the explanation for ROW_LSB, PBANK_LSB would be ROW_LSB bit +
+ \#rowbits + \#rankbits.
+ PBANK_LSB
+ - 0: rank = mem_adr[24]
+ - 1: rank = mem_adr[25]
+ - 2: rank = mem_adr[26]
+ - 3: rank = mem_adr[27]
+ - 4: rank = mem_adr[28]
+ - 5: rank = mem_adr[29]
+ - 6: rank = mem_adr[30]
+ - 7: rank = mem_adr[31]
+ - 8-15: RESERVED
+ DESIGN NOTE: The DFM DDR3 memory bus is 16b wide, therefore DOES NOT
+ support standard 64b/72b DDR3 DIMM modules. The board designer should
+ populate the DFM DDR3 interface using either TWO x8bit DDR3 devices
+ (or a single x16bit device if available) to fully populate the 16b
+ DFM DDR3 data bus.
+ The DFM DDR3 memory controller supports either 1(or 2) rank(s) based
+ on how much total memory is desired for the DFA application. See
+ RANK_ENA CSR bit when enabling for dual-ranks.
+ SW NOTE:
+ 1) When RANK_ENA=0, SW must properly configure the PBANK_LSB to
+ reference upper unused memory address bits.
+ 2) When RANK_ENA=1 (dual ranks), SW must configure PBANK_LSB to
+ reference the upper most address bit based on the total size
+ of the rank.
+ For example, for a DFM DDR3 memory populated using Samsung's k4b1g0846c-f7
+ 1Gb(256MB) (16M x 8 bit x 8 bank) DDR3 parts, the column address width = 10 and
+ the device row address width = 14b. The single x8bit device contains 128MB, and
+ requires TWO such parts to populate the DFM 16b DDR3 interface. This then yields
+ a total rank size = 256MB = 2^28.
+ For a single-rank configuration (RANK_ENA=0), SW would program PBANK_LSB>=3 to
+ select mem_adr[x] bits above the legal DFM address range for mem_adr[27:0]=256MB.
+ For a dual-rank configuration (RANK_ENA=1), SW would program PBANK_LSB=4 to select
+ rank=mem_adr[28] as the bit used to determine which 256MB rank (of 512MB total) to
+ access (via rank chip enables - see: DFM DDR3 CS0[1:0] pins for connection to
+ upper and lower rank). */
+ uint64_t row_lsb : 3; /**< Row Address bit select
+ Encoding used to determine which memory address
+ bit position represents the low order DDR ROW address.
+ The DFM memory address [31:4] which references octawords
+ needs to be translated to DRAM addresses (bnk,row,col,bunk)
+ mem_adr[31:4]:
+ 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | ROW[m:n] | COL[13:3] | BA
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ See:
+ BA[2:0]: mem_adr[6:4]
+ COL[13:0]: [mem_adr[17:7],3'd0]
+ NOTE: The extracted COL address is always 14b fixed size width,
+ and upper unused bits are ignored by the DRAM device.
+ ROW[15:0]: Extraction of ROW starting address bit is programmable,
+ and is dependent on the \#column bits supported by the DRAM device.
+ The actual starting bit of the ROW can actually span into the
+ high order bits of the COL[13:3] field described above.
+ ROW_LSB ROW[15:0]
+ --------------------------
+ - 0: mem_adr[26:11]
+ - 1: mem_adr[27:12]
+ - 2: mem_adr[28:13]
+ - 3: mem_adr[29:14]
+ - 4: mem_adr[30:15]
+ - 5: mem_adr[31:16]
+ 6,7: [1'b0, mem_adr[31:17]] For current DDR3 Jedec spec - UNSUPPORTED
+ For example, for Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10. Therefore,
+ BA[3:0] = mem_adr[6:4] / COL[9:0] = [mem_adr[13:7],3'd0], and
+ we would want the row starting address to be extracted from mem_adr[14].
+ Therefore, a ROW_LSB=3, will extract the row from mem_adr[29:14]. */
+ uint64_t ecc_ena : 1; /**< Must be zero. */
+ uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is
+ selected by DFM_CONFIG[SEQUENCE]. This register is a
+ oneshot and clears itself each time it is set. */
+#else
+ uint64_t init_start : 1;
+ uint64_t ecc_ena : 1;
+ uint64_t row_lsb : 3;
+ uint64_t pbank_lsb : 4;
+ uint64_t idlepower : 3;
+ uint64_t forcewrite : 4;
+ uint64_t ecc_adr : 1;
+ uint64_t reset : 1;
+ uint64_t ref_zqcs_int : 19;
+ uint64_t sequence : 3;
+ uint64_t early_dqx : 1;
+ uint64_t sref_with_dll : 1;
+ uint64_t rank_ena : 1;
+ uint64_t rankmask : 4;
+ uint64_t mirrmask : 4;
+ uint64_t init_status : 4;
+ uint64_t reserved_55_63 : 9;
+#endif
+ } cn63xxp1;
+ struct cvmx_dfm_config_s cn66xx;
+};
+typedef union cvmx_dfm_config cvmx_dfm_config_t;
+
+/**
+ * cvmx_dfm_control
+ *
+ * DFM_CONTROL = DFM Control
+ * This register is an assortment of various control fields needed by the memory controller
+ */
+union cvmx_dfm_control {
+ uint64_t u64;
+ struct cvmx_dfm_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ RD cmd is delayed an additional DCLK cycle. */
+ uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ WR cmd is delayed an additional DCLK cycle. */
+ uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for
+ the default DDR_DQ/DQS drivers is delayed an additional BPRCH FCLK
+ cycles.
+ 00 = 0 fclks
+ 01 = 1 fclks
+ 10 = 2 fclks
+ 11 = 3 fclks */
+ uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration
+ When clear, DFM runs external ZQ calibration */
+ uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration
+ When counter is re-enabled, ZQCS is run immediately,
+ and then every DFM_CONFIG[REF_ZQCS_INT] fclk cycles. */
+ uint64_t auto_fclkdis : 1; /**< When 1, DFM will automatically shut off its internal
+ clock to conserve power when there is no traffic. Note
+ that this has no effect on the DDR3 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< Must be zero. */
+ uint64_t max_write_batch : 4; /**< Must be set to value 8 */
+ uint64_t nxm_write_en : 1; /**< Must be zero. */
+ uint64_t elev_prio_dis : 1; /**< Must be zero. */
+ uint64_t inorder_wr : 1; /**< Must be zero. */
+ uint64_t inorder_rd : 1; /**< Must be zero. */
+ uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes
+ THROTTLE_RD and THROTTLE_WR must be the same value. */
+ uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads
+ THROTTLE_RD and THROTTLE_WR must be the same value. */
+ uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off
+ time for the default DDR_DQ/DQS drivers is FPRCH2 fclks earlier.
+ 00 = 0 fclks
+ 01 = 1 fclks
+ 10 = 2 fclks
+ 11 = RESERVED */
+ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3.
+ This bit should be set in conjunction with DFM_MODEREG_PARAMS[AL] */
+ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
+ address. This mode helps relieve setup time pressure
+ on the Address and command bus which nominally have
+ a very large fanout. Please refer to Micron's tech
+ note tn_47_01 titled "DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems" for physical details. */
+ uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
+ Clears the DFM_OPS_CNT, DFM_IFB_CNT, and
+ DFM_FCLK_CNT registers. SW should first write this
+ field to a one, then write this field to a zero to
+ clear the CSR's. */
+ uint64_t rdimm_ena : 1; /**< Must be zero. */
+#else
+ uint64_t rdimm_ena : 1;
+ uint64_t bwcnt : 1;
+ uint64_t ddr2t : 1;
+ uint64_t pocas : 1;
+ uint64_t fprch2 : 2;
+ uint64_t throttle_rd : 1;
+ uint64_t throttle_wr : 1;
+ uint64_t inorder_rd : 1;
+ uint64_t inorder_wr : 1;
+ uint64_t elev_prio_dis : 1;
+ uint64_t nxm_write_en : 1;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t auto_fclkdis : 1;
+ uint64_t int_zqcs_dis : 1;
+ uint64_t ext_zqcs_dis : 1;
+ uint64_t bprch : 2;
+ uint64_t wodt_bprch : 1;
+ uint64_t rodt_bprch : 1;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_dfm_control_s cn63xx;
+ struct cvmx_dfm_control_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for
+ the default DDR_DQ/DQS drivers is delayed an additional BPRCH FCLK
+ cycles.
+ 00 = 0 fclks
+ 01 = 1 fclks
+ 10 = 2 fclks
+ 11 = 3 fclks */
+ uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration
+ When clear, DFM runs external ZQ calibration */
+ uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration
+ When counter is re-enabled, ZQCS is run immediately,
+ and then every DFM_CONFIG[REF_ZQCS_INT] fclk cycles. */
+ uint64_t auto_fclkdis : 1; /**< When 1, DFM will automatically shut off its internal
+ clock to conserve power when there is no traffic. Note
+ that this has no effect on the DDR3 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< Must be zero. */
+ uint64_t max_write_batch : 4; /**< Must be set to value 8 */
+ uint64_t nxm_write_en : 1; /**< Must be zero. */
+ uint64_t elev_prio_dis : 1; /**< Must be zero. */
+ uint64_t inorder_wr : 1; /**< Must be zero. */
+ uint64_t inorder_rd : 1; /**< Must be zero. */
+ uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes
+ THROTTLE_RD and THROTTLE_WR must be the same value. */
+ uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads
+ THROTTLE_RD and THROTTLE_WR must be the same value. */
+ uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off
+ time for the default DDR_DQ/DQS drivers is FPRCH2 fclks earlier.
+ 00 = 0 fclks
+ 01 = 1 fclks
+ 10 = 2 fclks
+ 11 = RESERVED */
+ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3.
+ This bit should be set in conjunction with DFM_MODEREG_PARAMS[AL] */
+ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
+ address. This mode helps relieve setup time pressure
+ on the Address and command bus which nominally have
+ a very large fanout. Please refer to Micron's tech
+ note tn_47_01 titled "DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems" for physical details. */
+ uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
+ Clears the DFM_OPS_CNT, DFM_IFB_CNT, and
+ DFM_FCLK_CNT registers. SW should first write this
+ field to a one, then write this field to a zero to
+ clear the CSR's. */
+ uint64_t rdimm_ena : 1; /**< Must be zero. */
+#else
+ uint64_t rdimm_ena : 1;
+ uint64_t bwcnt : 1;
+ uint64_t ddr2t : 1;
+ uint64_t pocas : 1;
+ uint64_t fprch2 : 2;
+ uint64_t throttle_rd : 1;
+ uint64_t throttle_wr : 1;
+ uint64_t inorder_rd : 1;
+ uint64_t inorder_wr : 1;
+ uint64_t elev_prio_dis : 1;
+ uint64_t nxm_write_en : 1;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t auto_fclkdis : 1;
+ uint64_t int_zqcs_dis : 1;
+ uint64_t ext_zqcs_dis : 1;
+ uint64_t bprch : 2;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } cn63xxp1;
+ struct cvmx_dfm_control_s cn66xx;
+};
+typedef union cvmx_dfm_control cvmx_dfm_control_t;
+
+/**
+ * cvmx_dfm_dll_ctl2
+ *
+ * DFM_DLL_CTL2 = DFM (Octeon) DLL control and FCLK reset
+ *
+ *
+ * Notes:
+ * DLL Bringup sequence:
+ * 1. If not done already, set DFM_DLL_CTL2 = 0, except when DFM_DLL_CTL2[DRESET] = 1.
+ * 2. Write 1 to DFM_DLL_CTL2[DLL_BRINGUP]
+ * 3. Wait for 10 FCLK cycles, then write 1 to DFM_DLL_CTL2[QUAD_DLL_ENA]. It may not be feasible to count 10 FCLK cycles, but the
+ * idea is to configure the delay line into DLL mode by asserting DLL_BRING_UP earlier than [QUAD_DLL_ENA], even if it is one
+ * cycle early. DFM_DLL_CTL2[QUAD_DLL_ENA] must not change after this point without restarting the DFM and/or DRESET initialization
+ * sequence.
+ * 4. Read L2D_BST0 and wait for the result. (L2D_BST0 is subject to change depending on how it called in o63. It is still ok to go
+ * without step 4, since step 5 has enough time)
+ * 5. Wait 10 us.
+ * 6. Write 0 to DFM_DLL_CTL2[DLL_BRINGUP]. DFM_DLL_CTL2[DLL_BRINGUP] must not change after this point without restarting the DFM
+ * and/or DRESET initialization sequence.
+ * 7. Read L2D_BST0 and wait for the result. (same as step 4, but the idea here is the wait some time before going to step 8, even it
+ * is one cycle is fine)
+ * 8. Write 0 to DFM_DLL_CTL2[DRESET]. DFM_DLL_CTL2[DRESET] must not change after this point without restarting the DFM and/or
+ * DRESET initialization sequence.
+ */
+union cvmx_dfm_dll_ctl2 {
+ uint64_t u64;
+ struct cvmx_dfm_dll_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t dll_bringup : 1; /**< DLL Bringup */
+ uint64_t dreset : 1; /**< Fclk domain reset. The reset signal that is used by the
+ Fclk domain is (DRESET || ECLK_RESET). */
+ uint64_t quad_dll_ena : 1; /**< DLL Enable */
+ uint64_t byp_sel : 4; /**< Bypass select
+ 0000 : no byte
+ 0001 : byte 0
+ - ...
+ 1001 : byte 8
+ 1010 : all bytes
+ 1011-1111 : Reserved */
+ uint64_t byp_setting : 8; /**< Bypass setting
+ DDR3-1600: 00100010
+ DDR3-1333: 00110010
+ DDR3-1066: 01001011
+ DDR3-800 : 01110101
+ DDR3-667 : 10010110
+ DDR3-600 : 10101100 */
+#else
+ uint64_t byp_setting : 8;
+ uint64_t byp_sel : 4;
+ uint64_t quad_dll_ena : 1;
+ uint64_t dreset : 1;
+ uint64_t dll_bringup : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_dfm_dll_ctl2_s cn63xx;
+ struct cvmx_dfm_dll_ctl2_s cn63xxp1;
+ struct cvmx_dfm_dll_ctl2_s cn66xx;
+};
+typedef union cvmx_dfm_dll_ctl2 cvmx_dfm_dll_ctl2_t;
+
+/**
+ * cvmx_dfm_dll_ctl3
+ *
+ * DFM_DLL_CTL3 = DFM DLL control and FCLK reset
+ *
+ */
+union cvmx_dfm_dll_ctl3 {
+ uint64_t u64;
+ struct cvmx_dfm_dll_ctl3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t dll_fast : 1; /**< DLL lock
+ 0 = DLL locked */
+ uint64_t dll90_setting : 8; /**< Encoded DLL settings. Works in conjuction with
+ DLL90_BYTE_SEL */
+ uint64_t fine_tune_mode : 1; /**< Fine Tune Mode */
+ uint64_t dll_mode : 1; /**< DLL Mode */
+ uint64_t dll90_byte_sel : 4; /**< Observe DLL settings for selected byte
+ 0001 : byte 0
+ - ...
+ 1001 : byte 8
+ 0000,1010-1111 : Reserved */
+ uint64_t offset_ena : 1; /**< Offset enable
+ 0 = disable
+ 1 = enable */
+ uint64_t load_offset : 1; /**< Load offset
+ 0 : disable
+ 1 : load (generates a 1 cycle pulse to the PHY)
+ This register is oneshot and clears itself each time
+ it is set */
+ uint64_t mode_sel : 2; /**< Mode select
+ 00 : reset
+ 01 : write
+ 10 : read
+ 11 : write & read */
+ uint64_t byte_sel : 4; /**< Byte select
+ 0000 : no byte
+ 0001 : byte 0
+ - ...
+ 1001 : byte 8
+ 1010 : all bytes
+ 1011-1111 : Reserved */
+ uint64_t offset : 6; /**< Write/read offset setting
+ [4:0] : offset
+ [5] : 0 = increment, 1 = decrement
+ Not a 2's complement value */
+#else
+ uint64_t offset : 6;
+ uint64_t byte_sel : 4;
+ uint64_t mode_sel : 2;
+ uint64_t load_offset : 1;
+ uint64_t offset_ena : 1;
+ uint64_t dll90_byte_sel : 4;
+ uint64_t dll_mode : 1;
+ uint64_t fine_tune_mode : 1;
+ uint64_t dll90_setting : 8;
+ uint64_t dll_fast : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_dfm_dll_ctl3_s cn63xx;
+ struct cvmx_dfm_dll_ctl3_s cn63xxp1;
+ struct cvmx_dfm_dll_ctl3_s cn66xx;
+};
+typedef union cvmx_dfm_dll_ctl3 cvmx_dfm_dll_ctl3_t;
+
+/**
+ * cvmx_dfm_fclk_cnt
+ *
+ * DFM_FCLK_CNT = Performance Counters
+ *
+ */
+union cvmx_dfm_fclk_cnt {
+ uint64_t u64;
+ struct cvmx_dfm_fclk_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fclkcnt : 64; /**< Performance Counter that counts fclks
+ 64-bit counter. */
+#else
+ uint64_t fclkcnt : 64;
+#endif
+ } s;
+ struct cvmx_dfm_fclk_cnt_s cn63xx;
+ struct cvmx_dfm_fclk_cnt_s cn63xxp1;
+ struct cvmx_dfm_fclk_cnt_s cn66xx;
+};
+typedef union cvmx_dfm_fclk_cnt cvmx_dfm_fclk_cnt_t;
+
+/**
+ * cvmx_dfm_fnt_bist
+ *
+ * DFM_FNT_BIST = DFM Front BIST Status
+ *
+ * This register contains Bist Status for DFM Front
+ */
+union cvmx_dfm_fnt_bist {
+ uint64_t u64;
+ struct cvmx_dfm_fnt_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t cab : 1; /**< Bist Results for CAB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t mrq : 1; /**< Bist Results for MRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t mff : 1; /**< Bist Results for MFF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t rpb : 1; /**< Bist Results for RPB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t mwb : 1; /**< Bist Results for MWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t mwb : 1;
+ uint64_t rpb : 1;
+ uint64_t mff : 1;
+ uint64_t mrq : 1;
+ uint64_t cab : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_dfm_fnt_bist_s cn63xx;
+ struct cvmx_dfm_fnt_bist_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mrq : 1; /**< Bist Results for MRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t mff : 1; /**< Bist Results for MFF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t rpb : 1; /**< Bist Results for RPB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t mwb : 1; /**< Bist Results for MWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t mwb : 1;
+ uint64_t rpb : 1;
+ uint64_t mff : 1;
+ uint64_t mrq : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn63xxp1;
+ struct cvmx_dfm_fnt_bist_s cn66xx;
+};
+typedef union cvmx_dfm_fnt_bist cvmx_dfm_fnt_bist_t;
+
+/**
+ * cvmx_dfm_fnt_ctl
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * DFM_FNT_CTL = DFM Front Control Register
+ *
+ * This register contains control registers for the DFM Front Section of Logic.
+ */
+union cvmx_dfm_fnt_ctl {
+ uint64_t u64;
+ struct cvmx_dfm_fnt_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t sbe_ena : 1; /**< If SBE_ENA=1 & RECC_ENA=1 then all single bit errors
+ which have been detected/corrected during GWALK reads,
+ will be reported through RWORD0[REA]=ERR code in system
+ memory at the conclusion of the DFA instruction.
+ SWNOTE: The application user may wish to report single
+ bit errors that were corrected through the
+ RWORD0[REA]=ERR codeword.
+ NOTE: This DOES NOT effect the reporting of SBEs in
+ DFM_FNT_STAT[SBE] (which were corrected if RECC_ENA=1).
+ This bit is only here for applications which 'MAY' want
+ to be alerted with an ERR completion code if there were
+ SBEs that were auto-corrected during GWALK instructions.
+ Recap: If there is a SBE and SBE_ENA==1, the "err" field
+ in the data returned to DFA will be set. If SBE_ENA==0,
+ the "err" is always 0 when there is a SBE; however,
+ regardless of SBE_ENA, DBE will cause "err" to be 1. */
+ uint64_t wecc_ena : 1; /**< If WECC_ENA=1, HW will auto-generate(overwrite) the 10b
+ OWECC codeword during Memory Writes sourced by
+ 1) DFA MLOAD instructions, or by 2) NCB-Direct CSR
+ mode writes to DFA memory space. The HW will insert
+ the 10b OWECC inband into OW-DATA[127:118].
+ If WECC_ENA=0, SW is responsible for generating the
+ 10b OWECC codeword inband in the upper OW-data[127:118]
+ during Memory writes (to provide SEC/DED coverage for
+ the data during subsequent Memory reads-see RECC_ENA). */
+ uint64_t recc_ena : 1; /**< If RECC_ENA=1, all DFA memory reads sourced by 1) DFA
+ GWALK instructions or by 2) NCB-Direct CSR mode reads
+ to DFA memory space, will be protected by an inband 10b
+ OWECC SEC/DED codeword. The inband OW-DATA[127:118]
+ represents the inband OWECC codeword which offers single
+ bit error correction(SEC)/double bit error detection(DED).
+ [see also DFM_FNT_STAT[SBE,DBE,FADR,FSYN] status fields].
+ The FSYN field contains an encoded value which determines
+ which bit was corrected(for SBE) or detected(for DBE) to
+ help in bit isolation of the error.
+ SW NOTE: If RECC_ENA=1: An NCB-Direct CSR mode read of the
+ upper QW in memory will return ZEROES in the upper 10b of the
+ data word.
+ If RECC_ENA=0: An NCB-Direct CSR mode read of the upper QW in
+ memory will return the RAW 64bits from memory. During memory
+ debug, writing RECC_ENA=0 provides visibility into the raw ECC
+ stored in memory at that time. */
+ uint64_t dfr_ena : 1; /**< DFM Memory Interface Enable
+ The DFM powers up with the DDR3 interface disabled.
+ If the DFA function is required, then after poweron
+ software configures a stable DFM DDR3 memory clock
+ (see: LMCx_DDR_PLL_CTL[DFM_PS_EN, DFM_DIV_RESET]),
+ the DFM DDR3 memory interface can be enabled.
+ When disabled (DFR_ENA=0), all DFM DDR3 memory
+ output and bidirectional pins will be tristated.
+ SW NOTE: The DFR_ENA=1 write MUST occur sometime after
+ the DFM is brought out of reset (ie: after the
+ DFM_DLL_CTL2[DRESET]=0 write). */
+#else
+ uint64_t dfr_ena : 1;
+ uint64_t recc_ena : 1;
+ uint64_t wecc_ena : 1;
+ uint64_t sbe_ena : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_dfm_fnt_ctl_s cn63xx;
+ struct cvmx_dfm_fnt_ctl_s cn63xxp1;
+ struct cvmx_dfm_fnt_ctl_s cn66xx;
+};
+typedef union cvmx_dfm_fnt_ctl cvmx_dfm_fnt_ctl_t;
+
+/**
+ * cvmx_dfm_fnt_iena
+ *
+ * DFM_FNT_IENA = DFM Front Interrupt Enable Mask
+ *
+ * This register contains error interrupt enable information for the DFM Front Section of Logic.
+ */
+union cvmx_dfm_fnt_iena {
+ uint64_t u64;
+ struct cvmx_dfm_fnt_iena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dbe_intena : 1; /**< OWECC Double Error Detected(DED) Interrupt Enable
+ When set, the memory controller raises a processor
+ interrupt on detecting an uncorrectable double bit
+ OWECC during a memory read. */
+ uint64_t sbe_intena : 1; /**< OWECC Single Error Corrected(SEC) Interrupt Enable
+ When set, the memory controller raises a processor
+ interrupt on detecting a correctable single bit
+ OWECC error which was corrected during a memory
+ read. */
+#else
+ uint64_t sbe_intena : 1;
+ uint64_t dbe_intena : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_dfm_fnt_iena_s cn63xx;
+ struct cvmx_dfm_fnt_iena_s cn63xxp1;
+ struct cvmx_dfm_fnt_iena_s cn66xx;
+};
+typedef union cvmx_dfm_fnt_iena cvmx_dfm_fnt_iena_t;
+
+/**
+ * cvmx_dfm_fnt_sclk
+ *
+ * DFM_FNT_SCLK = DFM Front SCLK Control Register
+ *
+ * This register contains control registers for the DFM Front Section of Logic.
+ * NOTE: This register is in USCLK domain and is ised to enable the conditional SCLK grid, as well as
+ * to start a software BiST sequence for the DFM sub-block. (note: the DFM has conditional clocks which
+ * prevent BiST to run under reset automatically).
+ */
+union cvmx_dfm_fnt_sclk {
+ uint64_t u64;
+ struct cvmx_dfm_fnt_sclk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t clear_bist : 1; /**< When START_BIST is written 0->1, if CLEAR_BIST=1, all
+ previous BiST state is cleared.
+ NOTES:
+ 1) CLEAR_BIST must be written to 1 before START_BIST
+ is written to 1 using a separate CSR write.
+ 2) CLEAR_BIST must not be changed after writing START_BIST
+ 0->1 until the BIST operation completes. */
+ uint64_t bist_start : 1; /**< When software writes BIST_START=0->1, a BiST is executed
+ for the DFM sub-block.
+ NOTES:
+ 1) This bit should only be written after BOTH sclk
+ and fclk have been enabled by software and are stable
+ (see: DFM_FNT_SCLK[SCLKDIS] and instructions on how to
+ enable the DFM DDR3 memory (fclk) - which requires LMC
+ PLL init, DFM clock divider and proper DFM DLL
+ initialization sequence). */
+ uint64_t sclkdis : 1; /**< DFM sclk disable Source
+ When SET, the DFM sclk are disabled (to conserve overall
+ chip clocking power when the DFM function is not used).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t sclkdis : 1;
+ uint64_t bist_start : 1;
+ uint64_t clear_bist : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_dfm_fnt_sclk_s cn63xx;
+ struct cvmx_dfm_fnt_sclk_s cn63xxp1;
+ struct cvmx_dfm_fnt_sclk_s cn66xx;
+};
+typedef union cvmx_dfm_fnt_sclk cvmx_dfm_fnt_sclk_t;
+
+/**
+ * cvmx_dfm_fnt_stat
+ *
+ * DFM_FNT_STAT = DFM Front Status Register
+ *
+ * This register contains error status information for the DFM Front Section of Logic.
+ */
+union cvmx_dfm_fnt_stat {
+ uint64_t u64;
+ struct cvmx_dfm_fnt_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t fsyn : 10; /**< Failing Syndrome
+ If SBE_ERR=1, the FSYN code determines which bit was
+ corrected during the OWECC check/correct.
+ NOTE: If both DBE_ERR/SBE_ERR are set, the DBE_ERR has
+ higher priority and FSYN captured will always be for the
+ DBE_ERR detected.
+ The FSYN is "locked down" when either DBE_ERR/SBE_ERR
+ are detected (until these bits are cleared (W1C)).
+ However, if an SBE_ERR occurs first, followed by a
+ DBE_ERR, the higher priority DBE_ERR will re-capture
+ the FSYN for the higher priority error case. */
+ uint64_t fadr : 28; /**< Failing Memory octaword address
+ If either SBE_ERR or DBE_ERR are set, the FADR
+ represents the failing octaword address.
+ NOTE: If both DBE_ERR/SBE_ERR are set, the DBE_ERR has
+ higher priority and the FADR captured will always be
+ with the DBE_ERR detected.
+ The FADR is "locked down" when either DBE_ERR/SBE_ERR
+ are detected (until these bits are cleared (W1C)).
+ However, if an SBE_ERR occurs first, followed by a
+ DBE_ERR, the higher priority DBE_ERR will re-capture
+ the FADR for the higher priority error case. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t dbe_err : 1; /**< Double bit error detected(uncorrectable) during
+ Memory Read.
+ Write of 1 will clear the corresponding error bit */
+ uint64_t sbe_err : 1; /**< Single bit error detected(corrected) during
+ Memory Read.
+ Write of 1 will clear the corresponding error bit */
+#else
+ uint64_t sbe_err : 1;
+ uint64_t dbe_err : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t fadr : 28;
+ uint64_t fsyn : 10;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } s;
+ struct cvmx_dfm_fnt_stat_s cn63xx;
+ struct cvmx_dfm_fnt_stat_s cn63xxp1;
+ struct cvmx_dfm_fnt_stat_s cn66xx;
+};
+typedef union cvmx_dfm_fnt_stat cvmx_dfm_fnt_stat_t;
+
+/**
+ * cvmx_dfm_ifb_cnt
+ *
+ * DFM_IFB_CNT = Performance Counters
+ *
+ */
+union cvmx_dfm_ifb_cnt {
+ uint64_t u64;
+ struct cvmx_dfm_ifb_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ifbcnt : 64; /**< Performance Counter
+ 64-bit counter that increments every
+ cycle there is something in the in-flight buffer.
+ Before using, clear counter via DFM_CONTROL.BWCNT. */
+#else
+ uint64_t ifbcnt : 64;
+#endif
+ } s;
+ struct cvmx_dfm_ifb_cnt_s cn63xx;
+ struct cvmx_dfm_ifb_cnt_s cn63xxp1;
+ struct cvmx_dfm_ifb_cnt_s cn66xx;
+};
+typedef union cvmx_dfm_ifb_cnt cvmx_dfm_ifb_cnt_t;
+
+/**
+ * cvmx_dfm_modereg_params0
+ *
+ * Notes:
+ * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
+ *
+ */
+union cvmx_dfm_modereg_params0 {
+ uint64_t u64;
+ struct cvmx_dfm_modereg_params0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t ppd : 1; /**< DLL Control for precharge powerdown
+ 0 = Slow exit (DLL off)
+ 1 = Fast exit (DLL on)
+ DFM writes this value to MR0[PPD] in the selected DDR3 parts
+ during power-up/init instruction sequencing.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ This value must equal the MR0[PPD] value in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t wrp : 3; /**< Write recovery for auto precharge
+ Should be programmed to be equal to or greater than
+ RNDUP[tWR(ns)/tCYC(ns)]
+ 000 = 5
+ 001 = 5
+ 010 = 6
+ 011 = 7
+ 100 = 8
+ 101 = 10
+ 110 = 12
+ 111 = 14
+ DFM writes this value to MR0[WR] in the selected DDR3 parts
+ during power-up/init instruction sequencing.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ This value must equal the MR0[WR] value in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t dllr : 1; /**< DLL Reset
+ DFM writes this value to MR0[DLL] in the selected DDR3 parts
+ during power-up/init instruction sequencing.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR0[DLL] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t tm : 1; /**< Test Mode
+ DFM writes this value to MR0[TM] in the selected DDR3 parts
+ during power-up/init instruction sequencing.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR0[TM] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t rbt : 1; /**< Read Burst Type
+ 1 = interleaved (fixed)
+ DFM writes this value to MR0[RBT] in the selected DDR3 parts
+ during power-up/init instruction sequencing.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR0[RBT] value must be 1 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t cl : 4; /**< CAS Latency
+ 0010 = 5
+ 0100 = 6
+ 0110 = 7
+ 1000 = 8
+ 1010 = 9
+ 1100 = 10
+ 1110 = 11
+ 0001 = 12
+ 0011 = 13
+ 0101 = 14
+ 0111 = 15
+ 1001 = 16
+ 0000, 1011, 1101, 1111 = Reserved
+ DFM writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts
+ during power-up/init instruction sequencing.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ This value must equal the MR0[CAS Latency / CL] value in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t bl : 2; /**< Burst Length
+ 0 = 8 (fixed)
+ DFM writes this value to MR0[BL] in the selected DDR3 parts
+ during power-up/init instruction sequencing.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR0[BL] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t qoff : 1; /**< Qoff Enable
+ 0 = enable
+ DFM writes this value to MR1[Qoff] in the selected DDR3 parts
+ during power-up/init and write-leveling instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR1[Qoff] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
+ entry and exit instruction sequences.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ The MR1[Qoff] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t tdqs : 1; /**< TDQS Enable
+ 0 = disable
+ DFM writes this value to MR1[TDQS] in the selected DDR3 parts
+ during power-up/init and write-leveling instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR1[TDQS] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
+ entry and exit instruction sequences.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t wlev : 1; /**< Write Leveling Enable
+ 0 = disable
+ DFM writes MR1[Level]=0 in the selected DDR3 parts
+ during power-up/init and write-leveling instruction sequencing.
+ (DFM also writes MR1[Level]=1 at the beginning of a
+ write-leveling instruction sequence. Write-leveling can only be initiated via the
+ write-leveling instruction sequence.)
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ MR1[Level]=0 in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
+ entry and exit instruction sequences.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t al : 2; /**< Additive Latency
+ 00 = 0
+ 01 = CL-1
+ 10 = CL-2
+ 11 = Reserved
+ DFM writes this value to MR1[AL] in the selected DDR3 parts
+ during power-up/init and write-leveling instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR1[AL] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
+ entry and exit instruction sequences.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ This value must equal the MR1[AL] value in all the DDR3
+ parts attached to all ranks during normal operation.
+ See also DFM_CONTROL[POCAS]. */
+ uint64_t dll : 1; /**< DLL Enable
+ 0 = enable
+ 1 = disable
+ DFM writes this value to MR1[DLL] in the selected DDR3 parts
+ during power-up/init and write-leveling instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR1[DLL] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
+ entry and exit instruction sequences.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ This value must equal the MR1[DLL] value in all the DDR3
+ parts attached to all ranks during normal operation.
+ In dll-off mode, CL/CWL must be programmed
+ equal to 6/6, respectively, as per the DDR3 specifications. */
+ uint64_t mpr : 1; /**< MPR
+ DFM writes this value to MR3[MPR] in the selected DDR3 parts
+ during power-up/init and read-leveling instruction sequencing.
+ (DFM also writes MR3[MPR]=1 at the beginning of a
+ read-leveling instruction sequence. Read-leveling can only be initiated via the
+ read-leveling instruction sequence.)
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR3[MPR] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t mprloc : 2; /**< MPR Location
+ DFM writes this value to MR3[MPRLoc] in the selected DDR3 parts
+ during power-up/init and read-leveling instruction sequencing.
+ (DFM also writes MR3[MPRLoc]=0 at the beginning of the
+ read-leveling instruction sequence.)
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR3[MPRLoc] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t cwl : 3; /**< CAS Write Latency
+ - 000: 5
+ - 001: 6
+ - 010: 7
+ - 011: 8
+ - 100: 9
+ - 101: 10
+ - 110: 11
+ - 111: 12
+ DFM writes this value to MR2[CWL] in the selected DDR3 parts
+ during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR2[CWL] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
+ entry and exit instruction sequences.
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ This value must equal the MR2[CWL] value in all the DDR3
+ parts attached to all ranks during normal operation. */
+#else
+ uint64_t cwl : 3;
+ uint64_t mprloc : 2;
+ uint64_t mpr : 1;
+ uint64_t dll : 1;
+ uint64_t al : 2;
+ uint64_t wlev : 1;
+ uint64_t tdqs : 1;
+ uint64_t qoff : 1;
+ uint64_t bl : 2;
+ uint64_t cl : 4;
+ uint64_t rbt : 1;
+ uint64_t tm : 1;
+ uint64_t dllr : 1;
+ uint64_t wrp : 3;
+ uint64_t ppd : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_dfm_modereg_params0_s cn63xx;
+ struct cvmx_dfm_modereg_params0_s cn63xxp1;
+ struct cvmx_dfm_modereg_params0_s cn66xx;
+};
+typedef union cvmx_dfm_modereg_params0 cvmx_dfm_modereg_params0_t;
+
+/**
+ * cvmx_dfm_modereg_params1
+ *
+ * Notes:
+ * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
+ *
+ */
+union cvmx_dfm_modereg_params1 {
+ uint64_t u64;
+ struct cvmx_dfm_modereg_params1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t rtt_nom_11 : 3; /**< Must be zero */
+ uint64_t dic_11 : 2; /**< Must be zero */
+ uint64_t rtt_wr_11 : 2; /**< Must be zero */
+ uint64_t srt_11 : 1; /**< Must be zero */
+ uint64_t asr_11 : 1; /**< Must be zero */
+ uint64_t pasr_11 : 3; /**< Must be zero */
+ uint64_t rtt_nom_10 : 3; /**< Must be zero */
+ uint64_t dic_10 : 2; /**< Must be zero */
+ uint64_t rtt_wr_10 : 2; /**< Must be zero */
+ uint64_t srt_10 : 1; /**< Must be zero */
+ uint64_t asr_10 : 1; /**< Must be zero */
+ uint64_t pasr_10 : 3; /**< Must be zero */
+ uint64_t rtt_nom_01 : 3; /**< RTT_NOM Rank 1
+ DFM writes this value to MR1[Rtt_Nom] in the rank 1 (i.e. CS1) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR1[Rtt_Nom] in all DRAM parts in rank 1 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t dic_01 : 2; /**< Output Driver Impedance Control Rank 1
+ DFM writes this value to MR1[D.I.C.] in the rank 1 (i.e. CS1) DDR3 parts
+ when selected during power-up/init and write-leveling instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR1[D.I.C.] in all DRAM parts in rank 1 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_01 : 2; /**< RTT_WR Rank 1
+ DFM writes this value to MR2[Rtt_WR] in the rank 1 (i.e. CS1) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR2[Rtt_WR] in all DRAM parts in rank 1 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_01 : 1; /**< Self-refresh temperature range Rank 1
+ DFM writes this value to MR2[SRT] in the rank 1 (i.e. CS1) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR2[SRT] in all DRAM parts in rank 1 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_01 : 1; /**< Auto self-refresh Rank 1
+ DFM writes this value to MR2[ASR] in the rank 1 (i.e. CS1) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR2[ASR] in all DRAM parts in rank 1 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_01 : 3; /**< Partial array self-refresh Rank 1
+ DFM writes this value to MR2[PASR] in the rank 1 (i.e. CS1) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR2[PASR] in all DRAM parts in rank 1 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_00 : 3; /**< RTT_NOM Rank 0
+ DFM writes this value to MR1[Rtt_Nom] in the rank 0 (i.e. CS0) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR1[Rtt_Nom] in all DRAM parts in rank 0 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t dic_00 : 2; /**< Output Driver Impedance Control Rank 0
+ DFM writes this value to MR1[D.I.C.] in the rank 0 (i.e. CS0) DDR3 parts
+ when selected during power-up/init and write-leveling instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR1[D.I.C.] in all DRAM parts in rank 0 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_00 : 2; /**< RTT_WR Rank 0
+ DFM writes this value to MR2[Rtt_WR] in the rank 0 (i.e. CS0) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR2[Rtt_WR] in all DRAM parts in rank 0 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_00 : 1; /**< Self-refresh temperature range Rank 0
+ DFM writes this value to MR2[SRT] in the rank 0 (i.e. CS0) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR2[SRT] in all DRAM parts in rank 0 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_00 : 1; /**< Auto self-refresh Rank 0
+ DFM writes this value to MR2[ASR] in the rank 0 (i.e. CS0) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR2[ASR] in all DRAM parts in rank 0 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_00 : 3; /**< Partial array self-refresh Rank 0
+ DFM writes this value to MR2[PASR] in the rank 0 (i.e. CS0) DDR3 parts
+ when selected during power-up/init instruction sequencing.
+ If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
+ this value to MR2[PASR] in all DRAM parts in rank 0 during self-refresh
+ entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
+ See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+#else
+ uint64_t pasr_00 : 3;
+ uint64_t asr_00 : 1;
+ uint64_t srt_00 : 1;
+ uint64_t rtt_wr_00 : 2;
+ uint64_t dic_00 : 2;
+ uint64_t rtt_nom_00 : 3;
+ uint64_t pasr_01 : 3;
+ uint64_t asr_01 : 1;
+ uint64_t srt_01 : 1;
+ uint64_t rtt_wr_01 : 2;
+ uint64_t dic_01 : 2;
+ uint64_t rtt_nom_01 : 3;
+ uint64_t pasr_10 : 3;
+ uint64_t asr_10 : 1;
+ uint64_t srt_10 : 1;
+ uint64_t rtt_wr_10 : 2;
+ uint64_t dic_10 : 2;
+ uint64_t rtt_nom_10 : 3;
+ uint64_t pasr_11 : 3;
+ uint64_t asr_11 : 1;
+ uint64_t srt_11 : 1;
+ uint64_t rtt_wr_11 : 2;
+ uint64_t dic_11 : 2;
+ uint64_t rtt_nom_11 : 3;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_dfm_modereg_params1_s cn63xx;
+ struct cvmx_dfm_modereg_params1_s cn63xxp1;
+ struct cvmx_dfm_modereg_params1_s cn66xx;
+};
+typedef union cvmx_dfm_modereg_params1 cvmx_dfm_modereg_params1_t;
+
+/**
+ * cvmx_dfm_ops_cnt
+ *
+ * DFM_OPS_CNT = Performance Counters
+ *
+ */
+union cvmx_dfm_ops_cnt {
+ uint64_t u64;
+ struct cvmx_dfm_ops_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t opscnt : 64; /**< Performance Counter
+ 64-bit counter that increments when the DDR3 data bus
+ is being used. Before using, clear counter via
+ DFM_CONTROL.BWCNT
+ DRAM bus utilization = DFM_OPS_CNT/DFM_FCLK_CNT */
+#else
+ uint64_t opscnt : 64;
+#endif
+ } s;
+ struct cvmx_dfm_ops_cnt_s cn63xx;
+ struct cvmx_dfm_ops_cnt_s cn63xxp1;
+ struct cvmx_dfm_ops_cnt_s cn66xx;
+};
+typedef union cvmx_dfm_ops_cnt cvmx_dfm_ops_cnt_t;
+
+/**
+ * cvmx_dfm_phy_ctl
+ *
+ * DFM_PHY_CTL = DFM PHY Control
+ *
+ */
+union cvmx_dfm_phy_ctl {
+ uint64_t u64;
+ struct cvmx_dfm_phy_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t rx_always_on : 1; /**< Disable dynamic DDR3 IO Rx power gating */
+ uint64_t lv_mode : 1; /**< Low Voltage Mode (1.35V) */
+ uint64_t ck_tune1 : 1; /**< Clock Tune
+
+ NOTE: DFM UNUSED */
+ uint64_t ck_dlyout1 : 4; /**< Clock delay out setting
+
+ NOTE: DFM UNUSED */
+ uint64_t ck_tune0 : 1; /**< Clock Tune */
+ uint64_t ck_dlyout0 : 4; /**< Clock delay out setting */
+ uint64_t loopback : 1; /**< Loopback enable */
+ uint64_t loopback_pos : 1; /**< Loopback pos mode */
+ uint64_t ts_stagger : 1; /**< TS Staggermode
+ This mode configures output drivers with 2-stage drive
+ strength to avoid undershoot issues on the bus when strong
+ drivers are suddenly turned on. When this mode is asserted,
+ Octeon will configure output drivers to be weak drivers
+ (60 ohm output impedance) at the first FCLK cycle, and
+ change drivers to the designated drive strengths specified
+ in DFM_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
+ at the following cycle */
+#else
+ uint64_t ts_stagger : 1;
+ uint64_t loopback_pos : 1;
+ uint64_t loopback : 1;
+ uint64_t ck_dlyout0 : 4;
+ uint64_t ck_tune0 : 1;
+ uint64_t ck_dlyout1 : 4;
+ uint64_t ck_tune1 : 1;
+ uint64_t lv_mode : 1;
+ uint64_t rx_always_on : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_dfm_phy_ctl_s cn63xx;
+ struct cvmx_dfm_phy_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t lv_mode : 1; /**< Low Voltage Mode (1.35V) */
+ uint64_t ck_tune1 : 1; /**< Clock Tune
+
+ NOTE: DFM UNUSED */
+ uint64_t ck_dlyout1 : 4; /**< Clock delay out setting
+
+ NOTE: DFM UNUSED */
+ uint64_t ck_tune0 : 1; /**< Clock Tune */
+ uint64_t ck_dlyout0 : 4; /**< Clock delay out setting */
+ uint64_t loopback : 1; /**< Loopback enable */
+ uint64_t loopback_pos : 1; /**< Loopback pos mode */
+ uint64_t ts_stagger : 1; /**< TS Staggermode
+ This mode configures output drivers with 2-stage drive
+ strength to avoid undershoot issues on the bus when strong
+ drivers are suddenly turned on. When this mode is asserted,
+ Octeon will configure output drivers to be weak drivers
+ (60 ohm output impedance) at the first FCLK cycle, and
+ change drivers to the designated drive strengths specified
+ in DFM_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
+ at the following cycle */
+#else
+ uint64_t ts_stagger : 1;
+ uint64_t loopback_pos : 1;
+ uint64_t loopback : 1;
+ uint64_t ck_dlyout0 : 4;
+ uint64_t ck_tune0 : 1;
+ uint64_t ck_dlyout1 : 4;
+ uint64_t ck_tune1 : 1;
+ uint64_t lv_mode : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn63xxp1;
+ struct cvmx_dfm_phy_ctl_s cn66xx;
+};
+typedef union cvmx_dfm_phy_ctl cvmx_dfm_phy_ctl_t;
+
+/**
+ * cvmx_dfm_reset_ctl
+ *
+ * Specify the RSL base addresses for the block
+ *
+ *
+ * Notes:
+ * DDR3RST - DDR3 DRAM parts have a new RESET#
+ * pin that wasn't present in DDR2 parts. The
+ * DDR3RST CSR field controls the assertion of
+ * the new 6xxx pin that attaches to RESET#.
+ * When DDR3RST is set, 6xxx asserts RESET#.
+ * When DDR3RST is clear, 6xxx de-asserts
+ * RESET#.
+ *
+ * DDR3RST is set on a cold reset. Warm and
+ * soft chip resets do not affect the DDR3RST
+ * value. Outside of cold reset, only software
+ * CSR writes change the DDR3RST value.
+ */
+union cvmx_dfm_reset_ctl {
+ uint64_t u64;
+ struct cvmx_dfm_reset_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t ddr3psv : 1; /**< Must be zero */
+ uint64_t ddr3psoft : 1; /**< Must be zero */
+ uint64_t ddr3pwarm : 1; /**< Must be zero */
+ uint64_t ddr3rst : 1; /**< Memory Reset
+ 0 = Reset asserted
+ 1 = Reset de-asserted */
+#else
+ uint64_t ddr3rst : 1;
+ uint64_t ddr3pwarm : 1;
+ uint64_t ddr3psoft : 1;
+ uint64_t ddr3psv : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_dfm_reset_ctl_s cn63xx;
+ struct cvmx_dfm_reset_ctl_s cn63xxp1;
+ struct cvmx_dfm_reset_ctl_s cn66xx;
+};
+typedef union cvmx_dfm_reset_ctl cvmx_dfm_reset_ctl_t;
+
+/**
+ * cvmx_dfm_rlevel_ctl
+ */
+union cvmx_dfm_rlevel_ctl {
+ uint64_t u64;
+ struct cvmx_dfm_rlevel_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t delay_unload_3 : 1; /**< When set, unload the PHY silo one cycle later
+ during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 3
+ DELAY_UNLOAD_3 should normally be set, particularly at higher speeds. */
+ uint64_t delay_unload_2 : 1; /**< When set, unload the PHY silo one cycle later
+ during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 2
+ DELAY_UNLOAD_2 should normally not be set. */
+ uint64_t delay_unload_1 : 1; /**< When set, unload the PHY silo one cycle later
+ during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 1
+ DELAY_UNLOAD_1 should normally not be set. */
+ uint64_t delay_unload_0 : 1; /**< When set, unload the PHY silo one cycle later
+ during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 0
+ DELAY_UNLOAD_0 should normally not be set. */
+ uint64_t bitmask : 8; /**< Mask to select bit lanes on which read-leveling
+ feedback is returned when OR_DIS is set to 1 */
+ uint64_t or_dis : 1; /**< Disable or'ing of bits in a byte lane when computing
+ the read-leveling bitmask
+ OR_DIS should normally not be set. */
+ uint64_t offset_en : 1; /**< Use DFM_RLEVEL_CTL[OFFSET] to calibrate read
+ level dskew settings */
+ uint64_t offset : 4; /**< Pick final_setting-offset (if set) for the read level
+ deskew setting instead of the middle of the largest
+ contiguous sequence of 1's in the bitmask */
+ uint64_t byte : 4; /**< 0 <= BYTE <= 1
+ Byte index for which bitmask results are saved
+ in DFM_RLEVEL_DBG */
+#else
+ uint64_t byte : 4;
+ uint64_t offset : 4;
+ uint64_t offset_en : 1;
+ uint64_t or_dis : 1;
+ uint64_t bitmask : 8;
+ uint64_t delay_unload_0 : 1;
+ uint64_t delay_unload_1 : 1;
+ uint64_t delay_unload_2 : 1;
+ uint64_t delay_unload_3 : 1;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_dfm_rlevel_ctl_s cn63xx;
+ struct cvmx_dfm_rlevel_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t offset_en : 1; /**< Use DFM_RLEVEL_CTL[OFFSET] to calibrate read
+ level dskew settings */
+ uint64_t offset : 4; /**< Pick final_setting-offset (if set) for the read level
+ deskew setting instead of the middle of the largest
+ contiguous sequence of 1's in the bitmask */
+ uint64_t byte : 4; /**< 0 <= BYTE <= 1
+ Byte index for which bitmask results are saved
+ in DFM_RLEVEL_DBG */
+#else
+ uint64_t byte : 4;
+ uint64_t offset : 4;
+ uint64_t offset_en : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn63xxp1;
+ struct cvmx_dfm_rlevel_ctl_s cn66xx;
+};
+typedef union cvmx_dfm_rlevel_ctl cvmx_dfm_rlevel_ctl_t;
+
+/**
+ * cvmx_dfm_rlevel_dbg
+ *
+ * Notes:
+ * A given read of DFM_RLEVEL_DBG returns the read-leveling pass/fail results for all possible
+ * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled.
+ * DFM_RLEVEL_CTL[BYTE] selects the particular byte.
+ * To get these pass/fail results for another different rank, you must run the hardware read-leveling
+ * again. For example, it is possible to get the BITMASK results for every byte of every rank
+ * if you run read-leveling separately for each rank, probing DFM_RLEVEL_DBG between each
+ * read-leveling.
+ */
+union cvmx_dfm_rlevel_dbg {
+ uint64_t u64;
+ struct cvmx_dfm_rlevel_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bitmask : 64; /**< Bitmask generated during deskew settings sweep
+ BITMASK[n]=0 means deskew setting n failed
+ BITMASK[n]=1 means deskew setting n passed
+ for 0 <= n <= 63 */
+#else
+ uint64_t bitmask : 64;
+#endif
+ } s;
+ struct cvmx_dfm_rlevel_dbg_s cn63xx;
+ struct cvmx_dfm_rlevel_dbg_s cn63xxp1;
+ struct cvmx_dfm_rlevel_dbg_s cn66xx;
+};
+typedef union cvmx_dfm_rlevel_dbg cvmx_dfm_rlevel_dbg_t;
+
+/**
+ * cvmx_dfm_rlevel_rank#
+ *
+ * Notes:
+ * This is TWO CSRs per DFM, one per each rank.
+ *
+ * Deskew setting is measured in units of 1/4 FCLK, so the above BYTE* values can range over 16 FCLKs.
+ *
+ * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.)
+ * If HW is unable to find a match per DFM_RLEVEL_CTL[OFFSET_EN] and DFM_RLEVEL_CTL[OFFSET], then HW will set DFM_RLEVEL_RANKn[BYTE*<5:0>]
+ * to 0.
+ *
+ * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
+ *
+ * SW initiates a HW read-leveling sequence by programming DFM_RLEVEL_CTL and writing INIT_START=1 with SEQUENCE=1 in DFM_CONFIG.
+ * See DFM_RLEVEL_CTL.
+ */
+union cvmx_dfm_rlevel_rankx {
+ uint64_t u64;
+ struct cvmx_dfm_rlevel_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t status : 2; /**< Indicates status of the read-levelling and where
+ the BYTE* programmings in <35:0> came from:
+ 0 = BYTE* values are their reset value
+ 1 = BYTE* values were set via a CSR write to this register
+ 2 = read-leveling sequence currently in progress (BYTE* values are unpredictable)
+ 3 = BYTE* values came from a complete read-leveling sequence */
+ uint64_t reserved_12_53 : 42;
+ uint64_t byte1 : 6; /**< Deskew setting */
+ uint64_t byte0 : 6; /**< Deskew setting */
+#else
+ uint64_t byte0 : 6;
+ uint64_t byte1 : 6;
+ uint64_t reserved_12_53 : 42;
+ uint64_t status : 2;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_dfm_rlevel_rankx_s cn63xx;
+ struct cvmx_dfm_rlevel_rankx_s cn63xxp1;
+ struct cvmx_dfm_rlevel_rankx_s cn66xx;
+};
+typedef union cvmx_dfm_rlevel_rankx cvmx_dfm_rlevel_rankx_t;
+
+/**
+ * cvmx_dfm_rodt_mask
+ *
+ * DFM_RODT_MASK = DFM Read OnDieTermination mask
+ * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations
+ * especially on a multi-rank system. DDR3 DQ/DM/DQS I/O's have built in
+ * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
+ * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
+ * in that rank. System designers may prefer different combinations of ODT ON's for reads
+ * into different ranks. Octeon supports full programmability by way of the mask register below.
+ * Each Rank position has its own 8-bit programmable field.
+ * When the controller does a read to that rank, it sets the 4 ODT pins to the MASK pins below.
+ * For eg., When doing a read into Rank0, a system designer may desire to terminate the lines
+ * with the resistor on Dimm0/Rank1. The mask RODT_D0_R0 would then be [00000010].
+ * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
+ * required, write 0 in this register. Note that, as per the DDR3 specifications, the ODT pin
+ * for the rank that is being read should always be 0.
+ *
+ * Notes:
+ * - Notice that when there is only one rank, all valid fields must be zero. This is because there is no
+ * "other" rank to terminate lines for. Read ODT is meant for multirank systems.
+ * - For a two rank system and a read op to rank0: use RODT_D0_R0<1> to terminate lines on rank1.
+ * - For a two rank system and a read op to rank1: use RODT_D0_R1<0> to terminate lines on rank0.
+ * - Therefore, when a given RANK is selected, the RODT mask for that RANK is used.
+ *
+ * DFM always reads 128-bit words independently via one read CAS operation per word.
+ * When a RODT mask bit is set, DFM asserts the OCTEON ODT output
+ * pin(s) starting (CL - CWL) CK's after the read CAS operation. Then, OCTEON
+ * normally continues to assert the ODT output pin(s) for 5+DFM_CONTROL[RODT_BPRCH] more CK's
+ * - for a total of 6+DFM_CONTROL[RODT_BPRCH] CK's for the entire 128-bit read -
+ * satisfying the 6 CK DDR3 ODTH8 requirements.
+ *
+ * But it is possible for OCTEON to issue two 128-bit reads separated by as few as
+ * RtR = 4 or 5 (6 if DFM_CONTROL[RODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s)
+ * for the RODT mask of the first 128-bit read for RtR CK's, then asserts
+ * the ODT output pin(s) for the RODT mask of the second 128-bit read for 6+DFM_CONTROL[RODT_BPRCH] CK's
+ * (or less if a third 128-bit read follows within 4 or 5 (or 6) CK's of this second 128-bit read).
+ * Note that it may be necessary to force DFM to space back-to-back 128-bit reads
+ * to different ranks apart by at least 6+DFM_CONTROL[RODT_BPRCH] CK's to prevent DDR3 ODTH8 violations.
+ */
+union cvmx_dfm_rodt_mask {
+ uint64_t u64;
+ struct cvmx_dfm_rodt_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rodt_d3_r1 : 8; /**< Must be zero. */
+ uint64_t rodt_d3_r0 : 8; /**< Must be zero. */
+ uint64_t rodt_d2_r1 : 8; /**< Must be zero. */
+ uint64_t rodt_d2_r0 : 8; /**< Must be zero. */
+ uint64_t rodt_d1_r1 : 8; /**< Must be zero. */
+ uint64_t rodt_d1_r0 : 8; /**< Must be zero. */
+ uint64_t rodt_d0_r1 : 8; /**< Read ODT mask RANK1
+ RODT_D0_R1<7:1> must be zero in all cases.
+ RODT_D0_R1<0> must also be zero if RANK_ENA is not set. */
+ uint64_t rodt_d0_r0 : 8; /**< Read ODT mask RANK0
+ RODT_D0_R0<7:2,0> must be zero in all cases.
+ RODT_D0_R0<1> must also be zero if RANK_ENA is not set. */
+#else
+ uint64_t rodt_d0_r0 : 8;
+ uint64_t rodt_d0_r1 : 8;
+ uint64_t rodt_d1_r0 : 8;
+ uint64_t rodt_d1_r1 : 8;
+ uint64_t rodt_d2_r0 : 8;
+ uint64_t rodt_d2_r1 : 8;
+ uint64_t rodt_d3_r0 : 8;
+ uint64_t rodt_d3_r1 : 8;
+#endif
+ } s;
+ struct cvmx_dfm_rodt_mask_s cn63xx;
+ struct cvmx_dfm_rodt_mask_s cn63xxp1;
+ struct cvmx_dfm_rodt_mask_s cn66xx;
+};
+typedef union cvmx_dfm_rodt_mask cvmx_dfm_rodt_mask_t;
+
+/**
+ * cvmx_dfm_slot_ctl0
+ *
+ * DFM_SLOT_CTL0 = DFM Slot Control0
+ * This register is an assortment of various control fields needed by the memory controller
+ *
+ * Notes:
+ * HW will update this register if SW has not previously written to it and when any of DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn, DFM_CONTROL and
+ * DFM_MODEREG_PARAMS0 change.Ideally, this register should only be read after DFM has been initialized and DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn
+ * have valid data.
+ * R2W_INIT has 1 extra CK cycle built in for odt settling/channel turnaround time.
+ */
+union cvmx_dfm_slot_ctl0 {
+ uint64_t u64;
+ struct cvmx_dfm_slot_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t w2w_init : 6; /**< Write-to-write spacing control
+ for back to back accesses to the same rank and dimm */
+ uint64_t w2r_init : 6; /**< Write-to-read spacing control
+ for back to back accesses to the same rank and dimm */
+ uint64_t r2w_init : 6; /**< Read-to-write spacing control
+ for back to back accesses to the same rank and dimm */
+ uint64_t r2r_init : 6; /**< Read-to-read spacing control
+ for back to back accesses to the same rank and dimm */
+#else
+ uint64_t r2r_init : 6;
+ uint64_t r2w_init : 6;
+ uint64_t w2r_init : 6;
+ uint64_t w2w_init : 6;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_dfm_slot_ctl0_s cn63xx;
+ struct cvmx_dfm_slot_ctl0_s cn63xxp1;
+ struct cvmx_dfm_slot_ctl0_s cn66xx;
+};
+typedef union cvmx_dfm_slot_ctl0 cvmx_dfm_slot_ctl0_t;
+
+/**
+ * cvmx_dfm_slot_ctl1
+ *
+ * DFM_SLOT_CTL1 = DFM Slot Control1
+ * This register is an assortment of various control fields needed by the memory controller
+ *
+ * Notes:
+ * HW will update this register if SW has not previously written to it and when any of DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn, DFM_CONTROL and
+ * DFM_MODEREG_PARAMS0 change.Ideally, this register should only be read after DFM has been initialized and DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn
+ * have valid data.
+ * R2W_XRANK_INIT, W2R_XRANK_INIT have 1 extra CK cycle built in for odt settling/channel turnaround time.
+ */
+union cvmx_dfm_slot_ctl1 {
+ uint64_t u64;
+ struct cvmx_dfm_slot_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t w2w_xrank_init : 6; /**< Write-to-write spacing control
+ for back to back accesses across ranks of the same dimm */
+ uint64_t w2r_xrank_init : 6; /**< Write-to-read spacing control
+ for back to back accesses across ranks of the same dimm */
+ uint64_t r2w_xrank_init : 6; /**< Read-to-write spacing control
+ for back to back accesses across ranks of the same dimm */
+ uint64_t r2r_xrank_init : 6; /**< Read-to-read spacing control
+ for back to back accesses across ranks of the same dimm */
+#else
+ uint64_t r2r_xrank_init : 6;
+ uint64_t r2w_xrank_init : 6;
+ uint64_t w2r_xrank_init : 6;
+ uint64_t w2w_xrank_init : 6;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_dfm_slot_ctl1_s cn63xx;
+ struct cvmx_dfm_slot_ctl1_s cn63xxp1;
+ struct cvmx_dfm_slot_ctl1_s cn66xx;
+};
+typedef union cvmx_dfm_slot_ctl1 cvmx_dfm_slot_ctl1_t;
+
+/**
+ * cvmx_dfm_timing_params0
+ */
+union cvmx_dfm_timing_params0 {
+ uint64_t u64;
+ struct cvmx_dfm_timing_params0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t trp_ext : 1; /**< Indicates tRP constraints.
+ Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tcksre : 4; /**< Indicates tCKSRE constraints.
+ Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
+ where tCKSRE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, 10ns) */
+ uint64_t trp : 4; /**< Indicates tRP constraints.
+ Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints.
+ Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
+ where tZQINIT is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512) */
+ uint64_t tdllk : 4; /**< Indicates tDLLk constraints.
+ Set TDLLK (CSR field) = RNDUP[tDLLk(ns)/(256*tCYC(ns))],
+ where tDLLk is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512)
+ This parameter is used in self-refresh exit
+ and assumed to be greater than tRFC */
+ uint64_t tmod : 4; /**< Indicates tMOD constraints.
+ Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
+ where tMOD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(12nCK, 15ns) */
+ uint64_t tmrd : 4; /**< Indicates tMRD constraints.
+ Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
+ where tMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4nCK */
+ uint64_t txpr : 4; /**< Indicates tXPR constraints.
+ Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
+ where tXPR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, tRFC+10ns) */
+ uint64_t tcke : 4; /**< Indicates tCKE constraints.
+ Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
+ where tCKE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
+ uint64_t tzqcs : 4; /**< Indicates tZQCS constraints.
+ Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
+ where tZQCS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4 (equivalent to 64) */
+ uint64_t tckeon : 10; /**< Reserved. Should be written to zero. */
+#else
+ uint64_t tckeon : 10;
+ uint64_t tzqcs : 4;
+ uint64_t tcke : 4;
+ uint64_t txpr : 4;
+ uint64_t tmrd : 4;
+ uint64_t tmod : 4;
+ uint64_t tdllk : 4;
+ uint64_t tzqinit : 4;
+ uint64_t trp : 4;
+ uint64_t tcksre : 4;
+ uint64_t trp_ext : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfm_timing_params0_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t trp_ext : 1; /**< Indicates tRP constraints.
+ Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tcksre : 4; /**< Indicates tCKSRE constraints.
+ Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
+ where tCKSRE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, 10ns) */
+ uint64_t trp : 4; /**< Indicates tRP constraints.
+ Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints.
+ Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
+ where tZQINIT is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512) */
+ uint64_t tdllk : 4; /**< Indicates tDLLk constraints.
+ Set TDLLK (CSR field) = RNDUP[tDLLk(ns)/(256*tCYC(ns))],
+ where tDLLk is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512)
+ This parameter is used in self-refresh exit
+ and assumed to be greater than tRFC */
+ uint64_t tmod : 4; /**< Indicates tMOD constraints.
+ Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
+ where tMOD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(12nCK, 15ns) */
+ uint64_t tmrd : 4; /**< Indicates tMRD constraints.
+ Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
+ where tMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4nCK */
+ uint64_t txpr : 4; /**< Indicates tXPR constraints.
+ Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
+ where tXPR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, tRFC+10ns) */
+ uint64_t tcke : 4; /**< Indicates tCKE constraints.
+ Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
+ where tCKE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
+ uint64_t tzqcs : 4; /**< Indicates tZQCS constraints.
+ Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
+ where tZQCS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4 (equivalent to 64) */
+ uint64_t reserved_0_9 : 10;
+#else
+ uint64_t reserved_0_9 : 10;
+ uint64_t tzqcs : 4;
+ uint64_t tcke : 4;
+ uint64_t txpr : 4;
+ uint64_t tmrd : 4;
+ uint64_t tmod : 4;
+ uint64_t tdllk : 4;
+ uint64_t tzqinit : 4;
+ uint64_t trp : 4;
+ uint64_t tcksre : 4;
+ uint64_t trp_ext : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } cn63xx;
+ struct cvmx_dfm_timing_params0_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t tcksre : 4; /**< Indicates tCKSRE constraints.
+ Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
+ where tCKSRE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, 10ns) */
+ uint64_t trp : 4; /**< Indicates tRP constraints.
+ Set TRP (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints.
+ Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
+ where tZQINIT is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512) */
+ uint64_t tdllk : 4; /**< Indicates tDLLk constraints.
+ Set TDLLK (CSR field) = RNDUP[tDLLk(ns)/(256*tCYC(ns))],
+ where tDLLk is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512)
+ This parameter is used in self-refresh exit
+ and assumed to be greater than tRFC */
+ uint64_t tmod : 4; /**< Indicates tMOD constraints.
+ Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
+ where tMOD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(12nCK, 15ns) */
+ uint64_t tmrd : 4; /**< Indicates tMRD constraints.
+ Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
+ where tMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4nCK */
+ uint64_t txpr : 4; /**< Indicates tXPR constraints.
+ Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
+ where tXPR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, tRFC+10ns) */
+ uint64_t tcke : 4; /**< Indicates tCKE constraints.
+ Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
+ where tCKE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
+ uint64_t tzqcs : 4; /**< Indicates tZQCS constraints.
+ Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
+ where tZQCS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4 (equivalent to 64) */
+ uint64_t tckeon : 10; /**< Reserved. Should be written to zero. */
+#else
+ uint64_t tckeon : 10;
+ uint64_t tzqcs : 4;
+ uint64_t tcke : 4;
+ uint64_t txpr : 4;
+ uint64_t tmrd : 4;
+ uint64_t tmod : 4;
+ uint64_t tdllk : 4;
+ uint64_t tzqinit : 4;
+ uint64_t trp : 4;
+ uint64_t tcksre : 4;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } cn63xxp1;
+ struct cvmx_dfm_timing_params0_cn63xx cn66xx;
+};
+typedef union cvmx_dfm_timing_params0 cvmx_dfm_timing_params0_t;
+
+/**
+ * cvmx_dfm_timing_params1
+ */
+union cvmx_dfm_timing_params1 {
+ uint64_t u64;
+ struct cvmx_dfm_timing_params1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t tras_ext : 1; /**< Indicates tRAS constraints.
+ Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
+ where tRAS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=35ns-9*tREFI
+ - 000000: RESERVED
+ - 000001: 2 tCYC
+ - 000010: 3 tCYC
+ - ...
+ - 111111: 64 tCYC */
+ uint64_t txpdll : 5; /**< Indicates tXPDLL constraints.
+ Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
+ where tXPDLL is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(10nCK, 24ns) */
+ uint64_t tfaw : 5; /**< Indicates tFAW constraints.
+ Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
+ where tFAW is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=30-40ns */
+ uint64_t twldqsen : 4; /**< Indicates tWLDQSEN constraints.
+ Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
+ where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(25nCK) */
+ uint64_t twlmrd : 4; /**< Indicates tWLMRD constraints.
+ Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
+ where tWLMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(40nCK) */
+ uint64_t txp : 3; /**< Indicates tXP constraints.
+ Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
+ where tXP is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5ns) */
+ uint64_t trrd : 3; /**< Indicates tRRD constraints.
+ Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
+ where tRRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(4nCK, 10ns)
+ - 000: RESERVED
+ - 001: 3 tCYC
+ - ...
+ - 110: 8 tCYC
+ - 111: 9 tCYC */
+ uint64_t trfc : 5; /**< Indicates tRFC constraints.
+ Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
+ where tRFC is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=90-350ns
+ - 00000: RESERVED
+ - 00001: 8 tCYC
+ - 00010: 16 tCYC
+ - 00011: 24 tCYC
+ - 00100: 32 tCYC
+ - ...
+ - 11110: 240 tCYC
+ - 11111: 248 tCYC */
+ uint64_t twtr : 4; /**< Indicates tWTR constraints.
+ Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
+ where tWTR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(4nCK, 7.5ns)
+ - 0000: RESERVED
+ - 0001: 2
+ - ...
+ - 0111: 8
+ - 1000-1111: RESERVED */
+ uint64_t trcd : 4; /**< Indicates tRCD constraints.
+ Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
+ where tRCD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=10-15ns
+ - 0000: RESERVED
+ - 0001: 2 (2 is the smallest value allowed)
+ - 0002: 2
+ - ...
+ - 1001: 9
+ - 1010-1111: RESERVED
+ In 2T mode, make this register TRCD-1, not going
+ below 2. */
+ uint64_t tras : 5; /**< Indicates tRAS constraints.
+ Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
+ where tRAS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=35ns-9*tREFI
+ - 000000: RESERVED
+ - 000001: 2 tCYC
+ - 000010: 3 tCYC
+ - ...
+ - 111111: 64 tCYC */
+ uint64_t tmprr : 4; /**< Indicates tMPRR constraints.
+ Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
+ where tMPRR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=1nCK */
+#else
+ uint64_t tmprr : 4;
+ uint64_t tras : 5;
+ uint64_t trcd : 4;
+ uint64_t twtr : 4;
+ uint64_t trfc : 5;
+ uint64_t trrd : 3;
+ uint64_t txp : 3;
+ uint64_t twlmrd : 4;
+ uint64_t twldqsen : 4;
+ uint64_t tfaw : 5;
+ uint64_t txpdll : 5;
+ uint64_t tras_ext : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfm_timing_params1_s cn63xx;
+ struct cvmx_dfm_timing_params1_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t txpdll : 5; /**< Indicates tXPDLL constraints.
+ Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
+ where tXPDLL is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(10nCK, 24ns) */
+ uint64_t tfaw : 5; /**< Indicates tFAW constraints.
+ Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
+ where tFAW is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=30-40ns */
+ uint64_t twldqsen : 4; /**< Indicates tWLDQSEN constraints.
+ Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
+ where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(25nCK) */
+ uint64_t twlmrd : 4; /**< Indicates tWLMRD constraints.
+ Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
+ where tWLMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(40nCK) */
+ uint64_t txp : 3; /**< Indicates tXP constraints.
+ Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
+ where tXP is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5ns) */
+ uint64_t trrd : 3; /**< Indicates tRRD constraints.
+ Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
+ where tRRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(4nCK, 10ns)
+ - 000: RESERVED
+ - 001: 3 tCYC
+ - ...
+ - 110: 8 tCYC
+ - 111: 9 tCYC */
+ uint64_t trfc : 5; /**< Indicates tRFC constraints.
+ Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
+ where tRFC is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=90-350ns
+ - 00000: RESERVED
+ - 00001: 8 tCYC
+ - 00010: 16 tCYC
+ - 00011: 24 tCYC
+ - 00100: 32 tCYC
+ - ...
+ - 11110: 240 tCYC
+ - 11111: 248 tCYC */
+ uint64_t twtr : 4; /**< Indicates tWTR constraints.
+ Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
+ where tWTR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(4nCK, 7.5ns)
+ - 0000: RESERVED
+ - 0001: 2
+ - ...
+ - 0111: 8
+ - 1000-1111: RESERVED */
+ uint64_t trcd : 4; /**< Indicates tRCD constraints.
+ Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
+ where tRCD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=10-15ns
+ - 0000: RESERVED
+ - 0001: 2 (2 is the smallest value allowed)
+ - 0002: 2
+ - ...
+ - 1001: 9
+ - 1010-1111: RESERVED
+ In 2T mode, make this register TRCD-1, not going
+ below 2. */
+ uint64_t tras : 5; /**< Indicates tRAS constraints.
+ Set TRAS (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
+ where tRAS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=35ns-9*tREFI
+ - 00000: RESERVED
+ - 00001: 2 tCYC
+ - 00010: 3 tCYC
+ - ...
+ - 11111: 32 tCYC */
+ uint64_t tmprr : 4; /**< Indicates tMPRR constraints.
+ Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
+ where tMPRR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=1nCK */
+#else
+ uint64_t tmprr : 4;
+ uint64_t tras : 5;
+ uint64_t trcd : 4;
+ uint64_t twtr : 4;
+ uint64_t trfc : 5;
+ uint64_t trrd : 3;
+ uint64_t txp : 3;
+ uint64_t twlmrd : 4;
+ uint64_t twldqsen : 4;
+ uint64_t tfaw : 5;
+ uint64_t txpdll : 5;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } cn63xxp1;
+ struct cvmx_dfm_timing_params1_s cn66xx;
+};
+typedef union cvmx_dfm_timing_params1 cvmx_dfm_timing_params1_t;
+
+/**
+ * cvmx_dfm_wlevel_ctl
+ */
+union cvmx_dfm_wlevel_ctl {
+ uint64_t u64;
+ struct cvmx_dfm_wlevel_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t rtt_nom : 3; /**< RTT_NOM
+ DFM writes a decoded value to MR1[Rtt_Nom] of the rank during
+ write leveling. Per JEDEC DDR3 specifications,
+ only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6)
+ are allowed during write leveling with output buffer enabled.
+ 000 : DFM writes 001 (RZQ/4) to MR1[Rtt_Nom]
+ 001 : DFM writes 010 (RZQ/2) to MR1[Rtt_Nom]
+ 010 : DFM writes 011 (RZQ/6) to MR1[Rtt_Nom]
+ 011 : DFM writes 100 (RZQ/12) to MR1[Rtt_Nom]
+ 100 : DFM writes 101 (RZQ/8) to MR1[Rtt_Nom]
+ 101 : DFM writes 110 (Rsvd) to MR1[Rtt_Nom]
+ 110 : DFM writes 111 (Rsvd) to MR1[Rtt_Nom]
+ 111 : DFM writes 000 (Disabled) to MR1[Rtt_Nom] */
+ uint64_t bitmask : 8; /**< Mask to select bit lanes on which write-leveling
+ feedback is returned when OR_DIS is set to 1 */
+ uint64_t or_dis : 1; /**< Disable or'ing of bits in a byte lane when computing
+ the write-leveling bitmask */
+ uint64_t sset : 1; /**< Run write-leveling on the current setting only. */
+ uint64_t lanemask : 9; /**< One-hot mask to select byte lane to be leveled by
+ the write-leveling sequence
+ Used with x16 parts where the upper and lower byte
+ lanes need to be leveled independently
+ LANEMASK<8:2> must be zero. */
+#else
+ uint64_t lanemask : 9;
+ uint64_t sset : 1;
+ uint64_t or_dis : 1;
+ uint64_t bitmask : 8;
+ uint64_t rtt_nom : 3;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_dfm_wlevel_ctl_s cn63xx;
+ struct cvmx_dfm_wlevel_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t sset : 1; /**< Run write-leveling on the current setting only. */
+ uint64_t lanemask : 9; /**< One-hot mask to select byte lane to be leveled by
+ the write-leveling sequence
+ Used with x16 parts where the upper and lower byte
+ lanes need to be leveled independently
+ LANEMASK<8:2> must be zero. */
+#else
+ uint64_t lanemask : 9;
+ uint64_t sset : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn63xxp1;
+ struct cvmx_dfm_wlevel_ctl_s cn66xx;
+};
+typedef union cvmx_dfm_wlevel_ctl cvmx_dfm_wlevel_ctl_t;
+
+/**
+ * cvmx_dfm_wlevel_dbg
+ *
+ * Notes:
+ * A given write of DFM_WLEVEL_DBG returns the write-leveling pass/fail results for all possible
+ * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW write-leveled.
+ * DFM_WLEVEL_DBG[BYTE] selects the particular byte.
+ * To get these pass/fail results for another different rank, you must run the hardware write-leveling
+ * again. For example, it is possible to get the BITMASK results for every byte of every rank
+ * if you run write-leveling separately for each rank, probing DFM_WLEVEL_DBG between each
+ * write-leveling.
+ */
+union cvmx_dfm_wlevel_dbg {
+ uint64_t u64;
+ struct cvmx_dfm_wlevel_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t bitmask : 8; /**< Bitmask generated during deskew settings sweep
+ if DFM_WLEVEL_CTL[SSET]=0
+ BITMASK[n]=0 means deskew setting n failed
+ BITMASK[n]=1 means deskew setting n passed
+ for 0 <= n <= 7
+ BITMASK contains the first 8 results of the total 16
+ collected by DFM during the write-leveling sequence
+ else if DFM_WLEVEL_CTL[SSET]=1
+ BITMASK[0]=0 means curr deskew setting failed
+ BITMASK[0]=1 means curr deskew setting passed */
+ uint64_t byte : 4; /**< 0 <= BYTE <= 8 */
+#else
+ uint64_t byte : 4;
+ uint64_t bitmask : 8;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_dfm_wlevel_dbg_s cn63xx;
+ struct cvmx_dfm_wlevel_dbg_s cn63xxp1;
+ struct cvmx_dfm_wlevel_dbg_s cn66xx;
+};
+typedef union cvmx_dfm_wlevel_dbg cvmx_dfm_wlevel_dbg_t;
+
+/**
+ * cvmx_dfm_wlevel_rank#
+ *
+ * Notes:
+ * This is TWO CSRs per DFM, one per each rank. (front bunk/back bunk)
+ *
+ * Deskew setting is measured in units of 1/8 FCLK, so the above BYTE* values can range over 4 FCLKs.
+ *
+ * Assuming DFM_WLEVEL_CTL[SSET]=0, the BYTE*<2:0> values are not used during write-leveling, and
+ * they are over-written by the hardware as part of the write-leveling sequence. (HW sets STATUS==3
+ * after HW write-leveling completes for the rank). SW needs to set BYTE*<4:3> bits.
+ *
+ * Each CSR may also be written by SW, but not while a write-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
+ *
+ * SW initiates a HW write-leveling sequence by programming DFM_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQUENCE=6 in DFM_CONFIG.
+ * DFM will then step through and accumulate write leveling results for 8 unique delay settings (twice), starting at a delay of
+ * DFM_WLEVEL_RANKn[BYTE*<4:3>]*8 CK increasing by 1/8 CK each setting. HW will then set DFM_WLEVEL_RANKn[BYTE*<2:0>] to indicate the
+ * first write leveling result of '1' that followed a reslt of '0' during the sequence by searching for a '1100' pattern in the generated
+ * bitmask, except that DFM will always write DFM_WLEVEL_RANKn[BYTE*<0>]=0. If HW is unable to find a match for a '1100' pattern, then HW will
+ * set DFM_WLEVEL_RANKn[BYTE*<2:0>] to 4.
+ * See DFM_WLEVEL_CTL.
+ */
+union cvmx_dfm_wlevel_rankx {
+ uint64_t u64;
+ struct cvmx_dfm_wlevel_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t status : 2; /**< Indicates status of the write-leveling and where
+ the BYTE* programmings in <44:0> came from:
+ 0 = BYTE* values are their reset value
+ 1 = BYTE* values were set via a CSR write to this register
+ 2 = write-leveling sequence currently in progress (BYTE* values are unpredictable)
+ 3 = BYTE* values came from a complete write-leveling sequence, irrespective of
+ which lanes are masked via DFM_WLEVEL_CTL[LANEMASK] */
+ uint64_t reserved_10_44 : 35;
+ uint64_t byte1 : 5; /**< Deskew setting
+ Bit 0 of BYTE1 must be zero during normal operation */
+ uint64_t byte0 : 5; /**< Deskew setting
+ Bit 0 of BYTE0 must be zero during normal operation */
+#else
+ uint64_t byte0 : 5;
+ uint64_t byte1 : 5;
+ uint64_t reserved_10_44 : 35;
+ uint64_t status : 2;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfm_wlevel_rankx_s cn63xx;
+ struct cvmx_dfm_wlevel_rankx_s cn63xxp1;
+ struct cvmx_dfm_wlevel_rankx_s cn66xx;
+};
+typedef union cvmx_dfm_wlevel_rankx cvmx_dfm_wlevel_rankx_t;
+
+/**
+ * cvmx_dfm_wodt_mask
+ *
+ * DFM_WODT_MASK = DFM Write OnDieTermination mask
+ * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations
+ * especially on a multi-rank system. DDR3 DQ/DM/DQS I/O's have built in
+ * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
+ * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
+ * in that rank. System designers may prefer different combinations of ODT ON's for writes
+ * into different ranks. Octeon supports full programmability by way of the mask register below.
+ * Each Rank position has its own 8-bit programmable field.
+ * When the controller does a write to that rank, it sets the 4 ODT pins to the MASK pins below.
+ * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines
+ * with the resistor on Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010].
+ * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
+ * required, write 0 in this register.
+ *
+ * Notes:
+ * - DFM_WODT_MASK functions a little differently than DFM_RODT_MASK. While, in DFM_RODT_MASK, the other
+ * rank(s) are ODT-ed, in DFM_WODT_MASK, the rank in which the write CAS is issued can be ODT-ed as well.
+ * - For a two rank system and a write op to rank0: use RODT_D0_R0<1:0> to terminate lines on rank1 and/or rank0.
+ * - For a two rank system and a write op to rank1: use RODT_D0_R1<1:0> to terminate lines on rank1 and/or rank0.
+ * - When a given RANK is selected, the WODT mask for that RANK is used.
+ *
+ * DFM always writes 128-bit words independently via one write CAS operation per word.
+ * When a WODT mask bit is set, DFM asserts the OCTEON ODT output pin(s) starting the same cycle
+ * as the write CAS operation. Then, OCTEON normally continues to assert the ODT output pin(s) for five
+ * more cycles - for a total of 6 cycles for the entire word write - satisfying the 6 cycle DDR3
+ * ODTH8 requirements. But it is possible for DFM to issue two word writes separated by as few
+ * as WtW = 4 or 5 cycles. In that case, DFM asserts the ODT output pin(s) for the WODT mask of the
+ * first word write for WtW cycles, then asserts the ODT output pin(s) for the WODT mask of the
+ * second write for 6 cycles (or less if a third word write follows within 4 or 5
+ * cycles of this second word write). Note that it may be necessary to force DFM to space back-to-back
+ * word writes to different ranks apart by at least 6 cycles to prevent DDR3 ODTH8 violations.
+ */
+union cvmx_dfm_wodt_mask {
+ uint64_t u64;
+ struct cvmx_dfm_wodt_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wodt_d3_r1 : 8; /**< Not used by DFM. */
+ uint64_t wodt_d3_r0 : 8; /**< Not used by DFM. */
+ uint64_t wodt_d2_r1 : 8; /**< Not used by DFM. */
+ uint64_t wodt_d2_r0 : 8; /**< Not used by DFM. */
+ uint64_t wodt_d1_r1 : 8; /**< Not used by DFM. */
+ uint64_t wodt_d1_r0 : 8; /**< Not used by DFM. */
+ uint64_t wodt_d0_r1 : 8; /**< Write ODT mask RANK1
+ WODT_D0_R1<7:2> not used by DFM.
+ WODT_D0_R1<1:0> is also not used by DFM when RANK_ENA is not set. */
+ uint64_t wodt_d0_r0 : 8; /**< Write ODT mask RANK0
+ WODT_D0_R0<7:2> not used by DFM. */
+#else
+ uint64_t wodt_d0_r0 : 8;
+ uint64_t wodt_d0_r1 : 8;
+ uint64_t wodt_d1_r0 : 8;
+ uint64_t wodt_d1_r1 : 8;
+ uint64_t wodt_d2_r0 : 8;
+ uint64_t wodt_d2_r1 : 8;
+ uint64_t wodt_d3_r0 : 8;
+ uint64_t wodt_d3_r1 : 8;
+#endif
+ } s;
+ struct cvmx_dfm_wodt_mask_s cn63xx;
+ struct cvmx_dfm_wodt_mask_s cn63xxp1;
+ struct cvmx_dfm_wodt_mask_s cn66xx;
+};
+typedef union cvmx_dfm_wodt_mask cvmx_dfm_wodt_mask_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-dfm-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,554 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the PCI / PCIe DMA engines. These are only avialable
+ * on chips with PCI / PCIe.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/octeon-model.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-cmd-queue.h>
+#include <asm/octeon/cvmx-dma-engine.h>
+#include <asm/octeon/octeon-feature.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-dpi-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#endif
+#include "cvmx.h"
+#include "cvmx-cmd-queue.h"
+#include "cvmx-dma-engine.h"
+#include "cvmx-helper-cfg.h"
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/**
+ * Return the number of DMA engimes supported by this chip
+ *
+ * @return Number of DMA engines
+ */
+int cvmx_dma_engine_get_num(void)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
+ return 4;
+ else
+ return 5;
+ }
+ else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ return 8;
+ else
+ return 2;
+}
+
+/**
+ * Initialize the DMA engines for use
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_dma_engine_initialize(void)
+{
+ int engine;
+
+ for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
+ {
+ cvmx_cmd_queue_result_t result;
+ result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_DMA(engine),
+ 0, CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE);
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return -1;
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_npei_dmax_ibuff_saddr_t dmax_ibuff_saddr;
+ dmax_ibuff_saddr.u64 = 0;
+ dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7;
+ cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), dmax_ibuff_saddr.u64);
+ }
+ else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ {
+ cvmx_dpi_dmax_ibuff_saddr_t dpi_dmax_ibuff_saddr;
+ dpi_dmax_ibuff_saddr.u64 = 0;
+ dpi_dmax_ibuff_saddr.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
+ dpi_dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7;
+ cvmx_write_csr(CVMX_DPI_DMAX_IBUFF_SADDR(engine), dpi_dmax_ibuff_saddr.u64);
+ }
+ else
+ {
+ uint64_t address = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine)));
+ if (engine)
+ cvmx_write_csr(CVMX_NPI_HIGHP_IBUFF_SADDR, address);
+ else
+ cvmx_write_csr(CVMX_NPI_LOWP_IBUFF_SADDR, address);
+ }
+ }
+
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_npei_dma_control_t dma_control;
+ dma_control.u64 = 0;
+ if (cvmx_dma_engine_get_num() >= 5)
+ dma_control.s.dma4_enb = 1;
+ dma_control.s.dma3_enb = 1;
+ dma_control.s.dma2_enb = 1;
+ dma_control.s.dma1_enb = 1;
+ dma_control.s.dma0_enb = 1;
+ dma_control.s.o_mode = 1; /* Pull NS and RO from this register, not the pointers */
+ //dma_control.s.dwb_denb = 1;
+ //dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
+ dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL;
+ dma_control.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
+ cvmx_write_csr(CVMX_PEXP_NPEI_DMA_CONTROL, dma_control.u64);
+ /* As a workaround for errata PCIE-811 we only allow a single
+ outstanding DMA read over PCIe at a time. This limits performance,
+ but works in all cases. If you need higher performance, remove
+ this code and implement the more complicated workaround documented
+ in the errata. This only affects CN56XX pass 2.0 chips */
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_0))
+ {
+ cvmx_npei_dma_pcie_req_num_t pcie_req_num;
+ pcie_req_num.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM);
+ pcie_req_num.s.dma_cnt = 1;
+ cvmx_write_csr(CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM, pcie_req_num.u64);
+ }
+ }
+ else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ {
+ cvmx_dpi_engx_buf_t dpi_engx_buf;
+ cvmx_dpi_dma_engx_en_t dpi_dma_engx_en;
+ cvmx_dpi_dma_control_t dma_control;
+ cvmx_dpi_ctl_t dpi_ctl;
+
+ /* Give engine 0-4 1KB, and 5 3KB. This gives the packet engines better
+ performance. Total must not exceed 8KB */
+ dpi_engx_buf.u64 = 0;
+ dpi_engx_buf.s.blks = 2;
+ cvmx_write_csr(CVMX_DPI_ENGX_BUF(0), dpi_engx_buf.u64);
+ cvmx_write_csr(CVMX_DPI_ENGX_BUF(1), dpi_engx_buf.u64);
+ cvmx_write_csr(CVMX_DPI_ENGX_BUF(2), dpi_engx_buf.u64);
+ cvmx_write_csr(CVMX_DPI_ENGX_BUF(3), dpi_engx_buf.u64);
+ cvmx_write_csr(CVMX_DPI_ENGX_BUF(4), dpi_engx_buf.u64);
+ dpi_engx_buf.s.blks = 6;
+ cvmx_write_csr(CVMX_DPI_ENGX_BUF(5), dpi_engx_buf.u64);
+
+ dma_control.u64 = cvmx_read_csr(CVMX_DPI_DMA_CONTROL);
+ dma_control.s.pkt_hp = 1;
+ dma_control.s.pkt_en = 1;
+ dma_control.s.dma_enb = 0x1f;
+ dma_control.s.dwb_denb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB);
+ dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
+ dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL;
+ dma_control.s.o_mode = 1;
+ cvmx_write_csr(CVMX_DPI_DMA_CONTROL, dma_control.u64);
+ /* When dma_control[pkt_en] = 1, engine 5 is used for packets and is not
+ available for DMA. */
+ dpi_dma_engx_en.u64 = cvmx_read_csr(CVMX_DPI_DMA_ENGX_EN(5));
+ dpi_dma_engx_en.s.qen = 0;
+ cvmx_write_csr(CVMX_DPI_DMA_ENGX_EN(5), dpi_dma_engx_en.u64);
+ dpi_ctl.u64 = cvmx_read_csr(CVMX_DPI_CTL);
+ dpi_ctl.s.en = 1;
+ cvmx_write_csr(CVMX_DPI_CTL, dpi_ctl.u64);
+ }
+ else
+ {
+ cvmx_npi_dma_control_t dma_control;
+ dma_control.u64 = 0;
+ //dma_control.s.dwb_denb = 1;
+ //dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
+ dma_control.s.o_add1 = 1;
+ dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL;
+ dma_control.s.hp_enb = 1;
+ dma_control.s.lp_enb = 1;
+ dma_control.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
+ cvmx_write_csr(CVMX_NPI_DMA_CONTROL, dma_control.u64);
+ }
+
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_dma_engine_initialize);
+#endif
+
+/**
+ * Shutdown all DMA engines. The engines must be idle when this
+ * function is called.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_dma_engine_shutdown(void)
+{
+ int engine;
+
+ for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
+ {
+ if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_DMA(engine)))
+ {
+ cvmx_dprintf("ERROR: cvmx_dma_engine_shutdown: Engine not idle.\n");
+ return -1;
+ }
+ }
+
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_npei_dma_control_t dma_control;
+ dma_control.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMA_CONTROL);
+ if (cvmx_dma_engine_get_num() >= 5)
+ dma_control.s.dma4_enb = 0;
+ dma_control.s.dma3_enb = 0;
+ dma_control.s.dma2_enb = 0;
+ dma_control.s.dma1_enb = 0;
+ dma_control.s.dma0_enb = 0;
+ cvmx_write_csr(CVMX_PEXP_NPEI_DMA_CONTROL, dma_control.u64);
+ /* Make sure the disable completes */
+ cvmx_read_csr(CVMX_PEXP_NPEI_DMA_CONTROL);
+ }
+ else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ {
+ cvmx_dpi_dma_control_t dma_control;
+ dma_control.u64 = cvmx_read_csr(CVMX_DPI_DMA_CONTROL);
+ dma_control.s.dma_enb = 0;
+ cvmx_write_csr(CVMX_DPI_DMA_CONTROL, dma_control.u64);
+ /* Make sure the disable completes */
+ cvmx_read_csr(CVMX_DPI_DMA_CONTROL);
+ }
+ else
+ {
+ cvmx_npi_dma_control_t dma_control;
+ dma_control.u64 = cvmx_read_csr(CVMX_NPI_DMA_CONTROL);
+ dma_control.s.hp_enb = 0;
+ dma_control.s.lp_enb = 0;
+ cvmx_write_csr(CVMX_NPI_DMA_CONTROL, dma_control.u64);
+ /* Make sure the disable completes */
+ cvmx_read_csr(CVMX_NPI_DMA_CONTROL);
+ }
+
+ for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
+ {
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_DMA(engine));
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), 0);
+ else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ cvmx_write_csr(CVMX_DPI_DMAX_IBUFF_SADDR(engine), 0);
+ else
+ {
+ if (engine)
+ cvmx_write_csr(CVMX_NPI_HIGHP_IBUFF_SADDR, 0);
+ else
+ cvmx_write_csr(CVMX_NPI_LOWP_IBUFF_SADDR, 0);
+ }
+ }
+
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_dma_engine_shutdown);
+#endif
+
+/**
+ * Submit a series of DMA command to the DMA engines.
+ *
+ * @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
+ * @param header Command header
+ * @param num_buffers
+ * The number of data pointers
+ * @param buffers Command data pointers
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_dma_engine_submit(int engine, cvmx_dma_engine_header_t header, int num_buffers, cvmx_dma_engine_buffer_t buffers[])
+{
+ cvmx_cmd_queue_result_t result;
+ int cmd_count = 1;
+ uint64_t cmds[num_buffers + 1];
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ {
+ /* Check for Errata PCIe-604 */
+ if ((header.s.nfst > 11) || (header.s.nlst > 11) || (header.s.nfst + header.s.nlst > 15))
+ {
+ cvmx_dprintf("DMA engine submit too large\n");
+ return -1;
+ }
+ }
+
+ cmds[0] = header.u64;
+ while (num_buffers--)
+ {
+ cmds[cmd_count++] = buffers->u64;
+ buffers++;
+ }
+
+ /* Due to errata PCIE-13315, it is necessary to have the queue lock while we
+ ring the doorbell for the DMA engines. This prevents doorbells from
+ possibly arriving out of order with respect to the command queue
+ entries */
+ __cvmx_cmd_queue_lock(CVMX_CMD_QUEUE_DMA(engine), __cvmx_cmd_queue_get_state(CVMX_CMD_QUEUE_DMA(engine)));
+ result = cvmx_cmd_queue_write(CVMX_CMD_QUEUE_DMA(engine), 0, cmd_count, cmds);
+ /* This SYNCWS is needed since the command queue didn't do locking, which
+ normally implies the SYNCWS. This one makes sure the command queue
+ updates make it to L2 before we ring the doorbell */
+ CVMX_SYNCWS;
+ /* A syncw isn't needed here since the command queue did one as part of the queue unlock */
+ if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS))
+ {
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ /* DMA doorbells are 32bit writes in little endian space. This means we need to xor the address with 4 */
+ cvmx_write64_uint32(CVMX_PEXP_NPEI_DMAX_DBELL(engine)^4, cmd_count);
+ }
+ else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ cvmx_write_csr(CVMX_DPI_DMAX_DBELL(engine), cmd_count);
+ else
+ {
+ if (engine)
+ cvmx_write_csr(CVMX_NPI_HIGHP_DBELL, cmd_count);
+ else
+ cvmx_write_csr(CVMX_NPI_LOWP_DBELL, cmd_count);
+ }
+ }
+ /* Here is the unlock for the above errata workaround */
+ __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_get_state(CVMX_CMD_QUEUE_DMA(engine)));
+ return result;
+}
+
+
+/**
+ * @INTERNAL
+ * Function used by cvmx_dma_engine_transfer() to build the
+ * internal address list.
+ *
+ * @param buffers Location to store the list
+ * @param address Address to build list for
+ * @param size Length of the memory pointed to by address
+ *
+ * @return Number of internal pointer chunks created
+ */
+static inline int __cvmx_dma_engine_build_internal_pointers(cvmx_dma_engine_buffer_t *buffers, uint64_t address, int size)
+{
+ int segments = 0;
+ while (size)
+ {
+ /* Each internal chunk can contain a maximum of 8191 bytes */
+ int chunk = size;
+ if (chunk > 8191)
+ chunk = 8191;
+ buffers[segments].u64 = 0;
+ buffers[segments].internal.size = chunk;
+ buffers[segments].internal.addr = address;
+ address += chunk;
+ size -= chunk;
+ segments++;
+ }
+ return segments;
+}
+
+
+/**
+ * @INTERNAL
+ * Function used by cvmx_dma_engine_transfer() to build the PCI / PCIe address
+ * list.
+ * @param buffers Location to store the list
+ * @param address Address to build list for
+ * @param size Length of the memory pointed to by address
+ *
+ * @return Number of PCI / PCIe address chunks created. The number of words used
+ * will be segments + (segments-1)/4 + 1.
+ */
+static inline int __cvmx_dma_engine_build_external_pointers(cvmx_dma_engine_buffer_t *buffers, uint64_t address, int size)
+{
+ const int MAX_SIZE = 65535;
+ int segments = 0;
+ while (size)
+ {
+ /* Each block of 4 PCI / PCIe pointers uses one dword for lengths followed by
+ up to 4 addresses. This then repeats if more data is needed */
+ buffers[0].u64 = 0;
+ if (size <= MAX_SIZE)
+ {
+ /* Only one more segment needed */
+ buffers[0].pcie_length.len0 = size;
+ buffers[1].u64 = address;
+ segments++;
+ break;
+ }
+ else if (size <= MAX_SIZE * 2)
+ {
+ /* Two more segments needed */
+ buffers[0].pcie_length.len0 = MAX_SIZE;
+ buffers[0].pcie_length.len1 = size - MAX_SIZE;
+ buffers[1].u64 = address;
+ address += MAX_SIZE;
+ buffers[2].u64 = address;
+ segments+=2;
+ break;
+ }
+ else if (size <= MAX_SIZE * 3)
+ {
+ /* Three more segments needed */
+ buffers[0].pcie_length.len0 = MAX_SIZE;
+ buffers[0].pcie_length.len1 = MAX_SIZE;
+ buffers[0].pcie_length.len2 = size - MAX_SIZE * 2;
+ buffers[1].u64 = address;
+ address += MAX_SIZE;
+ buffers[2].u64 = address;
+ address += MAX_SIZE;
+ buffers[3].u64 = address;
+ segments+=3;
+ break;
+ }
+ else if (size <= MAX_SIZE * 4)
+ {
+ /* Four more segments needed */
+ buffers[0].pcie_length.len0 = MAX_SIZE;
+ buffers[0].pcie_length.len1 = MAX_SIZE;
+ buffers[0].pcie_length.len2 = MAX_SIZE;
+ buffers[0].pcie_length.len3 = size - MAX_SIZE * 3;
+ buffers[1].u64 = address;
+ address += MAX_SIZE;
+ buffers[2].u64 = address;
+ address += MAX_SIZE;
+ buffers[3].u64 = address;
+ address += MAX_SIZE;
+ buffers[4].u64 = address;
+ segments+=4;
+ break;
+ }
+ else
+ {
+ /* Five or more segments are needed */
+ buffers[0].pcie_length.len0 = MAX_SIZE;
+ buffers[0].pcie_length.len1 = MAX_SIZE;
+ buffers[0].pcie_length.len2 = MAX_SIZE;
+ buffers[0].pcie_length.len3 = MAX_SIZE;
+ buffers[1].u64 = address;
+ address += MAX_SIZE;
+ buffers[2].u64 = address;
+ address += MAX_SIZE;
+ buffers[3].u64 = address;
+ address += MAX_SIZE;
+ buffers[4].u64 = address;
+ address += MAX_SIZE;
+ size -= MAX_SIZE*4;
+ buffers += 5;
+ segments+=4;
+ }
+ }
+ return segments;
+}
+
+
+/**
+ * Build the first and last pointers based on a DMA engine header
+ * and submit them to the engine. The purpose of this function is
+ * to simplify the building of DMA engine commands by automatically
+ * converting a simple address and size into the apropriate internal
+ * or PCI / PCIe address list. This function does not support gather lists,
+ * so you will need to build your own lists in that case.
+ *
+ * @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
+ * @param header DMA Command header. Note that the nfst and nlst fields do not
+ * need to be filled in. All other fields must be set properly.
+ * @param first_address
+ * Address to use for the first pointers. In the case of INTERNAL,
+ * INBOUND, and OUTBOUND this is an Octeon memory address. In the
+ * case of EXTERNAL, this is the source PCI / PCIe address.
+ * @param last_address
+ * Address to use for the last pointers. In the case of EXTERNAL,
+ * INBOUND, and OUTBOUND this is a PCI / PCIe address. In the
+ * case of INTERNAL, this is the Octeon memory destination address.
+ * @param size Size of the transfer to perform.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_dma_engine_transfer(int engine, cvmx_dma_engine_header_t header,
+ uint64_t first_address, uint64_t last_address,
+ int size)
+{
+ cvmx_dma_engine_buffer_t buffers[32];
+ int words = 0;
+
+ switch (header.s.type)
+ {
+ case CVMX_DMA_ENGINE_TRANSFER_INTERNAL:
+ header.s.nfst = __cvmx_dma_engine_build_internal_pointers(buffers, first_address, size);
+ words += header.s.nfst;
+ header.s.nlst = __cvmx_dma_engine_build_internal_pointers(buffers + words, last_address, size);
+ words += header.s.nlst;
+ break;
+ case CVMX_DMA_ENGINE_TRANSFER_INBOUND:
+ case CVMX_DMA_ENGINE_TRANSFER_OUTBOUND:
+ header.s.nfst = __cvmx_dma_engine_build_internal_pointers(buffers, first_address, size);
+ words += header.s.nfst;
+ header.s.nlst = __cvmx_dma_engine_build_external_pointers(buffers + words, last_address, size);
+ words += header.s.nlst + ((header.s.nlst-1) >> 2) + 1;
+ break;
+ case CVMX_DMA_ENGINE_TRANSFER_EXTERNAL:
+ header.s.nfst = __cvmx_dma_engine_build_external_pointers(buffers, first_address, size);
+ words += header.s.nfst + ((header.s.nfst-1) >> 2) + 1;
+ header.s.nlst = __cvmx_dma_engine_build_external_pointers(buffers + words, last_address, size);
+ words += header.s.nlst + ((header.s.nlst-1) >> 2) + 1;
+ break;
+ }
+ return cvmx_dma_engine_submit(engine, header, words, buffers);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_dma_engine_transfer);
+#endif
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,379 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the PCI / PCIe DMA engines. These are only avialable
+ * on chips with PCI / PCIe.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_DMA_ENGINES_H__
+#define __CVMX_DMA_ENGINES_H__
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx-dpi-defs.h>
+#else
+#include "cvmx-dpi-defs.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum
+{
+ CVMX_DMA_ENGINE_TRANSFER_OUTBOUND = 0, /**< OUTBOUND (read from L2/DRAM, write into PCI / PCIe memory space) */
+ CVMX_DMA_ENGINE_TRANSFER_INBOUND = 1, /**< INBOUND (read from PCI / PCIe memory space, write into L2/DRAM) */
+ CVMX_DMA_ENGINE_TRANSFER_INTERNAL = 2, /**< INTERNAL-ONLY (read from L2/DRAM, write into L2/DRAM). Only available on chips with PCIe */
+ CVMX_DMA_ENGINE_TRANSFER_EXTERNAL = 3, /**< EXTERNAL-ONLY (read from PCIe memory space, write into PCIe memory space). Only available on chips with PCIe */
+} cvmx_dma_engine_transfer_t;
+
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t reserved_60_63 : 4; /**< Must be zero */
+ uint64_t fport : 2; /**< First port. FPort indicates the physical PCIe port used for the
+ PCIe memory space pointers in the FIRST POINTERS block in the
+ EXTERNAL-ONLY case. Must be zero in the OUTBOUND, INBOUND and
+ INTERNAL-ONLY cases. Must be zero on chips with PCI */
+ uint64_t lport : 2; /**< Last port. LPort indicates the physical PCIe port used for the
+ PCIe memory space pointers in the LAST POINTERS block in the
+ OUTBOUND, INBOUND, and EXTERNAL-ONLY cases. Must be zero in the
+ INTERNAL-ONLY case. Must be zero on chips with PCI */
+ cvmx_dma_engine_transfer_t type : 2; /**< Type \xAD A given PCI DMA transfer is either OUTBOUND (read from L2/DRAM,
+ write into PCI / PCIe memory space), INBOUND (read from PCI / PCIe memory space, write
+ into L2/DRAM), INTERNAL-ONLY (read from L2/DRAM, write into L2/DRAM), or
+ EXTERNAL-ONLY (read from PCIe memory space, write into PCIe memory space). */
+ uint64_t wqp : 1; /**< Work-queue pointer. When WQP = 1, PTR (if non-zero) is a pointer to a
+ work-queue entry that is submitted by the hardware after completing the DMA;
+ when WQP = 0, PTR (if non-zero) is a pointer to a byte in local memory that
+ is written to 0 by the hardware after completing the DMA. */
+ uint64_t c : 1; /**< C \xAD Counter. 1 = use counter 1, 0 = use counter 0.
+ The C bit selects between the two counters (NPEI_DMA_CNTS[DMA0,DMA1])
+ that can optionally be updated after an OUTBOUND or EXTERNAL-ONLY
+ transfer, and also selects between the two forced-interrupt bits
+ (NPEI_INT_SUMn[DMA0_FI, DMA1_FI]) that can optionally be set after an
+ OUTBOUND or EXTERNAL-ONLY transfer. C must be zero for INBOUND or
+ INTERNAL-ONLY transfers. */
+ uint64_t ca : 1; /**< CA \xAD Counter add.
+ When CA = 1, the hardware updates the selected counter after it completes the
+ PCI DMA OUTBOUND or EXTERNAL-ONLY Instruction.
+ - If C = 0, PCIE_DMA_CNT0 is updated
+ - If C = 1, PCIE_DMA_CNT1 is updated.
+ Note that this update may indirectly cause
+ NPEI_INT_SUM[DCNT0,DCNT1,DTIME0,DTIME1] to become set (depending
+ on the NPEI_DMA*_INT_LEVEL settings), so may cause interrupts to occur on a
+ remote PCI host.
+ - If NPEI_DMA_CONTROL[O_ADD1] = 1, the counter is updated by 1.
+ - If NPEI_DMA_CONTROL[O_ADD1] = 0, the counter is updated by the total
+ bytes in the transfer.
+ When CA = 0, the hardware does not update any counters.
+ For an INBOUND or INTERNAL-ONLY PCI DMA transfer, CA must never be
+ set, and the hardware never adds to the counters. */
+ uint64_t fi : 1; /**< FI \xAD Force interrupt.
+ When FI is set for an OUTBOUND or EXTERNAL-ONLY transfer, the hardware
+ sets a forced interrupt bit after it completes the PCI DMA Instruction. If C = 0,
+ NPEI_INT_SUMn[DMA0_FI] is set, else NPEI_INT_SUMn[DMA1_FI] is set. For
+ an INBOUND or INTERNAL-ONLY PCI DMA operation, FI must never be set,
+ and the hardware never generates interrupts. */
+ uint64_t ii : 1; /**< II\xAD Ignore the I bit (i.e. the I bit of the PCI DMA instruction local pointer).
+ For OUTBOUND transfers when II = 1, ignore the I bit and the FL bit in the
+ DMA HDR alone determines whether the hardware frees any/all of the local
+ buffers in the FIRST POINTERS area:
+ - when FL = 1, the hardware frees the local buffer when II=1.
+ - when FL = 0, the hardware does not free the local buffer when II=1.
+ For OUTBOUND transfers when II = 0, the I bit in the local pointer selects
+ whether local buffers are freed on a pointer-by-pointer basis:
+ - when (FL I) is true, the hardware frees the local buffer when II=0.
+ For INBOUND, INTERNAL-ONLY, and EXTERNAL-ONLY PCI DMA transfers,
+ II must never be set, and local buffers are never freed. */
+ uint64_t fl : 1; /**< FL \xAD Free local buffer.
+ When FL = 1, for an OUTBOUND operation, it indicates that the local buffers in
+ the FIRST BUFFERS area should be freed.
+ If II = 1, the FL bit alone indicates whether the local buffer should be freed:
+ - when FL = 1, the hardware frees the local buffer when II=1.
+ - when FL = 0, the hardware does not free the local buffer when II=1.
+ If II = 0, the I bit in the local pointer (refer to Section 9.5.2) determines whether
+ the local buffer is freed:
+ - when (FL I) is true, the hardware frees the local buffer when II=0.
+ For an INBOUND, INTERNAL-ONLY, or EXTERNAL-ONLY PCI DMA transfer,
+ FL must never be set, and local buffers are never freed. */
+ uint64_t nlst : 4; /**< NLST \xAD Number Last pointers.
+ The number of pointers in the LAST POINTERS area.
+ In the INBOUND, OUTBOUND, and EXTERNAL-ONLY cases, the LAST
+ POINTERS area contains PCI components, and the number of 64-bit words
+ required in the LAST POINTERS area is:
+ - HDR.NLST + ((HDR.NLST + 3)/4) where the division removes the fraction.
+ In the INTERNAL-ONLY case, the LAST POINTERS area contains local
+ pointers, and the number of 64-bit words required in the LAST POINTERS area is:
+ - HDR.NLST
+ Note that the sum of the number of 64-bit words in the LAST POINTERS and
+ FIRST POINTERS area must never exceed 31. */
+ uint64_t nfst : 4; /**< NFST \xAD Number First pointers.
+ The number of pointers in the FIRST POINTERS area.
+ In the INBOUND, OUTBOUND, and INTERNAL-ONLY cases, the FIRST
+ POINTERS area contains local pointers, and the number of 64-bit words required
+ in the FIRST POINTERS area is:
+ - HDR.NFST
+ In the EXTERNAL-ONLY case, the FIRST POINTERS area contains PCI
+ components, and the number of 64-bit words required in the FIRST POINTERS
+ area is:
+ - HDR.NFST + ((HDR.NFST + 3)/4) where the division removes the fraction. */
+ uint64_t addr : 40; /**< PTR \xAD Pointer, either a work-queue-entry pointer (when WQP = 1) or a local
+ memory pointer (WQP = 0).
+ When WQP = 1 and PTR 0x0, the hardware inserts the work-queue entry
+ indicated by PTR into a POW input queue after the PCI DMA operation is
+ complete. (Section 5.4 describes the work queue entry requirements in this
+ case.) When WQP = 1, PTR<2:0> must be 0x0.
+ When WQP = 0 and PTR 0x0, the hardware writes the single byte in local
+ memory indicated by PTR to 0x0 after the PCI DMA operation is complete.
+ NPEI_DMA_CONTROL[B0_LEND] selects the endian-ness of PTR in this
+ case.
+ When PTR = 0x0, the hardware performs no operation after the PCI DMA
+ operation is complete. */
+ } s;
+} cvmx_dma_engine_header_t;
+
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t i : 1; /**< I \xAD Invert free.
+ This bit gives the software the ability to free buffers independently for an
+ OUTBOUND PCI DMA transfer. I is not used by the hardware when II is set. I
+ must not be set, and buffers are never freed, for INBOUND, INTERNAL-ONLY,
+ and EXTERNAL-ONLY PCI DMA transfers. */
+ uint64_t back : 4; /**< Back \xAD Backup amount.
+ Allows the start of a buffer that is to be freed during an OUTBOUND transfer to
+ be different from the ptr value. Back specifies the amount to subtract from the
+ pointer to reach the start when freeing a buffer.
+ The address that is the start of the buffer being freed is:
+ - Buffer start address = ((ptr >> 7) - Back) << 7.
+ Back is only used by the hardware when the buffer corresponding to ptr is freed.
+ Back must be 0x0, and buffers are never freed, for INBOUND, INTERNAL-ONLY,
+ and EXTERNAL-ONLY PCI DMA transfers. */
+ uint64_t pool : 3; /**< Pool \xAD Free pool.
+ Specifies which pool (of the eight hardware-managed FPA free pools) receives the
+ buffer associated with ptr when freed during an OUTBOUND transfer.
+ Pool is only used when the buffer corresponding to ptr is freed. Pool must be 0x0,
+ and buffers are never freed, for INBOUND, INTERNAL-ONLY, and EXTERNAL-ONLY
+ PCI DMA transfers. */
+ uint64_t f : 1; /**< F \xAD Full-block writes are allowed.
+ When set, the hardware is permitted to write all the bytes in the cache blocks
+ covered by ptr, ptr + Size - 1. This can improve memory system performance
+ when the write misses in the L2 cache.
+ F can only be set for local pointers that can be written to:
+ - The local pointers in the FIRST POINTERS area that are write pointers for
+ INBOUND transfers.
+ - The local pointers in the LAST POINTERS area that are always write
+ pointers (when present for INTERNAL-ONLY transfers).
+ F must not be set for local pointers that are not written to:
+ - The local pointers in the FIRST POINTERS area for OUTBOUND and
+ INTERNAL-ONLY transfers. */
+ uint64_t a : 1; /**< A \xAD Allocate L2.
+ This is a hint to the hardware that the cache blocks should be allocated in the L2
+ cache (if they were not already). */
+ uint64_t l : 1; /**< L \xAD Little-endian.
+ When L is set, the data at ptr is in little-endian format rather than big-endian. */
+ uint64_t size : 13; /**< Size \xAD Size in bytes of the contiguous space specified by ptr. A Size value of 0 is
+ illegal. Note that the sum of the sizes in the FIRST POINTERS area must always
+ exactly equal the sum of the sizes/lengths in the LAST POINTERS area:
+ - In the OUTBOUND and INBOUND cases, the HDR.NFST size fields in the
+ local pointers in the FIRST POINTERS area must exactly equal the lengths
+ of the HDR.NLST fragments in the PCI components in the LAST POINTERS
+ area.
+ - In the INTERNAL-ONLY case, the HDR.NFST size fields in the local
+ pointers in the FIRST POINTERS area must equal the HDR.NLST size
+ fields in the local pointers in the LAST POINTERS area. */
+ uint64_t reserved_36_39 : 4; /**< Must be zero */
+ uint64_t addr : 36; /**< L2/DRAM byte pointer. Points to where the packet data starts.
+ Ptr can be any byte alignment. Note that ptr is interpreted as a big-endian byte
+ pointer when L is clear, a little-endian byte pointer when L is set. */
+ } internal;
+ struct
+ {
+ uint64_t len0 : 16; /**< Length of PCI / PCIe memory for address 0 */
+ uint64_t len1 : 16; /**< Length of PCI / PCIe memory for address 1 */
+ uint64_t len2 : 16; /**< Length of PCI / PCIe memory for address 2 */
+ uint64_t len3 : 16; /**< Length of PCI / PCIe memory for address 3 */
+ } pcie_length;
+} cvmx_dma_engine_buffer_t;
+
+/**
+ * Initialize the DMA engines for use
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_dma_engine_initialize(void);
+
+/**
+ * Shutdown all DMA engines. The engeines must be idle when this
+ * function is called.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_dma_engine_shutdown(void);
+
+/**
+ * Return the number of DMA engimes supported by this chip
+ *
+ * @return Number of DMA engines
+ */
+int cvmx_dma_engine_get_num(void);
+
+/**
+ * Submit a series of DMA command to the DMA engines.
+ *
+ * @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
+ * @param header Command header
+ * @param num_buffers
+ * The number of data pointers
+ * @param buffers Command data pointers
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_dma_engine_submit(int engine, cvmx_dma_engine_header_t header, int num_buffers, cvmx_dma_engine_buffer_t buffers[]);
+
+/**
+ * Build the first and last pointers based on a DMA engine header
+ * and submit them to the engine. The purpose of this function is
+ * to simplify the building of DMA engine commands by automatically
+ * converting a simple address and size into the apropriate internal
+ * or PCI / PCIe address list. This function does not support gather lists,
+ * so you will need to build your own lists in that case.
+ *
+ * @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
+ * @param header DMA Command header. Note that the nfst and nlst fields do not
+ * need to be filled in. All other fields must be set properly.
+ * @param first_address
+ * Address to use for the first pointers. In the case of INTERNAL,
+ * INBOUND, and OUTBOUND this is an Octeon memory address. In the
+ * case of EXTERNAL, this is the source PCI / PCIe address.
+ * @param last_address
+ * Address to use for the last pointers. In the case of EXTERNAL,
+ * INBOUND, and OUTBOUND this is a PCI / PCIe address. In the
+ * case of INTERNAL, this is the Octeon memory destination address.
+ * @param size Size of the transfer to perform.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_dma_engine_transfer(int engine, cvmx_dma_engine_header_t header,
+ uint64_t first_address, uint64_t last_address,
+ int size);
+
+/**
+ * Simplified interface to the DMA engines to emulate memcpy()
+ *
+ * @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
+ * @param dest Pointer to the destination memory. cvmx_ptr_to_phys() will be
+ * used to turn this into a physical address. It cannot be a local
+ * or CVMX_SHARED block.
+ * @param source Pointer to the source memory.
+ * cvmx_ptr_to_phys() will be used to turn this
+ * into a physical address. It cannot be a local
+ * or CVMX_SHARED block.
+ * @param length Number of bytes to copy
+ *
+ * @return Zero on success, negative on failure
+ */
+static inline int cvmx_dma_engine_memcpy(int engine, void *dest, void *source, int length)
+{
+ cvmx_dma_engine_header_t header;
+ header.u64 = 0;
+ header.s.type = CVMX_DMA_ENGINE_TRANSFER_INTERNAL;
+ return cvmx_dma_engine_transfer(engine, header, cvmx_ptr_to_phys(source),
+ cvmx_ptr_to_phys(dest), length);
+}
+
+/**
+ * Simplified interface to the DMA engines to emulate memcpy()
+ * When dici_mode is enabled, send zero byte.
+ *
+ * @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
+ * @param dest Pointer to the destination memory. cvmx_ptr_to_phys() will be
+ * used to turn this into a physical address. It cannot be a local
+ * or CVMX_SHARED block.
+ * @param source Pointer to the source memory.
+ * cvmx_ptr_to_phys() will be used to turn this
+ * into a physical address. It cannot be a local
+ * or CVMX_SHARED block.
+ * @param length Number of bytes to copy
+ * @param core core number for zero byte write
+ *
+ * @return Zero on success, negative on failure
+ */
+static inline int cvmx_dma_engine_memcpy_zero_byte(int engine, void *dest, void *source, int length, int core)
+{
+ cvmx_dma_engine_header_t header;
+ header.u64 = 0;
+ header.s.type = CVMX_DMA_ENGINE_TRANSFER_INTERNAL;
+ /* If dici_mode is set, DPI increments the DPI_DMA_PPn_CNT[CNT], where the
+ value of core n is PTR<5:0>-1 when WQP=0 and PTR != 0 && PTR < 64. */
+ if (octeon_has_feature(OCTEON_FEATURE_DICI_MODE))
+ {
+ cvmx_dpi_dma_control_t dma_control;
+ dma_control.u64 = cvmx_read_csr(CVMX_DPI_DMA_CONTROL);
+ if (dma_control.s.dici_mode)
+ {
+ header.s.wqp = 0; // local memory pointer
+ header.s.addr = core + 1;
+ }
+ }
+ return cvmx_dma_engine_transfer(engine, header, cvmx_ptr_to_phys(source),
+ cvmx_ptr_to_phys(dest), length);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __CVMX_CMD_QUEUE_H__
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-dma-engine.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-dpi-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-dpi-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-dpi-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,2072 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-dpi-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon dpi.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_DPI_DEFS_H__
+#define __CVMX_DPI_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_BIST_STATUS CVMX_DPI_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_DPI_BIST_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_BIST_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000000ull);
+}
+#else
+#define CVMX_DPI_BIST_STATUS (CVMX_ADD_IO_SEG(0x0001DF0000000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_CTL CVMX_DPI_CTL_FUNC()
+static inline uint64_t CVMX_DPI_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000040ull);
+}
+#else
+#define CVMX_DPI_CTL (CVMX_ADD_IO_SEG(0x0001DF0000000040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMAX_COUNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_DPI_DMAX_COUNTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000300ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_DMAX_COUNTS(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000300ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMAX_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_DPI_DMAX_DBELL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000200ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_DMAX_DBELL(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000200ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMAX_ERR_RSP_STATUS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_DPI_DMAX_ERR_RSP_STATUS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000A80ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_DMAX_ERR_RSP_STATUS(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000A80ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMAX_IBUFF_SADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_DPI_DMAX_IBUFF_SADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000280ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_DMAX_IBUFF_SADDR(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000280ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMAX_IFLIGHT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_DPI_DMAX_IFLIGHT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000A00ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_DMAX_IFLIGHT(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000A00ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMAX_NADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_DPI_DMAX_NADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000380ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_DMAX_NADDR(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000380ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMAX_REQBNK0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_DPI_DMAX_REQBNK0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000400ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_DMAX_REQBNK0(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000400ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMAX_REQBNK1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_DPI_DMAX_REQBNK1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000480ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_DMAX_REQBNK1(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000480ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_DMA_CONTROL CVMX_DPI_DMA_CONTROL_FUNC()
+static inline uint64_t CVMX_DPI_DMA_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_DMA_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000048ull);
+}
+#else
+#define CVMX_DPI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x0001DF0000000048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMA_ENGX_EN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_DPI_DMA_ENGX_EN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000080ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_DMA_ENGX_EN(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000080ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_DMA_PPX_CNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_DPI_DMA_PPX_CNT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000B00ull) + ((offset) & 31) * 8;
+}
+#else
+#define CVMX_DPI_DMA_PPX_CNT(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000B00ull) + ((offset) & 31) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_ENGX_BUF(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_DPI_ENGX_BUF(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000880ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_DPI_ENGX_BUF(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000880ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_INFO_REG CVMX_DPI_INFO_REG_FUNC()
+static inline uint64_t CVMX_DPI_INFO_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_INFO_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000980ull);
+}
+#else
+#define CVMX_DPI_INFO_REG (CVMX_ADD_IO_SEG(0x0001DF0000000980ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_INT_EN CVMX_DPI_INT_EN_FUNC()
+static inline uint64_t CVMX_DPI_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000010ull);
+}
+#else
+#define CVMX_DPI_INT_EN (CVMX_ADD_IO_SEG(0x0001DF0000000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_INT_REG CVMX_DPI_INT_REG_FUNC()
+static inline uint64_t CVMX_DPI_INT_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_INT_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000008ull);
+}
+#else
+#define CVMX_DPI_INT_REG (CVMX_ADD_IO_SEG(0x0001DF0000000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_NCBX_CFG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_DPI_NCBX_CFG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000800ull);
+}
+#else
+#define CVMX_DPI_NCBX_CFG(block_id) (CVMX_ADD_IO_SEG(0x0001DF0000000800ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_PINT_INFO CVMX_DPI_PINT_INFO_FUNC()
+static inline uint64_t CVMX_DPI_PINT_INFO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_PINT_INFO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000830ull);
+}
+#else
+#define CVMX_DPI_PINT_INFO (CVMX_ADD_IO_SEG(0x0001DF0000000830ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_PKT_ERR_RSP CVMX_DPI_PKT_ERR_RSP_FUNC()
+static inline uint64_t CVMX_DPI_PKT_ERR_RSP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_PKT_ERR_RSP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000078ull);
+}
+#else
+#define CVMX_DPI_PKT_ERR_RSP (CVMX_ADD_IO_SEG(0x0001DF0000000078ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_REQ_ERR_RSP CVMX_DPI_REQ_ERR_RSP_FUNC()
+static inline uint64_t CVMX_DPI_REQ_ERR_RSP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_REQ_ERR_RSP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000058ull);
+}
+#else
+#define CVMX_DPI_REQ_ERR_RSP (CVMX_ADD_IO_SEG(0x0001DF0000000058ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_REQ_ERR_RSP_EN CVMX_DPI_REQ_ERR_RSP_EN_FUNC()
+static inline uint64_t CVMX_DPI_REQ_ERR_RSP_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_REQ_ERR_RSP_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000068ull);
+}
+#else
+#define CVMX_DPI_REQ_ERR_RSP_EN (CVMX_ADD_IO_SEG(0x0001DF0000000068ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_REQ_ERR_RST CVMX_DPI_REQ_ERR_RST_FUNC()
+static inline uint64_t CVMX_DPI_REQ_ERR_RST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_REQ_ERR_RST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000060ull);
+}
+#else
+#define CVMX_DPI_REQ_ERR_RST (CVMX_ADD_IO_SEG(0x0001DF0000000060ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_REQ_ERR_RST_EN CVMX_DPI_REQ_ERR_RST_EN_FUNC()
+static inline uint64_t CVMX_DPI_REQ_ERR_RST_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_REQ_ERR_RST_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000070ull);
+}
+#else
+#define CVMX_DPI_REQ_ERR_RST_EN (CVMX_ADD_IO_SEG(0x0001DF0000000070ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_REQ_ERR_SKIP_COMP CVMX_DPI_REQ_ERR_SKIP_COMP_FUNC()
+static inline uint64_t CVMX_DPI_REQ_ERR_SKIP_COMP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_REQ_ERR_SKIP_COMP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000838ull);
+}
+#else
+#define CVMX_DPI_REQ_ERR_SKIP_COMP (CVMX_ADD_IO_SEG(0x0001DF0000000838ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_DPI_REQ_GBL_EN CVMX_DPI_REQ_GBL_EN_FUNC()
+static inline uint64_t CVMX_DPI_REQ_GBL_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_DPI_REQ_GBL_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001DF0000000050ull);
+}
+#else
+#define CVMX_DPI_REQ_GBL_EN (CVMX_ADD_IO_SEG(0x0001DF0000000050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_SLI_PRTX_CFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_DPI_SLI_PRTX_CFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000900ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_DPI_SLI_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000900ull) + ((offset) & 3) * 8)
+#endif
+static inline uint64_t CVMX_DPI_SLI_PRTX_ERR(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001DF0000000920ull) + ((offset) & 3) * 8;
+ break;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1))
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001DF0000000928ull) + ((offset) & 1) * 8;
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2))
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001DF0000000920ull) + ((offset) & 1) * 8; if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001DF0000000920ull) + ((offset) & 1) * 8;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001DF0000000928ull) + ((offset) & 1) * 8;
+ break;
+ }
+ cvmx_warn("CVMX_DPI_SLI_PRTX_ERR (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000920ull) + ((offset) & 1) * 8;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_DPI_SLI_PRTX_ERR_INFO(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_DPI_SLI_PRTX_ERR_INFO(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001DF0000000940ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_DPI_SLI_PRTX_ERR_INFO(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000940ull) + ((offset) & 3) * 8)
+#endif
+
+/**
+ * cvmx_dpi_bist_status
+ */
+union cvmx_dpi_bist_status {
+ uint64_t u64;
+ struct cvmx_dpi_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t bist : 47; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 47;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dpi_bist_status_s cn61xx;
+ struct cvmx_dpi_bist_status_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63 : 19;
+ uint64_t bist : 45; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 45;
+ uint64_t reserved_45_63 : 19;
+#endif
+ } cn63xx;
+ struct cvmx_dpi_bist_status_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t bist : 37; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 37;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } cn63xxp1;
+ struct cvmx_dpi_bist_status_s cn66xx;
+ struct cvmx_dpi_bist_status_cn63xx cn68xx;
+ struct cvmx_dpi_bist_status_cn63xx cn68xxp1;
+ struct cvmx_dpi_bist_status_s cnf71xx;
+};
+typedef union cvmx_dpi_bist_status cvmx_dpi_bist_status_t;
+
+/**
+ * cvmx_dpi_ctl
+ */
+union cvmx_dpi_ctl {
+ uint64_t u64;
+ struct cvmx_dpi_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t clk : 1; /**< Status bit that indicates that the clks are running */
+ uint64_t en : 1; /**< Turns on the DMA and Packet state machines */
+#else
+ uint64_t en : 1;
+ uint64_t clk : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_dpi_ctl_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< Turns on the DMA and Packet state machines */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn61xx;
+ struct cvmx_dpi_ctl_s cn63xx;
+ struct cvmx_dpi_ctl_s cn63xxp1;
+ struct cvmx_dpi_ctl_s cn66xx;
+ struct cvmx_dpi_ctl_s cn68xx;
+ struct cvmx_dpi_ctl_s cn68xxp1;
+ struct cvmx_dpi_ctl_cn61xx cnf71xx;
+};
+typedef union cvmx_dpi_ctl cvmx_dpi_ctl_t;
+
+/**
+ * cvmx_dpi_dma#_counts
+ *
+ * DPI_DMA[0..7]_COUNTS = DMA Instruction Counts
+ *
+ * Values for determing the number of instructions for DMA[0..7] in the DPI.
+ */
+union cvmx_dpi_dmax_counts {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t fcnt : 7; /**< Number of words in the Instruction FIFO locally
+ cached within DPI. */
+ uint64_t dbell : 32; /**< Number of available words of Instructions to read. */
+#else
+ uint64_t dbell : 32;
+ uint64_t fcnt : 7;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_counts_s cn61xx;
+ struct cvmx_dpi_dmax_counts_s cn63xx;
+ struct cvmx_dpi_dmax_counts_s cn63xxp1;
+ struct cvmx_dpi_dmax_counts_s cn66xx;
+ struct cvmx_dpi_dmax_counts_s cn68xx;
+ struct cvmx_dpi_dmax_counts_s cn68xxp1;
+ struct cvmx_dpi_dmax_counts_s cnf71xx;
+};
+typedef union cvmx_dpi_dmax_counts cvmx_dpi_dmax_counts_t;
+
+/**
+ * cvmx_dpi_dma#_dbell
+ *
+ * DPI_DMA_DBELL[0..7] = DMA Door Bell
+ *
+ * The door bell register for DMA[0..7] queue.
+ */
+union cvmx_dpi_dmax_dbell {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t dbell : 16; /**< The value written to this register is added to the
+ number of 8byte words to be read and processes for
+ the low priority dma queue. */
+#else
+ uint64_t dbell : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_dbell_s cn61xx;
+ struct cvmx_dpi_dmax_dbell_s cn63xx;
+ struct cvmx_dpi_dmax_dbell_s cn63xxp1;
+ struct cvmx_dpi_dmax_dbell_s cn66xx;
+ struct cvmx_dpi_dmax_dbell_s cn68xx;
+ struct cvmx_dpi_dmax_dbell_s cn68xxp1;
+ struct cvmx_dpi_dmax_dbell_s cnf71xx;
+};
+typedef union cvmx_dpi_dmax_dbell cvmx_dpi_dmax_dbell_t;
+
+/**
+ * cvmx_dpi_dma#_err_rsp_status
+ */
+union cvmx_dpi_dmax_err_rsp_status {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_err_rsp_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t status : 6; /**< QUE captures the ErrorResponse status of the last
+ 6 instructions for each instruction queue.
+ STATUS<5> represents the status for first
+ instruction in instruction order while STATUS<0>
+ represents the last or most recent instruction.
+ If STATUS<n> is set, then the nth instruction in
+ the given queue experienced an ErrorResponse.
+ Otherwise, it completed normally. */
+#else
+ uint64_t status : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_err_rsp_status_s cn61xx;
+ struct cvmx_dpi_dmax_err_rsp_status_s cn66xx;
+ struct cvmx_dpi_dmax_err_rsp_status_s cn68xx;
+ struct cvmx_dpi_dmax_err_rsp_status_s cn68xxp1;
+ struct cvmx_dpi_dmax_err_rsp_status_s cnf71xx;
+};
+typedef union cvmx_dpi_dmax_err_rsp_status cvmx_dpi_dmax_err_rsp_status_t;
+
+/**
+ * cvmx_dpi_dma#_ibuff_saddr
+ *
+ * DPI_DMA[0..7]_IBUFF_SADDR = DMA Instruction Buffer Starting Address
+ *
+ * The address to start reading Instructions from for DMA[0..7].
+ */
+union cvmx_dpi_dmax_ibuff_saddr {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_ibuff_saddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t csize : 14; /**< The size in 8B words of the DMA Instruction Chunk.
+ This value should only be written at known times
+ in order to prevent corruption of the instruction
+ queue. The minimum CSIZE is 16 (one cacheblock). */
+ uint64_t reserved_41_47 : 7;
+ uint64_t idle : 1; /**< DMA Request Queue is IDLE */
+ uint64_t saddr : 33; /**< The 128 byte aligned starting or chunk address.
+ SADDR is address bit 35:7 of the starting
+ instructions address. When new chunks are fetched
+ by the HW, SADDR will be updated to reflect the
+ address of the current chunk.
+ A write to SADDR resets both the queue's doorbell
+ (DPI_DMAx_COUNTS[DBELL) and its tail pointer
+ (DPI_DMAx_NADDR[ADDR]). */
+ uint64_t reserved_0_6 : 7;
+#else
+ uint64_t reserved_0_6 : 7;
+ uint64_t saddr : 33;
+ uint64_t idle : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t csize : 14;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_ibuff_saddr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t csize : 14; /**< The size in 8B words of the DMA Instruction Chunk.
+ This value should only be written at known times
+ in order to prevent corruption of the instruction
+ queue. The minimum CSIZE is 16 (one cacheblock). */
+ uint64_t reserved_41_47 : 7;
+ uint64_t idle : 1; /**< DMA Request Queue is IDLE */
+ uint64_t reserved_36_39 : 4;
+ uint64_t saddr : 29; /**< The 128 byte aligned starting or chunk address.
+ SADDR is address bit 35:7 of the starting
+ instructions address. When new chunks are fetched
+ by the HW, SADDR will be updated to reflect the
+ address of the current chunk.
+ A write to SADDR resets both the queue's doorbell
+ (DPI_DMAx_COUNTS[DBELL) and its tail pointer
+ (DPI_DMAx_NADDR[ADDR]). */
+ uint64_t reserved_0_6 : 7;
+#else
+ uint64_t reserved_0_6 : 7;
+ uint64_t saddr : 29;
+ uint64_t reserved_36_39 : 4;
+ uint64_t idle : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t csize : 14;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn61xx;
+ struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cn63xx;
+ struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cn63xxp1;
+ struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cn66xx;
+ struct cvmx_dpi_dmax_ibuff_saddr_s cn68xx;
+ struct cvmx_dpi_dmax_ibuff_saddr_s cn68xxp1;
+ struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cnf71xx;
+};
+typedef union cvmx_dpi_dmax_ibuff_saddr cvmx_dpi_dmax_ibuff_saddr_t;
+
+/**
+ * cvmx_dpi_dma#_iflight
+ */
+union cvmx_dpi_dmax_iflight {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_iflight_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t cnt : 3; /**< The number of instructions from a given queue that
+ can be inflight to the DMA engines at a time.
+ Reset value matches the number of DMA engines. */
+#else
+ uint64_t cnt : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_iflight_s cn61xx;
+ struct cvmx_dpi_dmax_iflight_s cn66xx;
+ struct cvmx_dpi_dmax_iflight_s cn68xx;
+ struct cvmx_dpi_dmax_iflight_s cn68xxp1;
+ struct cvmx_dpi_dmax_iflight_s cnf71xx;
+};
+typedef union cvmx_dpi_dmax_iflight cvmx_dpi_dmax_iflight_t;
+
+/**
+ * cvmx_dpi_dma#_naddr
+ *
+ * DPI_DMA[0..7]_NADDR = DMA Next Ichunk Address
+ *
+ * Place DPI will read the next Ichunk data from.
+ */
+union cvmx_dpi_dmax_naddr {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_naddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t addr : 40; /**< The next L2C address to read DMA# instructions
+ from. */
+#else
+ uint64_t addr : 40;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_naddr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< The next L2C address to read DMA# instructions
+ from. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn61xx;
+ struct cvmx_dpi_dmax_naddr_cn61xx cn63xx;
+ struct cvmx_dpi_dmax_naddr_cn61xx cn63xxp1;
+ struct cvmx_dpi_dmax_naddr_cn61xx cn66xx;
+ struct cvmx_dpi_dmax_naddr_s cn68xx;
+ struct cvmx_dpi_dmax_naddr_s cn68xxp1;
+ struct cvmx_dpi_dmax_naddr_cn61xx cnf71xx;
+};
+typedef union cvmx_dpi_dmax_naddr cvmx_dpi_dmax_naddr_t;
+
+/**
+ * cvmx_dpi_dma#_reqbnk0
+ *
+ * DPI_DMA[0..7]_REQBNK0 = DMA Request State Bank0
+ *
+ * Current contents of the request state machine - bank0
+ */
+union cvmx_dpi_dmax_reqbnk0 {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_reqbnk0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state : 64; /**< State */
+#else
+ uint64_t state : 64;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_reqbnk0_s cn61xx;
+ struct cvmx_dpi_dmax_reqbnk0_s cn63xx;
+ struct cvmx_dpi_dmax_reqbnk0_s cn63xxp1;
+ struct cvmx_dpi_dmax_reqbnk0_s cn66xx;
+ struct cvmx_dpi_dmax_reqbnk0_s cn68xx;
+ struct cvmx_dpi_dmax_reqbnk0_s cn68xxp1;
+ struct cvmx_dpi_dmax_reqbnk0_s cnf71xx;
+};
+typedef union cvmx_dpi_dmax_reqbnk0 cvmx_dpi_dmax_reqbnk0_t;
+
+/**
+ * cvmx_dpi_dma#_reqbnk1
+ *
+ * DPI_DMA[0..7]_REQBNK1 = DMA Request State Bank1
+ *
+ * Current contents of the request state machine - bank1
+ */
+union cvmx_dpi_dmax_reqbnk1 {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_reqbnk1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state : 64; /**< State */
+#else
+ uint64_t state : 64;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_reqbnk1_s cn61xx;
+ struct cvmx_dpi_dmax_reqbnk1_s cn63xx;
+ struct cvmx_dpi_dmax_reqbnk1_s cn63xxp1;
+ struct cvmx_dpi_dmax_reqbnk1_s cn66xx;
+ struct cvmx_dpi_dmax_reqbnk1_s cn68xx;
+ struct cvmx_dpi_dmax_reqbnk1_s cn68xxp1;
+ struct cvmx_dpi_dmax_reqbnk1_s cnf71xx;
+};
+typedef union cvmx_dpi_dmax_reqbnk1 cvmx_dpi_dmax_reqbnk1_t;
+
+/**
+ * cvmx_dpi_dma_control
+ *
+ * DPI_DMA_CONTROL = DMA Control Register
+ *
+ * Controls operation of the DMA IN/OUT.
+ */
+union cvmx_dpi_dma_control {
+ uint64_t u64;
+ struct cvmx_dpi_dma_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t dici_mode : 1; /**< DMA Instruction Completion Interrupt Mode
+ turns on mode to increment DPI_DMA_PPx_CNT
+ counters. */
+ uint64_t pkt_en1 : 1; /**< Enables the 2nd packet interface.
+ When the packet interface is enabled, engine 4
+ is used for packets and is not available for DMA.
+ The packet interfaces must be enabled in order.
+ When PKT_EN1=1, then PKT_EN=1.
+ When PKT_EN1=1, then DMA_ENB<4>=0. */
+ uint64_t ffp_dis : 1; /**< Force forward progress disable
+ The DMA engines will compete for shared resources.
+ If the HW detects that particular engines are not
+ able to make requests to an interface, the HW
+ will periodically trade-off throughput for
+ fairness. */
+ uint64_t commit_mode : 1; /**< DMA Engine Commit Mode
+
+ When COMMIT_MODE=0, DPI considers an instruction
+ complete when the HW internally generates the
+ final write for the current instruction.
+
+ When COMMIT_MODE=1, DPI additionally waits for
+ the final write to reach the interface coherency
+ point to declare the instructions complete.
+
+ Please note: when COMMIT_MODE == 0, DPI may not
+ follow the HRM ordering rules.
+
+ DPI hardware performance may be better with
+ COMMIT_MODE == 0 than with COMMIT_MODE == 1 due
+ to the relaxed ordering rules.
+
+ If the HRM ordering rules are required, set
+ COMMIT_MODE == 1. */
+ uint64_t pkt_hp : 1; /**< High-Priority Mode for Packet Interface.
+ This mode has been deprecated. */
+ uint64_t pkt_en : 1; /**< Enables 1st the packet interface.
+ When the packet interface is enabled, engine 5
+ is used for packets and is not available for DMA.
+ When PKT_EN=1, then DMA_ENB<5>=0.
+ When PKT_EN1=1, then PKT_EN=1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t dma_enb : 6; /**< DMA engine enable. Enables the operation of the
+ DMA engine. After being enabled an engine should
+ not be disabled while processing instructions.
+ When PKT_EN=1, then DMA_ENB<5>=0.
+ When PKT_EN1=1, then DMA_ENB<4>=0. */
+ uint64_t reserved_34_47 : 14;
+ uint64_t b0_lend : 1; /**< When set '1' and the DPI is in the mode to write
+ 0 to L2C memory when a DMA is done, the address
+ to be written to will be treated as a Little
+ Endian address. */
+ uint64_t dwb_denb : 1; /**< When set '1', DPI will send a value in the DWB
+ field for a free page operation for the memory
+ that contained the data. */
+ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are
+ freed this value is used for the DWB field of the
+ operation. */
+ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will
+ be returned to when used. */
+ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the SLI_DMAX_CNT
+ DMA counters, if '0' then the number of bytes
+ in the dma transfer will be added to the
+ SLI_DMAX_CNT count register. */
+ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */
+ uint64_t o_ns : 1; /**< Nosnoop For DMA. */
+ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */
+ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used.
+ 0=DPTR format 1 is used
+ use register values for address and pointer
+ values for ES, NS, RO
+ 1=DPTR format 0 is used
+ use pointer values for address and register
+ values for ES, NS, RO */
+ uint64_t reserved_0_13 : 14;
+#else
+ uint64_t reserved_0_13 : 14;
+ uint64_t o_mode : 1;
+ uint64_t o_es : 2;
+ uint64_t o_ns : 1;
+ uint64_t o_ro : 1;
+ uint64_t o_add1 : 1;
+ uint64_t fpa_que : 3;
+ uint64_t dwb_ichk : 9;
+ uint64_t dwb_denb : 1;
+ uint64_t b0_lend : 1;
+ uint64_t reserved_34_47 : 14;
+ uint64_t dma_enb : 6;
+ uint64_t reserved_54_55 : 2;
+ uint64_t pkt_en : 1;
+ uint64_t pkt_hp : 1;
+ uint64_t commit_mode : 1;
+ uint64_t ffp_dis : 1;
+ uint64_t pkt_en1 : 1;
+ uint64_t dici_mode : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_dpi_dma_control_s cn61xx;
+ struct cvmx_dpi_dma_control_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t pkt_en1 : 1; /**< Enables the 2nd packet interface.
+ When the packet interface is enabled, engine 4
+ is used for packets and is not available for DMA.
+ The packet interfaces must be enabled in order.
+ When PKT_EN1=1, then PKT_EN=1.
+ When PKT_EN1=1, then DMA_ENB<4>=0. */
+ uint64_t ffp_dis : 1; /**< Force forward progress disable
+ The DMA engines will compete for shared resources.
+ If the HW detects that particular engines are not
+ able to make requests to an interface, the HW
+ will periodically trade-off throughput for
+ fairness. */
+ uint64_t commit_mode : 1; /**< DMA Engine Commit Mode
+
+ When COMMIT_MODE=0, DPI considers an instruction
+ complete when the HW internally generates the
+ final write for the current instruction.
+
+ When COMMIT_MODE=1, DPI additionally waits for
+ the final write to reach the interface coherency
+ point to declare the instructions complete.
+
+ Please note: when COMMIT_MODE == 0, DPI may not
+ follow the HRM ordering rules.
+
+ DPI hardware performance may be better with
+ COMMIT_MODE == 0 than with COMMIT_MODE == 1 due
+ to the relaxed ordering rules.
+
+ If the HRM ordering rules are required, set
+ COMMIT_MODE == 1. */
+ uint64_t pkt_hp : 1; /**< High-Priority Mode for Packet Interface.
+ This mode has been deprecated. */
+ uint64_t pkt_en : 1; /**< Enables 1st the packet interface.
+ When the packet interface is enabled, engine 5
+ is used for packets and is not available for DMA.
+ When PKT_EN=1, then DMA_ENB<5>=0.
+ When PKT_EN1=1, then PKT_EN=1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t dma_enb : 6; /**< DMA engine enable. Enables the operation of the
+ DMA engine. After being enabled an engine should
+ not be disabled while processing instructions.
+ When PKT_EN=1, then DMA_ENB<5>=0.
+ When PKT_EN1=1, then DMA_ENB<4>=0. */
+ uint64_t reserved_34_47 : 14;
+ uint64_t b0_lend : 1; /**< When set '1' and the DPI is in the mode to write
+ 0 to L2C memory when a DMA is done, the address
+ to be written to will be treated as a Little
+ Endian address. */
+ uint64_t dwb_denb : 1; /**< When set '1', DPI will send a value in the DWB
+ field for a free page operation for the memory
+ that contained the data. */
+ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are
+ freed this value is used for the DWB field of the
+ operation. */
+ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will
+ be returned to when used. */
+ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters,
+ if '0' then the number of bytes in the dma
+ transfer will be added to the count register. */
+ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */
+ uint64_t o_ns : 1; /**< Nosnoop For DMA. */
+ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */
+ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used.
+ 0=DPTR format 1 is used
+ use register values for address and pointer
+ values for ES, NS, RO
+ 1=DPTR format 0 is used
+ use pointer values for address and register
+ values for ES, NS, RO */
+ uint64_t reserved_0_13 : 14;
+#else
+ uint64_t reserved_0_13 : 14;
+ uint64_t o_mode : 1;
+ uint64_t o_es : 2;
+ uint64_t o_ns : 1;
+ uint64_t o_ro : 1;
+ uint64_t o_add1 : 1;
+ uint64_t fpa_que : 3;
+ uint64_t dwb_ichk : 9;
+ uint64_t dwb_denb : 1;
+ uint64_t b0_lend : 1;
+ uint64_t reserved_34_47 : 14;
+ uint64_t dma_enb : 6;
+ uint64_t reserved_54_55 : 2;
+ uint64_t pkt_en : 1;
+ uint64_t pkt_hp : 1;
+ uint64_t commit_mode : 1;
+ uint64_t ffp_dis : 1;
+ uint64_t pkt_en1 : 1;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } cn63xx;
+ struct cvmx_dpi_dma_control_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t commit_mode : 1; /**< DMA Engine Commit Mode
+
+ When COMMIT_MODE=0, DPI considers an instruction
+ complete when the HW internally generates the
+ final write for the current instruction.
+
+ When COMMIT_MODE=1, DPI additionally waits for
+ the final write to reach the interface coherency
+ point to declare the instructions complete.
+
+ Please note: when COMMIT_MODE == 0, DPI may not
+ follow the HRM ordering rules.
+
+ DPI hardware performance may be better with
+ COMMIT_MODE == 0 than with COMMIT_MODE == 1 due
+ to the relaxed ordering rules.
+
+ If the HRM ordering rules are required, set
+ COMMIT_MODE == 1. */
+ uint64_t pkt_hp : 1; /**< High-Priority Mode for Packet Interface.
+ Engine 5 will be serviced more frequently to
+ deliver more bandwidth to packet interface.
+ When PKT_EN=0, then PKT_HP=0. */
+ uint64_t pkt_en : 1; /**< Enables the packet interface.
+ When the packet interface is enabled, engine 5
+ is used for packets and is not available for DMA.
+ When PKT_EN=1, then DMA_ENB<5>=0.
+ When PKT_EN=0, then PKT_HP=0. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t dma_enb : 6; /**< DMA engine enable. Enables the operation of the
+ DMA engine. After being enabled an engine should
+ not be disabled while processing instructions.
+ When PKT_EN=1, then DMA_ENB<5>=0. */
+ uint64_t reserved_34_47 : 14;
+ uint64_t b0_lend : 1; /**< When set '1' and the DPI is in the mode to write
+ 0 to L2C memory when a DMA is done, the address
+ to be written to will be treated as a Little
+ Endian address. */
+ uint64_t dwb_denb : 1; /**< When set '1', DPI will send a value in the DWB
+ field for a free page operation for the memory
+ that contained the data. */
+ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are
+ freed this value is used for the DWB field of the
+ operation. */
+ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will
+ be returned to when used. */
+ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters,
+ if '0' then the number of bytes in the dma
+ transfer will be added to the count register. */
+ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */
+ uint64_t o_ns : 1; /**< Nosnoop For DMA. */
+ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */
+ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used.
+ 0=DPTR format 1 is used
+ use register values for address and pointer
+ values for ES, NS, RO
+ 1=DPTR format 0 is used
+ use pointer values for address and register
+ values for ES, NS, RO */
+ uint64_t reserved_0_13 : 14;
+#else
+ uint64_t reserved_0_13 : 14;
+ uint64_t o_mode : 1;
+ uint64_t o_es : 2;
+ uint64_t o_ns : 1;
+ uint64_t o_ro : 1;
+ uint64_t o_add1 : 1;
+ uint64_t fpa_que : 3;
+ uint64_t dwb_ichk : 9;
+ uint64_t dwb_denb : 1;
+ uint64_t b0_lend : 1;
+ uint64_t reserved_34_47 : 14;
+ uint64_t dma_enb : 6;
+ uint64_t reserved_54_55 : 2;
+ uint64_t pkt_en : 1;
+ uint64_t pkt_hp : 1;
+ uint64_t commit_mode : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn63xxp1;
+ struct cvmx_dpi_dma_control_cn63xx cn66xx;
+ struct cvmx_dpi_dma_control_s cn68xx;
+ struct cvmx_dpi_dma_control_cn63xx cn68xxp1;
+ struct cvmx_dpi_dma_control_s cnf71xx;
+};
+typedef union cvmx_dpi_dma_control cvmx_dpi_dma_control_t;
+
+/**
+ * cvmx_dpi_dma_eng#_en
+ */
+union cvmx_dpi_dma_engx_en {
+ uint64_t u64;
+ struct cvmx_dpi_dma_engx_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t qen : 8; /**< Controls which logical instruction queues can be
+ serviced by the DMA engine. Setting QEN==0
+ effectively disables the engine.
+ When DPI_DMA_CONTROL[PKT_EN] = 1, then
+ DPI_DMA_ENG5_EN[QEN] must be zero.
+ When DPI_DMA_CONTROL[PKT_EN1] = 1, then
+ DPI_DMA_ENG4_EN[QEN] must be zero. */
+#else
+ uint64_t qen : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_dpi_dma_engx_en_s cn61xx;
+ struct cvmx_dpi_dma_engx_en_s cn63xx;
+ struct cvmx_dpi_dma_engx_en_s cn63xxp1;
+ struct cvmx_dpi_dma_engx_en_s cn66xx;
+ struct cvmx_dpi_dma_engx_en_s cn68xx;
+ struct cvmx_dpi_dma_engx_en_s cn68xxp1;
+ struct cvmx_dpi_dma_engx_en_s cnf71xx;
+};
+typedef union cvmx_dpi_dma_engx_en cvmx_dpi_dma_engx_en_t;
+
+/**
+ * cvmx_dpi_dma_pp#_cnt
+ *
+ * DPI_DMA_PP[0..3]_CNT = DMA per PP Instr Done Counter
+ *
+ * When DMA Instruction Completion Interrupt Mode DPI_DMA_CONTROL.DICI_MODE is enabled, every dma instruction
+ * that has the WQP=0 and a PTR value of 1..4 will incremrement DPI_DMA_PPx_CNT value-1 counter.
+ * Instructions with WQP=0 and PTR values higher then 0x3F will still send a zero byte write.
+ * Hardware reserves that values 5..63 for future use and will treat them as a PTR of 0 and do nothing.
+ */
+union cvmx_dpi_dma_ppx_cnt {
+ uint64_t u64;
+ struct cvmx_dpi_dma_ppx_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< Counter incremented according to conditions
+ described above and decremented by values written
+ to this field. A CNT of non zero, will cause
+ an interrupt in the CIU_SUM1_PPX_IPX register */
+#else
+ uint64_t cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_dpi_dma_ppx_cnt_s cn61xx;
+ struct cvmx_dpi_dma_ppx_cnt_s cn68xx;
+ struct cvmx_dpi_dma_ppx_cnt_s cnf71xx;
+};
+typedef union cvmx_dpi_dma_ppx_cnt cvmx_dpi_dma_ppx_cnt_t;
+
+/**
+ * cvmx_dpi_eng#_buf
+ *
+ * Notes:
+ * The total amount of storage allocated to the 6 DPI DMA engines (via DPI_ENG*_BUF[BLKS]) must not exceed 8KB.
+ *
+ */
+union cvmx_dpi_engx_buf {
+ uint64_t u64;
+ struct cvmx_dpi_engx_buf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t compblks : 5; /**< Computed engine block size */
+ uint64_t reserved_9_31 : 23;
+ uint64_t base : 5; /**< The base address in 512B blocks of the engine fifo */
+ uint64_t blks : 4; /**< The size of the engine fifo
+ Legal values are 0-10.
+ 0 = Engine is disabled
+ 1 = 0.5KB buffer
+ 2 = 1.0KB buffer
+ 3 = 1.5KB buffer
+ 4 = 2.0KB buffer
+ 5 = 2.5KB buffer
+ 6 = 3.0KB buffer
+ 7 = 3.5KB buffer
+ 8 = 4.0KB buffer
+ 9 = 6.0KB buffer
+ 10 = 8.0KB buffer */
+#else
+ uint64_t blks : 4;
+ uint64_t base : 5;
+ uint64_t reserved_9_31 : 23;
+ uint64_t compblks : 5;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } s;
+ struct cvmx_dpi_engx_buf_s cn61xx;
+ struct cvmx_dpi_engx_buf_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t base : 4; /**< The base address in 512B blocks of the engine fifo */
+ uint64_t blks : 4; /**< The size in 512B blocks of the engine fifo
+ Legal values are 0-8.
+ 0 = Engine is disabled
+ 1 = 0.5KB buffer
+ 2 = 1.0KB buffer
+ 3 = 1.5KB buffer
+ 4 = 2.0KB buffer
+ 5 = 2.5KB buffer
+ 6 = 3.0KB buffer
+ 7 = 3.5KB buffer
+ 8 = 4.0KB buffer */
+#else
+ uint64_t blks : 4;
+ uint64_t base : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn63xx;
+ struct cvmx_dpi_engx_buf_cn63xx cn63xxp1;
+ struct cvmx_dpi_engx_buf_s cn66xx;
+ struct cvmx_dpi_engx_buf_s cn68xx;
+ struct cvmx_dpi_engx_buf_s cn68xxp1;
+ struct cvmx_dpi_engx_buf_s cnf71xx;
+};
+typedef union cvmx_dpi_engx_buf cvmx_dpi_engx_buf_t;
+
+/**
+ * cvmx_dpi_info_reg
+ */
+union cvmx_dpi_info_reg {
+ uint64_t u64;
+ struct cvmx_dpi_info_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ffp : 4; /**< Force Forward Progress Indicator */
+ uint64_t reserved_2_3 : 2;
+ uint64_t ncb : 1; /**< NCB Register Access
+ This interrupt will fire in normal operation
+ when SW reads a DPI register through the NCB
+ interface. */
+ uint64_t rsl : 1; /**< RSL Register Access
+ This interrupt will fire in normal operation
+ when SW reads a DPI register through the RSL
+ interface. */
+#else
+ uint64_t rsl : 1;
+ uint64_t ncb : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t ffp : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_dpi_info_reg_s cn61xx;
+ struct cvmx_dpi_info_reg_s cn63xx;
+ struct cvmx_dpi_info_reg_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t ncb : 1; /**< NCB Register Access
+ This interrupt will fire in normal operation
+ when SW reads a DPI register through the NCB
+ interface. */
+ uint64_t rsl : 1; /**< RSL Register Access
+ This interrupt will fire in normal operation
+ when SW reads a DPI register through the RSL
+ interface. */
+#else
+ uint64_t rsl : 1;
+ uint64_t ncb : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn63xxp1;
+ struct cvmx_dpi_info_reg_s cn66xx;
+ struct cvmx_dpi_info_reg_s cn68xx;
+ struct cvmx_dpi_info_reg_s cn68xxp1;
+ struct cvmx_dpi_info_reg_s cnf71xx;
+};
+typedef union cvmx_dpi_info_reg cvmx_dpi_info_reg_t;
+
+/**
+ * cvmx_dpi_int_en
+ */
+union cvmx_dpi_int_en {
+ uint64_t u64;
+ struct cvmx_dpi_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t sprt3_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t sprt2_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t sprt1_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t sprt0_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t reserved_23_23 : 1;
+ uint64_t req_badfil : 1; /**< DMA instruction unexpected fill */
+ uint64_t req_inull : 1; /**< DMA instruction filled with NULL pointer */
+ uint64_t req_anull : 1; /**< DMA instruction filled with bad instruction */
+ uint64_t req_undflw : 1; /**< DMA instruction FIFO underflow */
+ uint64_t req_ovrflw : 1; /**< DMA instruction FIFO overflow */
+ uint64_t req_badlen : 1; /**< DMA instruction fetch with length */
+ uint64_t req_badadr : 1; /**< DMA instruction fetch with bad pointer */
+ uint64_t dmadbo : 8; /**< DMAx doorbell overflow. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t nfovr : 1; /**< CSR Fifo Overflow */
+ uint64_t nderr : 1; /**< NCB Decode Error */
+#else
+ uint64_t nderr : 1;
+ uint64_t nfovr : 1;
+ uint64_t reserved_2_7 : 6;
+ uint64_t dmadbo : 8;
+ uint64_t req_badadr : 1;
+ uint64_t req_badlen : 1;
+ uint64_t req_ovrflw : 1;
+ uint64_t req_undflw : 1;
+ uint64_t req_anull : 1;
+ uint64_t req_inull : 1;
+ uint64_t req_badfil : 1;
+ uint64_t reserved_23_23 : 1;
+ uint64_t sprt0_rst : 1;
+ uint64_t sprt1_rst : 1;
+ uint64_t sprt2_rst : 1;
+ uint64_t sprt3_rst : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_dpi_int_en_s cn61xx;
+ struct cvmx_dpi_int_en_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t sprt1_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t sprt0_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t reserved_23_23 : 1;
+ uint64_t req_badfil : 1; /**< DMA instruction unexpected fill */
+ uint64_t req_inull : 1; /**< DMA instruction filled with NULL pointer */
+ uint64_t req_anull : 1; /**< DMA instruction filled with bad instruction */
+ uint64_t req_undflw : 1; /**< DMA instruction FIFO underflow */
+ uint64_t req_ovrflw : 1; /**< DMA instruction FIFO overflow */
+ uint64_t req_badlen : 1; /**< DMA instruction fetch with length */
+ uint64_t req_badadr : 1; /**< DMA instruction fetch with bad pointer */
+ uint64_t dmadbo : 8; /**< DMAx doorbell overflow. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t nfovr : 1; /**< CSR Fifo Overflow */
+ uint64_t nderr : 1; /**< NCB Decode Error */
+#else
+ uint64_t nderr : 1;
+ uint64_t nfovr : 1;
+ uint64_t reserved_2_7 : 6;
+ uint64_t dmadbo : 8;
+ uint64_t req_badadr : 1;
+ uint64_t req_badlen : 1;
+ uint64_t req_ovrflw : 1;
+ uint64_t req_undflw : 1;
+ uint64_t req_anull : 1;
+ uint64_t req_inull : 1;
+ uint64_t req_badfil : 1;
+ uint64_t reserved_23_23 : 1;
+ uint64_t sprt0_rst : 1;
+ uint64_t sprt1_rst : 1;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } cn63xx;
+ struct cvmx_dpi_int_en_cn63xx cn63xxp1;
+ struct cvmx_dpi_int_en_s cn66xx;
+ struct cvmx_dpi_int_en_cn63xx cn68xx;
+ struct cvmx_dpi_int_en_cn63xx cn68xxp1;
+ struct cvmx_dpi_int_en_s cnf71xx;
+};
+typedef union cvmx_dpi_int_en cvmx_dpi_int_en_t;
+
+/**
+ * cvmx_dpi_int_reg
+ */
+union cvmx_dpi_int_reg {
+ uint64_t u64;
+ struct cvmx_dpi_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t sprt3_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t sprt2_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t sprt1_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t sprt0_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t reserved_23_23 : 1;
+ uint64_t req_badfil : 1; /**< DMA instruction unexpected fill
+ Instruction fill when none outstanding. */
+ uint64_t req_inull : 1; /**< DMA instruction filled with NULL pointer
+ Next pointer was NULL. */
+ uint64_t req_anull : 1; /**< DMA instruction filled with bad instruction
+ Fetched instruction word was 0. */
+ uint64_t req_undflw : 1; /**< DMA instruction FIFO underflow
+ DPI tracks outstanding instructions fetches.
+ Interrupt will fire when FIFO underflows. */
+ uint64_t req_ovrflw : 1; /**< DMA instruction FIFO overflow
+ DPI tracks outstanding instructions fetches.
+ Interrupt will fire when FIFO overflows. */
+ uint64_t req_badlen : 1; /**< DMA instruction fetch with length
+ Interrupt will fire if DPI forms an instruction
+ fetch with length of zero. */
+ uint64_t req_badadr : 1; /**< DMA instruction fetch with bad pointer
+ Interrupt will fire if DPI forms an instruction
+ fetch to the NULL pointer. */
+ uint64_t dmadbo : 8; /**< DMAx doorbell overflow.
+ DPI has a 32-bit counter for each request's queue
+ outstanding doorbell counts. Interrupt will fire
+ if the count overflows. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t nfovr : 1; /**< CSR Fifo Overflow
+ DPI can store upto 16 CSR request. The FIFO will
+ overflow if that number is exceeded. */
+ uint64_t nderr : 1; /**< NCB Decode Error
+ DPI received a NCB transaction on the outbound
+ bus to the DPI deviceID, but the command was not
+ recognized. */
+#else
+ uint64_t nderr : 1;
+ uint64_t nfovr : 1;
+ uint64_t reserved_2_7 : 6;
+ uint64_t dmadbo : 8;
+ uint64_t req_badadr : 1;
+ uint64_t req_badlen : 1;
+ uint64_t req_ovrflw : 1;
+ uint64_t req_undflw : 1;
+ uint64_t req_anull : 1;
+ uint64_t req_inull : 1;
+ uint64_t req_badfil : 1;
+ uint64_t reserved_23_23 : 1;
+ uint64_t sprt0_rst : 1;
+ uint64_t sprt1_rst : 1;
+ uint64_t sprt2_rst : 1;
+ uint64_t sprt3_rst : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_dpi_int_reg_s cn61xx;
+ struct cvmx_dpi_int_reg_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t sprt1_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t sprt0_rst : 1; /**< DMA instruction was dropped because the source or
+ destination port was in reset.
+ this bit is set. */
+ uint64_t reserved_23_23 : 1;
+ uint64_t req_badfil : 1; /**< DMA instruction unexpected fill
+ Instruction fill when none outstanding. */
+ uint64_t req_inull : 1; /**< DMA instruction filled with NULL pointer
+ Next pointer was NULL. */
+ uint64_t req_anull : 1; /**< DMA instruction filled with bad instruction
+ Fetched instruction word was 0. */
+ uint64_t req_undflw : 1; /**< DMA instruction FIFO underflow
+ DPI tracks outstanding instructions fetches.
+ Interrupt will fire when FIFO underflows. */
+ uint64_t req_ovrflw : 1; /**< DMA instruction FIFO overflow
+ DPI tracks outstanding instructions fetches.
+ Interrupt will fire when FIFO overflows. */
+ uint64_t req_badlen : 1; /**< DMA instruction fetch with length
+ Interrupt will fire if DPI forms an instruction
+ fetch with length of zero. */
+ uint64_t req_badadr : 1; /**< DMA instruction fetch with bad pointer
+ Interrupt will fire if DPI forms an instruction
+ fetch to the NULL pointer. */
+ uint64_t dmadbo : 8; /**< DMAx doorbell overflow.
+ DPI has a 32-bit counter for each request's queue
+ outstanding doorbell counts. Interrupt will fire
+ if the count overflows. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t nfovr : 1; /**< CSR Fifo Overflow
+ DPI can store upto 16 CSR request. The FIFO will
+ overflow if that number is exceeded. */
+ uint64_t nderr : 1; /**< NCB Decode Error
+ DPI received a NCB transaction on the outbound
+ bus to the DPI deviceID, but the command was not
+ recognized. */
+#else
+ uint64_t nderr : 1;
+ uint64_t nfovr : 1;
+ uint64_t reserved_2_7 : 6;
+ uint64_t dmadbo : 8;
+ uint64_t req_badadr : 1;
+ uint64_t req_badlen : 1;
+ uint64_t req_ovrflw : 1;
+ uint64_t req_undflw : 1;
+ uint64_t req_anull : 1;
+ uint64_t req_inull : 1;
+ uint64_t req_badfil : 1;
+ uint64_t reserved_23_23 : 1;
+ uint64_t sprt0_rst : 1;
+ uint64_t sprt1_rst : 1;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } cn63xx;
+ struct cvmx_dpi_int_reg_cn63xx cn63xxp1;
+ struct cvmx_dpi_int_reg_s cn66xx;
+ struct cvmx_dpi_int_reg_cn63xx cn68xx;
+ struct cvmx_dpi_int_reg_cn63xx cn68xxp1;
+ struct cvmx_dpi_int_reg_s cnf71xx;
+};
+typedef union cvmx_dpi_int_reg cvmx_dpi_int_reg_t;
+
+/**
+ * cvmx_dpi_ncb#_cfg
+ */
+union cvmx_dpi_ncbx_cfg {
+ uint64_t u64;
+ struct cvmx_dpi_ncbx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t molr : 6; /**< Max Outstanding Load Requests
+ Limits the number of oustanding load requests on
+ the NCB interface. This value can range from 1
+ to 32. Setting a value of 0 will halt all read
+ traffic to the NCB interface. There are no
+ restrictions on when this value can be changed. */
+#else
+ uint64_t molr : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_dpi_ncbx_cfg_s cn61xx;
+ struct cvmx_dpi_ncbx_cfg_s cn66xx;
+ struct cvmx_dpi_ncbx_cfg_s cn68xx;
+ struct cvmx_dpi_ncbx_cfg_s cnf71xx;
+};
+typedef union cvmx_dpi_ncbx_cfg cvmx_dpi_ncbx_cfg_t;
+
+/**
+ * cvmx_dpi_pint_info
+ *
+ * DPI_PINT_INFO = DPI Packet Interrupt Info
+ *
+ * DPI Packet Interrupt Info.
+ */
+union cvmx_dpi_pint_info {
+ uint64_t u64;
+ struct cvmx_dpi_pint_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t iinfo : 6; /**< Packet Instruction Doorbell count overflow info */
+ uint64_t reserved_6_7 : 2;
+ uint64_t sinfo : 6; /**< Packet Scatterlist Doorbell count overflow info */
+#else
+ uint64_t sinfo : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t iinfo : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_dpi_pint_info_s cn61xx;
+ struct cvmx_dpi_pint_info_s cn63xx;
+ struct cvmx_dpi_pint_info_s cn63xxp1;
+ struct cvmx_dpi_pint_info_s cn66xx;
+ struct cvmx_dpi_pint_info_s cn68xx;
+ struct cvmx_dpi_pint_info_s cn68xxp1;
+ struct cvmx_dpi_pint_info_s cnf71xx;
+};
+typedef union cvmx_dpi_pint_info cvmx_dpi_pint_info_t;
+
+/**
+ * cvmx_dpi_pkt_err_rsp
+ */
+union cvmx_dpi_pkt_err_rsp {
+ uint64_t u64;
+ struct cvmx_dpi_pkt_err_rsp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t pkterr : 1; /**< Indicates that an ErrorResponse was received from
+ the I/O subsystem. */
+#else
+ uint64_t pkterr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_dpi_pkt_err_rsp_s cn61xx;
+ struct cvmx_dpi_pkt_err_rsp_s cn63xx;
+ struct cvmx_dpi_pkt_err_rsp_s cn63xxp1;
+ struct cvmx_dpi_pkt_err_rsp_s cn66xx;
+ struct cvmx_dpi_pkt_err_rsp_s cn68xx;
+ struct cvmx_dpi_pkt_err_rsp_s cn68xxp1;
+ struct cvmx_dpi_pkt_err_rsp_s cnf71xx;
+};
+typedef union cvmx_dpi_pkt_err_rsp cvmx_dpi_pkt_err_rsp_t;
+
+/**
+ * cvmx_dpi_req_err_rsp
+ */
+union cvmx_dpi_req_err_rsp {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_rsp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t qerr : 8; /**< Indicates which instruction queue received an
+ ErrorResponse from the I/O subsystem.
+ SW must clear the bit before the the cooresponding
+ instruction queue will continue processing
+ instructions if DPI_REQ_ERR_RSP_EN[EN] is set. */
+#else
+ uint64_t qerr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_dpi_req_err_rsp_s cn61xx;
+ struct cvmx_dpi_req_err_rsp_s cn63xx;
+ struct cvmx_dpi_req_err_rsp_s cn63xxp1;
+ struct cvmx_dpi_req_err_rsp_s cn66xx;
+ struct cvmx_dpi_req_err_rsp_s cn68xx;
+ struct cvmx_dpi_req_err_rsp_s cn68xxp1;
+ struct cvmx_dpi_req_err_rsp_s cnf71xx;
+};
+typedef union cvmx_dpi_req_err_rsp cvmx_dpi_req_err_rsp_t;
+
+/**
+ * cvmx_dpi_req_err_rsp_en
+ */
+union cvmx_dpi_req_err_rsp_en {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_rsp_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t en : 8; /**< Indicates which instruction queues should stop
+ dispatching instructions when an ErrorResponse
+ is received from the I/O subsystem. */
+#else
+ uint64_t en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_dpi_req_err_rsp_en_s cn61xx;
+ struct cvmx_dpi_req_err_rsp_en_s cn63xx;
+ struct cvmx_dpi_req_err_rsp_en_s cn63xxp1;
+ struct cvmx_dpi_req_err_rsp_en_s cn66xx;
+ struct cvmx_dpi_req_err_rsp_en_s cn68xx;
+ struct cvmx_dpi_req_err_rsp_en_s cn68xxp1;
+ struct cvmx_dpi_req_err_rsp_en_s cnf71xx;
+};
+typedef union cvmx_dpi_req_err_rsp_en cvmx_dpi_req_err_rsp_en_t;
+
+/**
+ * cvmx_dpi_req_err_rst
+ */
+union cvmx_dpi_req_err_rst {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t qerr : 8; /**< Indicates which instruction queue dropped an
+ instruction because the source or destination
+ was in reset.
+ SW must clear the bit before the the cooresponding
+ instruction queue will continue processing
+ instructions if DPI_REQ_ERR_RST_EN[EN] is set. */
+#else
+ uint64_t qerr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_dpi_req_err_rst_s cn61xx;
+ struct cvmx_dpi_req_err_rst_s cn63xx;
+ struct cvmx_dpi_req_err_rst_s cn63xxp1;
+ struct cvmx_dpi_req_err_rst_s cn66xx;
+ struct cvmx_dpi_req_err_rst_s cn68xx;
+ struct cvmx_dpi_req_err_rst_s cn68xxp1;
+ struct cvmx_dpi_req_err_rst_s cnf71xx;
+};
+typedef union cvmx_dpi_req_err_rst cvmx_dpi_req_err_rst_t;
+
+/**
+ * cvmx_dpi_req_err_rst_en
+ */
+union cvmx_dpi_req_err_rst_en {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_rst_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t en : 8; /**< Indicates which instruction queues should stop
+ dispatching instructions when an instruction
+ is dropped because the source or destination port
+ is in reset. */
+#else
+ uint64_t en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_dpi_req_err_rst_en_s cn61xx;
+ struct cvmx_dpi_req_err_rst_en_s cn63xx;
+ struct cvmx_dpi_req_err_rst_en_s cn63xxp1;
+ struct cvmx_dpi_req_err_rst_en_s cn66xx;
+ struct cvmx_dpi_req_err_rst_en_s cn68xx;
+ struct cvmx_dpi_req_err_rst_en_s cn68xxp1;
+ struct cvmx_dpi_req_err_rst_en_s cnf71xx;
+};
+typedef union cvmx_dpi_req_err_rst_en cvmx_dpi_req_err_rst_en_t;
+
+/**
+ * cvmx_dpi_req_err_skip_comp
+ */
+union cvmx_dpi_req_err_skip_comp {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_skip_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t en_rst : 8; /**< Indicates which instruction queue should skip the
+ completion phase once an port reset is
+ detected as indicated by DPI_REQ_ERR_RST. All
+ completions to the effected instruction queue
+ will be skipped as long as
+ DPI_REQ_ERR_RSP[QERR<ique>] & EN_RSP<ique> or
+ DPI_REQ_ERR_RST[QERR<ique>] & EN_RST<ique> are
+ set. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t en_rsp : 8; /**< Indicates which instruction queue should skip the
+ completion phase once an ErrorResponse is
+ detected as indicated by DPI_REQ_ERR_RSP. All
+ completions to the effected instruction queue
+ will be skipped as long as
+ DPI_REQ_ERR_RSP[QERR<ique>] & EN_RSP<ique> or
+ DPI_REQ_ERR_RST[QERR<ique>] & EN_RST<ique> are
+ set. */
+#else
+ uint64_t en_rsp : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t en_rst : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_dpi_req_err_skip_comp_s cn61xx;
+ struct cvmx_dpi_req_err_skip_comp_s cn66xx;
+ struct cvmx_dpi_req_err_skip_comp_s cn68xx;
+ struct cvmx_dpi_req_err_skip_comp_s cn68xxp1;
+ struct cvmx_dpi_req_err_skip_comp_s cnf71xx;
+};
+typedef union cvmx_dpi_req_err_skip_comp cvmx_dpi_req_err_skip_comp_t;
+
+/**
+ * cvmx_dpi_req_gbl_en
+ */
+union cvmx_dpi_req_gbl_en {
+ uint64_t u64;
+ struct cvmx_dpi_req_gbl_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t qen : 8; /**< Indicates which instruction queues are enabled and
+ can dispatch instructions to a requesting engine. */
+#else
+ uint64_t qen : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_dpi_req_gbl_en_s cn61xx;
+ struct cvmx_dpi_req_gbl_en_s cn63xx;
+ struct cvmx_dpi_req_gbl_en_s cn63xxp1;
+ struct cvmx_dpi_req_gbl_en_s cn66xx;
+ struct cvmx_dpi_req_gbl_en_s cn68xx;
+ struct cvmx_dpi_req_gbl_en_s cn68xxp1;
+ struct cvmx_dpi_req_gbl_en_s cnf71xx;
+};
+typedef union cvmx_dpi_req_gbl_en cvmx_dpi_req_gbl_en_t;
+
+/**
+ * cvmx_dpi_sli_prt#_cfg
+ *
+ * DPI_SLI_PRTx_CFG = DPI SLI Port Configuration
+ *
+ * Configures the Max Read Request Size, Max Paylod Size, and Max Number of SLI Tags in use
+ */
+union cvmx_dpi_sli_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_dpi_sli_prtx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t halt : 1; /**< When set, HALT indicates that the MAC has detected
+ a reset condition. No further instructions that
+ reference the MAC from any instruction Q will be
+ issued until the MAC comes out of reset and HALT
+ is cleared in SLI_CTL_PORTx[DIS_PORT]. */
+ uint64_t qlm_cfg : 4; /**< QLM_CFG is a function of MIO_QLMx_CFG[QLM_CFG]
+ QLM_CFG may contain values that are not normally
+ used for DMA and/or packet operations.
+ QLM_CFG does not indicate if a port is disabled.
+ MIO_QLMx_CFG can be used for more complete QLM
+ configuration information.
+ 0000 = MAC is PCIe 1x4 (QLM) or 1x2 (DLM)
+ 0001 = MAC is PCIe 2x1 (DLM only)
+ 0010 = MAC is SGMII
+ 0011 = MAC is XAUI
+ all other encodings are RESERVED */
+ uint64_t reserved_17_19 : 3;
+ uint64_t rd_mode : 1; /**< Read Mode
+ 0=Exact Read Mode
+ If the port is a PCIe port, the HW reads on a
+ 4B granularity. In this mode, the HW may break
+ a given read into 3 operations to satisify
+ PCIe rules.
+ If the port is a SRIO port, the HW follows the
+ SRIO read rules from the SRIO specification and
+ only issues 32*n, 16, and 8 byte operations
+ on the SRIO bus.
+ 1=Block Mode
+ The HW will read more data than requested in
+ order to minimize the number of operations
+ necessary to complete the operation.
+ The memory region must be memory like. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t molr : 6; /**< Max Outstanding Load Requests
+ Limits the number of oustanding load requests on
+ the port by restricting the number of tags
+ used by the SLI to track load responses. This
+ value can range from 1 to 32 depending on the MAC
+ type and number of lanes.
+ MAC == PCIe: Max is 32
+ MAC == sRio / 4 lanes: Max is 32
+ MAC == sRio / 2 lanes: Max is 16
+ MAC == sRio / 1 lane: Max is 8
+ Reset value is computed based on the MAC config.
+ Setting MOLR to a value of 0 will halt all read
+ traffic to the port. There are no restrictions
+ on when this value can be changed. */
+ uint64_t mps_lim : 1; /**< MAC memory space write requests cannot cross the
+ (naturally-aligned) MPS boundary.
+ When clear, DPI is allowed to issue a MAC memory
+ space read that crosses the naturally-aligned
+ boundary of size defined by MPS. (DPI will still
+ only cross the boundary when it would eliminate a
+ write by doing so.)
+ When set, DPI will never issue a MAC memory space
+ write that crosses the naturally-aligned boundary
+ of size defined by MPS. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t mps : 1; /**< Max Payload Size
+ 0 = 128B
+ 1 = 256B
+ For PCIe MACs, this MPS size must not exceed
+ the size selected by PCIE*_CFG030[MPS].
+ For sRIO MACs, all MPS values are allowed. */
+ uint64_t mrrs_lim : 1; /**< MAC memory space read requests cannot cross the
+ (naturally-aligned) MRRS boundary.
+ When clear, DPI is allowed to issue a MAC memory
+ space read that crosses the naturally-aligned
+ boundary of size defined by MRRS. (DPI will still
+ only cross the boundary when it would eliminate a
+ read by doing so.)
+ When set, DPI will never issue a MAC memory space
+ read that crosses the naturally-aligned boundary
+ of size defined by MRRS. */
+ uint64_t reserved_2_2 : 1;
+ uint64_t mrrs : 2; /**< Max Read Request Size
+ 0 = 128B
+ 1 = 256B
+ 2 = 512B
+ 3 = 1024B
+ For PCIe MACs, this MRRS size must not exceed
+ the size selected by PCIE*_CFG030[MRRS].
+ For sRIO MACs, this MRRS size must be <= 256B. */
+#else
+ uint64_t mrrs : 2;
+ uint64_t reserved_2_2 : 1;
+ uint64_t mrrs_lim : 1;
+ uint64_t mps : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t mps_lim : 1;
+ uint64_t molr : 6;
+ uint64_t reserved_14_15 : 2;
+ uint64_t rd_mode : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t qlm_cfg : 4;
+ uint64_t halt : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_dpi_sli_prtx_cfg_s cn61xx;
+ struct cvmx_dpi_sli_prtx_cfg_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t halt : 1; /**< When set, HALT indicates that the MAC has detected
+ a reset condition. No further instructions that
+ reference the MAC from any instruction Q will be
+ issued until the MAC comes out of reset and HALT
+ is cleared in SLI_CTL_PORTx[DIS_PORT]. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t qlm_cfg : 1; /**< Read only copy of the QLM CFG pin
+ Since QLM_CFG is simply a copy of the QLM CFG
+ pins, it may reflect values that are not normal
+ for DMA or packet operations. QLM_CFG does not
+ indicate if a port is disabled.
+ 0= MAC is PCIe
+ 1= MAC is SRIO */
+ uint64_t reserved_17_19 : 3;
+ uint64_t rd_mode : 1; /**< Read Mode
+ 0=Exact Read Mode
+ If the port is a PCIe port, the HW reads on a
+ 4B granularity. In this mode, the HW may break
+ a given read into 3 operations to satisify
+ PCIe rules.
+ If the port is a SRIO port, the HW follows the
+ SRIO read rules from the SRIO specification and
+ only issues 32*n, 16, and 8 byte operations
+ on the SRIO bus.
+ 1=Block Mode
+ The HW will read more data than requested in
+ order to minimize the number of operations
+ necessary to complete the operation.
+ The memory region must be memory like. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t molr : 6; /**< Max Outstanding Load Requests
+ Limits the number of oustanding load requests on
+ the port by restricting the number of tags
+ used by the SLI to track load responses. This
+ value can range from 1 to 32. Setting a value of
+ 0 will halt all read traffic to the port. There
+ are no restrictions on when this value
+ can be changed. */
+ uint64_t mps_lim : 1; /**< MAC memory space write requests cannot cross the
+ (naturally-aligned) MPS boundary.
+ When clear, DPI is allowed to issue a MAC memory
+ space read that crosses the naturally-aligned
+ boundary of size defined by MPS. (DPI will still
+ only cross the boundary when it would eliminate a
+ write by doing so.)
+ When set, DPI will never issue a MAC memory space
+ write that crosses the naturally-aligned boundary
+ of size defined by MPS. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t mps : 1; /**< Max Payload Size
+ 0 = 128B
+ 1 = 256B
+ For PCIe MACs, this MPS size must not exceed
+ the size selected by PCIE*_CFG030[MPS].
+ For sRIO MACs, all MPS values are allowed. */
+ uint64_t mrrs_lim : 1; /**< MAC memory space read requests cannot cross the
+ (naturally-aligned) MRRS boundary.
+ When clear, DPI is allowed to issue a MAC memory
+ space read that crosses the naturally-aligned
+ boundary of size defined by MRRS. (DPI will still
+ only cross the boundary when it would eliminate a
+ read by doing so.)
+ When set, DPI will never issue a MAC memory space
+ read that crosses the naturally-aligned boundary
+ of size defined by MRRS. */
+ uint64_t reserved_2_2 : 1;
+ uint64_t mrrs : 2; /**< Max Read Request Size
+ 0 = 128B
+ 1 = 256B
+ 2 = 512B
+ 3 = 1024B
+ For PCIe MACs, this MRRS size must not exceed
+ the size selected by PCIE*_CFG030[MRRS].
+ For sRIO MACs, this MRRS size must be <= 256B. */
+#else
+ uint64_t mrrs : 2;
+ uint64_t reserved_2_2 : 1;
+ uint64_t mrrs_lim : 1;
+ uint64_t mps : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t mps_lim : 1;
+ uint64_t molr : 6;
+ uint64_t reserved_14_15 : 2;
+ uint64_t rd_mode : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t qlm_cfg : 1;
+ uint64_t reserved_21_23 : 3;
+ uint64_t halt : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } cn63xx;
+ struct cvmx_dpi_sli_prtx_cfg_cn63xx cn63xxp1;
+ struct cvmx_dpi_sli_prtx_cfg_s cn66xx;
+ struct cvmx_dpi_sli_prtx_cfg_cn63xx cn68xx;
+ struct cvmx_dpi_sli_prtx_cfg_cn63xx cn68xxp1;
+ struct cvmx_dpi_sli_prtx_cfg_s cnf71xx;
+};
+typedef union cvmx_dpi_sli_prtx_cfg cvmx_dpi_sli_prtx_cfg_t;
+
+/**
+ * cvmx_dpi_sli_prt#_err
+ *
+ * DPI_SLI_PRTx_ERR = DPI SLI Port Error Info
+ *
+ * Logs the Address and Request Queue associated with the reported SLI error response
+ */
+union cvmx_dpi_sli_prtx_err {
+ uint64_t u64;
+ struct cvmx_dpi_sli_prtx_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 61; /**< Address of the failed load request.
+ Address is locked along with the
+ DPI_SLI_PRTx_ERR_INFO register.
+ See the DPI_SLI_PRTx_ERR_INFO[LOCK] description
+ for further information. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t addr : 61;
+#endif
+ } s;
+ struct cvmx_dpi_sli_prtx_err_s cn61xx;
+ struct cvmx_dpi_sli_prtx_err_s cn63xx;
+ struct cvmx_dpi_sli_prtx_err_s cn63xxp1;
+ struct cvmx_dpi_sli_prtx_err_s cn66xx;
+ struct cvmx_dpi_sli_prtx_err_s cn68xx;
+ struct cvmx_dpi_sli_prtx_err_s cn68xxp1;
+ struct cvmx_dpi_sli_prtx_err_s cnf71xx;
+};
+typedef union cvmx_dpi_sli_prtx_err cvmx_dpi_sli_prtx_err_t;
+
+/**
+ * cvmx_dpi_sli_prt#_err_info
+ *
+ * DPI_SLI_PRTx_ERR_INFO = DPI SLI Port Error Info
+ *
+ * Logs the Address and Request Queue associated with the reported SLI error response
+ */
+union cvmx_dpi_sli_prtx_err_info {
+ uint64_t u64;
+ struct cvmx_dpi_sli_prtx_err_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t lock : 1; /**< DPI_SLI_PRTx_ERR and DPI_SLI_PRTx_ERR_INFO have
+ captured and locked contents.
+ When Octeon first detects an ErrorResponse, the
+ TYPE, REQQ, and ADDR of the error is saved and an
+ internal lock state is set so the data associated
+ with the initial error is perserved.
+ Subsequent ErrorResponses will optionally raise
+ an interrupt, but will not modify the TYPE, REQQ,
+ or ADDR fields until the internal lock state is
+ cleared.
+ SW can clear the internal lock state by writting
+ a '1' to the appropriate bit in either
+ DPI_REQ_ERR_RSP or DPI_PKT_ERR_RSP depending on
+ the TYPE field.
+ Once the internal lock state is cleared,
+ the next ErrorResponse will set the TYPE, REQQ,
+ and ADDR for the new transaction. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t type : 1; /**< Type of transaction that caused the ErrorResponse.
+ 0=DMA Instruction
+ 1=PKT Instruction */
+ uint64_t reserved_3_3 : 1;
+ uint64_t reqq : 3; /**< Request queue that made the failed load request. */
+#else
+ uint64_t reqq : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t type : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t lock : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_dpi_sli_prtx_err_info_s cn61xx;
+ struct cvmx_dpi_sli_prtx_err_info_s cn63xx;
+ struct cvmx_dpi_sli_prtx_err_info_s cn63xxp1;
+ struct cvmx_dpi_sli_prtx_err_info_s cn66xx;
+ struct cvmx_dpi_sli_prtx_err_info_s cn68xx;
+ struct cvmx_dpi_sli_prtx_err_info_s cn68xxp1;
+ struct cvmx_dpi_sli_prtx_err_info_s cnf71xx;
+};
+typedef union cvmx_dpi_sli_prtx_err_info cvmx_dpi_sli_prtx_err_info_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-dpi-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,118 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the EBT3000 specific devices
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "cvmx-config.h"
+#endif
+#include "cvmx.h"
+#include "cvmx-ebt3000.h"
+#include "cvmx-sysinfo.h"
+
+
+void ebt3000_char_write(int char_position, char val)
+{
+ /* Note: phys_to_ptr won't work here, as we are most likely going to access the boot bus. */
+ char *led_base = CASTPTR(char , CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, cvmx_sysinfo_get()->led_display_base_addr));
+ if (!led_base)
+ return;
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBT3000 && cvmx_sysinfo_get()->board_rev_major == 1)
+ {
+ /* Rev 1 board */
+ char *ptr = (char *)(led_base + 4);
+ char_position &= 0x3; /* only 4 chars */
+ ptr[3 - char_position] = val;
+ }
+ else
+ {
+ /* rev 2 or later board */
+ char *ptr = (char *)(led_base);
+ char_position &= 0x7; /* only 8 chars */
+ ptr[char_position] = val;
+ }
+}
+
+void ebt3000_str_write(const char *str)
+{
+ /* Note: phys_to_ptr won't work here, as we are most likely going to access the boot bus. */
+ char *led_base;
+ if (!cvmx_sysinfo_get()->led_display_base_addr)
+ return;
+ led_base = CASTPTR(char, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, cvmx_sysinfo_get()->led_display_base_addr));
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBT3000 && cvmx_sysinfo_get()->board_rev_major == 1)
+ {
+ char *ptr = (char *)(led_base + 4);
+ int i;
+ for (i=0; i<4; i++)
+ {
+ if (*str)
+ ptr[3 - i] = *str++;
+ else
+ ptr[3 - i] = ' ';
+ }
+ }
+ else
+ {
+ /* rev 2 board */
+ char *ptr = (char *)(led_base);
+ int i;
+ for (i=0; i<8; i++)
+ {
+ if (*str)
+ ptr[i] = *str++;
+ else
+ ptr[i] = ' ';
+ }
+ }
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,70 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+#ifndef __CVMX_EBT3000_H__
+#define __CVMX_EBT3000_H__
+
+/**
+ * @file
+ *
+ * Interface to the EBT3000 specific devices
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void ebt3000_str_write(const char *str);
+void ebt3000_char_write(int char_position, char val);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_EBT3000_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ebt3000.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-endor-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-endor-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-endor-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,7827 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-endor-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon endor.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision: 69515 $<hr>
+ *
+ */
+#ifndef __CVMX_ENDOR_DEFS_H__
+#define __CVMX_ENDOR_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_AUTO_CLK_GATE CVMX_ENDOR_ADMA_AUTO_CLK_GATE_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_AUTO_CLK_GATE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_AUTO_CLK_GATE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844004ull);
+}
+#else
+#define CVMX_ENDOR_ADMA_AUTO_CLK_GATE (CVMX_ADD_IO_SEG(0x00010F0000844004ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_AXIERR_INTR CVMX_ENDOR_ADMA_AXIERR_INTR_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_AXIERR_INTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_AXIERR_INTR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844044ull);
+}
+#else
+#define CVMX_ENDOR_ADMA_AXIERR_INTR (CVMX_ADD_IO_SEG(0x00010F0000844044ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_AXI_RSPCODE CVMX_ENDOR_ADMA_AXI_RSPCODE_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_AXI_RSPCODE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_AXI_RSPCODE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844050ull);
+}
+#else
+#define CVMX_ENDOR_ADMA_AXI_RSPCODE (CVMX_ADD_IO_SEG(0x00010F0000844050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_AXI_SIGNAL CVMX_ENDOR_ADMA_AXI_SIGNAL_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_AXI_SIGNAL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_AXI_SIGNAL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844084ull);
+}
+#else
+#define CVMX_ENDOR_ADMA_AXI_SIGNAL (CVMX_ADD_IO_SEG(0x00010F0000844084ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_DMADONE_INTR CVMX_ENDOR_ADMA_DMADONE_INTR_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_DMADONE_INTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_DMADONE_INTR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844040ull);
+}
+#else
+#define CVMX_ENDOR_ADMA_DMADONE_INTR (CVMX_ADD_IO_SEG(0x00010F0000844040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_ADMA_DMAX_ADDR_HI(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ENDOR_ADMA_DMAX_ADDR_HI(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F000084410Cull) + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_ENDOR_ADMA_DMAX_ADDR_HI(offset) (CVMX_ADD_IO_SEG(0x00010F000084410Cull) + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_ADMA_DMAX_ADDR_LO(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ENDOR_ADMA_DMAX_ADDR_LO(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000844108ull) + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_ENDOR_ADMA_DMAX_ADDR_LO(offset) (CVMX_ADD_IO_SEG(0x00010F0000844108ull) + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_ADMA_DMAX_CFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ENDOR_ADMA_DMAX_CFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000844100ull) + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_ENDOR_ADMA_DMAX_CFG(offset) (CVMX_ADD_IO_SEG(0x00010F0000844100ull) + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_ADMA_DMAX_SIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ENDOR_ADMA_DMAX_SIZE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000844104ull) + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_ENDOR_ADMA_DMAX_SIZE(offset) (CVMX_ADD_IO_SEG(0x00010F0000844104ull) + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_DMA_PRIORITY CVMX_ENDOR_ADMA_DMA_PRIORITY_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_DMA_PRIORITY_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_DMA_PRIORITY not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844080ull);
+}
+#else
+#define CVMX_ENDOR_ADMA_DMA_PRIORITY (CVMX_ADD_IO_SEG(0x00010F0000844080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_DMA_RESET CVMX_ENDOR_ADMA_DMA_RESET_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_DMA_RESET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_DMA_RESET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844008ull);
+}
+#else
+#define CVMX_ENDOR_ADMA_DMA_RESET (CVMX_ADD_IO_SEG(0x00010F0000844008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_INTR_DIS CVMX_ENDOR_ADMA_INTR_DIS_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_INTR_DIS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_INTR_DIS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000084404Cull);
+}
+#else
+#define CVMX_ENDOR_ADMA_INTR_DIS (CVMX_ADD_IO_SEG(0x00010F000084404Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_INTR_ENB CVMX_ENDOR_ADMA_INTR_ENB_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_INTR_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_INTR_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844048ull);
+}
+#else
+#define CVMX_ENDOR_ADMA_INTR_ENB (CVMX_ADD_IO_SEG(0x00010F0000844048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_ADMA_MODULE_STATUS CVMX_ENDOR_ADMA_MODULE_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_ADMA_MODULE_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_ADMA_MODULE_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844000ull);
+}
+#else
+#define CVMX_ENDOR_ADMA_MODULE_STATUS (CVMX_ADD_IO_SEG(0x00010F0000844000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_CNTL_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_CNTL_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008201E4ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ENDOR_INTC_CNTL_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F00008201E4ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_CNTL_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_CNTL_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008201E0ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ENDOR_INTC_CNTL_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F00008201E0ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_INDEX_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_INDEX_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008201A4ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ENDOR_INTC_INDEX_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F00008201A4ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_INDEX_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_INDEX_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008201A0ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ENDOR_INTC_INDEX_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F00008201A0ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_MISC_IDX_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_MISC_IDX_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820134ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_MISC_IDX_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820134ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_MISC_IDX_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_MISC_IDX_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820114ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_MISC_IDX_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820114ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_MISC_MASK_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_MISC_MASK_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820034ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_MISC_MASK_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820034ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_MISC_MASK_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_MISC_MASK_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820014ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_MISC_MASK_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820014ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_INTC_MISC_RINT CVMX_ENDOR_INTC_MISC_RINT_FUNC()
+static inline uint64_t CVMX_ENDOR_INTC_MISC_RINT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_INTC_MISC_RINT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000820194ull);
+}
+#else
+#define CVMX_ENDOR_INTC_MISC_RINT (CVMX_ADD_IO_SEG(0x00010F0000820194ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_MISC_STATUS_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_MISC_STATUS_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008200B4ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_MISC_STATUS_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F00008200B4ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_MISC_STATUS_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_MISC_STATUS_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820094ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_MISC_STATUS_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820094ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RDQ_IDX_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RDQ_IDX_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F000082012Cull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RDQ_IDX_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F000082012Cull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RDQ_IDX_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RDQ_IDX_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F000082010Cull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RDQ_IDX_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F000082010Cull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RDQ_MASK_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RDQ_MASK_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F000082002Cull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RDQ_MASK_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F000082002Cull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RDQ_MASK_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RDQ_MASK_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F000082000Cull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RDQ_MASK_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F000082000Cull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_INTC_RDQ_RINT CVMX_ENDOR_INTC_RDQ_RINT_FUNC()
+static inline uint64_t CVMX_ENDOR_INTC_RDQ_RINT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_INTC_RDQ_RINT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000082018Cull);
+}
+#else
+#define CVMX_ENDOR_INTC_RDQ_RINT (CVMX_ADD_IO_SEG(0x00010F000082018Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RDQ_STATUS_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RDQ_STATUS_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008200ACull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RDQ_STATUS_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F00008200ACull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RDQ_STATUS_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RDQ_STATUS_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F000082008Cull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RDQ_STATUS_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F000082008Cull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RD_IDX_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RD_IDX_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820124ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RD_IDX_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820124ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RD_IDX_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RD_IDX_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820104ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RD_IDX_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820104ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RD_MASK_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RD_MASK_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820024ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RD_MASK_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820024ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RD_MASK_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RD_MASK_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820004ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RD_MASK_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820004ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_INTC_RD_RINT CVMX_ENDOR_INTC_RD_RINT_FUNC()
+static inline uint64_t CVMX_ENDOR_INTC_RD_RINT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_INTC_RD_RINT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000820184ull);
+}
+#else
+#define CVMX_ENDOR_INTC_RD_RINT (CVMX_ADD_IO_SEG(0x00010F0000820184ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RD_STATUS_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RD_STATUS_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008200A4ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RD_STATUS_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F00008200A4ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_RD_STATUS_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_RD_STATUS_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820084ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_RD_STATUS_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820084ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_STAT_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_STAT_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008201C4ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ENDOR_INTC_STAT_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F00008201C4ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_STAT_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_STAT_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008201C0ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ENDOR_INTC_STAT_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F00008201C0ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_INTC_SWCLR CVMX_ENDOR_INTC_SWCLR_FUNC()
+static inline uint64_t CVMX_ENDOR_INTC_SWCLR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_INTC_SWCLR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000820204ull);
+}
+#else
+#define CVMX_ENDOR_INTC_SWCLR (CVMX_ADD_IO_SEG(0x00010F0000820204ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_INTC_SWSET CVMX_ENDOR_INTC_SWSET_FUNC()
+static inline uint64_t CVMX_ENDOR_INTC_SWSET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_INTC_SWSET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000820200ull);
+}
+#else
+#define CVMX_ENDOR_INTC_SWSET (CVMX_ADD_IO_SEG(0x00010F0000820200ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_SW_IDX_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_SW_IDX_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820130ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_SW_IDX_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820130ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_SW_IDX_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_SW_IDX_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820110ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_SW_IDX_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820110ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_SW_MASK_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_SW_MASK_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820030ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_SW_MASK_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820030ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_SW_MASK_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_SW_MASK_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820010ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_SW_MASK_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820010ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_INTC_SW_RINT CVMX_ENDOR_INTC_SW_RINT_FUNC()
+static inline uint64_t CVMX_ENDOR_INTC_SW_RINT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_INTC_SW_RINT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000820190ull);
+}
+#else
+#define CVMX_ENDOR_INTC_SW_RINT (CVMX_ADD_IO_SEG(0x00010F0000820190ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_SW_STATUS_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_SW_STATUS_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008200B0ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_SW_STATUS_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F00008200B0ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_SW_STATUS_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_SW_STATUS_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820090ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_SW_STATUS_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820090ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WRQ_IDX_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WRQ_IDX_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820128ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WRQ_IDX_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820128ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WRQ_IDX_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WRQ_IDX_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820108ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WRQ_IDX_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820108ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WRQ_MASK_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WRQ_MASK_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820028ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WRQ_MASK_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820028ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WRQ_MASK_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WRQ_MASK_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820008ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WRQ_MASK_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820008ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_INTC_WRQ_RINT CVMX_ENDOR_INTC_WRQ_RINT_FUNC()
+static inline uint64_t CVMX_ENDOR_INTC_WRQ_RINT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_INTC_WRQ_RINT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000820188ull);
+}
+#else
+#define CVMX_ENDOR_INTC_WRQ_RINT (CVMX_ADD_IO_SEG(0x00010F0000820188ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WRQ_STATUS_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WRQ_STATUS_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008200A8ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WRQ_STATUS_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F00008200A8ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WRQ_STATUS_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WRQ_STATUS_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820088ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WRQ_STATUS_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820088ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WR_IDX_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WR_IDX_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820120ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WR_IDX_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820120ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WR_IDX_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WR_IDX_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820100ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WR_IDX_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820100ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WR_MASK_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WR_MASK_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820020ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WR_MASK_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820020ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WR_MASK_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WR_MASK_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820000ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WR_MASK_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820000ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_INTC_WR_RINT CVMX_ENDOR_INTC_WR_RINT_FUNC()
+static inline uint64_t CVMX_ENDOR_INTC_WR_RINT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_INTC_WR_RINT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000820180ull);
+}
+#else
+#define CVMX_ENDOR_INTC_WR_RINT (CVMX_ADD_IO_SEG(0x00010F0000820180ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WR_STATUS_HIX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WR_STATUS_HIX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F00008200A0ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WR_STATUS_HIX(offset) (CVMX_ADD_IO_SEG(0x00010F00008200A0ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_INTC_WR_STATUS_LOX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ENDOR_INTC_WR_STATUS_LOX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000820080ull) + ((offset) & 1) * 64;
+}
+#else
+#define CVMX_ENDOR_INTC_WR_STATUS_LOX(offset) (CVMX_ADD_IO_SEG(0x00010F0000820080ull) + ((offset) & 1) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR0 CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR0_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832054ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR0 (CVMX_ADD_IO_SEG(0x00010F0000832054ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR1 CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR1_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000083205Cull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR1 (CVMX_ADD_IO_SEG(0x00010F000083205Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR2 CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR2_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832064ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR2 (CVMX_ADD_IO_SEG(0x00010F0000832064ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR3 CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR3_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000083206Cull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_CBUF_END_ADDR3 (CVMX_ADD_IO_SEG(0x00010F000083206Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR0 CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR0_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832050ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR0 (CVMX_ADD_IO_SEG(0x00010F0000832050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR1 CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR1_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832058ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR1 (CVMX_ADD_IO_SEG(0x00010F0000832058ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR2 CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR2_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832060ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR2 (CVMX_ADD_IO_SEG(0x00010F0000832060ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR3 CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR3_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832068ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_CBUF_START_ADDR3 (CVMX_ADD_IO_SEG(0x00010F0000832068ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_INTR_CLEAR CVMX_ENDOR_OFS_HMM_INTR_CLEAR_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_INTR_CLEAR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_INTR_CLEAR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832018ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_INTR_CLEAR (CVMX_ADD_IO_SEG(0x00010F0000832018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_INTR_ENB CVMX_ENDOR_OFS_HMM_INTR_ENB_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_INTR_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_INTR_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000083201Cull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_INTR_ENB (CVMX_ADD_IO_SEG(0x00010F000083201Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_INTR_RSTATUS CVMX_ENDOR_OFS_HMM_INTR_RSTATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_INTR_RSTATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_INTR_RSTATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832014ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_INTR_RSTATUS (CVMX_ADD_IO_SEG(0x00010F0000832014ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_INTR_STATUS CVMX_ENDOR_OFS_HMM_INTR_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_INTR_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_INTR_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832010ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_INTR_STATUS (CVMX_ADD_IO_SEG(0x00010F0000832010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_INTR_TEST CVMX_ENDOR_OFS_HMM_INTR_TEST_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_INTR_TEST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_INTR_TEST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832020ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_INTR_TEST (CVMX_ADD_IO_SEG(0x00010F0000832020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_MODE CVMX_ENDOR_OFS_HMM_MODE_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_MODE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_MODE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832004ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_MODE (CVMX_ADD_IO_SEG(0x00010F0000832004ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_START_ADDR0 CVMX_ENDOR_OFS_HMM_START_ADDR0_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_START_ADDR0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_START_ADDR0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832030ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_START_ADDR0 (CVMX_ADD_IO_SEG(0x00010F0000832030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_START_ADDR1 CVMX_ENDOR_OFS_HMM_START_ADDR1_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_START_ADDR1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_START_ADDR1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832034ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_START_ADDR1 (CVMX_ADD_IO_SEG(0x00010F0000832034ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_START_ADDR2 CVMX_ENDOR_OFS_HMM_START_ADDR2_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_START_ADDR2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_START_ADDR2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832038ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_START_ADDR2 (CVMX_ADD_IO_SEG(0x00010F0000832038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_START_ADDR3 CVMX_ENDOR_OFS_HMM_START_ADDR3_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_START_ADDR3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_START_ADDR3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000083203Cull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_START_ADDR3 (CVMX_ADD_IO_SEG(0x00010F000083203Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_STATUS CVMX_ENDOR_OFS_HMM_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832000ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_STATUS (CVMX_ADD_IO_SEG(0x00010F0000832000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_XFER_CNT CVMX_ENDOR_OFS_HMM_XFER_CNT_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_XFER_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_XFER_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000083202Cull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_XFER_CNT (CVMX_ADD_IO_SEG(0x00010F000083202Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_XFER_Q_STATUS CVMX_ENDOR_OFS_HMM_XFER_Q_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_XFER_Q_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_XFER_Q_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000083200Cull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_XFER_Q_STATUS (CVMX_ADD_IO_SEG(0x00010F000083200Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_OFS_HMM_XFER_START CVMX_ENDOR_OFS_HMM_XFER_START_FUNC()
+static inline uint64_t CVMX_ENDOR_OFS_HMM_XFER_START_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_OFS_HMM_XFER_START not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000832028ull);
+}
+#else
+#define CVMX_ENDOR_OFS_HMM_XFER_START (CVMX_ADD_IO_SEG(0x00010F0000832028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_1PPS_GEN_CFG CVMX_ENDOR_RFIF_1PPS_GEN_CFG_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_1PPS_GEN_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_1PPS_GEN_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680CCull);
+}
+#else
+#define CVMX_ENDOR_RFIF_1PPS_GEN_CFG (CVMX_ADD_IO_SEG(0x00010F00008680CCull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_1PPS_SAMPLE_CNT_OFFSET CVMX_ENDOR_RFIF_1PPS_SAMPLE_CNT_OFFSET_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_1PPS_SAMPLE_CNT_OFFSET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_1PPS_SAMPLE_CNT_OFFSET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868104ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_1PPS_SAMPLE_CNT_OFFSET (CVMX_ADD_IO_SEG(0x00010F0000868104ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_1PPS_VERIF_GEN_EN CVMX_ENDOR_RFIF_1PPS_VERIF_GEN_EN_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_1PPS_VERIF_GEN_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_1PPS_VERIF_GEN_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868110ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_1PPS_VERIF_GEN_EN (CVMX_ADD_IO_SEG(0x00010F0000868110ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_1PPS_VERIF_SCNT CVMX_ENDOR_RFIF_1PPS_VERIF_SCNT_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_1PPS_VERIF_SCNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_1PPS_VERIF_SCNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868114ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_1PPS_VERIF_SCNT (CVMX_ADD_IO_SEG(0x00010F0000868114ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_CONF CVMX_ENDOR_RFIF_CONF_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_CONF_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_CONF not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868010ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_CONF (CVMX_ADD_IO_SEG(0x00010F0000868010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_CONF2 CVMX_ENDOR_RFIF_CONF2_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_CONF2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_CONF2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000086801Cull);
+}
+#else
+#define CVMX_ENDOR_RFIF_CONF2 (CVMX_ADD_IO_SEG(0x00010F000086801Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_DSP1_GPIO CVMX_ENDOR_RFIF_DSP1_GPIO_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_DSP1_GPIO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_DSP1_GPIO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008684C0ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_DSP1_GPIO (CVMX_ADD_IO_SEG(0x00010F00008684C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_DSP_RX_HIS CVMX_ENDOR_RFIF_DSP_RX_HIS_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_DSP_RX_HIS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_DSP_RX_HIS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000086840Cull);
+}
+#else
+#define CVMX_ENDOR_RFIF_DSP_RX_HIS (CVMX_ADD_IO_SEG(0x00010F000086840Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_DSP_RX_ISM CVMX_ENDOR_RFIF_DSP_RX_ISM_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_DSP_RX_ISM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_DSP_RX_ISM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868400ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_DSP_RX_ISM (CVMX_ADD_IO_SEG(0x00010F0000868400ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_FIRS_ENABLE CVMX_ENDOR_RFIF_FIRS_ENABLE_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_FIRS_ENABLE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_FIRS_ENABLE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008684C4ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_FIRS_ENABLE (CVMX_ADD_IO_SEG(0x00010F00008684C4ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_FRAME_CNT CVMX_ENDOR_RFIF_FRAME_CNT_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_FRAME_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_FRAME_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868030ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_FRAME_CNT (CVMX_ADD_IO_SEG(0x00010F0000868030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_FRAME_L CVMX_ENDOR_RFIF_FRAME_L_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_FRAME_L_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_FRAME_L not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868014ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_FRAME_L (CVMX_ADD_IO_SEG(0x00010F0000868014ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_RFIF_GPIO_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_ENDOR_RFIF_GPIO_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000868418ull) + ((offset) & 3) * 4;
+}
+#else
+#define CVMX_ENDOR_RFIF_GPIO_X(offset) (CVMX_ADD_IO_SEG(0x00010F0000868418ull) + ((offset) & 3) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_MAX_SAMPLE_ADJ CVMX_ENDOR_RFIF_MAX_SAMPLE_ADJ_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_MAX_SAMPLE_ADJ_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_MAX_SAMPLE_ADJ not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680DCull);
+}
+#else
+#define CVMX_ENDOR_RFIF_MAX_SAMPLE_ADJ (CVMX_ADD_IO_SEG(0x00010F00008680DCull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_MIN_SAMPLE_ADJ CVMX_ENDOR_RFIF_MIN_SAMPLE_ADJ_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_MIN_SAMPLE_ADJ_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_MIN_SAMPLE_ADJ not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680E0ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_MIN_SAMPLE_ADJ (CVMX_ADD_IO_SEG(0x00010F00008680E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_NUM_RX_WIN CVMX_ENDOR_RFIF_NUM_RX_WIN_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_NUM_RX_WIN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_NUM_RX_WIN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868018ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_NUM_RX_WIN (CVMX_ADD_IO_SEG(0x00010F0000868018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_PWM_ENABLE CVMX_ENDOR_RFIF_PWM_ENABLE_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_PWM_ENABLE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_PWM_ENABLE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868180ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_PWM_ENABLE (CVMX_ADD_IO_SEG(0x00010F0000868180ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_PWM_HIGH_TIME CVMX_ENDOR_RFIF_PWM_HIGH_TIME_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_PWM_HIGH_TIME_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_PWM_HIGH_TIME not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868184ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_PWM_HIGH_TIME (CVMX_ADD_IO_SEG(0x00010F0000868184ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_PWM_LOW_TIME CVMX_ENDOR_RFIF_PWM_LOW_TIME_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_PWM_LOW_TIME_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_PWM_LOW_TIME not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868188ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_PWM_LOW_TIME (CVMX_ADD_IO_SEG(0x00010F0000868188ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RD_TIMER64_LSB CVMX_ENDOR_RFIF_RD_TIMER64_LSB_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RD_TIMER64_LSB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RD_TIMER64_LSB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008681ACull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RD_TIMER64_LSB (CVMX_ADD_IO_SEG(0x00010F00008681ACull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RD_TIMER64_MSB CVMX_ENDOR_RFIF_RD_TIMER64_MSB_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RD_TIMER64_MSB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RD_TIMER64_MSB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008681B0ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RD_TIMER64_MSB (CVMX_ADD_IO_SEG(0x00010F00008681B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_REAL_TIME_TIMER CVMX_ENDOR_RFIF_REAL_TIME_TIMER_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_REAL_TIME_TIMER_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_REAL_TIME_TIMER not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680C8ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_REAL_TIME_TIMER (CVMX_ADD_IO_SEG(0x00010F00008680C8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RF_CLK_TIMER CVMX_ENDOR_RFIF_RF_CLK_TIMER_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RF_CLK_TIMER_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RF_CLK_TIMER not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868194ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RF_CLK_TIMER (CVMX_ADD_IO_SEG(0x00010F0000868194ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RF_CLK_TIMER_EN CVMX_ENDOR_RFIF_RF_CLK_TIMER_EN_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RF_CLK_TIMER_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RF_CLK_TIMER_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868198ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RF_CLK_TIMER_EN (CVMX_ADD_IO_SEG(0x00010F0000868198ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_CORRECT_ADJ CVMX_ENDOR_RFIF_RX_CORRECT_ADJ_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_CORRECT_ADJ_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_CORRECT_ADJ not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680E8ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_CORRECT_ADJ (CVMX_ADD_IO_SEG(0x00010F00008680E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_DIV_STATUS CVMX_ENDOR_RFIF_RX_DIV_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_DIV_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_DIV_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868004ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_DIV_STATUS (CVMX_ADD_IO_SEG(0x00010F0000868004ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_FIFO_CNT CVMX_ENDOR_RFIF_RX_FIFO_CNT_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_FIFO_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_FIFO_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868500ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_FIFO_CNT (CVMX_ADD_IO_SEG(0x00010F0000868500ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_IF_CFG CVMX_ENDOR_RFIF_RX_IF_CFG_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_IF_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_IF_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868038ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_IF_CFG (CVMX_ADD_IO_SEG(0x00010F0000868038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_LEAD_LAG CVMX_ENDOR_RFIF_RX_LEAD_LAG_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_LEAD_LAG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_LEAD_LAG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868020ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_LEAD_LAG (CVMX_ADD_IO_SEG(0x00010F0000868020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_LOAD_CFG CVMX_ENDOR_RFIF_RX_LOAD_CFG_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_LOAD_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_LOAD_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868508ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_LOAD_CFG (CVMX_ADD_IO_SEG(0x00010F0000868508ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_OFFSET CVMX_ENDOR_RFIF_RX_OFFSET_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_OFFSET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_OFFSET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680D4ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_OFFSET (CVMX_ADD_IO_SEG(0x00010F00008680D4ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_OFFSET_ADJ_SCNT CVMX_ENDOR_RFIF_RX_OFFSET_ADJ_SCNT_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_OFFSET_ADJ_SCNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_OFFSET_ADJ_SCNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868108ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_OFFSET_ADJ_SCNT (CVMX_ADD_IO_SEG(0x00010F0000868108ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_STATUS CVMX_ENDOR_RFIF_RX_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868000ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_STATUS (CVMX_ADD_IO_SEG(0x00010F0000868000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_SYNC_SCNT CVMX_ENDOR_RFIF_RX_SYNC_SCNT_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_SYNC_SCNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_SYNC_SCNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680C4ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_SYNC_SCNT (CVMX_ADD_IO_SEG(0x00010F00008680C4ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_SYNC_VALUE CVMX_ENDOR_RFIF_RX_SYNC_VALUE_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_SYNC_VALUE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_SYNC_VALUE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680C0ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_SYNC_VALUE (CVMX_ADD_IO_SEG(0x00010F00008680C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_TH CVMX_ENDOR_RFIF_RX_TH_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_TH_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_TH not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868410ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_TH (CVMX_ADD_IO_SEG(0x00010F0000868410ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_RX_TRANSFER_SIZE CVMX_ENDOR_RFIF_RX_TRANSFER_SIZE_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_RX_TRANSFER_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_TRANSFER_SIZE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000086850Cull);
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_TRANSFER_SIZE (CVMX_ADD_IO_SEG(0x00010F000086850Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_RFIF_RX_W_EX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_W_EX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000868084ull) + ((offset) & 3) * 4;
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_W_EX(offset) (CVMX_ADD_IO_SEG(0x00010F0000868084ull) + ((offset) & 3) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_RFIF_RX_W_SX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_ENDOR_RFIF_RX_W_SX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000868044ull) + ((offset) & 3) * 4;
+}
+#else
+#define CVMX_ENDOR_RFIF_RX_W_SX(offset) (CVMX_ADD_IO_SEG(0x00010F0000868044ull) + ((offset) & 3) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SAMPLE_ADJ_CFG CVMX_ENDOR_RFIF_SAMPLE_ADJ_CFG_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SAMPLE_ADJ_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SAMPLE_ADJ_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680E4ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SAMPLE_ADJ_CFG (CVMX_ADD_IO_SEG(0x00010F00008680E4ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SAMPLE_ADJ_ERROR CVMX_ENDOR_RFIF_SAMPLE_ADJ_ERROR_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SAMPLE_ADJ_ERROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SAMPLE_ADJ_ERROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868100ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SAMPLE_ADJ_ERROR (CVMX_ADD_IO_SEG(0x00010F0000868100ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SAMPLE_CNT CVMX_ENDOR_RFIF_SAMPLE_CNT_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SAMPLE_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SAMPLE_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868028ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SAMPLE_CNT (CVMX_ADD_IO_SEG(0x00010F0000868028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SKIP_FRM_CNT_BITS CVMX_ENDOR_RFIF_SKIP_FRM_CNT_BITS_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SKIP_FRM_CNT_BITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SKIP_FRM_CNT_BITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868444ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SKIP_FRM_CNT_BITS (CVMX_ADD_IO_SEG(0x00010F0000868444ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_CMDSX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_CMDSX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000868800ull) + ((offset) & 63) * 4;
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_CMDSX(offset) (CVMX_ADD_IO_SEG(0x00010F0000868800ull) + ((offset) & 63) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_CMD_ATTRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_CMD_ATTRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000868A00ull) + ((offset) & 63) * 4;
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_CMD_ATTRX(offset) (CVMX_ADD_IO_SEG(0x00010F0000868A00ull) + ((offset) & 63) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SPI_CONF0 CVMX_ENDOR_RFIF_SPI_CONF0_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_CONF0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_CONF0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868428ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_CONF0 (CVMX_ADD_IO_SEG(0x00010F0000868428ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SPI_CONF1 CVMX_ENDOR_RFIF_SPI_CONF1_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_CONF1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_CONF1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000086842Cull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_CONF1 (CVMX_ADD_IO_SEG(0x00010F000086842Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SPI_CTRL CVMX_ENDOR_RFIF_SPI_CTRL_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_CTRL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_CTRL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000866008ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_CTRL (CVMX_ADD_IO_SEG(0x00010F0000866008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_DINX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_DINX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000868900ull) + ((offset) & 63) * 4;
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_DINX(offset) (CVMX_ADD_IO_SEG(0x00010F0000868900ull) + ((offset) & 63) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SPI_RX_DATA CVMX_ENDOR_RFIF_SPI_RX_DATA_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_RX_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_RX_DATA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000866000ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_RX_DATA (CVMX_ADD_IO_SEG(0x00010F0000866000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SPI_STATUS CVMX_ENDOR_RFIF_SPI_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000866010ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_STATUS (CVMX_ADD_IO_SEG(0x00010F0000866010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_SPI_TX_DATA CVMX_ENDOR_RFIF_SPI_TX_DATA_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_TX_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_TX_DATA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000866004ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_TX_DATA (CVMX_ADD_IO_SEG(0x00010F0000866004ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_RFIF_SPI_X_LL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_ENDOR_RFIF_SPI_X_LL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000868430ull) + ((offset) & 3) * 4;
+}
+#else
+#define CVMX_ENDOR_RFIF_SPI_X_LL(offset) (CVMX_ADD_IO_SEG(0x00010F0000868430ull) + ((offset) & 3) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TIMER64_CFG CVMX_ENDOR_RFIF_TIMER64_CFG_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TIMER64_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TIMER64_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008681A0ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TIMER64_CFG (CVMX_ADD_IO_SEG(0x00010F00008681A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TIMER64_EN CVMX_ENDOR_RFIF_TIMER64_EN_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TIMER64_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TIMER64_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000086819Cull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TIMER64_EN (CVMX_ADD_IO_SEG(0x00010F000086819Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_RFIF_TTI_SCNT_INTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ENDOR_RFIF_TTI_SCNT_INTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000868140ull) + ((offset) & 7) * 4;
+}
+#else
+#define CVMX_ENDOR_RFIF_TTI_SCNT_INTX(offset) (CVMX_ADD_IO_SEG(0x00010F0000868140ull) + ((offset) & 7) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TTI_SCNT_INT_CLR CVMX_ENDOR_RFIF_TTI_SCNT_INT_CLR_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TTI_SCNT_INT_CLR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TTI_SCNT_INT_CLR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868118ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TTI_SCNT_INT_CLR (CVMX_ADD_IO_SEG(0x00010F0000868118ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TTI_SCNT_INT_EN CVMX_ENDOR_RFIF_TTI_SCNT_INT_EN_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TTI_SCNT_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TTI_SCNT_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868124ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TTI_SCNT_INT_EN (CVMX_ADD_IO_SEG(0x00010F0000868124ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TTI_SCNT_INT_MAP CVMX_ENDOR_RFIF_TTI_SCNT_INT_MAP_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TTI_SCNT_INT_MAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TTI_SCNT_INT_MAP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868120ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TTI_SCNT_INT_MAP (CVMX_ADD_IO_SEG(0x00010F0000868120ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TTI_SCNT_INT_STAT CVMX_ENDOR_RFIF_TTI_SCNT_INT_STAT_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TTI_SCNT_INT_STAT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TTI_SCNT_INT_STAT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000086811Cull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TTI_SCNT_INT_STAT (CVMX_ADD_IO_SEG(0x00010F000086811Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TX_DIV_STATUS CVMX_ENDOR_RFIF_TX_DIV_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TX_DIV_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TX_DIV_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000086800Cull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TX_DIV_STATUS (CVMX_ADD_IO_SEG(0x00010F000086800Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TX_IF_CFG CVMX_ENDOR_RFIF_TX_IF_CFG_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TX_IF_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TX_IF_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868034ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TX_IF_CFG (CVMX_ADD_IO_SEG(0x00010F0000868034ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TX_LEAD_LAG CVMX_ENDOR_RFIF_TX_LEAD_LAG_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TX_LEAD_LAG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TX_LEAD_LAG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868024ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TX_LEAD_LAG (CVMX_ADD_IO_SEG(0x00010F0000868024ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TX_OFFSET CVMX_ENDOR_RFIF_TX_OFFSET_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TX_OFFSET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TX_OFFSET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008680D8ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TX_OFFSET (CVMX_ADD_IO_SEG(0x00010F00008680D8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TX_OFFSET_ADJ_SCNT CVMX_ENDOR_RFIF_TX_OFFSET_ADJ_SCNT_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TX_OFFSET_ADJ_SCNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TX_OFFSET_ADJ_SCNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000086810Cull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TX_OFFSET_ADJ_SCNT (CVMX_ADD_IO_SEG(0x00010F000086810Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TX_STATUS CVMX_ENDOR_RFIF_TX_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TX_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TX_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868008ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TX_STATUS (CVMX_ADD_IO_SEG(0x00010F0000868008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_TX_TH CVMX_ENDOR_RFIF_TX_TH_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_TX_TH_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_TX_TH not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868414ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_TX_TH (CVMX_ADD_IO_SEG(0x00010F0000868414ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_WIN_EN CVMX_ENDOR_RFIF_WIN_EN_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_WIN_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_WIN_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000868040ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_WIN_EN (CVMX_ADD_IO_SEG(0x00010F0000868040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_WIN_UPD_SCNT CVMX_ENDOR_RFIF_WIN_UPD_SCNT_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_WIN_UPD_SCNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_WIN_UPD_SCNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000086803Cull);
+}
+#else
+#define CVMX_ENDOR_RFIF_WIN_UPD_SCNT (CVMX_ADD_IO_SEG(0x00010F000086803Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_WR_TIMER64_LSB CVMX_ENDOR_RFIF_WR_TIMER64_LSB_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_WR_TIMER64_LSB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_WR_TIMER64_LSB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008681A4ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_WR_TIMER64_LSB (CVMX_ADD_IO_SEG(0x00010F00008681A4ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RFIF_WR_TIMER64_MSB CVMX_ENDOR_RFIF_WR_TIMER64_MSB_FUNC()
+static inline uint64_t CVMX_ENDOR_RFIF_WR_TIMER64_MSB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RFIF_WR_TIMER64_MSB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008681A8ull);
+}
+#else
+#define CVMX_ENDOR_RFIF_WR_TIMER64_MSB (CVMX_ADD_IO_SEG(0x00010F00008681A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_CLKENB0_CLR CVMX_ENDOR_RSTCLK_CLKENB0_CLR_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_CLKENB0_CLR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_CLKENB0_CLR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844428ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_CLKENB0_CLR (CVMX_ADD_IO_SEG(0x00010F0000844428ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_CLKENB0_SET CVMX_ENDOR_RSTCLK_CLKENB0_SET_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_CLKENB0_SET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_CLKENB0_SET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844424ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_CLKENB0_SET (CVMX_ADD_IO_SEG(0x00010F0000844424ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_CLKENB0_STATE CVMX_ENDOR_RSTCLK_CLKENB0_STATE_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_CLKENB0_STATE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_CLKENB0_STATE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844420ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_CLKENB0_STATE (CVMX_ADD_IO_SEG(0x00010F0000844420ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_CLKENB1_CLR CVMX_ENDOR_RSTCLK_CLKENB1_CLR_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_CLKENB1_CLR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_CLKENB1_CLR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844438ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_CLKENB1_CLR (CVMX_ADD_IO_SEG(0x00010F0000844438ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_CLKENB1_SET CVMX_ENDOR_RSTCLK_CLKENB1_SET_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_CLKENB1_SET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_CLKENB1_SET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844434ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_CLKENB1_SET (CVMX_ADD_IO_SEG(0x00010F0000844434ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_CLKENB1_STATE CVMX_ENDOR_RSTCLK_CLKENB1_STATE_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_CLKENB1_STATE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_CLKENB1_STATE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844430ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_CLKENB1_STATE (CVMX_ADD_IO_SEG(0x00010F0000844430ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_DSPSTALL_CLR CVMX_ENDOR_RSTCLK_DSPSTALL_CLR_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_DSPSTALL_CLR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_DSPSTALL_CLR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844448ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_DSPSTALL_CLR (CVMX_ADD_IO_SEG(0x00010F0000844448ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_DSPSTALL_SET CVMX_ENDOR_RSTCLK_DSPSTALL_SET_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_DSPSTALL_SET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_DSPSTALL_SET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844444ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_DSPSTALL_SET (CVMX_ADD_IO_SEG(0x00010F0000844444ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_DSPSTALL_STATE CVMX_ENDOR_RSTCLK_DSPSTALL_STATE_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_DSPSTALL_STATE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_DSPSTALL_STATE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844440ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_DSPSTALL_STATE (CVMX_ADD_IO_SEG(0x00010F0000844440ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_INTR0_CLRMASK CVMX_ENDOR_RSTCLK_INTR0_CLRMASK_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_INTR0_CLRMASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_INTR0_CLRMASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844598ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_INTR0_CLRMASK (CVMX_ADD_IO_SEG(0x00010F0000844598ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_INTR0_MASK CVMX_ENDOR_RSTCLK_INTR0_MASK_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_INTR0_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_INTR0_MASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844590ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_INTR0_MASK (CVMX_ADD_IO_SEG(0x00010F0000844590ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_INTR0_SETMASK CVMX_ENDOR_RSTCLK_INTR0_SETMASK_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_INTR0_SETMASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_INTR0_SETMASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844594ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_INTR0_SETMASK (CVMX_ADD_IO_SEG(0x00010F0000844594ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_INTR0_STATUS CVMX_ENDOR_RSTCLK_INTR0_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_INTR0_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_INTR0_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F000084459Cull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_INTR0_STATUS (CVMX_ADD_IO_SEG(0x00010F000084459Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_INTR1_CLRMASK CVMX_ENDOR_RSTCLK_INTR1_CLRMASK_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_INTR1_CLRMASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_INTR1_CLRMASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008445A8ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_INTR1_CLRMASK (CVMX_ADD_IO_SEG(0x00010F00008445A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_INTR1_MASK CVMX_ENDOR_RSTCLK_INTR1_MASK_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_INTR1_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_INTR1_MASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008445A0ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_INTR1_MASK (CVMX_ADD_IO_SEG(0x00010F00008445A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_INTR1_SETMASK CVMX_ENDOR_RSTCLK_INTR1_SETMASK_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_INTR1_SETMASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_INTR1_SETMASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008445A4ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_INTR1_SETMASK (CVMX_ADD_IO_SEG(0x00010F00008445A4ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_INTR1_STATUS CVMX_ENDOR_RSTCLK_INTR1_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_INTR1_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_INTR1_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008445ACull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_INTR1_STATUS (CVMX_ADD_IO_SEG(0x00010F00008445ACull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_PHY_CONFIG CVMX_ENDOR_RSTCLK_PHY_CONFIG_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_PHY_CONFIG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_PHY_CONFIG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844450ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_PHY_CONFIG (CVMX_ADD_IO_SEG(0x00010F0000844450ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_PROC_MON CVMX_ENDOR_RSTCLK_PROC_MON_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_PROC_MON_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_PROC_MON not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008445B0ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_PROC_MON (CVMX_ADD_IO_SEG(0x00010F00008445B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_PROC_MON_COUNT CVMX_ENDOR_RSTCLK_PROC_MON_COUNT_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_PROC_MON_COUNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_PROC_MON_COUNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F00008445B4ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_PROC_MON_COUNT (CVMX_ADD_IO_SEG(0x00010F00008445B4ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_RESET0_CLR CVMX_ENDOR_RSTCLK_RESET0_CLR_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_RESET0_CLR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_RESET0_CLR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844408ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_RESET0_CLR (CVMX_ADD_IO_SEG(0x00010F0000844408ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_RESET0_SET CVMX_ENDOR_RSTCLK_RESET0_SET_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_RESET0_SET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_RESET0_SET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844404ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_RESET0_SET (CVMX_ADD_IO_SEG(0x00010F0000844404ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_RESET0_STATE CVMX_ENDOR_RSTCLK_RESET0_STATE_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_RESET0_STATE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_RESET0_STATE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844400ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_RESET0_STATE (CVMX_ADD_IO_SEG(0x00010F0000844400ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_RESET1_CLR CVMX_ENDOR_RSTCLK_RESET1_CLR_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_RESET1_CLR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_RESET1_CLR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844418ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_RESET1_CLR (CVMX_ADD_IO_SEG(0x00010F0000844418ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_RESET1_SET CVMX_ENDOR_RSTCLK_RESET1_SET_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_RESET1_SET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_RESET1_SET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844414ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_RESET1_SET (CVMX_ADD_IO_SEG(0x00010F0000844414ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_RESET1_STATE CVMX_ENDOR_RSTCLK_RESET1_STATE_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_RESET1_STATE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_RESET1_STATE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844410ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_RESET1_STATE (CVMX_ADD_IO_SEG(0x00010F0000844410ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_SW_INTR_CLR CVMX_ENDOR_RSTCLK_SW_INTR_CLR_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_SW_INTR_CLR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_SW_INTR_CLR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844588ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_SW_INTR_CLR (CVMX_ADD_IO_SEG(0x00010F0000844588ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_SW_INTR_SET CVMX_ENDOR_RSTCLK_SW_INTR_SET_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_SW_INTR_SET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_SW_INTR_SET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844584ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_SW_INTR_SET (CVMX_ADD_IO_SEG(0x00010F0000844584ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_SW_INTR_STATUS CVMX_ENDOR_RSTCLK_SW_INTR_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_SW_INTR_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_SW_INTR_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844580ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_SW_INTR_STATUS (CVMX_ADD_IO_SEG(0x00010F0000844580ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_TIMER_CTL CVMX_ENDOR_RSTCLK_TIMER_CTL_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_TIMER_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_TIMER_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844500ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_TIMER_CTL (CVMX_ADD_IO_SEG(0x00010F0000844500ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_TIMER_INTR_CLR CVMX_ENDOR_RSTCLK_TIMER_INTR_CLR_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_TIMER_INTR_CLR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_TIMER_INTR_CLR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844534ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_TIMER_INTR_CLR (CVMX_ADD_IO_SEG(0x00010F0000844534ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_TIMER_INTR_STATUS CVMX_ENDOR_RSTCLK_TIMER_INTR_STATUS_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_TIMER_INTR_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_TIMER_INTR_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844530ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_TIMER_INTR_STATUS (CVMX_ADD_IO_SEG(0x00010F0000844530ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_TIMER_MAX CVMX_ENDOR_RSTCLK_TIMER_MAX_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_TIMER_MAX_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_TIMER_MAX not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844508ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_TIMER_MAX (CVMX_ADD_IO_SEG(0x00010F0000844508ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_TIMER_VALUE CVMX_ENDOR_RSTCLK_TIMER_VALUE_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_TIMER_VALUE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_TIMER_VALUE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844504ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_TIMER_VALUE (CVMX_ADD_IO_SEG(0x00010F0000844504ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ENDOR_RSTCLK_TIMEX_THRD(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_TIMEX_THRD(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010F0000844510ull) + ((offset) & 7) * 4;
+}
+#else
+#define CVMX_ENDOR_RSTCLK_TIMEX_THRD(offset) (CVMX_ADD_IO_SEG(0x00010F0000844510ull) + ((offset) & 7) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ENDOR_RSTCLK_VERSION CVMX_ENDOR_RSTCLK_VERSION_FUNC()
+static inline uint64_t CVMX_ENDOR_RSTCLK_VERSION_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_ENDOR_RSTCLK_VERSION not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010F0000844570ull);
+}
+#else
+#define CVMX_ENDOR_RSTCLK_VERSION (CVMX_ADD_IO_SEG(0x00010F0000844570ull))
+#endif
+
+/**
+ * cvmx_endor_adma_auto_clk_gate
+ */
+union cvmx_endor_adma_auto_clk_gate {
+ uint32_t u32;
+ struct cvmx_endor_adma_auto_clk_gate_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t auto_gate : 1; /**< 1==enable auto-clock-gating */
+#else
+ uint32_t auto_gate : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_adma_auto_clk_gate_s cnf71xx;
+};
+typedef union cvmx_endor_adma_auto_clk_gate cvmx_endor_adma_auto_clk_gate_t;
+
+/**
+ * cvmx_endor_adma_axi_rspcode
+ */
+union cvmx_endor_adma_axi_rspcode {
+ uint32_t u32;
+ struct cvmx_endor_adma_axi_rspcode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t ch7_axi_rspcode : 2; /**< dma \#7 AXI response code */
+ uint32_t ch6_axi_rspcode : 2; /**< dma \#6 AXI response code */
+ uint32_t ch5_axi_rspcode : 2; /**< dma \#5 AXI response code */
+ uint32_t ch4_axi_rspcode : 2; /**< dma \#4 AXI response code */
+ uint32_t ch3_axi_rspcode : 2; /**< dma \#3 AXI response code */
+ uint32_t ch2_axi_rspcode : 2; /**< dma \#2 AXI response code */
+ uint32_t ch1_axi_rspcode : 2; /**< dma \#1 AXI response code */
+ uint32_t ch0_axi_rspcode : 2; /**< dma \#0 AXI response code */
+#else
+ uint32_t ch0_axi_rspcode : 2;
+ uint32_t ch1_axi_rspcode : 2;
+ uint32_t ch2_axi_rspcode : 2;
+ uint32_t ch3_axi_rspcode : 2;
+ uint32_t ch4_axi_rspcode : 2;
+ uint32_t ch5_axi_rspcode : 2;
+ uint32_t ch6_axi_rspcode : 2;
+ uint32_t ch7_axi_rspcode : 2;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_endor_adma_axi_rspcode_s cnf71xx;
+};
+typedef union cvmx_endor_adma_axi_rspcode cvmx_endor_adma_axi_rspcode_t;
+
+/**
+ * cvmx_endor_adma_axi_signal
+ */
+union cvmx_endor_adma_axi_signal {
+ uint32_t u32;
+ struct cvmx_endor_adma_axi_signal_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t awcobuf : 1; /**< ADMA_COBUF */
+ uint32_t reserved_10_23 : 14;
+ uint32_t awlock : 2; /**< ADMA_AWLOCK */
+ uint32_t reserved_2_7 : 6;
+ uint32_t arlock : 2; /**< ADMA_ARLOCK */
+#else
+ uint32_t arlock : 2;
+ uint32_t reserved_2_7 : 6;
+ uint32_t awlock : 2;
+ uint32_t reserved_10_23 : 14;
+ uint32_t awcobuf : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_endor_adma_axi_signal_s cnf71xx;
+};
+typedef union cvmx_endor_adma_axi_signal cvmx_endor_adma_axi_signal_t;
+
+/**
+ * cvmx_endor_adma_axierr_intr
+ */
+union cvmx_endor_adma_axierr_intr {
+ uint32_t u32;
+ struct cvmx_endor_adma_axierr_intr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t axi_err_int : 1; /**< AXI Error interrupt */
+#else
+ uint32_t axi_err_int : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_adma_axierr_intr_s cnf71xx;
+};
+typedef union cvmx_endor_adma_axierr_intr cvmx_endor_adma_axierr_intr_t;
+
+/**
+ * cvmx_endor_adma_dma#_addr_hi
+ */
+union cvmx_endor_adma_dmax_addr_hi {
+ uint32_t u32;
+ struct cvmx_endor_adma_dmax_addr_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t hi_addr : 8; /**< dma low address[63:32] */
+#else
+ uint32_t hi_addr : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_adma_dmax_addr_hi_s cnf71xx;
+};
+typedef union cvmx_endor_adma_dmax_addr_hi cvmx_endor_adma_dmax_addr_hi_t;
+
+/**
+ * cvmx_endor_adma_dma#_addr_lo
+ */
+union cvmx_endor_adma_dmax_addr_lo {
+ uint32_t u32;
+ struct cvmx_endor_adma_dmax_addr_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lo_addr : 32; /**< dma low address[31:0] */
+#else
+ uint32_t lo_addr : 32;
+#endif
+ } s;
+ struct cvmx_endor_adma_dmax_addr_lo_s cnf71xx;
+};
+typedef union cvmx_endor_adma_dmax_addr_lo cvmx_endor_adma_dmax_addr_lo_t;
+
+/**
+ * cvmx_endor_adma_dma#_cfg
+ */
+union cvmx_endor_adma_dmax_cfg {
+ uint32_t u32;
+ struct cvmx_endor_adma_dmax_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t endian : 1; /**< 0==byte-swap, 1==word */
+ uint32_t reserved_18_23 : 6;
+ uint32_t hmm_ofs : 2; /**< HMM memory byte offset */
+ uint32_t reserved_13_15 : 3;
+ uint32_t awcache_lbm : 1; /**< AWCACHE last burst mode, 1==force 0 on the last write data */
+ uint32_t awcache : 4; /**< ADMA_AWCACHE */
+ uint32_t reserved_6_7 : 2;
+ uint32_t bst_bound : 1; /**< burst boundary (0==4kB, 1==128 byte) */
+ uint32_t max_bstlen : 1; /**< maximum burst length(0==8 dword) */
+ uint32_t reserved_1_3 : 3;
+ uint32_t enable : 1; /**< 1 == dma enable */
+#else
+ uint32_t enable : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t max_bstlen : 1;
+ uint32_t bst_bound : 1;
+ uint32_t reserved_6_7 : 2;
+ uint32_t awcache : 4;
+ uint32_t awcache_lbm : 1;
+ uint32_t reserved_13_15 : 3;
+ uint32_t hmm_ofs : 2;
+ uint32_t reserved_18_23 : 6;
+ uint32_t endian : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_endor_adma_dmax_cfg_s cnf71xx;
+};
+typedef union cvmx_endor_adma_dmax_cfg cvmx_endor_adma_dmax_cfg_t;
+
+/**
+ * cvmx_endor_adma_dma#_size
+ */
+union cvmx_endor_adma_dmax_size {
+ uint32_t u32;
+ struct cvmx_endor_adma_dmax_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_18_31 : 14;
+ uint32_t dma_size : 18; /**< dma transfer byte size */
+#else
+ uint32_t dma_size : 18;
+ uint32_t reserved_18_31 : 14;
+#endif
+ } s;
+ struct cvmx_endor_adma_dmax_size_s cnf71xx;
+};
+typedef union cvmx_endor_adma_dmax_size cvmx_endor_adma_dmax_size_t;
+
+/**
+ * cvmx_endor_adma_dma_priority
+ */
+union cvmx_endor_adma_dma_priority {
+ uint32_t u32;
+ struct cvmx_endor_adma_dma_priority_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t rdma_rr_prty : 1; /**< 1 == round-robin for DMA read channel */
+ uint32_t wdma_rr_prty : 1; /**< 1 == round-robin for DMA write channel */
+ uint32_t wdma_fix_prty : 4; /**< dma fixed priority */
+#else
+ uint32_t wdma_fix_prty : 4;
+ uint32_t wdma_rr_prty : 1;
+ uint32_t rdma_rr_prty : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_adma_dma_priority_s cnf71xx;
+};
+typedef union cvmx_endor_adma_dma_priority cvmx_endor_adma_dma_priority_t;
+
+/**
+ * cvmx_endor_adma_dma_reset
+ */
+union cvmx_endor_adma_dma_reset {
+ uint32_t u32;
+ struct cvmx_endor_adma_dma_reset_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t dma_ch_reset : 8; /**< dma channel reset */
+#else
+ uint32_t dma_ch_reset : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_adma_dma_reset_s cnf71xx;
+};
+typedef union cvmx_endor_adma_dma_reset cvmx_endor_adma_dma_reset_t;
+
+/**
+ * cvmx_endor_adma_dmadone_intr
+ */
+union cvmx_endor_adma_dmadone_intr {
+ uint32_t u32;
+ struct cvmx_endor_adma_dmadone_intr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t dma_ch_done : 8; /**< done-interrupt status of the DMA channel */
+#else
+ uint32_t dma_ch_done : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_adma_dmadone_intr_s cnf71xx;
+};
+typedef union cvmx_endor_adma_dmadone_intr cvmx_endor_adma_dmadone_intr_t;
+
+/**
+ * cvmx_endor_adma_intr_dis
+ */
+union cvmx_endor_adma_intr_dis {
+ uint32_t u32;
+ struct cvmx_endor_adma_intr_dis_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_17_31 : 15;
+ uint32_t axierr_intr_dis : 1; /**< AXI Error interrupt disable (1==enable) */
+ uint32_t dmadone_intr_dis : 16; /**< dma done interrupt disable (1==enable) */
+#else
+ uint32_t dmadone_intr_dis : 16;
+ uint32_t axierr_intr_dis : 1;
+ uint32_t reserved_17_31 : 15;
+#endif
+ } s;
+ struct cvmx_endor_adma_intr_dis_s cnf71xx;
+};
+typedef union cvmx_endor_adma_intr_dis cvmx_endor_adma_intr_dis_t;
+
+/**
+ * cvmx_endor_adma_intr_enb
+ */
+union cvmx_endor_adma_intr_enb {
+ uint32_t u32;
+ struct cvmx_endor_adma_intr_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_17_31 : 15;
+ uint32_t axierr_intr_enb : 1; /**< AXI Error interrupt enable (1==enable) */
+ uint32_t dmadone_intr_enb : 16; /**< dma done interrupt enable (1==enable) */
+#else
+ uint32_t dmadone_intr_enb : 16;
+ uint32_t axierr_intr_enb : 1;
+ uint32_t reserved_17_31 : 15;
+#endif
+ } s;
+ struct cvmx_endor_adma_intr_enb_s cnf71xx;
+};
+typedef union cvmx_endor_adma_intr_enb cvmx_endor_adma_intr_enb_t;
+
+/**
+ * cvmx_endor_adma_module_status
+ */
+union cvmx_endor_adma_module_status {
+ uint32_t u32;
+ struct cvmx_endor_adma_module_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t non_dmardch_stt : 1; /**< non-DMA read channel status */
+ uint32_t non_dmawrch_stt : 1; /**< non-DMA write channel status (1==transfer in progress) */
+ uint32_t dma_ch_stt : 14; /**< dma channel status (1==transfer in progress)
+ blah, blah */
+#else
+ uint32_t dma_ch_stt : 14;
+ uint32_t non_dmawrch_stt : 1;
+ uint32_t non_dmardch_stt : 1;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_endor_adma_module_status_s cnf71xx;
+};
+typedef union cvmx_endor_adma_module_status cvmx_endor_adma_module_status_t;
+
+/**
+ * cvmx_endor_intc_cntl_hi#
+ *
+ * ENDOR_INTC_CNTL_HI - Interrupt Enable HI
+ *
+ */
+union cvmx_endor_intc_cntl_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_cntl_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t enab : 1; /**< Interrupt Enable */
+#else
+ uint32_t enab : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_intc_cntl_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_cntl_hix cvmx_endor_intc_cntl_hix_t;
+
+/**
+ * cvmx_endor_intc_cntl_lo#
+ *
+ * ENDOR_INTC_CNTL_LO - Interrupt Enable LO
+ *
+ */
+union cvmx_endor_intc_cntl_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_cntl_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t enab : 1; /**< Interrupt Enable */
+#else
+ uint32_t enab : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_intc_cntl_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_cntl_lox cvmx_endor_intc_cntl_lox_t;
+
+/**
+ * cvmx_endor_intc_index_hi#
+ *
+ * ENDOR_INTC_INDEX_HI - Overall Index HI
+ *
+ */
+union cvmx_endor_intc_index_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_index_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t index : 9; /**< Overall Interrup Index */
+#else
+ uint32_t index : 9;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_endor_intc_index_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_index_hix cvmx_endor_intc_index_hix_t;
+
+/**
+ * cvmx_endor_intc_index_lo#
+ *
+ * ENDOR_INTC_INDEX_LO - Overall Index LO
+ *
+ */
+union cvmx_endor_intc_index_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_index_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t index : 9; /**< Overall Interrup Index */
+#else
+ uint32_t index : 9;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_endor_intc_index_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_index_lox cvmx_endor_intc_index_lox_t;
+
+/**
+ * cvmx_endor_intc_misc_idx_hi#
+ *
+ * ENDOR_INTC_MISC_IDX_HI - Misc Group Index HI
+ *
+ */
+union cvmx_endor_intc_misc_idx_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_misc_idx_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Misc Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_misc_idx_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_misc_idx_hix cvmx_endor_intc_misc_idx_hix_t;
+
+/**
+ * cvmx_endor_intc_misc_idx_lo#
+ *
+ * ENDOR_INTC_MISC_IDX_LO - Misc Group Index LO
+ *
+ */
+union cvmx_endor_intc_misc_idx_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_misc_idx_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Misc Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_misc_idx_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_misc_idx_lox cvmx_endor_intc_misc_idx_lox_t;
+
+/**
+ * cvmx_endor_intc_misc_mask_hi#
+ *
+ * ENDOR_INTC_MISC_MASK_HI = Interrupt MISC Group Mask
+ *
+ */
+union cvmx_endor_intc_misc_mask_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_misc_mask_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rf_rx_ppssync : 1; /**< RX PPS Sync Done */
+ uint32_t rf_rx_spiskip : 1; /**< RX SPI Event Skipped */
+ uint32_t rf_spi3 : 1; /**< SPI Transfer Done Event 3 */
+ uint32_t rf_spi2 : 1; /**< SPI Transfer Done Event 2 */
+ uint32_t rf_spi1 : 1; /**< SPI Transfer Done Event 1 */
+ uint32_t rf_spi0 : 1; /**< SPI Transfer Done Event 0 */
+ uint32_t rf_rx_strx : 1; /**< RX Start RX */
+ uint32_t rf_rx_stframe : 1; /**< RX Start Frame */
+ uint32_t rf_rxd_ffflag : 1; /**< RX DIV FIFO flags asserted */
+ uint32_t rf_rxd_ffthresh : 1; /**< RX DIV FIFO Threshhold reached */
+ uint32_t rf_rx_ffflag : 1; /**< RX FIFO flags asserted */
+ uint32_t rf_rx_ffthresh : 1; /**< RX FIFO Threshhold reached */
+ uint32_t tti_timer : 8; /**< TTI Timer Interrupt */
+ uint32_t axi_berr : 1; /**< AXI Bus Error */
+ uint32_t rfspi : 1; /**< RFSPI Interrupt */
+ uint32_t ifftpapr : 1; /**< IFFTPAPR HAB Interrupt */
+ uint32_t h3genc : 1; /**< 3G Encoder HAB Interrupt */
+ uint32_t lteenc : 1; /**< LTE Encoder HAB Interrupt */
+ uint32_t vdec : 1; /**< Viterbi Decoder HAB Interrupt */
+ uint32_t turbo_rddone : 1; /**< TURBO Decoder HAB Read Done */
+ uint32_t turbo_done : 1; /**< TURBO Decoder HAB Done */
+ uint32_t turbo : 1; /**< TURBO Decoder HAB Interrupt */
+ uint32_t dftdmp : 1; /**< DFTDMP HAB Interrupt */
+ uint32_t rach : 1; /**< RACH HAB Interrupt */
+ uint32_t ulfe : 1; /**< ULFE HAB Interrupt */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rach : 1;
+ uint32_t dftdmp : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_done : 1;
+ uint32_t turbo_rddone : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t h3genc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t rfspi : 1;
+ uint32_t axi_berr : 1;
+ uint32_t tti_timer : 8;
+ uint32_t rf_rx_ffthresh : 1;
+ uint32_t rf_rx_ffflag : 1;
+ uint32_t rf_rxd_ffthresh : 1;
+ uint32_t rf_rxd_ffflag : 1;
+ uint32_t rf_rx_stframe : 1;
+ uint32_t rf_rx_strx : 1;
+ uint32_t rf_spi0 : 1;
+ uint32_t rf_spi1 : 1;
+ uint32_t rf_spi2 : 1;
+ uint32_t rf_spi3 : 1;
+ uint32_t rf_rx_spiskip : 1;
+ uint32_t rf_rx_ppssync : 1;
+#endif
+ } s;
+ struct cvmx_endor_intc_misc_mask_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_misc_mask_hix cvmx_endor_intc_misc_mask_hix_t;
+
+/**
+ * cvmx_endor_intc_misc_mask_lo#
+ *
+ * ENDOR_INTC_MISC_MASK_LO = Interrupt MISC Group Mask
+ *
+ */
+union cvmx_endor_intc_misc_mask_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_misc_mask_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rf_rx_ppssync : 1; /**< RX PPS Sync Done */
+ uint32_t rf_rx_spiskip : 1; /**< RX SPI Event Skipped */
+ uint32_t rf_spi3 : 1; /**< SPI Transfer Done Event 3 */
+ uint32_t rf_spi2 : 1; /**< SPI Transfer Done Event 2 */
+ uint32_t rf_spi1 : 1; /**< SPI Transfer Done Event 1 */
+ uint32_t rf_spi0 : 1; /**< SPI Transfer Done Event 0 */
+ uint32_t rf_rx_strx : 1; /**< RX Start RX */
+ uint32_t rf_rx_stframe : 1; /**< RX Start Frame */
+ uint32_t rf_rxd_ffflag : 1; /**< RX DIV FIFO flags asserted */
+ uint32_t rf_rxd_ffthresh : 1; /**< RX DIV FIFO Threshhold reached */
+ uint32_t rf_rx_ffflag : 1; /**< RX FIFO flags asserted */
+ uint32_t rf_rx_ffthresh : 1; /**< RX FIFO Threshhold reached */
+ uint32_t tti_timer : 8; /**< TTI Timer Interrupt */
+ uint32_t axi_berr : 1; /**< AXI Bus Error */
+ uint32_t rfspi : 1; /**< RFSPI Interrupt */
+ uint32_t ifftpapr : 1; /**< IFFTPAPR HAB Interrupt */
+ uint32_t h3genc : 1; /**< 3G Encoder HAB Interrupt */
+ uint32_t lteenc : 1; /**< LTE Encoder HAB Interrupt */
+ uint32_t vdec : 1; /**< Viterbi Decoder HAB Interrupt */
+ uint32_t turbo_rddone : 1; /**< TURBO Decoder HAB Read Done */
+ uint32_t turbo_done : 1; /**< TURBO Decoder HAB Done */
+ uint32_t turbo : 1; /**< TURBO Decoder HAB Interrupt */
+ uint32_t dftdmp : 1; /**< DFTDMP HAB Interrupt */
+ uint32_t rach : 1; /**< RACH HAB Interrupt */
+ uint32_t ulfe : 1; /**< ULFE HAB Interrupt */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rach : 1;
+ uint32_t dftdmp : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_done : 1;
+ uint32_t turbo_rddone : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t h3genc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t rfspi : 1;
+ uint32_t axi_berr : 1;
+ uint32_t tti_timer : 8;
+ uint32_t rf_rx_ffthresh : 1;
+ uint32_t rf_rx_ffflag : 1;
+ uint32_t rf_rxd_ffthresh : 1;
+ uint32_t rf_rxd_ffflag : 1;
+ uint32_t rf_rx_stframe : 1;
+ uint32_t rf_rx_strx : 1;
+ uint32_t rf_spi0 : 1;
+ uint32_t rf_spi1 : 1;
+ uint32_t rf_spi2 : 1;
+ uint32_t rf_spi3 : 1;
+ uint32_t rf_rx_spiskip : 1;
+ uint32_t rf_rx_ppssync : 1;
+#endif
+ } s;
+ struct cvmx_endor_intc_misc_mask_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_misc_mask_lox cvmx_endor_intc_misc_mask_lox_t;
+
+/**
+ * cvmx_endor_intc_misc_rint
+ *
+ * ENDOR_INTC_MISC_RINT - MISC Raw Interrupt Status
+ *
+ */
+union cvmx_endor_intc_misc_rint {
+ uint32_t u32;
+ struct cvmx_endor_intc_misc_rint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rf_rx_ppssync : 1; /**< RX PPS Sync Done */
+ uint32_t rf_rx_spiskip : 1; /**< RX SPI Event Skipped */
+ uint32_t rf_spi3 : 1; /**< SPI Transfer Done Event 3 */
+ uint32_t rf_spi2 : 1; /**< SPI Transfer Done Event 2 */
+ uint32_t rf_spi1 : 1; /**< SPI Transfer Done Event 1 */
+ uint32_t rf_spi0 : 1; /**< SPI Transfer Done Event 0 */
+ uint32_t rf_rx_strx : 1; /**< RX Start RX */
+ uint32_t rf_rx_stframe : 1; /**< RX Start Frame */
+ uint32_t rf_rxd_ffflag : 1; /**< RX DIV FIFO flags asserted */
+ uint32_t rf_rxd_ffthresh : 1; /**< RX DIV FIFO Threshhold reached */
+ uint32_t rf_rx_ffflag : 1; /**< RX FIFO flags asserted */
+ uint32_t rf_rx_ffthresh : 1; /**< RX FIFO Threshhold reached */
+ uint32_t tti_timer : 8; /**< TTI Timer Interrupt */
+ uint32_t axi_berr : 1; /**< AXI Bus Error */
+ uint32_t rfspi : 1; /**< RFSPI Interrupt */
+ uint32_t ifftpapr : 1; /**< IFFTPAPR HAB Interrupt */
+ uint32_t h3genc : 1; /**< 3G Encoder HAB Interrupt */
+ uint32_t lteenc : 1; /**< LTE Encoder HAB Interrupt */
+ uint32_t vdec : 1; /**< Viterbi Decoder HAB Interrupt */
+ uint32_t turbo_rddone : 1; /**< TURBO Decoder HAB Read Done */
+ uint32_t turbo_done : 1; /**< TURBO Decoder HAB Done */
+ uint32_t turbo : 1; /**< TURBO Decoder HAB Interrupt */
+ uint32_t dftdmp : 1; /**< DFTDMP HAB Interrupt */
+ uint32_t rach : 1; /**< RACH HAB Interrupt */
+ uint32_t ulfe : 1; /**< ULFE HAB Interrupt */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rach : 1;
+ uint32_t dftdmp : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_done : 1;
+ uint32_t turbo_rddone : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t h3genc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t rfspi : 1;
+ uint32_t axi_berr : 1;
+ uint32_t tti_timer : 8;
+ uint32_t rf_rx_ffthresh : 1;
+ uint32_t rf_rx_ffflag : 1;
+ uint32_t rf_rxd_ffthresh : 1;
+ uint32_t rf_rxd_ffflag : 1;
+ uint32_t rf_rx_stframe : 1;
+ uint32_t rf_rx_strx : 1;
+ uint32_t rf_spi0 : 1;
+ uint32_t rf_spi1 : 1;
+ uint32_t rf_spi2 : 1;
+ uint32_t rf_spi3 : 1;
+ uint32_t rf_rx_spiskip : 1;
+ uint32_t rf_rx_ppssync : 1;
+#endif
+ } s;
+ struct cvmx_endor_intc_misc_rint_s cnf71xx;
+};
+typedef union cvmx_endor_intc_misc_rint cvmx_endor_intc_misc_rint_t;
+
+/**
+ * cvmx_endor_intc_misc_status_hi#
+ *
+ * ENDOR_INTC_MISC_STATUS_HI = Interrupt MISC Group Mask
+ *
+ */
+union cvmx_endor_intc_misc_status_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_misc_status_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rf_rx_ppssync : 1; /**< RX PPS Sync Done */
+ uint32_t rf_rx_spiskip : 1; /**< RX SPI Event Skipped */
+ uint32_t rf_spi3 : 1; /**< SPI Transfer Done Event 3 */
+ uint32_t rf_spi2 : 1; /**< SPI Transfer Done Event 2 */
+ uint32_t rf_spi1 : 1; /**< SPI Transfer Done Event 1 */
+ uint32_t rf_spi0 : 1; /**< SPI Transfer Done Event 0 */
+ uint32_t rf_rx_strx : 1; /**< RX Start RX */
+ uint32_t rf_rx_stframe : 1; /**< RX Start Frame */
+ uint32_t rf_rxd_ffflag : 1; /**< RX DIV FIFO flags asserted */
+ uint32_t rf_rxd_ffthresh : 1; /**< RX DIV FIFO Threshhold reached */
+ uint32_t rf_rx_ffflag : 1; /**< RX FIFO flags asserted */
+ uint32_t rf_rx_ffthresh : 1; /**< RX FIFO Threshhold reached */
+ uint32_t tti_timer : 8; /**< TTI Timer Interrupt */
+ uint32_t axi_berr : 1; /**< AXI Bus Error */
+ uint32_t rfspi : 1; /**< RFSPI Interrupt */
+ uint32_t ifftpapr : 1; /**< IFFTPAPR HAB Interrupt */
+ uint32_t h3genc : 1; /**< 3G Encoder HAB Interrupt */
+ uint32_t lteenc : 1; /**< LTE Encoder HAB Interrupt */
+ uint32_t vdec : 1; /**< Viterbi Decoder HAB Interrupt */
+ uint32_t turbo_rddone : 1; /**< TURBO Decoder HAB Read Done */
+ uint32_t turbo_done : 1; /**< TURBO Decoder HAB Done */
+ uint32_t turbo : 1; /**< TURBO Decoder HAB Interrupt */
+ uint32_t dftdmp : 1; /**< DFTDMP HAB Interrupt */
+ uint32_t rach : 1; /**< RACH HAB Interrupt */
+ uint32_t ulfe : 1; /**< ULFE HAB Interrupt */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rach : 1;
+ uint32_t dftdmp : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_done : 1;
+ uint32_t turbo_rddone : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t h3genc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t rfspi : 1;
+ uint32_t axi_berr : 1;
+ uint32_t tti_timer : 8;
+ uint32_t rf_rx_ffthresh : 1;
+ uint32_t rf_rx_ffflag : 1;
+ uint32_t rf_rxd_ffthresh : 1;
+ uint32_t rf_rxd_ffflag : 1;
+ uint32_t rf_rx_stframe : 1;
+ uint32_t rf_rx_strx : 1;
+ uint32_t rf_spi0 : 1;
+ uint32_t rf_spi1 : 1;
+ uint32_t rf_spi2 : 1;
+ uint32_t rf_spi3 : 1;
+ uint32_t rf_rx_spiskip : 1;
+ uint32_t rf_rx_ppssync : 1;
+#endif
+ } s;
+ struct cvmx_endor_intc_misc_status_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_misc_status_hix cvmx_endor_intc_misc_status_hix_t;
+
+/**
+ * cvmx_endor_intc_misc_status_lo#
+ *
+ * ENDOR_INTC_MISC_STATUS_LO = Interrupt MISC Group Mask
+ *
+ */
+union cvmx_endor_intc_misc_status_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_misc_status_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rf_rx_ppssync : 1; /**< RX PPS Sync Done */
+ uint32_t rf_rx_spiskip : 1; /**< RX SPI Event Skipped */
+ uint32_t rf_spi3 : 1; /**< SPI Transfer Done Event 3 */
+ uint32_t rf_spi2 : 1; /**< SPI Transfer Done Event 2 */
+ uint32_t rf_spi1 : 1; /**< SPI Transfer Done Event 1 */
+ uint32_t rf_spi0 : 1; /**< SPI Transfer Done Event 0 */
+ uint32_t rf_rx_strx : 1; /**< RX Start RX */
+ uint32_t rf_rx_stframe : 1; /**< RX Start Frame */
+ uint32_t rf_rxd_ffflag : 1; /**< RX DIV FIFO flags asserted */
+ uint32_t rf_rxd_ffthresh : 1; /**< RX DIV FIFO Threshhold reached */
+ uint32_t rf_rx_ffflag : 1; /**< RX FIFO flags asserted */
+ uint32_t rf_rx_ffthresh : 1; /**< RX FIFO Threshhold reached */
+ uint32_t tti_timer : 8; /**< TTI Timer Interrupt */
+ uint32_t axi_berr : 1; /**< AXI Bus Error */
+ uint32_t rfspi : 1; /**< RFSPI Interrupt */
+ uint32_t ifftpapr : 1; /**< IFFTPAPR HAB Interrupt */
+ uint32_t h3genc : 1; /**< 3G Encoder HAB Interrupt */
+ uint32_t lteenc : 1; /**< LTE Encoder HAB Interrupt */
+ uint32_t vdec : 1; /**< Viterbi Decoder HAB Interrupt */
+ uint32_t turbo_rddone : 1; /**< TURBO Decoder HAB Read Done */
+ uint32_t turbo_done : 1; /**< TURBO Decoder HAB Done */
+ uint32_t turbo : 1; /**< TURBO Decoder HAB Interrupt */
+ uint32_t dftdmp : 1; /**< DFTDMP HAB Interrupt */
+ uint32_t rach : 1; /**< RACH HAB Interrupt */
+ uint32_t ulfe : 1; /**< ULFE HAB Interrupt */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rach : 1;
+ uint32_t dftdmp : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_done : 1;
+ uint32_t turbo_rddone : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t h3genc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t rfspi : 1;
+ uint32_t axi_berr : 1;
+ uint32_t tti_timer : 8;
+ uint32_t rf_rx_ffthresh : 1;
+ uint32_t rf_rx_ffflag : 1;
+ uint32_t rf_rxd_ffthresh : 1;
+ uint32_t rf_rxd_ffflag : 1;
+ uint32_t rf_rx_stframe : 1;
+ uint32_t rf_rx_strx : 1;
+ uint32_t rf_spi0 : 1;
+ uint32_t rf_spi1 : 1;
+ uint32_t rf_spi2 : 1;
+ uint32_t rf_spi3 : 1;
+ uint32_t rf_rx_spiskip : 1;
+ uint32_t rf_rx_ppssync : 1;
+#endif
+ } s;
+ struct cvmx_endor_intc_misc_status_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_misc_status_lox cvmx_endor_intc_misc_status_lox_t;
+
+/**
+ * cvmx_endor_intc_rd_idx_hi#
+ *
+ * ENDOR_INTC_RD_IDX_HI - Read Done Group Index HI
+ *
+ */
+union cvmx_endor_intc_rd_idx_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_rd_idx_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Read Done Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_rd_idx_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rd_idx_hix cvmx_endor_intc_rd_idx_hix_t;
+
+/**
+ * cvmx_endor_intc_rd_idx_lo#
+ *
+ * ENDOR_INTC_RD_IDX_LO - Read Done Group Index LO
+ *
+ */
+union cvmx_endor_intc_rd_idx_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_rd_idx_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Read Done Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_rd_idx_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rd_idx_lox cvmx_endor_intc_rd_idx_lox_t;
+
+/**
+ * cvmx_endor_intc_rd_mask_hi#
+ *
+ * ENDOR_INTC_RD_MASK_HI = Interrupt Read Done Group Mask
+ *
+ */
+union cvmx_endor_intc_rd_mask_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_rd_mask_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rd_mask_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rd_mask_hix cvmx_endor_intc_rd_mask_hix_t;
+
+/**
+ * cvmx_endor_intc_rd_mask_lo#
+ *
+ * ENDOR_INTC_RD_MASK_LO = Interrupt Read Done Group Mask
+ *
+ */
+union cvmx_endor_intc_rd_mask_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_rd_mask_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rd_mask_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rd_mask_lox cvmx_endor_intc_rd_mask_lox_t;
+
+/**
+ * cvmx_endor_intc_rd_rint
+ *
+ * ENDOR_INTC_RD_RINT - Read Done Group Raw Interrupt Status
+ *
+ */
+union cvmx_endor_intc_rd_rint {
+ uint32_t u32;
+ struct cvmx_endor_intc_rd_rint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rd_rint_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rd_rint cvmx_endor_intc_rd_rint_t;
+
+/**
+ * cvmx_endor_intc_rd_status_hi#
+ *
+ * ENDOR_INTC_RD_STATUS_HI = Interrupt Read Done Group Mask
+ *
+ */
+union cvmx_endor_intc_rd_status_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_rd_status_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rd_status_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rd_status_hix cvmx_endor_intc_rd_status_hix_t;
+
+/**
+ * cvmx_endor_intc_rd_status_lo#
+ *
+ * ENDOR_INTC_RD_STATUS_LO = Interrupt Read Done Group Mask
+ *
+ */
+union cvmx_endor_intc_rd_status_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_rd_status_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rd_status_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rd_status_lox cvmx_endor_intc_rd_status_lox_t;
+
+/**
+ * cvmx_endor_intc_rdq_idx_hi#
+ *
+ * ENDOR_INTC_RDQ_IDX_HI - Read Queue Done Group Index HI
+ *
+ */
+union cvmx_endor_intc_rdq_idx_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_rdq_idx_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Read Queue Done Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_rdq_idx_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rdq_idx_hix cvmx_endor_intc_rdq_idx_hix_t;
+
+/**
+ * cvmx_endor_intc_rdq_idx_lo#
+ *
+ * ENDOR_INTC_RDQ_IDX_LO - Read Queue Done Group Index LO
+ *
+ */
+union cvmx_endor_intc_rdq_idx_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_rdq_idx_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Read Queue Done Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_rdq_idx_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rdq_idx_lox cvmx_endor_intc_rdq_idx_lox_t;
+
+/**
+ * cvmx_endor_intc_rdq_mask_hi#
+ *
+ * ENDOR_INTC_RDQ_MASK_HI = Interrupt Read Queue Done Group Mask
+ *
+ */
+union cvmx_endor_intc_rdq_mask_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_rdq_mask_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rdq_mask_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rdq_mask_hix cvmx_endor_intc_rdq_mask_hix_t;
+
+/**
+ * cvmx_endor_intc_rdq_mask_lo#
+ *
+ * ENDOR_INTC_RDQ_MASK_LO = Interrupt Read Queue Done Group Mask
+ *
+ */
+union cvmx_endor_intc_rdq_mask_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_rdq_mask_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rdq_mask_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rdq_mask_lox cvmx_endor_intc_rdq_mask_lox_t;
+
+/**
+ * cvmx_endor_intc_rdq_rint
+ *
+ * ENDOR_INTC_RDQ_RINT - Read Queue Done Group Raw Interrupt Status
+ *
+ */
+union cvmx_endor_intc_rdq_rint {
+ uint32_t u32;
+ struct cvmx_endor_intc_rdq_rint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rdq_rint_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rdq_rint cvmx_endor_intc_rdq_rint_t;
+
+/**
+ * cvmx_endor_intc_rdq_status_hi#
+ *
+ * ENDOR_INTC_RDQ_STATUS_HI = Interrupt Read Queue Done Group Mask
+ *
+ */
+union cvmx_endor_intc_rdq_status_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_rdq_status_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rdq_status_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rdq_status_hix cvmx_endor_intc_rdq_status_hix_t;
+
+/**
+ * cvmx_endor_intc_rdq_status_lo#
+ *
+ * ENDOR_INTC_RDQ_STATUS_LO = Interrupt Read Queue Done Group Mask
+ *
+ */
+union cvmx_endor_intc_rdq_status_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_rdq_status_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t t3_rfif_1 : 1; /**< RFIF_1 Read Done */
+ uint32_t t3_rfif_0 : 1; /**< RFIF_0 Read Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Read Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Read Done */
+ uint32_t axi_tx : 1; /**< TX to Host Read Done */
+ uint32_t t3_int : 1; /**< TX to PHY Read Done */
+ uint32_t t3_ext : 1; /**< TX to Host Read Done */
+ uint32_t t2_int : 1; /**< RX1 to PHY Read Done */
+ uint32_t t2_harq : 1; /**< HARQ to Host Read Done */
+ uint32_t t2_ext : 1; /**< RX1 to Host Read Done */
+ uint32_t t1_int : 1; /**< RX0 to PHY Read Done */
+ uint32_t t1_ext : 1; /**< RX0 to Host Read Done */
+ uint32_t ifftpapr_rm : 1; /**< IFFTPAPR_RM Read Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Read Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Read Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Read Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Read Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Read Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Read Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Read Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Read Done */
+ uint32_t rachsnif : 1; /**< RACH Read Done */
+ uint32_t ulfe : 1; /**< ULFE Read Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t ifftpapr_rm : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_int : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t3_rfif_0 : 1;
+ uint32_t t3_rfif_1 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_intc_rdq_status_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_rdq_status_lox cvmx_endor_intc_rdq_status_lox_t;
+
+/**
+ * cvmx_endor_intc_stat_hi#
+ *
+ * ENDOR_INTC_STAT_HI - Grouped Interrupt Status HI
+ *
+ */
+union cvmx_endor_intc_stat_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_stat_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t misc : 1; /**< Misc Group Interrupt */
+ uint32_t sw : 1; /**< SW Group Interrupt */
+ uint32_t wrqdone : 1; /**< Write Queue Done Group Interrupt */
+ uint32_t rdqdone : 1; /**< Read Queue Done Group Interrupt */
+ uint32_t rddone : 1; /**< Read Done Group Interrupt */
+ uint32_t wrdone : 1; /**< Write Done Group Interrupt */
+#else
+ uint32_t wrdone : 1;
+ uint32_t rddone : 1;
+ uint32_t rdqdone : 1;
+ uint32_t wrqdone : 1;
+ uint32_t sw : 1;
+ uint32_t misc : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_stat_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_stat_hix cvmx_endor_intc_stat_hix_t;
+
+/**
+ * cvmx_endor_intc_stat_lo#
+ *
+ * ENDOR_INTC_STAT_LO - Grouped Interrupt Status LO
+ *
+ */
+union cvmx_endor_intc_stat_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_stat_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t misc : 1; /**< Misc Group Interrupt */
+ uint32_t sw : 1; /**< SW Group Interrupt */
+ uint32_t wrqdone : 1; /**< Write Queue Done Group Interrupt */
+ uint32_t rdqdone : 1; /**< Read Queue Done Group Interrupt */
+ uint32_t rddone : 1; /**< Read Done Group Interrupt */
+ uint32_t wrdone : 1; /**< Write Done Group Interrupt */
+#else
+ uint32_t wrdone : 1;
+ uint32_t rddone : 1;
+ uint32_t rdqdone : 1;
+ uint32_t wrqdone : 1;
+ uint32_t sw : 1;
+ uint32_t misc : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_stat_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_stat_lox cvmx_endor_intc_stat_lox_t;
+
+/**
+ * cvmx_endor_intc_sw_idx_hi#
+ *
+ * ENDOR_INTC_SW_IDX_HI - SW Group Index HI
+ *
+ */
+union cvmx_endor_intc_sw_idx_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_sw_idx_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< SW Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_sw_idx_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_sw_idx_hix cvmx_endor_intc_sw_idx_hix_t;
+
+/**
+ * cvmx_endor_intc_sw_idx_lo#
+ *
+ * ENDOR_INTC_SW_IDX_LO - SW Group Index LO
+ *
+ */
+union cvmx_endor_intc_sw_idx_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_sw_idx_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< SW Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_sw_idx_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_sw_idx_lox cvmx_endor_intc_sw_idx_lox_t;
+
+/**
+ * cvmx_endor_intc_sw_mask_hi#
+ *
+ * ENDOR_INTC_SW_MASK_HI = Interrupt SW Mask
+ *
+ */
+union cvmx_endor_intc_sw_mask_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_sw_mask_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t swint : 32; /**< ULFE Read Done */
+#else
+ uint32_t swint : 32;
+#endif
+ } s;
+ struct cvmx_endor_intc_sw_mask_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_sw_mask_hix cvmx_endor_intc_sw_mask_hix_t;
+
+/**
+ * cvmx_endor_intc_sw_mask_lo#
+ *
+ * ENDOR_INTC_SW_MASK_LO = Interrupt SW Mask
+ *
+ */
+union cvmx_endor_intc_sw_mask_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_sw_mask_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t swint : 32; /**< ULFE Read Done */
+#else
+ uint32_t swint : 32;
+#endif
+ } s;
+ struct cvmx_endor_intc_sw_mask_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_sw_mask_lox cvmx_endor_intc_sw_mask_lox_t;
+
+/**
+ * cvmx_endor_intc_sw_rint
+ *
+ * ENDOR_INTC_SW_RINT - SW Raw Interrupt Status
+ *
+ */
+union cvmx_endor_intc_sw_rint {
+ uint32_t u32;
+ struct cvmx_endor_intc_sw_rint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t swint : 32; /**< ULFE Read Done */
+#else
+ uint32_t swint : 32;
+#endif
+ } s;
+ struct cvmx_endor_intc_sw_rint_s cnf71xx;
+};
+typedef union cvmx_endor_intc_sw_rint cvmx_endor_intc_sw_rint_t;
+
+/**
+ * cvmx_endor_intc_sw_status_hi#
+ *
+ * ENDOR_INTC_SW_STATUS_HI = Interrupt SW Mask
+ *
+ */
+union cvmx_endor_intc_sw_status_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_sw_status_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t swint : 32; /**< ULFE Read Done */
+#else
+ uint32_t swint : 32;
+#endif
+ } s;
+ struct cvmx_endor_intc_sw_status_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_sw_status_hix cvmx_endor_intc_sw_status_hix_t;
+
+/**
+ * cvmx_endor_intc_sw_status_lo#
+ *
+ * ENDOR_INTC_SW_STATUS_LO = Interrupt SW Mask
+ *
+ */
+union cvmx_endor_intc_sw_status_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_sw_status_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t swint : 32; /**< ULFE Read Done */
+#else
+ uint32_t swint : 32;
+#endif
+ } s;
+ struct cvmx_endor_intc_sw_status_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_sw_status_lox cvmx_endor_intc_sw_status_lox_t;
+
+/**
+ * cvmx_endor_intc_swclr
+ *
+ * ENDOR_INTC_SWCLR- SW Interrupt Clear
+ *
+ */
+union cvmx_endor_intc_swclr {
+ uint32_t u32;
+ struct cvmx_endor_intc_swclr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t clr : 32; /**< Clear SW Interrupt bit */
+#else
+ uint32_t clr : 32;
+#endif
+ } s;
+ struct cvmx_endor_intc_swclr_s cnf71xx;
+};
+typedef union cvmx_endor_intc_swclr cvmx_endor_intc_swclr_t;
+
+/**
+ * cvmx_endor_intc_swset
+ *
+ * ENDOR_INTC_SWSET - SW Interrupt Set
+ *
+ */
+union cvmx_endor_intc_swset {
+ uint32_t u32;
+ struct cvmx_endor_intc_swset_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t set : 32; /**< Set SW Interrupt bit */
+#else
+ uint32_t set : 32;
+#endif
+ } s;
+ struct cvmx_endor_intc_swset_s cnf71xx;
+};
+typedef union cvmx_endor_intc_swset cvmx_endor_intc_swset_t;
+
+/**
+ * cvmx_endor_intc_wr_idx_hi#
+ *
+ * ENDOR_INTC_WR_IDX_HI - Write Done Group Index HI
+ *
+ */
+union cvmx_endor_intc_wr_idx_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_wr_idx_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Write Done Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_wr_idx_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wr_idx_hix cvmx_endor_intc_wr_idx_hix_t;
+
+/**
+ * cvmx_endor_intc_wr_idx_lo#
+ *
+ * ENDOR_INTC_WR_IDX_LO - Write Done Group Index LO
+ *
+ */
+union cvmx_endor_intc_wr_idx_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_wr_idx_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Write Done Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_wr_idx_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wr_idx_lox cvmx_endor_intc_wr_idx_lox_t;
+
+/**
+ * cvmx_endor_intc_wr_mask_hi#
+ *
+ * ENDOR_INTC_WR_MASK_HI = Interrupt Write Done Group Mask
+ *
+ */
+union cvmx_endor_intc_wr_mask_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_wr_mask_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t t1_rfif_1 : 1; /**< RFIF_1 Write Done */
+ uint32_t t1_rfif_0 : 1; /**< RFIF_0 Write Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Write Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Write Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Write Done */
+ uint32_t axi_tx : 1; /**< TX to Host Write Done */
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t1_rfif_0 : 1;
+ uint32_t t1_rfif_1 : 1;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } s;
+ struct cvmx_endor_intc_wr_mask_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wr_mask_hix cvmx_endor_intc_wr_mask_hix_t;
+
+/**
+ * cvmx_endor_intc_wr_mask_lo#
+ *
+ * ENDOR_INTC_WR_MASK_LO = Interrupt Write Done Group Mask
+ *
+ */
+union cvmx_endor_intc_wr_mask_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_wr_mask_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t t1_rfif_1 : 1; /**< RFIF_1 Write Done */
+ uint32_t t1_rfif_0 : 1; /**< RFIF_0 Write Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Write Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Write Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Write Done */
+ uint32_t axi_tx : 1; /**< TX to Host Write Done */
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t1_rfif_0 : 1;
+ uint32_t t1_rfif_1 : 1;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } s;
+ struct cvmx_endor_intc_wr_mask_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wr_mask_lox cvmx_endor_intc_wr_mask_lox_t;
+
+/**
+ * cvmx_endor_intc_wr_rint
+ *
+ * ENDOR_INTC_WR_RINT - Write Done Group Raw Interrupt Status
+ *
+ */
+union cvmx_endor_intc_wr_rint {
+ uint32_t u32;
+ struct cvmx_endor_intc_wr_rint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t t1_rfif_1 : 1; /**< RFIF_1 Write Done */
+ uint32_t t1_rfif_0 : 1; /**< RFIF_0 Write Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Write Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Write Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Write Done */
+ uint32_t axi_tx : 1; /**< TX to Host Write Done */
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t1_rfif_0 : 1;
+ uint32_t t1_rfif_1 : 1;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } s;
+ struct cvmx_endor_intc_wr_rint_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wr_rint cvmx_endor_intc_wr_rint_t;
+
+/**
+ * cvmx_endor_intc_wr_status_hi#
+ *
+ * ENDOR_INTC_WR_STATUS_HI = Interrupt Write Done Group Mask
+ *
+ */
+union cvmx_endor_intc_wr_status_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_wr_status_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t t1_rfif_1 : 1; /**< RFIF_1 Write Done */
+ uint32_t t1_rfif_0 : 1; /**< RFIF_0 Write Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Write Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Write Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Write Done */
+ uint32_t axi_tx : 1; /**< TX to Host Write Done */
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t1_rfif_0 : 1;
+ uint32_t t1_rfif_1 : 1;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } s;
+ struct cvmx_endor_intc_wr_status_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wr_status_hix cvmx_endor_intc_wr_status_hix_t;
+
+/**
+ * cvmx_endor_intc_wr_status_lo#
+ *
+ * ENDOR_INTC_WR_STATUS_LO = Interrupt Write Done Group Mask
+ *
+ */
+union cvmx_endor_intc_wr_status_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_wr_status_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t t1_rfif_1 : 1; /**< RFIF_1 Write Done */
+ uint32_t t1_rfif_0 : 1; /**< RFIF_0 Write Done */
+ uint32_t axi_rx1_harq : 1; /**< HARQ to Host Write Done */
+ uint32_t axi_rx1 : 1; /**< RX1 to Host Write Done */
+ uint32_t axi_rx0 : 1; /**< RX0 to Host Write Done */
+ uint32_t axi_tx : 1; /**< TX to Host Write Done */
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t axi_tx : 1;
+ uint32_t axi_rx0 : 1;
+ uint32_t axi_rx1 : 1;
+ uint32_t axi_rx1_harq : 1;
+ uint32_t t1_rfif_0 : 1;
+ uint32_t t1_rfif_1 : 1;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } s;
+ struct cvmx_endor_intc_wr_status_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wr_status_lox cvmx_endor_intc_wr_status_lox_t;
+
+/**
+ * cvmx_endor_intc_wrq_idx_hi#
+ *
+ * ENDOR_INTC_WRQ_IDX_HI - Write Queue Done Group Index HI
+ *
+ */
+union cvmx_endor_intc_wrq_idx_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_wrq_idx_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Write Queue Done Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_wrq_idx_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wrq_idx_hix cvmx_endor_intc_wrq_idx_hix_t;
+
+/**
+ * cvmx_endor_intc_wrq_idx_lo#
+ *
+ * ENDOR_INTC_WRQ_IDX_LO - Write Queue Done Group Index LO
+ *
+ */
+union cvmx_endor_intc_wrq_idx_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_wrq_idx_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t grpidx : 6; /**< Write Queue Done Group Interrupt Index */
+#else
+ uint32_t grpidx : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_intc_wrq_idx_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wrq_idx_lox cvmx_endor_intc_wrq_idx_lox_t;
+
+/**
+ * cvmx_endor_intc_wrq_mask_hi#
+ *
+ * ENDOR_INTC_WRQ_MASK_HI = Interrupt Write Queue Done Group Mask
+ *
+ */
+union cvmx_endor_intc_wrq_mask_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_wrq_mask_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_endor_intc_wrq_mask_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wrq_mask_hix cvmx_endor_intc_wrq_mask_hix_t;
+
+/**
+ * cvmx_endor_intc_wrq_mask_lo#
+ *
+ * ENDOR_INTC_WRQ_MASK_LO = Interrupt Write Queue Done Group Mask
+ *
+ */
+union cvmx_endor_intc_wrq_mask_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_wrq_mask_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_endor_intc_wrq_mask_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wrq_mask_lox cvmx_endor_intc_wrq_mask_lox_t;
+
+/**
+ * cvmx_endor_intc_wrq_rint
+ *
+ * ENDOR_INTC_WRQ_RINT - Write Queue Done Group Raw Interrupt Status
+ *
+ */
+union cvmx_endor_intc_wrq_rint {
+ uint32_t u32;
+ struct cvmx_endor_intc_wrq_rint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_endor_intc_wrq_rint_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wrq_rint cvmx_endor_intc_wrq_rint_t;
+
+/**
+ * cvmx_endor_intc_wrq_status_hi#
+ *
+ * ENDOR_INTC_WRQ_STATUS_HI = Interrupt Write Queue Done Group Mask
+ *
+ */
+union cvmx_endor_intc_wrq_status_hix {
+ uint32_t u32;
+ struct cvmx_endor_intc_wrq_status_hix_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_endor_intc_wrq_status_hix_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wrq_status_hix cvmx_endor_intc_wrq_status_hix_t;
+
+/**
+ * cvmx_endor_intc_wrq_status_lo#
+ *
+ * ENDOR_INTC_WRQ_STATUS_LO = Interrupt Write Queue Done Group Mask
+ *
+ */
+union cvmx_endor_intc_wrq_status_lox {
+ uint32_t u32;
+ struct cvmx_endor_intc_wrq_status_lox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t t3_instr : 1; /**< TX Instr Write Done */
+ uint32_t t3_int : 1; /**< PHY to TX Write Done */
+ uint32_t t3_ext : 1; /**< Host to TX Write Done */
+ uint32_t t2_instr : 1; /**< RX1 Instr Write Done */
+ uint32_t t2_harq : 1; /**< Host to HARQ Write Done */
+ uint32_t t2_int : 1; /**< PHY to RX1 Write Done */
+ uint32_t t2_ext : 1; /**< Host to RX1 Write Done */
+ uint32_t t1_instr : 1; /**< RX0 Instr Write Done */
+ uint32_t t1_int : 1; /**< PHY to RX0 Write Done */
+ uint32_t t1_ext : 1; /**< Host to RX0 Write Done */
+ uint32_t ifftpapr_1 : 1; /**< IFFTPAPR_1 Write Done */
+ uint32_t ifftpapr_0 : 1; /**< IFFTPAPR_0 Write Done */
+ uint32_t lteenc_cch : 1; /**< LTE Encoder CCH Write Done */
+ uint32_t lteenc_tb1 : 1; /**< LTE Encoder TB1 Write Done */
+ uint32_t lteenc_tb0 : 1; /**< LTE Encoder TB0 Write Done */
+ uint32_t vitbdec : 1; /**< Viterbi Decoder Write Done */
+ uint32_t turbo_hq : 1; /**< Turbo Decoder HARQ Write Done */
+ uint32_t turbo_sb : 1; /**< Turbo Decoder Soft Bits Write Done */
+ uint32_t turbo : 1; /**< Turbo Decoder Write Done */
+ uint32_t dftdm : 1; /**< DFT/Demapper Write Done */
+ uint32_t rachsnif_1 : 1; /**< RACH_1 Write Done */
+ uint32_t rachsnif_0 : 1; /**< RACH_0 Write Done */
+ uint32_t ulfe : 1; /**< ULFE Write Done */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachsnif_0 : 1;
+ uint32_t rachsnif_1 : 1;
+ uint32_t dftdm : 1;
+ uint32_t turbo : 1;
+ uint32_t turbo_sb : 1;
+ uint32_t turbo_hq : 1;
+ uint32_t vitbdec : 1;
+ uint32_t lteenc_tb0 : 1;
+ uint32_t lteenc_tb1 : 1;
+ uint32_t lteenc_cch : 1;
+ uint32_t ifftpapr_0 : 1;
+ uint32_t ifftpapr_1 : 1;
+ uint32_t t1_ext : 1;
+ uint32_t t1_int : 1;
+ uint32_t t1_instr : 1;
+ uint32_t t2_ext : 1;
+ uint32_t t2_int : 1;
+ uint32_t t2_harq : 1;
+ uint32_t t2_instr : 1;
+ uint32_t t3_ext : 1;
+ uint32_t t3_int : 1;
+ uint32_t t3_instr : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_endor_intc_wrq_status_lox_s cnf71xx;
+};
+typedef union cvmx_endor_intc_wrq_status_lox cvmx_endor_intc_wrq_status_lox_t;
+
+/**
+ * cvmx_endor_ofs_hmm_cbuf_end_addr0
+ */
+union cvmx_endor_ofs_hmm_cbuf_end_addr0 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_cbuf_end_addr0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_cbuf_end_addr0_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_cbuf_end_addr0 cvmx_endor_ofs_hmm_cbuf_end_addr0_t;
+
+/**
+ * cvmx_endor_ofs_hmm_cbuf_end_addr1
+ */
+union cvmx_endor_ofs_hmm_cbuf_end_addr1 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_cbuf_end_addr1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_cbuf_end_addr1_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_cbuf_end_addr1 cvmx_endor_ofs_hmm_cbuf_end_addr1_t;
+
+/**
+ * cvmx_endor_ofs_hmm_cbuf_end_addr2
+ */
+union cvmx_endor_ofs_hmm_cbuf_end_addr2 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_cbuf_end_addr2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_cbuf_end_addr2_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_cbuf_end_addr2 cvmx_endor_ofs_hmm_cbuf_end_addr2_t;
+
+/**
+ * cvmx_endor_ofs_hmm_cbuf_end_addr3
+ */
+union cvmx_endor_ofs_hmm_cbuf_end_addr3 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_cbuf_end_addr3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_cbuf_end_addr3_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_cbuf_end_addr3 cvmx_endor_ofs_hmm_cbuf_end_addr3_t;
+
+/**
+ * cvmx_endor_ofs_hmm_cbuf_start_addr0
+ */
+union cvmx_endor_ofs_hmm_cbuf_start_addr0 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_cbuf_start_addr0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_cbuf_start_addr0_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_cbuf_start_addr0 cvmx_endor_ofs_hmm_cbuf_start_addr0_t;
+
+/**
+ * cvmx_endor_ofs_hmm_cbuf_start_addr1
+ */
+union cvmx_endor_ofs_hmm_cbuf_start_addr1 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_cbuf_start_addr1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_cbuf_start_addr1_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_cbuf_start_addr1 cvmx_endor_ofs_hmm_cbuf_start_addr1_t;
+
+/**
+ * cvmx_endor_ofs_hmm_cbuf_start_addr2
+ */
+union cvmx_endor_ofs_hmm_cbuf_start_addr2 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_cbuf_start_addr2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_cbuf_start_addr2_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_cbuf_start_addr2 cvmx_endor_ofs_hmm_cbuf_start_addr2_t;
+
+/**
+ * cvmx_endor_ofs_hmm_cbuf_start_addr3
+ */
+union cvmx_endor_ofs_hmm_cbuf_start_addr3 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_cbuf_start_addr3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_cbuf_start_addr3_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_cbuf_start_addr3 cvmx_endor_ofs_hmm_cbuf_start_addr3_t;
+
+/**
+ * cvmx_endor_ofs_hmm_intr_clear
+ */
+union cvmx_endor_ofs_hmm_intr_clear {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_intr_clear_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_2_31 : 30;
+ uint32_t xfer_q_empty : 1; /**< reserved. */
+ uint32_t xfer_complete : 1; /**< reserved. */
+#else
+ uint32_t xfer_complete : 1;
+ uint32_t xfer_q_empty : 1;
+ uint32_t reserved_2_31 : 30;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_intr_clear_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_intr_clear cvmx_endor_ofs_hmm_intr_clear_t;
+
+/**
+ * cvmx_endor_ofs_hmm_intr_enb
+ */
+union cvmx_endor_ofs_hmm_intr_enb {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_intr_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_2_31 : 30;
+ uint32_t xfer_q_empty : 1; /**< reserved. */
+ uint32_t xfer_complete : 1; /**< reserved. */
+#else
+ uint32_t xfer_complete : 1;
+ uint32_t xfer_q_empty : 1;
+ uint32_t reserved_2_31 : 30;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_intr_enb_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_intr_enb cvmx_endor_ofs_hmm_intr_enb_t;
+
+/**
+ * cvmx_endor_ofs_hmm_intr_rstatus
+ */
+union cvmx_endor_ofs_hmm_intr_rstatus {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_intr_rstatus_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_2_31 : 30;
+ uint32_t xfer_q_empty : 1; /**< reserved. */
+ uint32_t xfer_complete : 1; /**< reserved. */
+#else
+ uint32_t xfer_complete : 1;
+ uint32_t xfer_q_empty : 1;
+ uint32_t reserved_2_31 : 30;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_intr_rstatus_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_intr_rstatus cvmx_endor_ofs_hmm_intr_rstatus_t;
+
+/**
+ * cvmx_endor_ofs_hmm_intr_status
+ */
+union cvmx_endor_ofs_hmm_intr_status {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_intr_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_2_31 : 30;
+ uint32_t xfer_q_empty : 1; /**< reserved. */
+ uint32_t xfer_complete : 1; /**< reserved. */
+#else
+ uint32_t xfer_complete : 1;
+ uint32_t xfer_q_empty : 1;
+ uint32_t reserved_2_31 : 30;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_intr_status_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_intr_status cvmx_endor_ofs_hmm_intr_status_t;
+
+/**
+ * cvmx_endor_ofs_hmm_intr_test
+ */
+union cvmx_endor_ofs_hmm_intr_test {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_intr_test_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_2_31 : 30;
+ uint32_t xfer_q_empty : 1; /**< reserved. */
+ uint32_t xfer_complete : 1; /**< reserved. */
+#else
+ uint32_t xfer_complete : 1;
+ uint32_t xfer_q_empty : 1;
+ uint32_t reserved_2_31 : 30;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_intr_test_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_intr_test cvmx_endor_ofs_hmm_intr_test_t;
+
+/**
+ * cvmx_endor_ofs_hmm_mode
+ */
+union cvmx_endor_ofs_hmm_mode {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t itlv_bufmode : 2; /**< interleave buffer : 0==1:1, 1==2:1, 2==4:1 */
+ uint32_t reserved_2_3 : 2;
+ uint32_t mem_clr_enb : 1; /**< reserved. */
+ uint32_t auto_clk_enb : 1; /**< reserved. */
+#else
+ uint32_t auto_clk_enb : 1;
+ uint32_t mem_clr_enb : 1;
+ uint32_t reserved_2_3 : 2;
+ uint32_t itlv_bufmode : 2;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_mode_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_mode cvmx_endor_ofs_hmm_mode_t;
+
+/**
+ * cvmx_endor_ofs_hmm_start_addr0
+ */
+union cvmx_endor_ofs_hmm_start_addr0 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_start_addr0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_start_addr0_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_start_addr0 cvmx_endor_ofs_hmm_start_addr0_t;
+
+/**
+ * cvmx_endor_ofs_hmm_start_addr1
+ */
+union cvmx_endor_ofs_hmm_start_addr1 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_start_addr1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_start_addr1_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_start_addr1 cvmx_endor_ofs_hmm_start_addr1_t;
+
+/**
+ * cvmx_endor_ofs_hmm_start_addr2
+ */
+union cvmx_endor_ofs_hmm_start_addr2 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_start_addr2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_start_addr2_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_start_addr2 cvmx_endor_ofs_hmm_start_addr2_t;
+
+/**
+ * cvmx_endor_ofs_hmm_start_addr3
+ */
+union cvmx_endor_ofs_hmm_start_addr3 {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_start_addr3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t addr : 24; /**< reserved. */
+#else
+ uint32_t addr : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_start_addr3_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_start_addr3 cvmx_endor_ofs_hmm_start_addr3_t;
+
+/**
+ * cvmx_endor_ofs_hmm_status
+ */
+union cvmx_endor_ofs_hmm_status {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_status_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_status cvmx_endor_ofs_hmm_status_t;
+
+/**
+ * cvmx_endor_ofs_hmm_xfer_cnt
+ */
+union cvmx_endor_ofs_hmm_xfer_cnt {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_xfer_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t xfer_comp_intr : 1; /**< transfer complete interrupt. */
+ uint32_t slice_mode : 1; /**< reserved. */
+ uint32_t cbuf_mode : 1; /**< reserved. */
+ uint32_t reserved_16_28 : 13;
+ uint32_t wordcnt : 16; /**< word count. */
+#else
+ uint32_t wordcnt : 16;
+ uint32_t reserved_16_28 : 13;
+ uint32_t cbuf_mode : 1;
+ uint32_t slice_mode : 1;
+ uint32_t xfer_comp_intr : 1;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_xfer_cnt_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_xfer_cnt cvmx_endor_ofs_hmm_xfer_cnt_t;
+
+/**
+ * cvmx_endor_ofs_hmm_xfer_q_status
+ */
+union cvmx_endor_ofs_hmm_xfer_q_status {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_xfer_q_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t status : 32; /**< number of slots to queue buffer transaction. */
+#else
+ uint32_t status : 32;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_xfer_q_status_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_xfer_q_status cvmx_endor_ofs_hmm_xfer_q_status_t;
+
+/**
+ * cvmx_endor_ofs_hmm_xfer_start
+ */
+union cvmx_endor_ofs_hmm_xfer_start {
+ uint32_t u32;
+ struct cvmx_endor_ofs_hmm_xfer_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t start : 1; /**< reserved. */
+#else
+ uint32_t start : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_ofs_hmm_xfer_start_s cnf71xx;
+};
+typedef union cvmx_endor_ofs_hmm_xfer_start cvmx_endor_ofs_hmm_xfer_start_t;
+
+/**
+ * cvmx_endor_rfif_1pps_gen_cfg
+ */
+union cvmx_endor_rfif_1pps_gen_cfg {
+ uint32_t u32;
+ struct cvmx_endor_rfif_1pps_gen_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t ena : 1; /**< Enable 1PPS Generation and Tracking
+ - 0: 1PPS signal not tracked or generated
+ - 1: 1PPS signal generated and tracked */
+#else
+ uint32_t ena : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_rfif_1pps_gen_cfg_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_1pps_gen_cfg cvmx_endor_rfif_1pps_gen_cfg_t;
+
+/**
+ * cvmx_endor_rfif_1pps_sample_cnt_offset
+ */
+union cvmx_endor_rfif_1pps_sample_cnt_offset {
+ uint32_t u32;
+ struct cvmx_endor_rfif_1pps_sample_cnt_offset_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t offset : 20; /**< This register holds the sample count at which the 1PPS
+ was received.
+ Upon reset, the sample counter starts at 0 when the
+ first 1PPS is received and then increments to wrap
+ around at FRAME_L-1. At each subsequent 1PPS, a
+ snapshot of the sample counter is taken and the count
+ is made available via this register. This enables
+ software to monitor the RF clock drift relative to
+ the 1PPS. */
+#else
+ uint32_t offset : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_1pps_sample_cnt_offset_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_1pps_sample_cnt_offset cvmx_endor_rfif_1pps_sample_cnt_offset_t;
+
+/**
+ * cvmx_endor_rfif_1pps_verif_gen_en
+ */
+union cvmx_endor_rfif_1pps_verif_gen_en {
+ uint32_t u32;
+ struct cvmx_endor_rfif_1pps_verif_gen_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t ena : 1; /**< 1PPS generation for verification purposes
+ - 0: Disabled (default)
+ - 1: Enabled
+ Note the external 1PPS is not considered, when this bit
+ is set to 1. */
+#else
+ uint32_t ena : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_rfif_1pps_verif_gen_en_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_1pps_verif_gen_en cvmx_endor_rfif_1pps_verif_gen_en_t;
+
+/**
+ * cvmx_endor_rfif_1pps_verif_scnt
+ */
+union cvmx_endor_rfif_1pps_verif_scnt {
+ uint32_t u32;
+ struct cvmx_endor_rfif_1pps_verif_scnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t cnt : 20; /**< Sample count at which the 1PPS is generated for
+ verification purposes. */
+#else
+ uint32_t cnt : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_1pps_verif_scnt_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_1pps_verif_scnt cvmx_endor_rfif_1pps_verif_scnt_t;
+
+/**
+ * cvmx_endor_rfif_conf
+ */
+union cvmx_endor_rfif_conf {
+ uint32_t u32;
+ struct cvmx_endor_rfif_conf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_18_31 : 14;
+ uint32_t loopback : 1; /**< FDD loop back mode
+ - 0: Not in loopback mode(default)
+ - 1: loops back the tx ouput to the rx input inside the
+ rf_if */
+ uint32_t mol : 1; /**< Manual Override Lock */
+ uint32_t upd_style : 1; /**< TX and RX Windows parameters update style (default:0)
+ - 0: updated as written to the register (on the fly)
+ (not fully verified but kept in case limitations are
+ found with the other update scheme.)
+ - 1: updated at the specified time by registers 00F and
+ 90F.
+ Note the frame length is updated after the last TX
+ window.
+ - 1: eNB, enables using 1PPS synchronization scheme. */
+ uint32_t diversity : 1; /**< RX diversity disable (Used to support FDD SISO with CLK
+ 4X)
+ - 0: Data gets written to the diversity FIFO in MIMO mode
+ (default).
+ - 1: No data written to the diversity FIFO in MIMO mode. */
+ uint32_t duplex : 1; /**< Division Duplex Mode
+ - 0: TDD (default)
+ - 1: FDD */
+ uint32_t prod_type : 1; /**< Product Type
+ - 0: UE (default), enables using sync and timing advance
+ synchronization schemes. */
+ uint32_t txnrx_ctrl : 1; /**< RFIC IF TXnRX signal pulse control. Changing the value
+ of this bit generates a pulse on the TXNRX signal of
+ the RFIC interface. This feature is enabled when bit
+ 9 has already been asserted. */
+ uint32_t ena_ctrl : 1; /**< RFIC IF ENABLE signal pulse control. Changing the value
+ of this bit generates a pulse on the ENABLE signal of
+ the RFIC interface. This feature is enabled when bit 9
+ has already been asserted. */
+ uint32_t man_ctrl : 1; /**< RF IC Manual Control Enable. Setting this bit to 1
+ enables manual control of the TXNRX and ENABLE signals.
+ When set to 0 (default), the TXNRX and ENABLE signals
+ are automatically controlled when opening and closing
+ RX/TX windows. The manual mode is used to initialize
+ the RFIC in alert mode. */
+ uint32_t dsp_rx_int_en : 1; /**< DSP RX interrupt mask enable
+ - 0: DSP RX receives interrupts
+ - 1: DSP RX doesn't receive interrupts, needs to poll
+ ISRs */
+ uint32_t adi_en : 1; /**< ADI enable signal pulsed or leveled behavior
+ - 0: pulsed
+ - 1: leveled */
+ uint32_t clr_fifo_of : 1; /**< Clear RX FIFO overflow flag. */
+ uint32_t clr_fifo_ur : 1; /**< Clear RX FIFO under run flag. */
+ uint32_t wavesat_mode : 1; /**< AD9361 wavesat mode, where enable becomes rx_control
+ and txnrx becomes tx_control. The wavesat mode permits
+ an independent control of the rx and tx data flows.
+ - 0: wavesat mode
+ - 1: regular mode */
+ uint32_t flush : 1; /**< Flush RX FIFO auto clear register. */
+ uint32_t inv : 1; /**< Data inversion (bit 0 becomes bit 11, bit 1 becomes 10) */
+ uint32_t mode : 1; /**< 0: SISO 1: MIMO */
+ uint32_t enable : 1; /**< 1=enable, 0=disabled */
+#else
+ uint32_t enable : 1;
+ uint32_t mode : 1;
+ uint32_t inv : 1;
+ uint32_t flush : 1;
+ uint32_t wavesat_mode : 1;
+ uint32_t clr_fifo_ur : 1;
+ uint32_t clr_fifo_of : 1;
+ uint32_t adi_en : 1;
+ uint32_t dsp_rx_int_en : 1;
+ uint32_t man_ctrl : 1;
+ uint32_t ena_ctrl : 1;
+ uint32_t txnrx_ctrl : 1;
+ uint32_t prod_type : 1;
+ uint32_t duplex : 1;
+ uint32_t diversity : 1;
+ uint32_t upd_style : 1;
+ uint32_t mol : 1;
+ uint32_t loopback : 1;
+ uint32_t reserved_18_31 : 14;
+#endif
+ } s;
+ struct cvmx_endor_rfif_conf_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_conf cvmx_endor_rfif_conf_t;
+
+/**
+ * cvmx_endor_rfif_conf2
+ */
+union cvmx_endor_rfif_conf2 {
+ uint32_t u32;
+ struct cvmx_endor_rfif_conf2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_3_31 : 29;
+ uint32_t latency : 1; /**< RF DATA variable latency
+ - 0: fixed latency (prior to AD9163)
+ - 1: variable latency (starting with the AD9361) */
+ uint32_t iq_cfg : 1; /**< IQ port configuration
+ - 0: Single port (10Mhz BW and less)
+ - 1: Dual ports (more then 10Mhz BW) */
+ uint32_t behavior : 1; /**< RX and TX FRAME signals behavior:
+ - 0: Pulsed every frame
+ - 1: Leveled during the whole RX and TX periods */
+#else
+ uint32_t behavior : 1;
+ uint32_t iq_cfg : 1;
+ uint32_t latency : 1;
+ uint32_t reserved_3_31 : 29;
+#endif
+ } s;
+ struct cvmx_endor_rfif_conf2_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_conf2 cvmx_endor_rfif_conf2_t;
+
+/**
+ * cvmx_endor_rfif_dsp1_gpio
+ */
+union cvmx_endor_rfif_dsp1_gpio {
+ uint32_t u32;
+ struct cvmx_endor_rfif_dsp1_gpio_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_4_31 : 28;
+ uint32_t val : 4; /**< Values to output to the DSP1_GPIO ports */
+#else
+ uint32_t val : 4;
+ uint32_t reserved_4_31 : 28;
+#endif
+ } s;
+ struct cvmx_endor_rfif_dsp1_gpio_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_dsp1_gpio cvmx_endor_rfif_dsp1_gpio_t;
+
+/**
+ * cvmx_endor_rfif_dsp_rx_his
+ */
+union cvmx_endor_rfif_dsp_rx_his {
+ uint32_t u32;
+ struct cvmx_endor_rfif_dsp_rx_his_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_dsp_rx_his_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_dsp_rx_his cvmx_endor_rfif_dsp_rx_his_t;
+
+/**
+ * cvmx_endor_rfif_dsp_rx_ism
+ */
+union cvmx_endor_rfif_dsp_rx_ism {
+ uint32_t u32;
+ struct cvmx_endor_rfif_dsp_rx_ism_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t ena : 8; /**< Enable interrupt bits. Set to each bit to 1 to enable
+ the interrupts listed in the table below. The default
+ value is 0x0. */
+ uint32_t reserved_0_15 : 16;
+#else
+ uint32_t reserved_0_15 : 16;
+ uint32_t ena : 8;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rfif_dsp_rx_ism_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_dsp_rx_ism cvmx_endor_rfif_dsp_rx_ism_t;
+
+/**
+ * cvmx_endor_rfif_firs_enable
+ */
+union cvmx_endor_rfif_firs_enable {
+ uint32_t u32;
+ struct cvmx_endor_rfif_firs_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_4_31 : 28;
+ uint32_t tx_div_fil : 1; /**< TX DIV filtering control bit
+ - 0: TX DIV filtering disabled
+ - 1: TX DIV filtering enabled */
+ uint32_t tx_fil : 1; /**< TX filtering control bit
+ - 0: TX filtering disabled
+ - 1: TX filtering enabled */
+ uint32_t rx_dif_fil : 1; /**< RX DIV filtering control bit
+ - 0: RX DIV filtering disabled
+ - 1: RX DIV filtering enabled */
+ uint32_t rx_fil : 1; /**< RX filtering control bit
+ - 0: RX filtering disabled
+ - 1: RX filtering enabled */
+#else
+ uint32_t rx_fil : 1;
+ uint32_t rx_dif_fil : 1;
+ uint32_t tx_fil : 1;
+ uint32_t tx_div_fil : 1;
+ uint32_t reserved_4_31 : 28;
+#endif
+ } s;
+ struct cvmx_endor_rfif_firs_enable_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_firs_enable cvmx_endor_rfif_firs_enable_t;
+
+/**
+ * cvmx_endor_rfif_frame_cnt
+ */
+union cvmx_endor_rfif_frame_cnt {
+ uint32_t u32;
+ struct cvmx_endor_rfif_frame_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t cnt : 20; /**< Frame count (value wraps around 2**16) */
+#else
+ uint32_t cnt : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_frame_cnt_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_frame_cnt cvmx_endor_rfif_frame_cnt_t;
+
+/**
+ * cvmx_endor_rfif_frame_l
+ */
+union cvmx_endor_rfif_frame_l {
+ uint32_t u32;
+ struct cvmx_endor_rfif_frame_l_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t length : 20; /**< Frame length in terms of RF clock cycles:
+ RFIC in single port modes
+ TDD SISO ? FRAME_L = num_samples
+ TDD MIMO ? FRAME_L = num_samples * 2
+ FDD SISO ? FRAME_L = num_samples * 2
+ FDD MIMO ? FRAME_L = num_samples * 4
+ RFIC in dual ports modes
+ TDD SISO ? FRAME_L = num_samples * 0.5
+ TDD MIMO ? FRAME_L = num_samples
+ FDD SISO ? FRAME_L = num_samples
+ FDD MIMO ? FRAME_L = num_samples * 2 */
+#else
+ uint32_t length : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_frame_l_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_frame_l cvmx_endor_rfif_frame_l_t;
+
+/**
+ * cvmx_endor_rfif_gpio_#
+ */
+union cvmx_endor_rfif_gpio_x {
+ uint32_t u32;
+ struct cvmx_endor_rfif_gpio_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t fall_val : 11; /**< Signed value (lead/lag) on falling edge of level signal */
+ uint32_t rise_val : 11; /**< Signed value (lead/lag) on rising edge of level signal */
+ uint32_t src : 2; /**< Signal active high source:
+ - 00: idle
+ - 01: RX
+ - 10: TX
+ - 11: idle */
+#else
+ uint32_t src : 2;
+ uint32_t rise_val : 11;
+ uint32_t fall_val : 11;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rfif_gpio_x_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_gpio_x cvmx_endor_rfif_gpio_x_t;
+
+/**
+ * cvmx_endor_rfif_max_sample_adj
+ */
+union cvmx_endor_rfif_max_sample_adj {
+ uint32_t u32;
+ struct cvmx_endor_rfif_max_sample_adj_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_10_31 : 22;
+ uint32_t num : 10; /**< Indicates the maximum number of samples that can be
+ adjusted per frame. Note the value to be programmed
+ varies with the mode of operation as follow:
+ MAX_SAMPLE_ADJ = num_samples*MIMO*FDD*DP
+ Where:
+ MIMO = 2 in MIMO mode and 1 otherwise.
+ FDD = 2 in FDD mode and 1 otherwise.
+ DP = 0.5 in RF IF Dual Port mode, 1 otherwise. */
+#else
+ uint32_t num : 10;
+ uint32_t reserved_10_31 : 22;
+#endif
+ } s;
+ struct cvmx_endor_rfif_max_sample_adj_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_max_sample_adj cvmx_endor_rfif_max_sample_adj_t;
+
+/**
+ * cvmx_endor_rfif_min_sample_adj
+ */
+union cvmx_endor_rfif_min_sample_adj {
+ uint32_t u32;
+ struct cvmx_endor_rfif_min_sample_adj_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_10_31 : 22;
+ uint32_t num : 10; /**< Indicates the minimum number of samples that can be
+ adjusted per frame. Note the value to be programmed
+ varies with the mode of operation as follow:
+ MIN_SAMPLE_ADJ = num_samples*MIMO*FDD*DP
+ Where:
+ MIMO = 2 in MIMO mode and 1 otherwise.
+ FDD = 2 in FDD mode and 1 otherwise.
+ DP = 0.5 in RF IF Dual Port mode, 1 otherwise. */
+#else
+ uint32_t num : 10;
+ uint32_t reserved_10_31 : 22;
+#endif
+ } s;
+ struct cvmx_endor_rfif_min_sample_adj_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_min_sample_adj cvmx_endor_rfif_min_sample_adj_t;
+
+/**
+ * cvmx_endor_rfif_num_rx_win
+ */
+union cvmx_endor_rfif_num_rx_win {
+ uint32_t u32;
+ struct cvmx_endor_rfif_num_rx_win_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_3_31 : 29;
+ uint32_t num : 3; /**< Number of RX windows
+ - 0: No RX window
+ - 1: One RX window
+ - ...
+ - 4: Four RX windows
+ Other: Not defined */
+#else
+ uint32_t num : 3;
+ uint32_t reserved_3_31 : 29;
+#endif
+ } s;
+ struct cvmx_endor_rfif_num_rx_win_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_num_rx_win cvmx_endor_rfif_num_rx_win_t;
+
+/**
+ * cvmx_endor_rfif_pwm_enable
+ */
+union cvmx_endor_rfif_pwm_enable {
+ uint32_t u32;
+ struct cvmx_endor_rfif_pwm_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t ena : 1; /**< PWM signal generation enable:
+ - 1: PWM enabled
+ - 0: PWM disabled (default) */
+#else
+ uint32_t ena : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_rfif_pwm_enable_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_pwm_enable cvmx_endor_rfif_pwm_enable_t;
+
+/**
+ * cvmx_endor_rfif_pwm_high_time
+ */
+union cvmx_endor_rfif_pwm_high_time {
+ uint32_t u32;
+ struct cvmx_endor_rfif_pwm_high_time_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t hi_time : 24; /**< PWM high time. The default is 0h00FFFF cycles. Program
+ to n for n+1 high cycles. */
+#else
+ uint32_t hi_time : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rfif_pwm_high_time_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_pwm_high_time cvmx_endor_rfif_pwm_high_time_t;
+
+/**
+ * cvmx_endor_rfif_pwm_low_time
+ */
+union cvmx_endor_rfif_pwm_low_time {
+ uint32_t u32;
+ struct cvmx_endor_rfif_pwm_low_time_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t lo_time : 24; /**< PWM low time. The default is 0h00FFFF cycles. Program
+ to n for n+1 low cycles. */
+#else
+ uint32_t lo_time : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rfif_pwm_low_time_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_pwm_low_time cvmx_endor_rfif_pwm_low_time_t;
+
+/**
+ * cvmx_endor_rfif_rd_timer64_lsb
+ */
+union cvmx_endor_rfif_rd_timer64_lsb {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rd_timer64_lsb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t val : 32; /**< 64-bit timer initial value of the 32 LSB.
+ Note the value written in WR_TIMER64_LSB is not
+ propagating until the timer64 is enabled. */
+#else
+ uint32_t val : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rd_timer64_lsb_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rd_timer64_lsb cvmx_endor_rfif_rd_timer64_lsb_t;
+
+/**
+ * cvmx_endor_rfif_rd_timer64_msb
+ */
+union cvmx_endor_rfif_rd_timer64_msb {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rd_timer64_msb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t val : 32; /**< 64-bit timer initial value of the 32 MSB.
+ Note the value written in WR_TIMER64_MSB is not
+ propagating until the timer64 is enabled. */
+#else
+ uint32_t val : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rd_timer64_msb_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rd_timer64_msb cvmx_endor_rfif_rd_timer64_msb_t;
+
+/**
+ * cvmx_endor_rfif_real_time_timer
+ */
+union cvmx_endor_rfif_real_time_timer {
+ uint32_t u32;
+ struct cvmx_endor_rfif_real_time_timer_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timer : 32; /**< The full 32 bits of the real time timer fed from a core
+ clock based counter. */
+#else
+ uint32_t timer : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_real_time_timer_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_real_time_timer cvmx_endor_rfif_real_time_timer_t;
+
+/**
+ * cvmx_endor_rfif_rf_clk_timer
+ */
+union cvmx_endor_rfif_rf_clk_timer {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rf_clk_timer_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timer : 32; /**< Timer running off the RF CLK.
+ 1- The counter is disabled by default;
+ 2- The counter is enabled by writing 1 to register 066;
+ 3- The counter waits for the 1PPS to start incrementing
+ 4- The 1PPS is received and the counter starts
+ incrementing;
+ 5- The counter is reset after receiving the 30th 1PPS
+ (after 30 seconds);
+ 6- The counter keeps incrementing and is reset as in 5,
+ unless it is disabled. */
+#else
+ uint32_t timer : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rf_clk_timer_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rf_clk_timer cvmx_endor_rfif_rf_clk_timer_t;
+
+/**
+ * cvmx_endor_rfif_rf_clk_timer_en
+ */
+union cvmx_endor_rfif_rf_clk_timer_en {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rf_clk_timer_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t ena : 1; /**< RF CLK based timer enable
+ - 0: Disabled
+ - 1: Enabled */
+#else
+ uint32_t ena : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rf_clk_timer_en_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rf_clk_timer_en cvmx_endor_rfif_rf_clk_timer_en_t;
+
+/**
+ * cvmx_endor_rfif_rx_correct_adj
+ */
+union cvmx_endor_rfif_rx_correct_adj {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_correct_adj_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_4_31 : 28;
+ uint32_t offset : 4; /**< Indicates the sample counter offset for the last sample
+ flag insertion, which determines when the rx samples
+ are dropped or added. This register can take values
+ from 0 to 15 and should be configured as follow:
+ 4, when MIN_SAMPLE_ADJ = 1
+ 5 , when MIN_SAMPLE_ADJ = 2
+ 6 , when MIN_SAMPLE_ADJ = 4 */
+#else
+ uint32_t offset : 4;
+ uint32_t reserved_4_31 : 28;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_correct_adj_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_correct_adj cvmx_endor_rfif_rx_correct_adj_t;
+
+/**
+ * cvmx_endor_rfif_rx_div_status
+ *
+ * Notes:
+ * In TDD Mode, bits 15:12 are DDR state machine status.
+ *
+ */
+union cvmx_endor_rfif_rx_div_status {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_div_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t rfic_ena : 1; /**< RFIC enabled (in alert state) */
+ uint32_t sync_late : 1; /**< Sync late (Used for UE products). */
+ uint32_t reserved_19_20 : 2;
+ uint32_t thresh_rch : 1; /**< Threshold Reached (RX/RX_div/TX) */
+ uint32_t fifo_of : 1; /**< FIFO overflow */
+ uint32_t fifo_ur : 1; /**< FIFO underrun */
+ uint32_t tx_sm : 2; /**< TX state machine status */
+ uint32_t rx_sm : 2; /**< RX state machine status */
+ uint32_t hab_req_sm : 4; /**< HAB request manager SM
+ - 0: idle
+ - 1: wait_cs
+ - 2: Term
+ - 3: rd_fifo(RX)/ write fifo(TX)
+ - 4: wait_th
+ Others: not used */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t hab_req_sm : 4;
+ uint32_t rx_sm : 2;
+ uint32_t tx_sm : 2;
+ uint32_t fifo_ur : 1;
+ uint32_t fifo_of : 1;
+ uint32_t thresh_rch : 1;
+ uint32_t reserved_19_20 : 2;
+ uint32_t sync_late : 1;
+ uint32_t rfic_ena : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_div_status_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_div_status cvmx_endor_rfif_rx_div_status_t;
+
+/**
+ * cvmx_endor_rfif_rx_fifo_cnt
+ */
+union cvmx_endor_rfif_rx_fifo_cnt {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_fifo_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t cnt : 13; /**< RX FIFO fill level. This register can take values
+ between 0 and 5136. */
+#else
+ uint32_t cnt : 13;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_fifo_cnt_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_fifo_cnt cvmx_endor_rfif_rx_fifo_cnt_t;
+
+/**
+ * cvmx_endor_rfif_rx_if_cfg
+ */
+union cvmx_endor_rfif_rx_if_cfg {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_if_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t eorl : 1; /**< Early or Late TX_FRAME
+ - 0: The TX_FRAME asserts after the tx_lead and deasserts
+ before the tx_lag
+ - 1: The TX_FRAME asserts (3:0) cycles after the
+ TX_ON/ENABLE and deasserts (3:0) cycles after the
+ TX_ON/ENABLE signal. */
+ uint32_t half_lat : 1; /**< Half cycle latency
+ - 0: Captures I and Q on the falling and rising edge of
+ the clock respectively.
+ - 1: Captures I and Q on the rising and falling edge of
+ the clock respectively. */
+ uint32_t cap_lat : 4; /**< Enable to capture latency
+ The data from the RF IC starts and stops being captured
+ a number of cycles after the enable pulse.
+ - 0: Invalid
+ - 1: One cycle latency
+ - 2: Two cycles of latency
+ - 3: Three cycles of latency
+ - ...
+ - 15: Seven cycles of latency */
+#else
+ uint32_t cap_lat : 4;
+ uint32_t half_lat : 1;
+ uint32_t eorl : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_if_cfg_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_if_cfg cvmx_endor_rfif_rx_if_cfg_t;
+
+/**
+ * cvmx_endor_rfif_rx_lead_lag
+ */
+union cvmx_endor_rfif_rx_lead_lag {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_lead_lag_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t lag : 12; /**< unsigned value (lag) on end of window */
+ uint32_t lead : 12; /**< unsigned value (lead) on beginning of window */
+#else
+ uint32_t lead : 12;
+ uint32_t lag : 12;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_lead_lag_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_lead_lag cvmx_endor_rfif_rx_lead_lag_t;
+
+/**
+ * cvmx_endor_rfif_rx_load_cfg
+ */
+union cvmx_endor_rfif_rx_load_cfg {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_load_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t hidden : 1; /**< Hidden bit set to 1 during synthesis
+ (set_case_analysis) if only one destination can be
+ programmed at a time. In this case there is no need to
+ gate the VLD with the RDYs, to ease timing closure. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t alt_ant : 1; /**< Send data alternating antenna 0 (first) and antenna 1
+ (second) data on the RX HMI interface when set to 1.
+ By default, only the data from antenna 0 is sent on
+ this interface. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t exe3 : 1; /**< Setting this bit to 1 indicates the RF_IF to load
+ and execute the programmed DMA transfer size (register
+ RX_TRANSFER_SIZE) from the FIFO to destination 3. */
+ uint32_t exe2 : 1; /**< Setting this bit to 1 indicates the RF_IF to load
+ and execute the programmed DMA transfer size (register
+ RX_TRANSFER_SIZE) from the FIFO to destination 2. */
+ uint32_t exe1 : 1; /**< Setting this bit to 1 indicates the RF_IF to load
+ and execute the programmed DMA transfer size (register
+ RX_TRANSFER_SIZE) from the FIFO to destination 1. */
+#else
+ uint32_t exe1 : 1;
+ uint32_t exe2 : 1;
+ uint32_t exe3 : 1;
+ uint32_t reserved_3_7 : 5;
+ uint32_t alt_ant : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t hidden : 1;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_load_cfg_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_load_cfg cvmx_endor_rfif_rx_load_cfg_t;
+
+/**
+ * cvmx_endor_rfif_rx_offset
+ */
+union cvmx_endor_rfif_rx_offset {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_offset_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t offset : 20; /**< Indicates the number of RF clock cycles after the
+ GPS/ETH 1PPS is received before the start of the RX
+ frame. See description Figure 44. */
+#else
+ uint32_t offset : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_offset_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_offset cvmx_endor_rfif_rx_offset_t;
+
+/**
+ * cvmx_endor_rfif_rx_offset_adj_scnt
+ */
+union cvmx_endor_rfif_rx_offset_adj_scnt {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_offset_adj_scnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t cnt : 20; /**< Indicates the RX sample count at which the 1PPS
+ incremental adjustments will be applied. */
+#else
+ uint32_t cnt : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_offset_adj_scnt_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_offset_adj_scnt cvmx_endor_rfif_rx_offset_adj_scnt_t;
+
+/**
+ * cvmx_endor_rfif_rx_status
+ *
+ * Notes:
+ * In TDD Mode, bits 15:12 are DDR state machine status.
+ *
+ */
+union cvmx_endor_rfif_rx_status {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t rfic_ena : 1; /**< RFIC enabled (in alert state) */
+ uint32_t sync_late : 1; /**< Sync late (Used for UE products). */
+ uint32_t reserved_19_20 : 2;
+ uint32_t thresh_rch : 1; /**< Threshold Reached (RX/RX_div/TX) */
+ uint32_t fifo_of : 1; /**< FIFO overflow */
+ uint32_t fifo_ur : 1; /**< FIFO underrun */
+ uint32_t tx_sm : 2; /**< TX state machine status */
+ uint32_t rx_sm : 2; /**< RX state machine status */
+ uint32_t hab_req_sm : 4; /**< HAB request manager SM
+ - 0: idle
+ - 1: wait_cs
+ - 2: Term
+ - 3: rd_fifo(RX)/ write fifo(TX)
+ - 4: wait_th
+ Others: not used */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t hab_req_sm : 4;
+ uint32_t rx_sm : 2;
+ uint32_t tx_sm : 2;
+ uint32_t fifo_ur : 1;
+ uint32_t fifo_of : 1;
+ uint32_t thresh_rch : 1;
+ uint32_t reserved_19_20 : 2;
+ uint32_t sync_late : 1;
+ uint32_t rfic_ena : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_status_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_status cvmx_endor_rfif_rx_status_t;
+
+/**
+ * cvmx_endor_rfif_rx_sync_scnt
+ */
+union cvmx_endor_rfif_rx_sync_scnt {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_sync_scnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t cnt : 20; /**< Sample count at which the start of frame reference will
+ be modified as described with register 0x30. */
+#else
+ uint32_t cnt : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_sync_scnt_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_sync_scnt cvmx_endor_rfif_rx_sync_scnt_t;
+
+/**
+ * cvmx_endor_rfif_rx_sync_value
+ */
+union cvmx_endor_rfif_rx_sync_value {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_sync_value_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t val : 20; /**< RX Synchronization offset value. This register
+ indicates the sample number at which the start of frame
+ must be moved to. This value must be smaller than
+ FRAME_L, but it cannot be negative. See below how the
+ sample count gets updated based on registers 0x30 and
+ 0x31 at sample count RX_SYNC_VALUE.
+ If RX_SYNC_SCNT >= RX_SYNC_VALUE
+ sample_count = RX_SYNC_SCNT ? RX_SYNC_VALUE + 1
+ Else
+ sample_count = RX_SYNC_SCNT + FRAME_L ?
+ RX_SYNC_VALUE + 1
+ Note this is not used for eNB products, only for UE
+ products.
+ Note this register is cleared after the correction is
+ applied. */
+#else
+ uint32_t val : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_sync_value_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_sync_value cvmx_endor_rfif_rx_sync_value_t;
+
+/**
+ * cvmx_endor_rfif_rx_th
+ */
+union cvmx_endor_rfif_rx_th {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_th_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_12_31 : 20;
+ uint32_t thr : 12; /**< FIFO level reached before granting a RX DMA request.
+ This RX FIFO fill level threshold can be used
+ in two ways:
+ 1- When the FIFO fill level reaches the threshold,
+ there is enough data in the FIFO to start the data
+ transfer, so it grants a DMA transfer from the RX FIFO
+ to the HAB's memory.
+ 2- It can also be used to generate an interrupt to
+ the DSP when the FIFO threshold is reached. */
+#else
+ uint32_t thr : 12;
+ uint32_t reserved_12_31 : 20;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_th_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_th cvmx_endor_rfif_rx_th_t;
+
+/**
+ * cvmx_endor_rfif_rx_transfer_size
+ */
+union cvmx_endor_rfif_rx_transfer_size {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_transfer_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t size : 13; /**< Indicates the size of the DMA data transfer from the
+ rf_if RX FIFO out via the HMI IF.
+ The DMA transfers to the HAB1 and HAB2 */
+#else
+ uint32_t size : 13;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_transfer_size_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_transfer_size cvmx_endor_rfif_rx_transfer_size_t;
+
+/**
+ * cvmx_endor_rfif_rx_w_e#
+ */
+union cvmx_endor_rfif_rx_w_ex {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_w_ex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t end_cnt : 20; /**< End count for each of the 4 RX windows. The maximum
+ value should be FRAME_L, unless the window must stay
+ opened for ever. */
+#else
+ uint32_t end_cnt : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_w_ex_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_w_ex cvmx_endor_rfif_rx_w_ex_t;
+
+/**
+ * cvmx_endor_rfif_rx_w_s#
+ */
+union cvmx_endor_rfif_rx_w_sx {
+ uint32_t u32;
+ struct cvmx_endor_rfif_rx_w_sx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t start_pnt : 20; /**< Start points for each of the 4 RX windows
+ Some restrictions applies to the start and end values:
+ 1- The first RX window must always start at the sample
+ count 0.
+ 2- The other start point must be greater than rx_lead,
+ refer to 0x008.
+ 3- All start point values must be smaller than the
+ endpoints in TDD mode.
+ 4- RX windows have priorities over TX windows in TDD
+ mode.
+ 5- There must be a minimum of 7 samples between
+ closing a window and opening a new one. However, it is
+ recommended to leave a 10 samples gap. Note that this
+ number could increase with different RF ICs used. */
+#else
+ uint32_t start_pnt : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_rx_w_sx_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_rx_w_sx cvmx_endor_rfif_rx_w_sx_t;
+
+/**
+ * cvmx_endor_rfif_sample_adj_cfg
+ */
+union cvmx_endor_rfif_sample_adj_cfg {
+ uint32_t u32;
+ struct cvmx_endor_rfif_sample_adj_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t adj : 1; /**< Indicates whether samples must be removed from the
+ beginning or the end of the frame.
+ - 1: add/remove samples from the beginning of the frame
+ - 0: add/remove samples from the end of the frame
+ (default) */
+#else
+ uint32_t adj : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_rfif_sample_adj_cfg_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_sample_adj_cfg cvmx_endor_rfif_sample_adj_cfg_t;
+
+/**
+ * cvmx_endor_rfif_sample_adj_error
+ */
+union cvmx_endor_rfif_sample_adj_error {
+ uint32_t u32;
+ struct cvmx_endor_rfif_sample_adj_error_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t offset : 32; /**< Count of the number of times the TX FIFO did not have
+ enough IQ samples to be dropped for a TX timing
+ adjustment.
+ 0-7 = TX FIFO sample adjustment error
+ - 16:23 = TX DIV sample adjustment error */
+#else
+ uint32_t offset : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_sample_adj_error_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_sample_adj_error cvmx_endor_rfif_sample_adj_error_t;
+
+/**
+ * cvmx_endor_rfif_sample_cnt
+ */
+union cvmx_endor_rfif_sample_cnt {
+ uint32_t u32;
+ struct cvmx_endor_rfif_sample_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t cnt : 20; /**< Sample count modulo FRAME_L. The start of frame is
+ aligned with count 0. */
+#else
+ uint32_t cnt : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_sample_cnt_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_sample_cnt cvmx_endor_rfif_sample_cnt_t;
+
+/**
+ * cvmx_endor_rfif_skip_frm_cnt_bits
+ */
+union cvmx_endor_rfif_skip_frm_cnt_bits {
+ uint32_t u32;
+ struct cvmx_endor_rfif_skip_frm_cnt_bits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_2_31 : 30;
+ uint32_t bits : 2; /**< Indicates the number of sample count bits to skip, in
+ order to reduce the sample count update frequency and
+ permit a reliable clock crossing from the RF to the
+ HAB clock domain.
+ - 0: No bits are skipped
+ - ...
+ - 3: 3 bits are skipped */
+#else
+ uint32_t bits : 2;
+ uint32_t reserved_2_31 : 30;
+#endif
+ } s;
+ struct cvmx_endor_rfif_skip_frm_cnt_bits_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_skip_frm_cnt_bits cvmx_endor_rfif_skip_frm_cnt_bits_t;
+
+/**
+ * cvmx_endor_rfif_spi_#_ll
+ */
+union cvmx_endor_rfif_spi_x_ll {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_x_ll_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t num : 20; /**< SPI event X start sample count */
+#else
+ uint32_t num : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_x_ll_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_x_ll cvmx_endor_rfif_spi_x_ll_t;
+
+/**
+ * cvmx_endor_rfif_spi_cmd_attr#
+ */
+union cvmx_endor_rfif_spi_cmd_attrx {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_cmd_attrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_4_31 : 28;
+ uint32_t slave : 1; /**< Slave select (in case there are 2 ADI chips)
+ - 0: slave 1
+ - 1: slave 2 */
+ uint32_t bytes : 1; /**< Number of data bytes transfer
+ - 0: 1 byte transfer mode
+ - 1: 2 bytes transfer mode */
+ uint32_t gen_int : 1; /**< Generate an interrupt upon the SPI event completion:
+ - 0: no interrupt generated 1: interrupt generated */
+ uint32_t rw : 1; /**< r/w: r:0 ; w:1. */
+#else
+ uint32_t rw : 1;
+ uint32_t gen_int : 1;
+ uint32_t bytes : 1;
+ uint32_t slave : 1;
+ uint32_t reserved_4_31 : 28;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_cmd_attrx_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_cmd_attrx cvmx_endor_rfif_spi_cmd_attrx_t;
+
+/**
+ * cvmx_endor_rfif_spi_cmds#
+ */
+union cvmx_endor_rfif_spi_cmdsx {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_cmdsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t word : 24; /**< Spi command word. */
+#else
+ uint32_t word : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_cmdsx_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_cmdsx cvmx_endor_rfif_spi_cmdsx_t;
+
+/**
+ * cvmx_endor_rfif_spi_conf0
+ */
+union cvmx_endor_rfif_spi_conf0 {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_conf0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t num_cmds3 : 6; /**< Number of SPI cmds to transfer for event 3 */
+ uint32_t num_cmds2 : 6; /**< Number of SPI cmds to transfer for event 2 */
+ uint32_t num_cmds1 : 6; /**< Number of SPI cmds to transfer for event 1 */
+ uint32_t num_cmds0 : 6; /**< Number of SPI cmds to transfer for event 0 */
+#else
+ uint32_t num_cmds0 : 6;
+ uint32_t num_cmds1 : 6;
+ uint32_t num_cmds2 : 6;
+ uint32_t num_cmds3 : 6;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_conf0_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_conf0 cvmx_endor_rfif_spi_conf0_t;
+
+/**
+ * cvmx_endor_rfif_spi_conf1
+ */
+union cvmx_endor_rfif_spi_conf1 {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_conf1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t start3 : 6; /**< SPI commands start address for event 3 */
+ uint32_t start2 : 6; /**< SPI commands start address for event 2 */
+ uint32_t start1 : 6; /**< SPI commands start address for event 1 */
+ uint32_t start0 : 6; /**< SPI commands start address for event 0 */
+#else
+ uint32_t start0 : 6;
+ uint32_t start1 : 6;
+ uint32_t start2 : 6;
+ uint32_t start3 : 6;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_conf1_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_conf1 cvmx_endor_rfif_spi_conf1_t;
+
+/**
+ * cvmx_endor_rfif_spi_ctrl
+ */
+union cvmx_endor_rfif_spi_ctrl {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ctrl : 32; /**< Control */
+#else
+ uint32_t ctrl : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_ctrl_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_ctrl cvmx_endor_rfif_spi_ctrl_t;
+
+/**
+ * cvmx_endor_rfif_spi_din#
+ */
+union cvmx_endor_rfif_spi_dinx {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_dinx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t data : 16; /**< Data read back from spi commands. */
+#else
+ uint32_t data : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_dinx_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_dinx cvmx_endor_rfif_spi_dinx_t;
+
+/**
+ * cvmx_endor_rfif_spi_rx_data
+ */
+union cvmx_endor_rfif_spi_rx_data {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_rx_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rd_data : 32; /**< SPI Read Data */
+#else
+ uint32_t rd_data : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_rx_data_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_rx_data cvmx_endor_rfif_spi_rx_data_t;
+
+/**
+ * cvmx_endor_rfif_spi_status
+ */
+union cvmx_endor_rfif_spi_status {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_12_31 : 20;
+ uint32_t sr_state : 4; /**< SPI State Machine
+ 1 : INIT
+ 2 : IDLE
+ 3 : WAIT_FIFO
+ 4 : READ_FIFO
+ 5 : LOAD_SR
+ 6 : SHIFT_SR
+ 7 : WAIT_CLK
+ 8 : WAIT_FOR_SS */
+ uint32_t rx_fifo_lvl : 4; /**< Level of RX FIFO */
+ uint32_t tx_fifo_lvl : 4; /**< Level of TX FIFO */
+#else
+ uint32_t tx_fifo_lvl : 4;
+ uint32_t rx_fifo_lvl : 4;
+ uint32_t sr_state : 4;
+ uint32_t reserved_12_31 : 20;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_status_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_status cvmx_endor_rfif_spi_status_t;
+
+/**
+ * cvmx_endor_rfif_spi_tx_data
+ */
+union cvmx_endor_rfif_spi_tx_data {
+ uint32_t u32;
+ struct cvmx_endor_rfif_spi_tx_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t write : 1; /**< When set, execute write. Otherwise, read. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t addr : 9; /**< SPI Address */
+ uint32_t data : 8; /**< SPI Data */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t data : 8;
+ uint32_t addr : 9;
+ uint32_t reserved_25_30 : 6;
+ uint32_t write : 1;
+#endif
+ } s;
+ struct cvmx_endor_rfif_spi_tx_data_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_spi_tx_data cvmx_endor_rfif_spi_tx_data_t;
+
+/**
+ * cvmx_endor_rfif_timer64_cfg
+ */
+union cvmx_endor_rfif_timer64_cfg {
+ uint32_t u32;
+ struct cvmx_endor_rfif_timer64_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t clks : 8; /**< 7-0: Number of rf clock cycles per 64-bit timer
+ increment. Set to n for n+1 cycles (default=0x7F for
+ 128 cycles). The valid range for the register is 3 to
+ 255. */
+#else
+ uint32_t clks : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_rfif_timer64_cfg_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_timer64_cfg cvmx_endor_rfif_timer64_cfg_t;
+
+/**
+ * cvmx_endor_rfif_timer64_en
+ *
+ * Notes:
+ * This is how the 64-bit timer works:
+ * 1- Configuration
+ * - Write counter LSB (reg:0x69)
+ * - Write counter MSB (reg:0x6A)
+ * - Write config (reg:0x68)
+ * 2- Enable the counter
+ * 3- Wait for the 1PPS
+ * 4- Start incrementing the counter every n+1 rf clock cycles
+ * 5- Read the MSB and LSB registers (reg:0x6B and 0x6C)
+ *
+ * 6- There is no 64-bit snapshot mechanism. Software has to consider the
+ * 32 LSB might rollover and increment the 32 MSB between the LSB and the
+ * MSB reads. You may want to use the following concatenation recipe:
+ *
+ * a) Read the 32 MSB (MSB1)
+ * b) Read the 32 LSB
+ * c) Read the 32 MSB again (MSB2)
+ * d) Concatenate the 32 MSB an 32 LSB
+ * -If both 32 MSB are equal or LSB(31)=1, concatenate MSB1 and LSB
+ * -Else concatenate the MSB2 and LSB
+ */
+union cvmx_endor_rfif_timer64_en {
+ uint32_t u32;
+ struct cvmx_endor_rfif_timer64_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t ena : 1; /**< Enable for the 64-bit rf clock based timer.
+ - 0: Disabled
+ - 1: Enabled */
+#else
+ uint32_t ena : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_endor_rfif_timer64_en_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_timer64_en cvmx_endor_rfif_timer64_en_t;
+
+/**
+ * cvmx_endor_rfif_tti_scnt_int#
+ */
+union cvmx_endor_rfif_tti_scnt_intx {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tti_scnt_intx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t intr : 20; /**< TTI Sample Count Interrupt:
+ Indicates the sample count of the selected reference
+ counter at which to generate an interrupt. */
+#else
+ uint32_t intr : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tti_scnt_intx_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tti_scnt_intx cvmx_endor_rfif_tti_scnt_intx_t;
+
+/**
+ * cvmx_endor_rfif_tti_scnt_int_clr
+ */
+union cvmx_endor_rfif_tti_scnt_int_clr {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tti_scnt_int_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t cnt : 8; /**< TTI Sample Count Interrupt Status register:
+ Writing 0x1 to clear the TTI_SCNT_INT_STAT(0), writing
+ 0x2 to clear the TTI_SCNT_INT_STAT(1) and so on. */
+#else
+ uint32_t cnt : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tti_scnt_int_clr_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tti_scnt_int_clr cvmx_endor_rfif_tti_scnt_int_clr_t;
+
+/**
+ * cvmx_endor_rfif_tti_scnt_int_en
+ */
+union cvmx_endor_rfif_tti_scnt_int_en {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tti_scnt_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t ena : 8; /**< TTI Sample Counter Interrupt Enable:
+ Bit 0: 1 Enables TTI_SCNT_INT_0
+ Bit 1: 1 Enables TTI_SCNT_INT_1
+ - ...
+ Bit 7: 1 Enables TTI_SCNT_INT_7
+ Note these interrupts are disabled by default (=0x00). */
+#else
+ uint32_t ena : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tti_scnt_int_en_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tti_scnt_int_en cvmx_endor_rfif_tti_scnt_int_en_t;
+
+/**
+ * cvmx_endor_rfif_tti_scnt_int_map
+ */
+union cvmx_endor_rfif_tti_scnt_int_map {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tti_scnt_int_map_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t map : 8; /**< TTI Sample Count Interrupt Mapping to a Reference
+ Counter:
+ Indicates the reference counter the TTI Sample Count
+ Interrupts must be generated from. A value of 0
+ indicates the RX reference counter (default) and a
+ value of 1 indicates the TX reference counter. The
+ bit 0 is associated with TTI_SCNT_INT_0, the bit 1
+ is associated with TTI_SCNT_INT_1 and so on.
+ Note that This register has not effect in TDD mode,
+ only in FDD mode. */
+#else
+ uint32_t map : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tti_scnt_int_map_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tti_scnt_int_map cvmx_endor_rfif_tti_scnt_int_map_t;
+
+/**
+ * cvmx_endor_rfif_tti_scnt_int_stat
+ */
+union cvmx_endor_rfif_tti_scnt_int_stat {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tti_scnt_int_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t cnt : 8; /**< TTI Sample Count Interrupt Status register:
+ Indicates if a TTI_SCNT_INT_X occurred (1) or not (0).
+ The bit 0 is associated with TTI_SCNT_INT_0 and so on
+ incrementally. Writing a 1 will clear the interrupt
+ bit. */
+#else
+ uint32_t cnt : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tti_scnt_int_stat_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tti_scnt_int_stat cvmx_endor_rfif_tti_scnt_int_stat_t;
+
+/**
+ * cvmx_endor_rfif_tx_div_status
+ *
+ * Notes:
+ * In TDD Mode, bits 15:12 are DDR state machine status.
+ *
+ */
+union cvmx_endor_rfif_tx_div_status {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tx_div_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t rfic_ena : 1; /**< RFIC enabled (in alert state) */
+ uint32_t sync_late : 1; /**< Sync late (Used for UE products). */
+ uint32_t reserved_19_20 : 2;
+ uint32_t thresh_rch : 1; /**< Threshold Reached (RX/RX_div/TX) */
+ uint32_t fifo_of : 1; /**< FIFO overflow */
+ uint32_t fifo_ur : 1; /**< FIFO underrun */
+ uint32_t tx_sm : 2; /**< TX state machine status */
+ uint32_t rx_sm : 2; /**< RX state machine status */
+ uint32_t hab_req_sm : 4; /**< HAB request manager SM
+ - 0: idle
+ - 1: wait_cs
+ - 2: Term
+ - 3: rd_fifo(RX)/ write fifo(TX)
+ - 4: wait_th
+ Others: not used */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t hab_req_sm : 4;
+ uint32_t rx_sm : 2;
+ uint32_t tx_sm : 2;
+ uint32_t fifo_ur : 1;
+ uint32_t fifo_of : 1;
+ uint32_t thresh_rch : 1;
+ uint32_t reserved_19_20 : 2;
+ uint32_t sync_late : 1;
+ uint32_t rfic_ena : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tx_div_status_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tx_div_status cvmx_endor_rfif_tx_div_status_t;
+
+/**
+ * cvmx_endor_rfif_tx_if_cfg
+ */
+union cvmx_endor_rfif_tx_if_cfg {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tx_if_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_4_31 : 28;
+ uint32_t mode : 1; /**< TX communication mode
+ - 0: TX SISO (default)
+ - 1: TX MIMO */
+ uint32_t dis_sch : 1; /**< Disabled antenna driving scheme (TX SISO/RX MIMO
+ feature only)
+ - 0: Constant 0 for debugging (default)
+ - 1: Same as previous cycle to minimize IO switching */
+ uint32_t antenna : 2; /**< Transmit on antenna A and/or B (TX SISO/RX MIMO
+ feature only)
+ - 0: Transmit on antenna A (default)
+ - 1: Transmit on antenna B
+ - 2: Transmit on A and B
+ - 3: Reserved */
+#else
+ uint32_t antenna : 2;
+ uint32_t dis_sch : 1;
+ uint32_t mode : 1;
+ uint32_t reserved_4_31 : 28;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tx_if_cfg_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tx_if_cfg cvmx_endor_rfif_tx_if_cfg_t;
+
+/**
+ * cvmx_endor_rfif_tx_lead_lag
+ */
+union cvmx_endor_rfif_tx_lead_lag {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tx_lead_lag_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t lag : 12; /**< unsigned value (lag) on end of window */
+ uint32_t lead : 12; /**< unsigned value (lead) on beginning of window */
+#else
+ uint32_t lead : 12;
+ uint32_t lag : 12;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tx_lead_lag_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tx_lead_lag cvmx_endor_rfif_tx_lead_lag_t;
+
+/**
+ * cvmx_endor_rfif_tx_offset
+ */
+union cvmx_endor_rfif_tx_offset {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tx_offset_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t offset : 20; /**< Indicates the number of RF clock cycles after the
+ GPS/ETH 1PPS is received before the start of the RX
+ frame. See description Figure 44. */
+#else
+ uint32_t offset : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tx_offset_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tx_offset cvmx_endor_rfif_tx_offset_t;
+
+/**
+ * cvmx_endor_rfif_tx_offset_adj_scnt
+ */
+union cvmx_endor_rfif_tx_offset_adj_scnt {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tx_offset_adj_scnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t cnt : 20; /**< Indicates the TX sample count at which the 1PPS
+ incremental adjustments will be applied. */
+#else
+ uint32_t cnt : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tx_offset_adj_scnt_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tx_offset_adj_scnt cvmx_endor_rfif_tx_offset_adj_scnt_t;
+
+/**
+ * cvmx_endor_rfif_tx_status
+ *
+ * Notes:
+ * In TDD Mode, bits 15:12 are DDR state machine status.
+ *
+ */
+union cvmx_endor_rfif_tx_status {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tx_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t rfic_ena : 1; /**< RFIC enabled (in alert state) */
+ uint32_t sync_late : 1; /**< Sync late (Used for UE products). */
+ uint32_t reserved_19_20 : 2;
+ uint32_t thresh_rch : 1; /**< Threshold Reached (RX/RX_div/TX) */
+ uint32_t fifo_of : 1; /**< FIFO overflow */
+ uint32_t fifo_ur : 1; /**< FIFO underrun */
+ uint32_t tx_sm : 2; /**< TX state machine status */
+ uint32_t rx_sm : 2; /**< RX state machine status */
+ uint32_t hab_req_sm : 4; /**< HAB request manager SM
+ - 0: idle
+ - 1: wait_cs
+ - 2: Term
+ - 3: rd_fifo(RX)/ write fifo(TX)
+ - 4: wait_th
+ Others: not used */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t hab_req_sm : 4;
+ uint32_t rx_sm : 2;
+ uint32_t tx_sm : 2;
+ uint32_t fifo_ur : 1;
+ uint32_t fifo_of : 1;
+ uint32_t thresh_rch : 1;
+ uint32_t reserved_19_20 : 2;
+ uint32_t sync_late : 1;
+ uint32_t rfic_ena : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tx_status_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tx_status cvmx_endor_rfif_tx_status_t;
+
+/**
+ * cvmx_endor_rfif_tx_th
+ */
+union cvmx_endor_rfif_tx_th {
+ uint32_t u32;
+ struct cvmx_endor_rfif_tx_th_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_12_31 : 20;
+ uint32_t thr : 12; /**< FIFO level reached before granting a TX DMA request.
+ This TX FIFO fill level threshold can be used
+ in two ways:
+ 1- When the FIFO fill level reaches the threshold,
+ there is enough data in the FIFO to start the data
+ transfer, so it grants a DMA transfer from the TX FIFO
+ to the HAB's memory.
+ 2- It can also be used to generate an interrupt to
+ the DSP when the FIFO threshold is reached. */
+#else
+ uint32_t thr : 12;
+ uint32_t reserved_12_31 : 20;
+#endif
+ } s;
+ struct cvmx_endor_rfif_tx_th_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_tx_th cvmx_endor_rfif_tx_th_t;
+
+/**
+ * cvmx_endor_rfif_win_en
+ */
+union cvmx_endor_rfif_win_en {
+ uint32_t u32;
+ struct cvmx_endor_rfif_win_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_4_31 : 28;
+ uint32_t enable : 4; /**< Receive windows enable (all enabled by default)
+ Bit 0: 1 window 1 enabled, 0 window 1 disabled
+ - ...
+ Bit 3: 1 window 3 enabled, 0 window 3 disabled.
+ Bits 23-4: not used */
+#else
+ uint32_t enable : 4;
+ uint32_t reserved_4_31 : 28;
+#endif
+ } s;
+ struct cvmx_endor_rfif_win_en_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_win_en cvmx_endor_rfif_win_en_t;
+
+/**
+ * cvmx_endor_rfif_win_upd_scnt
+ */
+union cvmx_endor_rfif_win_upd_scnt {
+ uint32_t u32;
+ struct cvmx_endor_rfif_win_upd_scnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t scnt : 20; /**< Receive window update sample count. This is the count
+ at which the following registers newly programmed value
+ will take effect. RX_WIN_EN(3-0), RX_W_S (19-0),
+ RX_W_E(19-0), NUM_RX_WIN(3-0), FRAME_L(19-0),
+ RX_LEAD_LAG(23-0) */
+#else
+ uint32_t scnt : 20;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_endor_rfif_win_upd_scnt_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_win_upd_scnt cvmx_endor_rfif_win_upd_scnt_t;
+
+/**
+ * cvmx_endor_rfif_wr_timer64_lsb
+ */
+union cvmx_endor_rfif_wr_timer64_lsb {
+ uint32_t u32;
+ struct cvmx_endor_rfif_wr_timer64_lsb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t val : 32; /**< 64-bit timer initial value of the 32 LSB. */
+#else
+ uint32_t val : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_wr_timer64_lsb_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_wr_timer64_lsb cvmx_endor_rfif_wr_timer64_lsb_t;
+
+/**
+ * cvmx_endor_rfif_wr_timer64_msb
+ */
+union cvmx_endor_rfif_wr_timer64_msb {
+ uint32_t u32;
+ struct cvmx_endor_rfif_wr_timer64_msb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t val : 32; /**< 64-bit timer initial value of the 32 MSB. */
+#else
+ uint32_t val : 32;
+#endif
+ } s;
+ struct cvmx_endor_rfif_wr_timer64_msb_s cnf71xx;
+};
+typedef union cvmx_endor_rfif_wr_timer64_msb cvmx_endor_rfif_wr_timer64_msb_t;
+
+/**
+ * cvmx_endor_rstclk_clkenb0_clr
+ */
+union cvmx_endor_rstclk_clkenb0_clr {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_clkenb0_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t axidma : 1; /**< abc */
+ uint32_t txseq : 1; /**< abc */
+ uint32_t v3genc : 1; /**< abc */
+ uint32_t ifftpapr : 1; /**< abc */
+ uint32_t lteenc : 1; /**< abc */
+ uint32_t vdec : 1; /**< abc */
+ uint32_t turbodsp : 1; /**< abc */
+ uint32_t turbophy : 1; /**< abc */
+ uint32_t rx1seq : 1; /**< abc */
+ uint32_t dftdmap : 1; /**< abc */
+ uint32_t rx0seq : 1; /**< abc */
+ uint32_t rachfe : 1; /**< abc */
+ uint32_t ulfe : 1; /**< abc */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachfe : 1;
+ uint32_t rx0seq : 1;
+ uint32_t dftdmap : 1;
+ uint32_t rx1seq : 1;
+ uint32_t turbophy : 1;
+ uint32_t turbodsp : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t v3genc : 1;
+ uint32_t txseq : 1;
+ uint32_t axidma : 1;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_clkenb0_clr_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_clkenb0_clr cvmx_endor_rstclk_clkenb0_clr_t;
+
+/**
+ * cvmx_endor_rstclk_clkenb0_set
+ */
+union cvmx_endor_rstclk_clkenb0_set {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_clkenb0_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t axidma : 1; /**< abc */
+ uint32_t txseq : 1; /**< abc */
+ uint32_t v3genc : 1; /**< abc */
+ uint32_t ifftpapr : 1; /**< abc */
+ uint32_t lteenc : 1; /**< abc */
+ uint32_t vdec : 1; /**< abc */
+ uint32_t turbodsp : 1; /**< abc */
+ uint32_t turbophy : 1; /**< abc */
+ uint32_t rx1seq : 1; /**< abc */
+ uint32_t dftdmap : 1; /**< abc */
+ uint32_t rx0seq : 1; /**< abc */
+ uint32_t rachfe : 1; /**< abc */
+ uint32_t ulfe : 1; /**< abc */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachfe : 1;
+ uint32_t rx0seq : 1;
+ uint32_t dftdmap : 1;
+ uint32_t rx1seq : 1;
+ uint32_t turbophy : 1;
+ uint32_t turbodsp : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t v3genc : 1;
+ uint32_t txseq : 1;
+ uint32_t axidma : 1;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_clkenb0_set_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_clkenb0_set cvmx_endor_rstclk_clkenb0_set_t;
+
+/**
+ * cvmx_endor_rstclk_clkenb0_state
+ */
+union cvmx_endor_rstclk_clkenb0_state {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_clkenb0_state_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t axidma : 1; /**< abc */
+ uint32_t txseq : 1; /**< abc */
+ uint32_t v3genc : 1; /**< abc */
+ uint32_t ifftpapr : 1; /**< abc */
+ uint32_t lteenc : 1; /**< abc */
+ uint32_t vdec : 1; /**< abc */
+ uint32_t turbodsp : 1; /**< abc */
+ uint32_t turbophy : 1; /**< abc */
+ uint32_t rx1seq : 1; /**< abc */
+ uint32_t dftdmap : 1; /**< abc */
+ uint32_t rx0seq : 1; /**< abc */
+ uint32_t rachfe : 1; /**< abc */
+ uint32_t ulfe : 1; /**< abc */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachfe : 1;
+ uint32_t rx0seq : 1;
+ uint32_t dftdmap : 1;
+ uint32_t rx1seq : 1;
+ uint32_t turbophy : 1;
+ uint32_t turbodsp : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t v3genc : 1;
+ uint32_t txseq : 1;
+ uint32_t axidma : 1;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_clkenb0_state_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_clkenb0_state cvmx_endor_rstclk_clkenb0_state_t;
+
+/**
+ * cvmx_endor_rstclk_clkenb1_clr
+ */
+union cvmx_endor_rstclk_clkenb1_clr {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_clkenb1_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_7_31 : 25;
+ uint32_t token : 1; /**< abc */
+ uint32_t tile3dsp : 1; /**< abc */
+ uint32_t tile2dsp : 1; /**< abc */
+ uint32_t tile1dsp : 1; /**< abc */
+ uint32_t rfspi : 1; /**< abc */
+ uint32_t rfif_hab : 1; /**< abc */
+ uint32_t rfif_rf : 1; /**< abc */
+#else
+ uint32_t rfif_rf : 1;
+ uint32_t rfif_hab : 1;
+ uint32_t rfspi : 1;
+ uint32_t tile1dsp : 1;
+ uint32_t tile2dsp : 1;
+ uint32_t tile3dsp : 1;
+ uint32_t token : 1;
+ uint32_t reserved_7_31 : 25;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_clkenb1_clr_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_clkenb1_clr cvmx_endor_rstclk_clkenb1_clr_t;
+
+/**
+ * cvmx_endor_rstclk_clkenb1_set
+ */
+union cvmx_endor_rstclk_clkenb1_set {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_clkenb1_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_7_31 : 25;
+ uint32_t token : 1; /**< abc */
+ uint32_t tile3dsp : 1; /**< abc */
+ uint32_t tile2dsp : 1; /**< abc */
+ uint32_t tile1dsp : 1; /**< abc */
+ uint32_t rfspi : 1; /**< abc */
+ uint32_t rfif_hab : 1; /**< abc */
+ uint32_t rfif_rf : 1; /**< abc */
+#else
+ uint32_t rfif_rf : 1;
+ uint32_t rfif_hab : 1;
+ uint32_t rfspi : 1;
+ uint32_t tile1dsp : 1;
+ uint32_t tile2dsp : 1;
+ uint32_t tile3dsp : 1;
+ uint32_t token : 1;
+ uint32_t reserved_7_31 : 25;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_clkenb1_set_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_clkenb1_set cvmx_endor_rstclk_clkenb1_set_t;
+
+/**
+ * cvmx_endor_rstclk_clkenb1_state
+ */
+union cvmx_endor_rstclk_clkenb1_state {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_clkenb1_state_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_7_31 : 25;
+ uint32_t token : 1; /**< abc */
+ uint32_t tile3dsp : 1; /**< abc */
+ uint32_t tile2dsp : 1; /**< abc */
+ uint32_t tile1dsp : 1; /**< abc */
+ uint32_t rfspi : 1; /**< abc */
+ uint32_t rfif_hab : 1; /**< abc */
+ uint32_t rfif_rf : 1; /**< abc */
+#else
+ uint32_t rfif_rf : 1;
+ uint32_t rfif_hab : 1;
+ uint32_t rfspi : 1;
+ uint32_t tile1dsp : 1;
+ uint32_t tile2dsp : 1;
+ uint32_t tile3dsp : 1;
+ uint32_t token : 1;
+ uint32_t reserved_7_31 : 25;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_clkenb1_state_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_clkenb1_state cvmx_endor_rstclk_clkenb1_state_t;
+
+/**
+ * cvmx_endor_rstclk_dspstall_clr
+ */
+union cvmx_endor_rstclk_dspstall_clr {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_dspstall_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t txdsp1 : 1; /**< abc */
+ uint32_t txdsp0 : 1; /**< abc */
+ uint32_t rx1dsp1 : 1; /**< abc */
+ uint32_t rx1dsp0 : 1; /**< abc */
+ uint32_t rx0dsp1 : 1; /**< abc */
+ uint32_t rx0dsp0 : 1; /**< abc */
+#else
+ uint32_t rx0dsp0 : 1;
+ uint32_t rx0dsp1 : 1;
+ uint32_t rx1dsp0 : 1;
+ uint32_t rx1dsp1 : 1;
+ uint32_t txdsp0 : 1;
+ uint32_t txdsp1 : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_dspstall_clr_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_dspstall_clr cvmx_endor_rstclk_dspstall_clr_t;
+
+/**
+ * cvmx_endor_rstclk_dspstall_set
+ */
+union cvmx_endor_rstclk_dspstall_set {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_dspstall_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t txdsp1 : 1; /**< abc */
+ uint32_t txdsp0 : 1; /**< abc */
+ uint32_t rx1dsp1 : 1; /**< abc */
+ uint32_t rx1dsp0 : 1; /**< abc */
+ uint32_t rx0dsp1 : 1; /**< abc */
+ uint32_t rx0dsp0 : 1; /**< abc */
+#else
+ uint32_t rx0dsp0 : 1;
+ uint32_t rx0dsp1 : 1;
+ uint32_t rx1dsp0 : 1;
+ uint32_t rx1dsp1 : 1;
+ uint32_t txdsp0 : 1;
+ uint32_t txdsp1 : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_dspstall_set_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_dspstall_set cvmx_endor_rstclk_dspstall_set_t;
+
+/**
+ * cvmx_endor_rstclk_dspstall_state
+ */
+union cvmx_endor_rstclk_dspstall_state {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_dspstall_state_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t txdsp1 : 1; /**< abc */
+ uint32_t txdsp0 : 1; /**< abc */
+ uint32_t rx1dsp1 : 1; /**< abc */
+ uint32_t rx1dsp0 : 1; /**< abc */
+ uint32_t rx0dsp1 : 1; /**< abc */
+ uint32_t rx0dsp0 : 1; /**< abc */
+#else
+ uint32_t rx0dsp0 : 1;
+ uint32_t rx0dsp1 : 1;
+ uint32_t rx1dsp0 : 1;
+ uint32_t rx1dsp1 : 1;
+ uint32_t txdsp0 : 1;
+ uint32_t txdsp1 : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_dspstall_state_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_dspstall_state cvmx_endor_rstclk_dspstall_state_t;
+
+/**
+ * cvmx_endor_rstclk_intr0_clrmask
+ */
+union cvmx_endor_rstclk_intr0_clrmask {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_intr0_clrmask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timer_intr : 8; /**< reserved. */
+ uint32_t sw_intr : 24; /**< reserved. */
+#else
+ uint32_t sw_intr : 24;
+ uint32_t timer_intr : 8;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_intr0_clrmask_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_intr0_clrmask cvmx_endor_rstclk_intr0_clrmask_t;
+
+/**
+ * cvmx_endor_rstclk_intr0_mask
+ */
+union cvmx_endor_rstclk_intr0_mask {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_intr0_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timer_intr : 8; /**< reserved. */
+ uint32_t sw_intr : 24; /**< reserved. */
+#else
+ uint32_t sw_intr : 24;
+ uint32_t timer_intr : 8;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_intr0_mask_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_intr0_mask cvmx_endor_rstclk_intr0_mask_t;
+
+/**
+ * cvmx_endor_rstclk_intr0_setmask
+ */
+union cvmx_endor_rstclk_intr0_setmask {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_intr0_setmask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timer_intr : 8; /**< reserved. */
+ uint32_t sw_intr : 24; /**< reserved. */
+#else
+ uint32_t sw_intr : 24;
+ uint32_t timer_intr : 8;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_intr0_setmask_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_intr0_setmask cvmx_endor_rstclk_intr0_setmask_t;
+
+/**
+ * cvmx_endor_rstclk_intr0_status
+ */
+union cvmx_endor_rstclk_intr0_status {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_intr0_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t value : 32; /**< reserved. */
+#else
+ uint32_t value : 32;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_intr0_status_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_intr0_status cvmx_endor_rstclk_intr0_status_t;
+
+/**
+ * cvmx_endor_rstclk_intr1_clrmask
+ */
+union cvmx_endor_rstclk_intr1_clrmask {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_intr1_clrmask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t value : 32; /**< reserved. */
+#else
+ uint32_t value : 32;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_intr1_clrmask_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_intr1_clrmask cvmx_endor_rstclk_intr1_clrmask_t;
+
+/**
+ * cvmx_endor_rstclk_intr1_mask
+ */
+union cvmx_endor_rstclk_intr1_mask {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_intr1_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t value : 32; /**< reserved. */
+#else
+ uint32_t value : 32;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_intr1_mask_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_intr1_mask cvmx_endor_rstclk_intr1_mask_t;
+
+/**
+ * cvmx_endor_rstclk_intr1_setmask
+ */
+union cvmx_endor_rstclk_intr1_setmask {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_intr1_setmask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t value : 32; /**< reserved. */
+#else
+ uint32_t value : 32;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_intr1_setmask_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_intr1_setmask cvmx_endor_rstclk_intr1_setmask_t;
+
+/**
+ * cvmx_endor_rstclk_intr1_status
+ */
+union cvmx_endor_rstclk_intr1_status {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_intr1_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t value : 32; /**< reserved. */
+#else
+ uint32_t value : 32;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_intr1_status_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_intr1_status cvmx_endor_rstclk_intr1_status_t;
+
+/**
+ * cvmx_endor_rstclk_phy_config
+ */
+union cvmx_endor_rstclk_phy_config {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_phy_config_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t t3smem_initenb : 1; /**< abc */
+ uint32_t t3imem_initenb : 1; /**< abc */
+ uint32_t t2smem_initenb : 1; /**< abc */
+ uint32_t t2imem_initenb : 1; /**< abc */
+ uint32_t t1smem_initenb : 1; /**< abc */
+ uint32_t t1imem_initenb : 1; /**< abc */
+#else
+ uint32_t t1imem_initenb : 1;
+ uint32_t t1smem_initenb : 1;
+ uint32_t t2imem_initenb : 1;
+ uint32_t t2smem_initenb : 1;
+ uint32_t t3imem_initenb : 1;
+ uint32_t t3smem_initenb : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_phy_config_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_phy_config cvmx_endor_rstclk_phy_config_t;
+
+/**
+ * cvmx_endor_rstclk_proc_mon
+ */
+union cvmx_endor_rstclk_proc_mon {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_proc_mon_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_18_31 : 14;
+ uint32_t transistor_sel : 2; /**< 01==RVT, 10==HVT. */
+ uint32_t ringosc_count : 16; /**< reserved. */
+#else
+ uint32_t ringosc_count : 16;
+ uint32_t transistor_sel : 2;
+ uint32_t reserved_18_31 : 14;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_proc_mon_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_proc_mon cvmx_endor_rstclk_proc_mon_t;
+
+/**
+ * cvmx_endor_rstclk_proc_mon_count
+ */
+union cvmx_endor_rstclk_proc_mon_count {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_proc_mon_count_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t count : 24; /**< reserved. */
+#else
+ uint32_t count : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_proc_mon_count_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_proc_mon_count cvmx_endor_rstclk_proc_mon_count_t;
+
+/**
+ * cvmx_endor_rstclk_reset0_clr
+ */
+union cvmx_endor_rstclk_reset0_clr {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_reset0_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t axidma : 1; /**< abc */
+ uint32_t txseq : 1; /**< abc */
+ uint32_t v3genc : 1; /**< abc */
+ uint32_t ifftpapr : 1; /**< abc */
+ uint32_t lteenc : 1; /**< abc */
+ uint32_t vdec : 1; /**< abc */
+ uint32_t turbodsp : 1; /**< abc */
+ uint32_t turbophy : 1; /**< abc */
+ uint32_t rx1seq : 1; /**< abc */
+ uint32_t dftdmap : 1; /**< abc */
+ uint32_t rx0seq : 1; /**< abc */
+ uint32_t rachfe : 1; /**< abc */
+ uint32_t ulfe : 1; /**< abc */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachfe : 1;
+ uint32_t rx0seq : 1;
+ uint32_t dftdmap : 1;
+ uint32_t rx1seq : 1;
+ uint32_t turbophy : 1;
+ uint32_t turbodsp : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t v3genc : 1;
+ uint32_t txseq : 1;
+ uint32_t axidma : 1;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_reset0_clr_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_reset0_clr cvmx_endor_rstclk_reset0_clr_t;
+
+/**
+ * cvmx_endor_rstclk_reset0_set
+ */
+union cvmx_endor_rstclk_reset0_set {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_reset0_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t axidma : 1; /**< abc */
+ uint32_t txseq : 1; /**< abc */
+ uint32_t v3genc : 1; /**< abc */
+ uint32_t ifftpapr : 1; /**< abc */
+ uint32_t lteenc : 1; /**< abc */
+ uint32_t vdec : 1; /**< abc */
+ uint32_t turbodsp : 1; /**< abc */
+ uint32_t turbophy : 1; /**< abc */
+ uint32_t rx1seq : 1; /**< abc */
+ uint32_t dftdmap : 1; /**< abc */
+ uint32_t rx0seq : 1; /**< abc */
+ uint32_t rachfe : 1; /**< abc */
+ uint32_t ulfe : 1; /**< abc */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachfe : 1;
+ uint32_t rx0seq : 1;
+ uint32_t dftdmap : 1;
+ uint32_t rx1seq : 1;
+ uint32_t turbophy : 1;
+ uint32_t turbodsp : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t v3genc : 1;
+ uint32_t txseq : 1;
+ uint32_t axidma : 1;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_reset0_set_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_reset0_set cvmx_endor_rstclk_reset0_set_t;
+
+/**
+ * cvmx_endor_rstclk_reset0_state
+ */
+union cvmx_endor_rstclk_reset0_state {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_reset0_state_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t axidma : 1; /**< abc */
+ uint32_t txseq : 1; /**< abc */
+ uint32_t v3genc : 1; /**< abc */
+ uint32_t ifftpapr : 1; /**< abc */
+ uint32_t lteenc : 1; /**< abc */
+ uint32_t vdec : 1; /**< abc */
+ uint32_t turbodsp : 1; /**< abc */
+ uint32_t turbophy : 1; /**< abc */
+ uint32_t rx1seq : 1; /**< abc */
+ uint32_t dftdmap : 1; /**< abc */
+ uint32_t rx0seq : 1; /**< abc */
+ uint32_t rachfe : 1; /**< abc */
+ uint32_t ulfe : 1; /**< abc */
+#else
+ uint32_t ulfe : 1;
+ uint32_t rachfe : 1;
+ uint32_t rx0seq : 1;
+ uint32_t dftdmap : 1;
+ uint32_t rx1seq : 1;
+ uint32_t turbophy : 1;
+ uint32_t turbodsp : 1;
+ uint32_t vdec : 1;
+ uint32_t lteenc : 1;
+ uint32_t ifftpapr : 1;
+ uint32_t v3genc : 1;
+ uint32_t txseq : 1;
+ uint32_t axidma : 1;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_reset0_state_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_reset0_state cvmx_endor_rstclk_reset0_state_t;
+
+/**
+ * cvmx_endor_rstclk_reset1_clr
+ */
+union cvmx_endor_rstclk_reset1_clr {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_reset1_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_7_31 : 25;
+ uint32_t token : 1; /**< abc */
+ uint32_t tile3dsp : 1; /**< abc */
+ uint32_t tile2dsp : 1; /**< abc */
+ uint32_t tile1dsp : 1; /**< abc */
+ uint32_t rfspi : 1; /**< abc */
+ uint32_t rfif_hab : 1; /**< abc */
+ uint32_t rfif_rf : 1; /**< abc */
+#else
+ uint32_t rfif_rf : 1;
+ uint32_t rfif_hab : 1;
+ uint32_t rfspi : 1;
+ uint32_t tile1dsp : 1;
+ uint32_t tile2dsp : 1;
+ uint32_t tile3dsp : 1;
+ uint32_t token : 1;
+ uint32_t reserved_7_31 : 25;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_reset1_clr_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_reset1_clr cvmx_endor_rstclk_reset1_clr_t;
+
+/**
+ * cvmx_endor_rstclk_reset1_set
+ */
+union cvmx_endor_rstclk_reset1_set {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_reset1_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_7_31 : 25;
+ uint32_t token : 1; /**< abc */
+ uint32_t tile3dsp : 1; /**< abc */
+ uint32_t tile2dsp : 1; /**< abc */
+ uint32_t tile1dsp : 1; /**< abc */
+ uint32_t rfspi : 1; /**< abc */
+ uint32_t rfif_hab : 1; /**< abc */
+ uint32_t rfif_rf : 1; /**< abc */
+#else
+ uint32_t rfif_rf : 1;
+ uint32_t rfif_hab : 1;
+ uint32_t rfspi : 1;
+ uint32_t tile1dsp : 1;
+ uint32_t tile2dsp : 1;
+ uint32_t tile3dsp : 1;
+ uint32_t token : 1;
+ uint32_t reserved_7_31 : 25;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_reset1_set_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_reset1_set cvmx_endor_rstclk_reset1_set_t;
+
+/**
+ * cvmx_endor_rstclk_reset1_state
+ */
+union cvmx_endor_rstclk_reset1_state {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_reset1_state_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_7_31 : 25;
+ uint32_t token : 1; /**< abc */
+ uint32_t tile3dsp : 1; /**< abc */
+ uint32_t tile2dsp : 1; /**< abc */
+ uint32_t tile1dsp : 1; /**< abc */
+ uint32_t rfspi : 1; /**< abc */
+ uint32_t rfif_hab : 1; /**< abc */
+ uint32_t rfif_rf : 1; /**< abc */
+#else
+ uint32_t rfif_rf : 1;
+ uint32_t rfif_hab : 1;
+ uint32_t rfspi : 1;
+ uint32_t tile1dsp : 1;
+ uint32_t tile2dsp : 1;
+ uint32_t tile3dsp : 1;
+ uint32_t token : 1;
+ uint32_t reserved_7_31 : 25;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_reset1_state_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_reset1_state cvmx_endor_rstclk_reset1_state_t;
+
+/**
+ * cvmx_endor_rstclk_sw_intr_clr
+ */
+union cvmx_endor_rstclk_sw_intr_clr {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_sw_intr_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timer_intr : 8; /**< reserved. */
+ uint32_t sw_intr : 24; /**< reserved. */
+#else
+ uint32_t sw_intr : 24;
+ uint32_t timer_intr : 8;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_sw_intr_clr_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_sw_intr_clr cvmx_endor_rstclk_sw_intr_clr_t;
+
+/**
+ * cvmx_endor_rstclk_sw_intr_set
+ */
+union cvmx_endor_rstclk_sw_intr_set {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_sw_intr_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timer_intr : 8; /**< reserved. */
+ uint32_t sw_intr : 24; /**< reserved. */
+#else
+ uint32_t sw_intr : 24;
+ uint32_t timer_intr : 8;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_sw_intr_set_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_sw_intr_set cvmx_endor_rstclk_sw_intr_set_t;
+
+/**
+ * cvmx_endor_rstclk_sw_intr_status
+ */
+union cvmx_endor_rstclk_sw_intr_status {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_sw_intr_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timer_intr : 8; /**< reserved. */
+ uint32_t sw_intr : 24; /**< reserved. */
+#else
+ uint32_t sw_intr : 24;
+ uint32_t timer_intr : 8;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_sw_intr_status_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_sw_intr_status cvmx_endor_rstclk_sw_intr_status_t;
+
+/**
+ * cvmx_endor_rstclk_time#_thrd
+ */
+union cvmx_endor_rstclk_timex_thrd {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_timex_thrd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t value : 24; /**< abc */
+#else
+ uint32_t value : 24;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_timex_thrd_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_timex_thrd cvmx_endor_rstclk_timex_thrd_t;
+
+/**
+ * cvmx_endor_rstclk_timer_ctl
+ */
+union cvmx_endor_rstclk_timer_ctl {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_timer_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t intr_enb : 8; /**< abc */
+ uint32_t reserved_3_7 : 5;
+ uint32_t enb : 1; /**< abc */
+ uint32_t cont : 1; /**< abc */
+ uint32_t clr : 1; /**< abc */
+#else
+ uint32_t clr : 1;
+ uint32_t cont : 1;
+ uint32_t enb : 1;
+ uint32_t reserved_3_7 : 5;
+ uint32_t intr_enb : 8;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_timer_ctl_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_timer_ctl cvmx_endor_rstclk_timer_ctl_t;
+
+/**
+ * cvmx_endor_rstclk_timer_intr_clr
+ */
+union cvmx_endor_rstclk_timer_intr_clr {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_timer_intr_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t clr : 8; /**< reserved. */
+#else
+ uint32_t clr : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_timer_intr_clr_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_timer_intr_clr cvmx_endor_rstclk_timer_intr_clr_t;
+
+/**
+ * cvmx_endor_rstclk_timer_intr_status
+ */
+union cvmx_endor_rstclk_timer_intr_status {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_timer_intr_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t status : 8; /**< reserved. */
+#else
+ uint32_t status : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_timer_intr_status_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_timer_intr_status cvmx_endor_rstclk_timer_intr_status_t;
+
+/**
+ * cvmx_endor_rstclk_timer_max
+ */
+union cvmx_endor_rstclk_timer_max {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_timer_max_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t value : 32; /**< reserved. */
+#else
+ uint32_t value : 32;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_timer_max_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_timer_max cvmx_endor_rstclk_timer_max_t;
+
+/**
+ * cvmx_endor_rstclk_timer_value
+ */
+union cvmx_endor_rstclk_timer_value {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_timer_value_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t value : 32; /**< reserved. */
+#else
+ uint32_t value : 32;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_timer_value_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_timer_value cvmx_endor_rstclk_timer_value_t;
+
+/**
+ * cvmx_endor_rstclk_version
+ */
+union cvmx_endor_rstclk_version {
+ uint32_t u32;
+ struct cvmx_endor_rstclk_version_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t major : 8; /**< reserved. */
+ uint32_t minor : 8; /**< reserved. */
+#else
+ uint32_t minor : 8;
+ uint32_t major : 8;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_endor_rstclk_version_s cnf71xx;
+};
+typedef union cvmx_endor_rstclk_version cvmx_endor_rstclk_version_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-endor-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-eoi-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-eoi-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-eoi-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,690 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-eoi-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon eoi.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision: 69515 $<hr>
+ *
+ */
+#ifndef __CVMX_EOI_DEFS_H__
+#define __CVMX_EOI_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_BIST_CTL_STA CVMX_EOI_BIST_CTL_STA_FUNC()
+static inline uint64_t CVMX_EOI_BIST_CTL_STA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_BIST_CTL_STA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000118ull);
+}
+#else
+#define CVMX_EOI_BIST_CTL_STA (CVMX_ADD_IO_SEG(0x0001180013000118ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_CTL_STA CVMX_EOI_CTL_STA_FUNC()
+static inline uint64_t CVMX_EOI_CTL_STA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_CTL_STA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000000ull);
+}
+#else
+#define CVMX_EOI_CTL_STA (CVMX_ADD_IO_SEG(0x0001180013000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_DEF_STA0 CVMX_EOI_DEF_STA0_FUNC()
+static inline uint64_t CVMX_EOI_DEF_STA0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_DEF_STA0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000020ull);
+}
+#else
+#define CVMX_EOI_DEF_STA0 (CVMX_ADD_IO_SEG(0x0001180013000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_DEF_STA1 CVMX_EOI_DEF_STA1_FUNC()
+static inline uint64_t CVMX_EOI_DEF_STA1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_DEF_STA1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000028ull);
+}
+#else
+#define CVMX_EOI_DEF_STA1 (CVMX_ADD_IO_SEG(0x0001180013000028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_DEF_STA2 CVMX_EOI_DEF_STA2_FUNC()
+static inline uint64_t CVMX_EOI_DEF_STA2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_DEF_STA2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000030ull);
+}
+#else
+#define CVMX_EOI_DEF_STA2 (CVMX_ADD_IO_SEG(0x0001180013000030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_ECC_CTL CVMX_EOI_ECC_CTL_FUNC()
+static inline uint64_t CVMX_EOI_ECC_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_ECC_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000110ull);
+}
+#else
+#define CVMX_EOI_ECC_CTL (CVMX_ADD_IO_SEG(0x0001180013000110ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_ENDOR_BISTR_CTL_STA CVMX_EOI_ENDOR_BISTR_CTL_STA_FUNC()
+static inline uint64_t CVMX_EOI_ENDOR_BISTR_CTL_STA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_ENDOR_BISTR_CTL_STA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000120ull);
+}
+#else
+#define CVMX_EOI_ENDOR_BISTR_CTL_STA (CVMX_ADD_IO_SEG(0x0001180013000120ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_ENDOR_CLK_CTL CVMX_EOI_ENDOR_CLK_CTL_FUNC()
+static inline uint64_t CVMX_EOI_ENDOR_CLK_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_ENDOR_CLK_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000038ull);
+}
+#else
+#define CVMX_EOI_ENDOR_CLK_CTL (CVMX_ADD_IO_SEG(0x0001180013000038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_ENDOR_CTL CVMX_EOI_ENDOR_CTL_FUNC()
+static inline uint64_t CVMX_EOI_ENDOR_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_ENDOR_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000100ull);
+}
+#else
+#define CVMX_EOI_ENDOR_CTL (CVMX_ADD_IO_SEG(0x0001180013000100ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_INT_ENA CVMX_EOI_INT_ENA_FUNC()
+static inline uint64_t CVMX_EOI_INT_ENA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_INT_ENA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000010ull);
+}
+#else
+#define CVMX_EOI_INT_ENA (CVMX_ADD_IO_SEG(0x0001180013000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_INT_STA CVMX_EOI_INT_STA_FUNC()
+static inline uint64_t CVMX_EOI_INT_STA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_INT_STA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000008ull);
+}
+#else
+#define CVMX_EOI_INT_STA (CVMX_ADD_IO_SEG(0x0001180013000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_IO_DRV CVMX_EOI_IO_DRV_FUNC()
+static inline uint64_t CVMX_EOI_IO_DRV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_IO_DRV not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000018ull);
+}
+#else
+#define CVMX_EOI_IO_DRV (CVMX_ADD_IO_SEG(0x0001180013000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_EOI_THROTTLE_CTL CVMX_EOI_THROTTLE_CTL_FUNC()
+static inline uint64_t CVMX_EOI_THROTTLE_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_EOI_THROTTLE_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180013000108ull);
+}
+#else
+#define CVMX_EOI_THROTTLE_CTL (CVMX_ADD_IO_SEG(0x0001180013000108ull))
+#endif
+
+/**
+ * cvmx_eoi_bist_ctl_sta
+ *
+ * EOI_BIST_CTL_STA = EOI BIST Status Register
+ *
+ * Description:
+ * This register control EOI memory BIST and contains the bist result of EOI memories.
+ */
+union cvmx_eoi_bist_ctl_sta {
+ uint64_t u64;
+ struct cvmx_eoi_bist_ctl_sta_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t clear_bist : 1; /**< Clear BIST on the HCLK memories */
+ uint64_t start_bist : 1; /**< Starts BIST on the HCLK memories during 0-to-1
+ transition. */
+ uint64_t reserved_3_15 : 13;
+ uint64_t stdf : 1; /**< STDF Bist Status. */
+ uint64_t ppaf : 1; /**< PPAF Bist Status. */
+ uint64_t lddf : 1; /**< LDDF Bist Status. */
+#else
+ uint64_t lddf : 1;
+ uint64_t ppaf : 1;
+ uint64_t stdf : 1;
+ uint64_t reserved_3_15 : 13;
+ uint64_t start_bist : 1;
+ uint64_t clear_bist : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_eoi_bist_ctl_sta_s cnf71xx;
+};
+typedef union cvmx_eoi_bist_ctl_sta cvmx_eoi_bist_ctl_sta_t;
+
+/**
+ * cvmx_eoi_ctl_sta
+ *
+ * EOI_CTL_STA = EOI Configure Control Reigster
+ * This register configures EOI.
+ */
+union cvmx_eoi_ctl_sta {
+ uint64_t u64;
+ struct cvmx_eoi_ctl_sta_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t ppaf_wm : 5; /**< Number of entries when PP Access FIFO will assert
+ full (back pressure) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t busy : 1; /**< 1: EOI is busy; 0: EOI is idle */
+ uint64_t rwam : 2; /**< Rread Write Aribitration Mode:
+ - 10: Reads have higher priority
+ - 01: Writes have higher priority
+ 00,11: Round-Robin between Reads and Writes */
+ uint64_t ena : 1; /**< When reset, all the inbound DMA accesses will be
+ drop and all the outbound read response and write
+ commits will be drop. It must be set to 1'b1 for
+ normal access. */
+ uint64_t reset : 1; /**< EOI block Software Reset. */
+#else
+ uint64_t reset : 1;
+ uint64_t ena : 1;
+ uint64_t rwam : 2;
+ uint64_t busy : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t ppaf_wm : 5;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_eoi_ctl_sta_s cnf71xx;
+};
+typedef union cvmx_eoi_ctl_sta cvmx_eoi_ctl_sta_t;
+
+/**
+ * cvmx_eoi_def_sta0
+ *
+ * Note: Working settings tabulated for each corner.
+ * ================================
+ * Corner pctl nctl
+ * ===============================
+ * 1 26 22
+ * 2 30 28
+ * 3 32 31
+ * 4 23 19
+ * 5 27 24
+ * 6 29 27
+ * 7 21 17
+ * 8 25 22
+ * 9 27 24
+ * 10 29 24
+ * 11 34 31
+ * 12 36 35
+ * 13 26 21
+ * 14 31 27
+ * 15 33 30
+ * 16 23 18
+ * 17 28 24
+ * 18 30 27
+ * 19 21 17
+ * 20 27 25
+ * 21 29 28
+ * 22 21 17
+ * 23 25 22
+ * 24 27 25
+ * 25 19 15
+ * 26 23 20
+ * 27 25 22
+ * 28 24 24
+ * 29 28 31
+ * 30 30 35
+ * 31 21 21
+ * 32 25 27
+ * 33 27 30
+ * 34 19 18
+ * 35 23 24
+ * 36 25 27
+ * 37 29 19
+ * 38 33 25
+ * 39 36 28
+ * 40 25 17
+ * 41 30 22
+ * 42 32 25
+ * 43 23 15
+ * 44 27 20
+ * 45 29 22
+ * ===============================
+ *
+ * EOI_DEF_STA0 = EOI Defect Status Register 0
+ *
+ * Register to hold repairout 0/1/2
+ */
+union cvmx_eoi_def_sta0 {
+ uint64_t u64;
+ struct cvmx_eoi_def_sta0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t rout2 : 18; /**< Repairout2 */
+ uint64_t rout1 : 18; /**< Repairout1 */
+ uint64_t rout0 : 18; /**< Repairout0 */
+#else
+ uint64_t rout0 : 18;
+ uint64_t rout1 : 18;
+ uint64_t rout2 : 18;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_eoi_def_sta0_s cnf71xx;
+};
+typedef union cvmx_eoi_def_sta0 cvmx_eoi_def_sta0_t;
+
+/**
+ * cvmx_eoi_def_sta1
+ *
+ * EOI_DEF_STA1 = EOI Defect Status Register 1
+ *
+ * Register to hold repairout 3/4/5
+ */
+union cvmx_eoi_def_sta1 {
+ uint64_t u64;
+ struct cvmx_eoi_def_sta1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t rout5 : 18; /**< Repairout5 */
+ uint64_t rout4 : 18; /**< Repairout4 */
+ uint64_t rout3 : 18; /**< Repairout3 */
+#else
+ uint64_t rout3 : 18;
+ uint64_t rout4 : 18;
+ uint64_t rout5 : 18;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_eoi_def_sta1_s cnf71xx;
+};
+typedef union cvmx_eoi_def_sta1 cvmx_eoi_def_sta1_t;
+
+/**
+ * cvmx_eoi_def_sta2
+ *
+ * EOI_DEF_STA2 = EOI Defect Status Register 2
+ *
+ * Register to hold repairout 6 and toomanydefects.
+ */
+union cvmx_eoi_def_sta2 {
+ uint64_t u64;
+ struct cvmx_eoi_def_sta2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t toomany : 1; /**< Toomanydefects */
+ uint64_t reserved_18_23 : 6;
+ uint64_t rout6 : 18; /**< Repairout6 */
+#else
+ uint64_t rout6 : 18;
+ uint64_t reserved_18_23 : 6;
+ uint64_t toomany : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_eoi_def_sta2_s cnf71xx;
+};
+typedef union cvmx_eoi_def_sta2 cvmx_eoi_def_sta2_t;
+
+/**
+ * cvmx_eoi_ecc_ctl
+ *
+ * EOI_ECC_CTL = EOI ECC Control Register
+ *
+ * Description:
+ * This register enables ECC for each individual internal memory that requires ECC. For debug purpose, it can also
+ * control 1 or 2 bits be flipped in the ECC data.
+ */
+union cvmx_eoi_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_eoi_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t rben : 1; /**< 1: ECC Enable for read buffer
+ - 0: ECC Enable for instruction buffer */
+ uint64_t rbsf : 2; /**< read buffer ecc syndrome flip
+ 2'b00 : No Error Generation
+ 2'b10, 2'b01: Flip 1 bit
+ 2'b11 : Flip 2 bits */
+#else
+ uint64_t rbsf : 2;
+ uint64_t rben : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_eoi_ecc_ctl_s cnf71xx;
+};
+typedef union cvmx_eoi_ecc_ctl cvmx_eoi_ecc_ctl_t;
+
+/**
+ * cvmx_eoi_endor_bistr_ctl_sta
+ *
+ * EOI_ENDOR_BISTR_CTL_STA = EOI BIST/BISR Control Status Register
+ *
+ * Description:
+ * This register the bist result of EOI memories.
+ */
+union cvmx_eoi_endor_bistr_ctl_sta {
+ uint64_t u64;
+ struct cvmx_eoi_endor_bistr_ctl_sta_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t bisr_done : 1; /**< Endor DSP Memroy Bisr Done Status: 1 - done;
+ 0 - Not done. */
+ uint64_t failed : 1; /**< Bist/Bisr Status: 1 - failed; 0 - Not failed. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t bisr_hr : 1; /**< BISR Hardrepair */
+ uint64_t bisr_dir : 1; /**< BISR Direction: 0 = input repair packets;
+ 1 = output defect packets. */
+ uint64_t start_bist : 1; /**< Start Bist */
+#else
+ uint64_t start_bist : 1;
+ uint64_t bisr_dir : 1;
+ uint64_t bisr_hr : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t failed : 1;
+ uint64_t bisr_done : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_eoi_endor_bistr_ctl_sta_s cnf71xx;
+};
+typedef union cvmx_eoi_endor_bistr_ctl_sta cvmx_eoi_endor_bistr_ctl_sta_t;
+
+/**
+ * cvmx_eoi_endor_clk_ctl
+ *
+ * EOI_ENDOR_CLK_CTL = EOI Endor Clock Control
+ *
+ * Register control the generation of Endor DSP and HAB clocks.
+ */
+union cvmx_eoi_endor_clk_ctl {
+ uint64_t u64;
+ struct cvmx_eoi_endor_clk_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t habclk_sel : 1; /**< HAB CLK select
+ 0x0: HAB CLK select from PHY_PLL output from HAB PS
+ 0x1: HAB CLK select from DDR_PLL output from HAB PS */
+ uint64_t reserved_26_26 : 1;
+ uint64_t dsp_div_reset : 1; /**< DSP postscalar divider reset */
+ uint64_t dsp_ps_en : 3; /**< DSP postscalar divide ratio
+ Determines the DSP CK speed.
+ 0x0 : Divide DSP PLL output by 1
+ 0x1 : Divide DSP PLL output by 2
+ 0x2 : Divide DSP PLL output by 3
+ 0x3 : Divide DSP PLL output by 4
+ 0x4 : Divide DSP PLL output by 6
+ 0x5 : Divide DSP PLL output by 8
+ 0x6 : Divide DSP PLL output by 12
+ 0x7 : Divide DSP PLL output by 12
+ DSP_PS_EN is not used when DSP_DIV_RESET = 1 */
+ uint64_t hab_div_reset : 1; /**< HAB postscalar divider reset */
+ uint64_t hab_ps_en : 3; /**< HAB postscalar divide ratio
+ Determines the LMC CK speed.
+ 0x0 : Divide HAB PLL output by 1
+ 0x1 : Divide HAB PLL output by 2
+ 0x2 : Divide HAB PLL output by 3
+ 0x3 : Divide HAB PLL output by 4
+ 0x4 : Divide HAB PLL output by 6
+ 0x5 : Divide HAB PLL output by 8
+ 0x6 : Divide HAB PLL output by 12
+ 0x7 : Divide HAB PLL output by 12
+ HAB_PS_EN is not used when HAB_DIV_RESET = 1 */
+ uint64_t diffamp : 4; /**< PLL diffamp input transconductance */
+ uint64_t cps : 3; /**< PLL charge-pump current */
+ uint64_t cpb : 3; /**< PLL charge-pump current */
+ uint64_t reset_n : 1; /**< PLL reset */
+ uint64_t clkf : 7; /**< Multiply reference by CLKF
+ 32 <= CLKF <= 64
+ PHY PLL frequency = 50 * CLKF
+ min = 1.6 GHz, max = 3.2 GHz */
+#else
+ uint64_t clkf : 7;
+ uint64_t reset_n : 1;
+ uint64_t cpb : 3;
+ uint64_t cps : 3;
+ uint64_t diffamp : 4;
+ uint64_t hab_ps_en : 3;
+ uint64_t hab_div_reset : 1;
+ uint64_t dsp_ps_en : 3;
+ uint64_t dsp_div_reset : 1;
+ uint64_t reserved_26_26 : 1;
+ uint64_t habclk_sel : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_eoi_endor_clk_ctl_s cnf71xx;
+};
+typedef union cvmx_eoi_endor_clk_ctl cvmx_eoi_endor_clk_ctl_t;
+
+/**
+ * cvmx_eoi_endor_ctl
+ *
+ * EOI_ENDOR_CTL_STA = Endor Control Reigster
+ * This register controls Endor phy reset and access.
+ */
+union cvmx_eoi_endor_ctl {
+ uint64_t u64;
+ struct cvmx_eoi_endor_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t r_emod : 2; /**< Endian format for data read from the L2C.
+ IN: A-B-C-D-E-F-G-H
+ OUT0: A-B-C-D-E-F-G-H
+ OUT1: H-G-F-E-D-C-B-A
+ OUT2: D-C-B-A-H-G-F-E
+ OUT3: E-F-G-H-A-B-C-D */
+ uint64_t w_emod : 2; /**< Endian format for data written the L2C.
+ IN: A-B-C-D-E-F-G-H
+ OUT0: A-B-C-D-E-F-G-H
+ OUT1: H-G-F-E-D-C-B-A
+ OUT2: D-C-B-A-H-G-F-E
+ OUT3: E-F-G-H-A-B-C-D */
+ uint64_t inv_rsl_ra2 : 1; /**< Invert RSL CSR read address bit 2. */
+ uint64_t inv_rsl_wa2 : 1; /**< Invert RSL CSR write address bit 2. */
+ uint64_t inv_pp_ra2 : 1; /**< Invert PP CSR read address bit 2. */
+ uint64_t inv_pp_wa2 : 1; /**< Invert PP CSR write address bit 2. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t reset : 1; /**< Endor block software reset. After hardware reset,
+ this bit is set to 1'b1 which put Endor into reset
+ state. Software must clear this bit to use Endor. */
+#else
+ uint64_t reset : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t inv_pp_wa2 : 1;
+ uint64_t inv_pp_ra2 : 1;
+ uint64_t inv_rsl_wa2 : 1;
+ uint64_t inv_rsl_ra2 : 1;
+ uint64_t w_emod : 2;
+ uint64_t r_emod : 2;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_eoi_endor_ctl_s cnf71xx;
+};
+typedef union cvmx_eoi_endor_ctl cvmx_eoi_endor_ctl_t;
+
+/**
+ * cvmx_eoi_int_ena
+ *
+ * EOI_INT_ENA = EOI Interrupt Enable Register
+ *
+ * Register to enable individual interrupt source in corresponding to EOI_INT_STA
+ */
+union cvmx_eoi_int_ena {
+ uint64_t u64;
+ struct cvmx_eoi_int_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t rb_dbe : 1; /**< Read Buffer ECC DBE */
+ uint64_t rb_sbe : 1; /**< Read Buffer ECC SBE */
+#else
+ uint64_t rb_sbe : 1;
+ uint64_t rb_dbe : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_eoi_int_ena_s cnf71xx;
+};
+typedef union cvmx_eoi_int_ena cvmx_eoi_int_ena_t;
+
+/**
+ * cvmx_eoi_int_sta
+ *
+ * EOI_INT_STA = EOI Interrupt Status Register
+ *
+ * Summary of different bits of RSL interrupt status.
+ */
+union cvmx_eoi_int_sta {
+ uint64_t u64;
+ struct cvmx_eoi_int_sta_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t rb_dbe : 1; /**< Read Buffer ECC DBE */
+ uint64_t rb_sbe : 1; /**< Read Buffer ECC SBE */
+#else
+ uint64_t rb_sbe : 1;
+ uint64_t rb_dbe : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_eoi_int_sta_s cnf71xx;
+};
+typedef union cvmx_eoi_int_sta cvmx_eoi_int_sta_t;
+
+/**
+ * cvmx_eoi_io_drv
+ *
+ * EOI_IO_DRV = EOI Endor IO Drive Control
+ *
+ * Register to control Endor Phy IOs
+ */
+union cvmx_eoi_io_drv {
+ uint64_t u64;
+ struct cvmx_eoi_io_drv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t rfif_p : 6; /**< RFIF output driver P-Mos control */
+ uint64_t rfif_n : 6; /**< RFIF output driver N-Mos control */
+ uint64_t gpo_p : 6; /**< GPO output driver P-Mos control */
+ uint64_t gpo_n : 6; /**< GPO output driver N-Mos control */
+#else
+ uint64_t gpo_n : 6;
+ uint64_t gpo_p : 6;
+ uint64_t rfif_n : 6;
+ uint64_t rfif_p : 6;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_eoi_io_drv_s cnf71xx;
+};
+typedef union cvmx_eoi_io_drv cvmx_eoi_io_drv_t;
+
+/**
+ * cvmx_eoi_throttle_ctl
+ *
+ * EOI_THROTTLE_CTL = EOI THROTTLE Control Reigster
+ * This register controls number of outstanding EOI loads to L2C . It is in phy_clock domain.
+ */
+union cvmx_eoi_throttle_ctl {
+ uint64_t u64;
+ struct cvmx_eoi_throttle_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t std : 5; /**< Number of outstanding store data accepted by EOI on
+ AXI before backpressure ADMA. The value must be from
+ from 16 to 31 inclusively. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t stc : 2; /**< Number of outstanding L2C store command accepted by
+ EOI on AXI before backpressure ADMA. The value must be
+ from 1 to 3 inclusively. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t ldc : 4; /**< Number of outstanding L2C loads. The value must be
+ from 1 to 8 inclusively. */
+#else
+ uint64_t ldc : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t stc : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t std : 5;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_eoi_throttle_ctl_s cnf71xx;
+};
+typedef union cvmx_eoi_throttle_ctl cvmx_eoi_throttle_ctl_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-eoi-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-fau.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-fau.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-fau.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,605 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Interface to the hardware Fetch and Add Unit.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_FAU_H__
+#define __CVMX_FAU_H__
+
+#ifndef CVMX_DONT_INCLUDE_CONFIG
+#include "cvmx-config.h"
+#else
+typedef int cvmx_fau_reg_64_t;
+typedef int cvmx_fau_reg_32_t;
+typedef int cvmx_fau_reg_16_t;
+typedef int cvmx_fau_reg_8_t;
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Octeon Fetch and Add Unit (FAU)
+ */
+
+#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
+#define CVMX_FAU_BITS_SCRADDR 63,56
+#define CVMX_FAU_BITS_LEN 55,48
+#define CVMX_FAU_BITS_INEVAL 35,14
+#define CVMX_FAU_BITS_TAGWAIT 13,13
+#define CVMX_FAU_BITS_NOADD 13,13
+#define CVMX_FAU_BITS_SIZE 12,11
+#define CVMX_FAU_BITS_REGISTER 10,0
+
+
+typedef enum {
+ CVMX_FAU_OP_SIZE_8 = 0,
+ CVMX_FAU_OP_SIZE_16 = 1,
+ CVMX_FAU_OP_SIZE_32 = 2,
+ CVMX_FAU_OP_SIZE_64 = 3
+} cvmx_fau_op_size_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct
+{
+ uint64_t error : 1;
+ int64_t value : 63;
+} cvmx_fau_tagwait64_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct
+{
+ uint64_t error : 1;
+ int32_t value : 31;
+} cvmx_fau_tagwait32_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct
+{
+ uint64_t error : 1;
+ int16_t value : 15;
+} cvmx_fau_tagwait16_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct
+{
+ uint64_t error : 1;
+ int8_t value : 7;
+} cvmx_fau_tagwait8_t;
+
+/**
+ * Asynchronous tagwait return definition. If a timeout occurs,
+ * the error bit will be set. Otherwise the value of the
+ * register before the update will be returned.
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ uint64_t invalid: 1;
+ uint64_t data :63; /* unpredictable if invalid is set */
+ } s;
+} cvmx_fau_async_tagwait_result_t;
+
+/**
+ * @INTERNAL
+ * Builds a store I/O address for writing to the FAU
+ *
+ * @param noadd 0 = Store value is atomically added to the current value
+ * 1 = Store value is atomically written over the current value
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
+ * @return Address to store for atomic update
+ */
+static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
+{
+ return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
+ cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
+ cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
+}
+
+/**
+ * @INTERNAL
+ * Builds a I/O address for accessing the FAU
+ *
+ * @param tagwait Should the atomic add wait for the current tag switch
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
+ * @return Address to read from for atomic update
+ */
+static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg, int64_t value)
+{
+ return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
+ cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
+ cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
+ cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Value of the register before the update
+ */
+static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Value of the register before the update
+ */
+static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ * @return Value of the register before the update
+ */
+static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ * @return Value of the register before the update
+ */
+static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 64 bit add after the current tag switch
+ * completes
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait64_t cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ union
+ {
+ uint64_t i64;
+ cvmx_fau_tagwait64_t t;
+ } result;
+ result.i64 = cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Perform an atomic 32 bit add after the current tag switch
+ * completes
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait32_t cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ union
+ {
+ uint64_t i32;
+ cvmx_fau_tagwait32_t t;
+ } result;
+ result.i32 = cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Perform an atomic 16 bit add after the current tag switch
+ * completes
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait16_t cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ union
+ {
+ uint64_t i16;
+ cvmx_fau_tagwait16_t t;
+ } result;
+ result.i16 = cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Perform an atomic 8 bit add after the current tag switch
+ * completes
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait8_t cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ union
+ {
+ uint64_t i8;
+ cvmx_fau_tagwait8_t t;
+ } result;
+ result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * @INTERNAL
+ * Builds I/O data for async operations
+ *
+ * @param scraddr Scratch pad byte addres to write to. Must be 8 byte aligned
+ * @param value Signed value to add.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
+ * @param tagwait Should the atomic add wait for the current tag switch
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
+ * @param size The size of the operation:
+ * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
+ * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
+ * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
+ * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
+ * @return Data to write using cvmx_send_single
+ */
+static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value, uint64_t tagwait,
+ cvmx_fau_op_size_t size, uint64_t reg)
+{
+ return (CVMX_FAU_LOAD_IO_ADDRESS |
+ cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr>>3) |
+ cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
+ cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
+ cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
+ cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
+ cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
+}
+
+/**
+ * Perform an async atomic 64 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
+}
+
+/**
+ * Perform an async atomic 32 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
+}
+
+/**
+ * Perform an async atomic 16 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
+}
+
+/**
+ * Perform an async atomic 8 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
+}
+
+/**
+ * Perform an async atomic 64 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * If a timeout occurs, the error bit (63) will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
+}
+
+/**
+ * Perform an async atomic 32 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * If a timeout occurs, the error bit (63) will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
+}
+
+/**
+ * Perform an async atomic 16 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * If a timeout occurs, the error bit (63) will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
+}
+
+/**
+ * Perform an async atomic 8 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * If a timeout occurs, the error bit (63) will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 64 bit write
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
+}
+
+/**
+ * Perform an atomic 32 bit write
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
+}
+
+/**
+ * Perform an atomic 16 bit write
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
+}
+
+/**
+ * Perform an atomic 8 bit write
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_FAU_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-fau.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-flash.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-flash.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-flash.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,675 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file provides bootbus flash operations
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ *
+ */
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-flash.h"
+
+#define MAX_NUM_FLASH_CHIPS 8 /* Maximum number of flash chips */
+#define MAX_NUM_REGIONS 8 /* Maximum number of block regions per chip */
+#define DEBUG 1
+
+#define CFI_CMDSET_NONE 0
+#define CFI_CMDSET_INTEL_EXTENDED 1
+#define CFI_CMDSET_AMD_STANDARD 2
+#define CFI_CMDSET_INTEL_STANDARD 3
+#define CFI_CMDSET_AMD_EXTENDED 4
+#define CFI_CMDSET_MITSU_STANDARD 256
+#define CFI_CMDSET_MITSU_EXTENDED 257
+#define CFI_CMDSET_SST 258
+
+typedef struct
+{
+ void * base_ptr; /**< Memory pointer to start of flash */
+ int is_16bit; /**< Chip is 16bits wide in 8bit mode */
+ uint16_t vendor; /**< Vendor ID of Chip */
+ int size; /**< Size of the chip in bytes */
+ uint64_t erase_timeout; /**< Erase timeout in cycles */
+ uint64_t write_timeout; /**< Write timeout in cycles */
+ int num_regions; /**< Number of block regions */
+ cvmx_flash_region_t region[MAX_NUM_REGIONS];
+} cvmx_flash_t;
+
+static CVMX_SHARED cvmx_flash_t flash_info[MAX_NUM_FLASH_CHIPS];
+static CVMX_SHARED cvmx_spinlock_t flash_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
+
+
+/**
+ * @INTERNAL
+ * Read a byte from flash
+ *
+ * @param chip_id Chip to read from
+ * @param offset Offset into the chip
+ * @return Value read
+ */
+static uint8_t __cvmx_flash_read8(int chip_id, int offset)
+{
+ return *(volatile uint8_t *)(flash_info[chip_id].base_ptr + offset);
+}
+
+
+/**
+ * @INTERNAL
+ * Read a byte from flash (for commands)
+ *
+ * @param chip_id Chip to read from
+ * @param offset Offset into the chip
+ * @return Value read
+ */
+static uint8_t __cvmx_flash_read_cmd(int chip_id, int offset)
+{
+ if (flash_info[chip_id].is_16bit)
+ offset<<=1;
+ return __cvmx_flash_read8(chip_id, offset);
+}
+
+
+/**
+ * @INTERNAL
+ * Read 16bits from flash (for commands)
+ *
+ * @param chip_id Chip to read from
+ * @param offset Offset into the chip
+ * @return Value read
+ */
+static uint16_t __cvmx_flash_read_cmd16(int chip_id, int offset)
+{
+ uint16_t v = __cvmx_flash_read_cmd(chip_id, offset);
+ v |= __cvmx_flash_read_cmd(chip_id, offset + 1)<<8;
+ return v;
+}
+
+
+/**
+ * @INTERNAL
+ * Write a byte to flash
+ *
+ * @param chip_id Chip to write to
+ * @param offset Offset into the chip
+ * @param data Value to write
+ */
+static void __cvmx_flash_write8(int chip_id, int offset, uint8_t data)
+{
+ volatile uint8_t *flash_ptr = (volatile uint8_t *)flash_info[chip_id].base_ptr;
+ flash_ptr[offset] = data;
+}
+
+
+/**
+ * @INTERNAL
+ * Write a byte to flash (for commands)
+ *
+ * @param chip_id Chip to write to
+ * @param offset Offset into the chip
+ * @param data Value to write
+ */
+static void __cvmx_flash_write_cmd(int chip_id, int offset, uint8_t data)
+{
+ volatile uint8_t *flash_ptr = (volatile uint8_t *)flash_info[chip_id].base_ptr;
+ flash_ptr[offset<<flash_info[chip_id].is_16bit] = data;
+}
+
+
+/**
+ * @INTERNAL
+ * Query a address and see if a CFI flash chip is there.
+ *
+ * @param chip_id Chip ID data to fill in if the chip is there
+ * @param base_ptr Memory pointer to the start address to query
+ * @return Zero on success, Negative on failure
+ */
+static int __cvmx_flash_queury_cfi(int chip_id, void *base_ptr)
+{
+ int region;
+ cvmx_flash_t *flash = flash_info + chip_id;
+
+ /* Set the minimum needed for the read and write primitives to work */
+ flash->base_ptr = base_ptr;
+ flash->is_16bit = 1; /* FIXME: Currently assumes the chip is 16bits */
+
+ /* Put flash in CFI query mode */
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
+ __cvmx_flash_write_cmd(chip_id, 0x55, 0x98);
+
+ /* Make sure we get the QRY response we should */
+ if ((__cvmx_flash_read_cmd(chip_id, 0x10) != 'Q') ||
+ (__cvmx_flash_read_cmd(chip_id, 0x11) != 'R') ||
+ (__cvmx_flash_read_cmd(chip_id, 0x12) != 'Y'))
+ {
+ flash->base_ptr = NULL;
+ return -1;
+ }
+
+ /* Read the 16bit vendor ID */
+ flash->vendor = __cvmx_flash_read_cmd16(chip_id, 0x13);
+
+ /* Read the write timeout. The timeout is microseconds(us) is 2^0x1f
+ typically. The worst case is this value time 2^0x23 */
+ flash->write_timeout = 1ull << (__cvmx_flash_read_cmd(chip_id, 0x1f) +
+ __cvmx_flash_read_cmd(chip_id, 0x23));
+
+ /* Read the erase timeout. The timeout is milliseconds(ms) is 2^0x21
+ typically. The worst case is this value time 2^0x25 */
+ flash->erase_timeout = 1ull << (__cvmx_flash_read_cmd(chip_id, 0x21) +
+ __cvmx_flash_read_cmd(chip_id, 0x25));
+
+ /* Get the flash size. This is 2^0x27 */
+ flash->size = 1<<__cvmx_flash_read_cmd(chip_id, 0x27);
+
+ /* Get the number of different sized block regions from 0x2c */
+ flash->num_regions = __cvmx_flash_read_cmd(chip_id, 0x2c);
+
+ int start_offset = 0;
+ /* Loop through all regions get information about each */
+ for (region=0; region<flash->num_regions; region++)
+ {
+ cvmx_flash_region_t *rgn_ptr = flash->region + region;
+ rgn_ptr->start_offset = start_offset;
+
+ /* The number of blocks in each region is a 16 bit little endian
+ endian field. It is encoded at 0x2d + region*4 as (blocks-1) */
+ uint16_t blocks = __cvmx_flash_read_cmd16(chip_id, 0x2d + region*4);
+ rgn_ptr->num_blocks = 1u + blocks;
+
+ /* The size of each block is a 16 bit little endian endian field. It
+ is encoded at 0x2d + region*4 + 2 as (size/256). Zero is a special
+ case representing 128 */
+ uint16_t size = __cvmx_flash_read_cmd16(chip_id, 0x2d + region*4 + 2);
+ if (size == 0)
+ rgn_ptr->block_size = 128;
+ else
+ rgn_ptr->block_size = 256u * size;
+
+ start_offset += rgn_ptr->block_size * rgn_ptr->num_blocks;
+ }
+
+ /* Take the chip out of CFI query mode */
+ switch (flash_info[chip_id].vendor)
+ {
+ case CFI_CMDSET_AMD_STANDARD:
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xf0);
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xff);
+ break;
+ }
+
+ /* Convert the timeouts to cycles */
+ flash->write_timeout *= cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000;
+ flash->erase_timeout *= cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000;
+
+#if DEBUG
+ /* Print the information about the chip */
+ cvmx_dprintf("cvmx-flash: Base pointer: %p\n"
+ " Vendor: 0x%04x\n"
+ " Size: %d bytes\n"
+ " Num regions: %d\n"
+ " Erase timeout: %llu cycles\n"
+ " Write timeout: %llu cycles\n",
+ flash->base_ptr,
+ (unsigned int)flash->vendor,
+ flash->size,
+ flash->num_regions,
+ (unsigned long long)flash->erase_timeout,
+ (unsigned long long)flash->write_timeout);
+
+ for (region=0; region<flash->num_regions; region++)
+ {
+ cvmx_dprintf(" Region %d: offset 0x%x, %d blocks, %d bytes/block\n",
+ region,
+ flash->region[region].start_offset,
+ flash->region[region].num_blocks,
+ flash->region[region].block_size);
+ }
+#endif
+
+ return 0;
+}
+
+
+/**
+ * Initialize the flash access library
+ */
+void cvmx_flash_initialize(void)
+{
+ int boot_region;
+ int chip_id = 0;
+
+ memset(flash_info, 0, sizeof(flash_info));
+
+ /* Loop through each boot bus chip select region */
+ for (boot_region=0; boot_region<MAX_NUM_FLASH_CHIPS; boot_region++)
+ {
+ cvmx_mio_boot_reg_cfgx_t region_cfg;
+ region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFG0 + boot_region*8);
+ /* Only try chip select regions that are enabled. This assumes the
+ bootloader already setup the flash */
+ if (region_cfg.s.en)
+ {
+ /* Convert the hardware address to a pointer. Note that the bootbus,
+ unlike memory, isn't 1:1 mapped in the simple exec */
+ void *base_ptr = cvmx_phys_to_ptr((region_cfg.s.base<<16) | 0xffffffff80000000ull);
+ if (__cvmx_flash_queury_cfi(chip_id, base_ptr) == 0)
+ {
+ /* Valid CFI flash chip found */
+ chip_id++;
+ }
+ }
+ }
+
+ if (chip_id == 0)
+ cvmx_dprintf("cvmx-flash: No CFI chips found\n");
+}
+
+
+/**
+ * Return a pointer to the flash chip
+ *
+ * @param chip_id Chip ID to return
+ * @return NULL if the chip doesn't exist
+ */
+void *cvmx_flash_get_base(int chip_id)
+{
+ return flash_info[chip_id].base_ptr;
+}
+
+
+/**
+ * Return the number of erasable regions on the chip
+ *
+ * @param chip_id Chip to return info for
+ * @return Number of regions
+ */
+int cvmx_flash_get_num_regions(int chip_id)
+{
+ return flash_info[chip_id].num_regions;
+}
+
+
+/**
+ * Return information about a flash chips region
+ *
+ * @param chip_id Chip to get info for
+ * @param region Region to get info for
+ * @return Region information
+ */
+const cvmx_flash_region_t *cvmx_flash_get_region_info(int chip_id, int region)
+{
+ return flash_info[chip_id].region + region;
+}
+
+
+/**
+ * Erase a block on the flash chip
+ *
+ * @param chip_id Chip to erase a block on
+ * @param region Region to erase a block in
+ * @param block Block number to erase
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_flash_erase_block(int chip_id, int region, int block)
+{
+ cvmx_spinlock_lock(&flash_lock);
+#if DEBUG
+ cvmx_dprintf("cvmx-flash: Erasing chip %d, region %d, block %d\n",
+ chip_id, region, block);
+#endif
+
+ int offset = flash_info[chip_id].region[region].start_offset +
+ block * flash_info[chip_id].region[region].block_size;
+
+ switch (flash_info[chip_id].vendor)
+ {
+ case CFI_CMDSET_AMD_STANDARD:
+ {
+ /* Send the erase sector command sequence */
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
+ __cvmx_flash_write_cmd(chip_id, 0x555, 0xaa);
+ __cvmx_flash_write_cmd(chip_id, 0x2aa, 0x55);
+ __cvmx_flash_write_cmd(chip_id, 0x555, 0x80);
+ __cvmx_flash_write_cmd(chip_id, 0x555, 0xaa);
+ __cvmx_flash_write_cmd(chip_id, 0x2aa, 0x55);
+ __cvmx_flash_write8(chip_id, offset, 0x30);
+
+ /* Loop checking status */
+ uint8_t status = __cvmx_flash_read8(chip_id, offset);
+ uint64_t start_cycle = cvmx_get_cycle();
+ while (1)
+ {
+ /* Read the status and xor it with the old status so we can
+ find toggling bits */
+ uint8_t old_status = status;
+ status = __cvmx_flash_read8(chip_id, offset);
+ uint8_t toggle = status ^ old_status;
+
+ /* Check if the erase in progress bit is toggling */
+ if (toggle & (1<<6))
+ {
+ /* Check hardware timeout */
+ if (status & (1<<5))
+ {
+ /* Chip has signalled a timeout. Reread the status */
+ old_status = __cvmx_flash_read8(chip_id, offset);
+ status = __cvmx_flash_read8(chip_id, offset);
+ toggle = status ^ old_status;
+
+ /* Check if the erase in progress bit is toggling */
+ if (toggle & (1<<6))
+ {
+ cvmx_dprintf("cvmx-flash: Hardware timeout erasing block\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+ }
+ else
+ break; /* Not toggling, erase complete */
+ }
+ }
+ else
+ break; /* Not toggling, erase complete */
+
+ if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].erase_timeout)
+ {
+ cvmx_dprintf("cvmx-flash: Timeout erasing block\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+ }
+ }
+
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
+ cvmx_spinlock_unlock(&flash_lock);
+ return 0;
+ }
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ {
+ /* Send the erase sector command sequence */
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xff); /* Reset the flash chip */
+ __cvmx_flash_write8(chip_id, offset, 0x20);
+ __cvmx_flash_write8(chip_id, offset, 0xd0);
+
+ /* Loop checking status */
+ uint8_t status = __cvmx_flash_read8(chip_id, offset);
+ uint64_t start_cycle = cvmx_get_cycle();
+ while ((status & 0x80) == 0)
+ {
+ if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].erase_timeout)
+ {
+ cvmx_dprintf("cvmx-flash: Timeout erasing block\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+ }
+ status = __cvmx_flash_read8(chip_id, offset);
+ }
+
+ /* Check the final status */
+ if (status & 0x7f)
+ {
+ cvmx_dprintf("cvmx-flash: Hardware failure erasing block\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+ }
+
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xff); /* Reset the flash chip */
+ cvmx_spinlock_unlock(&flash_lock);
+ return 0;
+ }
+ }
+
+ cvmx_dprintf("cvmx-flash: Unsupported flash vendor\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+}
+
+
+/**
+ * Write a block on the flash chip
+ *
+ * @param chip_id Chip to write a block on
+ * @param region Region to write a block in
+ * @param block Block number to write
+ * @param data Data to write
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_flash_write_block(int chip_id, int region, int block, const void *data)
+{
+ cvmx_spinlock_lock(&flash_lock);
+#if DEBUG
+ cvmx_dprintf("cvmx-flash: Writing chip %d, region %d, block %d\n",
+ chip_id, region, block);
+#endif
+ int offset = flash_info[chip_id].region[region].start_offset +
+ block * flash_info[chip_id].region[region].block_size;
+ int len = flash_info[chip_id].region[region].block_size;
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ switch (flash_info[chip_id].vendor)
+ {
+ case CFI_CMDSET_AMD_STANDARD:
+ {
+ /* Loop through one byte at a time */
+ while (len--)
+ {
+ /* Send the program sequence */
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
+ __cvmx_flash_write_cmd(chip_id, 0x555, 0xaa);
+ __cvmx_flash_write_cmd(chip_id, 0x2aa, 0x55);
+ __cvmx_flash_write_cmd(chip_id, 0x555, 0xa0);
+ __cvmx_flash_write8(chip_id, offset, *ptr);
+
+ /* Loop polling for status */
+ uint64_t start_cycle = cvmx_get_cycle();
+ while (1)
+ {
+ uint8_t status = __cvmx_flash_read8(chip_id, offset);
+ if (((status ^ *ptr) & (1<<7)) == 0)
+ break; /* Data matches, this byte is done */
+ else if (status & (1<<5))
+ {
+ /* Hardware timeout, recheck status */
+ status = __cvmx_flash_read8(chip_id, offset);
+ if (((status ^ *ptr) & (1<<7)) == 0)
+ break; /* Data matches, this byte is done */
+ else
+ {
+ cvmx_dprintf("cvmx-flash: Hardware write timeout\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+ }
+ }
+
+ if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].write_timeout)
+ {
+ cvmx_dprintf("cvmx-flash: Timeout writing block\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+ }
+ }
+
+ /* Increment to the next byte */
+ ptr++;
+ offset++;
+ }
+
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
+ cvmx_spinlock_unlock(&flash_lock);
+ return 0;
+ }
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ {
+cvmx_dprintf("%s:%d len=%d\n", __FUNCTION__, __LINE__, len);
+ /* Loop through one byte at a time */
+ while (len--)
+ {
+ /* Send the program sequence */
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xff); /* Reset the flash chip */
+ __cvmx_flash_write8(chip_id, offset, 0x40);
+ __cvmx_flash_write8(chip_id, offset, *ptr);
+
+ /* Loop polling for status */
+ uint8_t status = __cvmx_flash_read8(chip_id, offset);
+ uint64_t start_cycle = cvmx_get_cycle();
+ while ((status & 0x80) == 0)
+ {
+ if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].write_timeout)
+ {
+ cvmx_dprintf("cvmx-flash: Timeout writing block\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+ }
+ status = __cvmx_flash_read8(chip_id, offset);
+ }
+
+ /* Check the final status */
+ if (status & 0x7f)
+ {
+ cvmx_dprintf("cvmx-flash: Hardware failure erasing block\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+ }
+
+ /* Increment to the next byte */
+ ptr++;
+ offset++;
+ }
+cvmx_dprintf("%s:%d\n", __FUNCTION__, __LINE__);
+
+ __cvmx_flash_write_cmd(chip_id, 0x00, 0xff); /* Reset the flash chip */
+ cvmx_spinlock_unlock(&flash_lock);
+ return 0;
+ }
+ }
+
+ cvmx_dprintf("cvmx-flash: Unsupported flash vendor\n");
+ cvmx_spinlock_unlock(&flash_lock);
+ return -1;
+}
+
+
+/**
+ * Erase and write data to a flash
+ *
+ * @param address Memory address to write to
+ * @param data Data to write
+ * @param len Length of the data
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_flash_write(void *address, const void *data, int len)
+{
+ int chip_id;
+
+ /* Find which chip controls this address. Don't allow the write to span
+ multiple chips */
+ for (chip_id=0; chip_id<MAX_NUM_FLASH_CHIPS; chip_id++)
+ {
+ if ((flash_info[chip_id].base_ptr <= address) &&
+ (flash_info[chip_id].base_ptr + flash_info[chip_id].size >= address + len))
+ break;
+ }
+
+ if (chip_id == MAX_NUM_FLASH_CHIPS)
+ {
+ cvmx_dprintf("cvmx-flash: Unable to find chip that contains address %p\n", address);
+ return -1;
+ }
+
+ cvmx_flash_t *flash = flash_info + chip_id;
+
+ /* Determine which block region we need to start writing to */
+ void *region_base = flash->base_ptr;
+ int region = 0;
+ while (region_base + flash->region[region].num_blocks * flash->region[region].block_size <= address)
+ {
+ region++;
+ region_base = flash->base_ptr + flash->region[region].start_offset;
+ }
+
+ /* Determine which block in the region to start at */
+ int block = (address - region_base) / flash->region[region].block_size;
+
+ /* Require all writes to start on block boundries */
+ if (address != region_base + block*flash->region[region].block_size)
+ {
+ cvmx_dprintf("cvmx-flash: Write address not aligned on a block boundry\n");
+ return -1;
+ }
+
+ /* Loop until we're out of data */
+ while (len > 0)
+ {
+ /* Erase the current block */
+ if (cvmx_flash_erase_block(chip_id, region, block))
+ return -1;
+ /* Write the new data */
+ if (cvmx_flash_write_block(chip_id, region, block, data))
+ return -1;
+
+ /* Increment to the next block */
+ data += flash->region[region].block_size;
+ len -= flash->region[region].block_size;
+ block++;
+ if (block >= flash->region[region].num_blocks)
+ {
+ block = 0;
+ region++;
+ }
+ }
+
+ return 0;
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-flash.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-flash.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-flash.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-flash.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,137 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file provides bootbus flash operations
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ *
+ */
+
+
+#ifndef __CVMX_FLASH_H__
+#define __CVMX_FLASH_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct
+{
+ int start_offset;
+ int block_size;
+ int num_blocks;
+} cvmx_flash_region_t;
+
+/**
+ * Initialize the flash access library
+ */
+void cvmx_flash_initialize(void);
+
+/**
+ * Return a pointer to the flash chip
+ *
+ * @param chip_id Chip ID to return
+ * @return NULL if the chip doesn't exist
+ */
+void *cvmx_flash_get_base(int chip_id);
+
+/**
+ * Return the number of erasable regions on the chip
+ *
+ * @param chip_id Chip to return info for
+ * @return Number of regions
+ */
+int cvmx_flash_get_num_regions(int chip_id);
+
+/**
+ * Return information about a flash chips region
+ *
+ * @param chip_id Chip to get info for
+ * @param region Region to get info for
+ * @return Region information
+ */
+const cvmx_flash_region_t *cvmx_flash_get_region_info(int chip_id, int region);
+
+/**
+ * Erase a block on the flash chip
+ *
+ * @param chip_id Chip to erase a block on
+ * @param region Region to erase a block in
+ * @param block Block number to erase
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_flash_erase_block(int chip_id, int region, int block);
+
+/**
+ * Write a block on the flash chip
+ *
+ * @param chip_id Chip to write a block on
+ * @param region Region to write a block in
+ * @param block Block number to write
+ * @param data Data to write
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_flash_write_block(int chip_id, int region, int block, const void *data);
+
+/**
+ * Erase and write data to a flash
+ *
+ * @param address Memory address to write to
+ * @param data Data to write
+ * @param len Length of the data
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_flash_write(void *address, const void *data, int len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_FLASH_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-flash.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-fpa-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-fpa-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-fpa-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,2648 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-fpa-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon fpa.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_FPA_DEFS_H__
+#define __CVMX_FPA_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_ADDR_RANGE_ERROR CVMX_FPA_ADDR_RANGE_ERROR_FUNC()
+static inline uint64_t CVMX_FPA_ADDR_RANGE_ERROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_FPA_ADDR_RANGE_ERROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180028000458ull);
+}
+#else
+#define CVMX_FPA_ADDR_RANGE_ERROR (CVMX_ADD_IO_SEG(0x0001180028000458ull))
+#endif
+#define CVMX_FPA_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800280000E8ull))
+#define CVMX_FPA_CTL_STATUS (CVMX_ADD_IO_SEG(0x0001180028000050ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_FPF0_MARKS CVMX_FPA_FPF0_MARKS_FUNC()
+static inline uint64_t CVMX_FPA_FPF0_MARKS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_FPA_FPF0_MARKS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180028000000ull);
+}
+#else
+#define CVMX_FPA_FPF0_MARKS (CVMX_ADD_IO_SEG(0x0001180028000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_FPF0_SIZE CVMX_FPA_FPF0_SIZE_FUNC()
+static inline uint64_t CVMX_FPA_FPF0_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_FPA_FPF0_SIZE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180028000058ull);
+}
+#else
+#define CVMX_FPA_FPF0_SIZE (CVMX_ADD_IO_SEG(0x0001180028000058ull))
+#endif
+#define CVMX_FPA_FPF1_MARKS CVMX_FPA_FPFX_MARKS(1)
+#define CVMX_FPA_FPF2_MARKS CVMX_FPA_FPFX_MARKS(2)
+#define CVMX_FPA_FPF3_MARKS CVMX_FPA_FPFX_MARKS(3)
+#define CVMX_FPA_FPF4_MARKS CVMX_FPA_FPFX_MARKS(4)
+#define CVMX_FPA_FPF5_MARKS CVMX_FPA_FPFX_MARKS(5)
+#define CVMX_FPA_FPF6_MARKS CVMX_FPA_FPFX_MARKS(6)
+#define CVMX_FPA_FPF7_MARKS CVMX_FPA_FPFX_MARKS(7)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_FPF8_MARKS CVMX_FPA_FPF8_MARKS_FUNC()
+static inline uint64_t CVMX_FPA_FPF8_MARKS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_FPA_FPF8_MARKS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180028000240ull);
+}
+#else
+#define CVMX_FPA_FPF8_MARKS (CVMX_ADD_IO_SEG(0x0001180028000240ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_FPF8_SIZE CVMX_FPA_FPF8_SIZE_FUNC()
+static inline uint64_t CVMX_FPA_FPF8_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_FPA_FPF8_SIZE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180028000248ull);
+}
+#else
+#define CVMX_FPA_FPF8_SIZE (CVMX_ADD_IO_SEG(0x0001180028000248ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_FPA_FPFX_MARKS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 1) && (offset <= 7))))))
+ cvmx_warn("CVMX_FPA_FPFX_MARKS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180028000008ull) + ((offset) & 7) * 8 - 8*1;
+}
+#else
+#define CVMX_FPA_FPFX_MARKS(offset) (CVMX_ADD_IO_SEG(0x0001180028000008ull) + ((offset) & 7) * 8 - 8*1)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_FPA_FPFX_SIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 1) && (offset <= 7))))))
+ cvmx_warn("CVMX_FPA_FPFX_SIZE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180028000060ull) + ((offset) & 7) * 8 - 8*1;
+}
+#else
+#define CVMX_FPA_FPFX_SIZE(offset) (CVMX_ADD_IO_SEG(0x0001180028000060ull) + ((offset) & 7) * 8 - 8*1)
+#endif
+#define CVMX_FPA_INT_ENB (CVMX_ADD_IO_SEG(0x0001180028000048ull))
+#define CVMX_FPA_INT_SUM (CVMX_ADD_IO_SEG(0x0001180028000040ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_PACKET_THRESHOLD CVMX_FPA_PACKET_THRESHOLD_FUNC()
+static inline uint64_t CVMX_FPA_PACKET_THRESHOLD_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_FPA_PACKET_THRESHOLD not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180028000460ull);
+}
+#else
+#define CVMX_FPA_PACKET_THRESHOLD (CVMX_ADD_IO_SEG(0x0001180028000460ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_FPA_POOLX_END_ADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_FPA_POOLX_END_ADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180028000358ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_FPA_POOLX_END_ADDR(offset) (CVMX_ADD_IO_SEG(0x0001180028000358ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_FPA_POOLX_START_ADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_FPA_POOLX_START_ADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180028000258ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_FPA_POOLX_START_ADDR(offset) (CVMX_ADD_IO_SEG(0x0001180028000258ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_FPA_POOLX_THRESHOLD(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_FPA_POOLX_THRESHOLD(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180028000140ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_FPA_POOLX_THRESHOLD(offset) (CVMX_ADD_IO_SEG(0x0001180028000140ull) + ((offset) & 15) * 8)
+#endif
+#define CVMX_FPA_QUE0_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(0)
+#define CVMX_FPA_QUE1_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(1)
+#define CVMX_FPA_QUE2_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(2)
+#define CVMX_FPA_QUE3_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(3)
+#define CVMX_FPA_QUE4_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(4)
+#define CVMX_FPA_QUE5_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(5)
+#define CVMX_FPA_QUE6_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(6)
+#define CVMX_FPA_QUE7_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(7)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_QUE8_PAGE_INDEX CVMX_FPA_QUE8_PAGE_INDEX_FUNC()
+static inline uint64_t CVMX_FPA_QUE8_PAGE_INDEX_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_FPA_QUE8_PAGE_INDEX not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180028000250ull);
+}
+#else
+#define CVMX_FPA_QUE8_PAGE_INDEX (CVMX_ADD_IO_SEG(0x0001180028000250ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_FPA_QUEX_AVAILABLE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_FPA_QUEX_AVAILABLE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180028000098ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_FPA_QUEX_AVAILABLE(offset) (CVMX_ADD_IO_SEG(0x0001180028000098ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_FPA_QUEX_PAGE_INDEX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_FPA_QUEX_PAGE_INDEX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800280000F0ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_FPA_QUEX_PAGE_INDEX(offset) (CVMX_ADD_IO_SEG(0x00011800280000F0ull) + ((offset) & 7) * 8)
+#endif
+#define CVMX_FPA_QUE_ACT (CVMX_ADD_IO_SEG(0x0001180028000138ull))
+#define CVMX_FPA_QUE_EXP (CVMX_ADD_IO_SEG(0x0001180028000130ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_WART_CTL CVMX_FPA_WART_CTL_FUNC()
+static inline uint64_t CVMX_FPA_WART_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_FPA_WART_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800280000D8ull);
+}
+#else
+#define CVMX_FPA_WART_CTL (CVMX_ADD_IO_SEG(0x00011800280000D8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_WART_STATUS CVMX_FPA_WART_STATUS_FUNC()
+static inline uint64_t CVMX_FPA_WART_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_FPA_WART_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800280000E0ull);
+}
+#else
+#define CVMX_FPA_WART_STATUS (CVMX_ADD_IO_SEG(0x00011800280000E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_FPA_WQE_THRESHOLD CVMX_FPA_WQE_THRESHOLD_FUNC()
+static inline uint64_t CVMX_FPA_WQE_THRESHOLD_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_FPA_WQE_THRESHOLD not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180028000468ull);
+}
+#else
+#define CVMX_FPA_WQE_THRESHOLD (CVMX_ADD_IO_SEG(0x0001180028000468ull))
+#endif
+
+/**
+ * cvmx_fpa_addr_range_error
+ *
+ * Space here reserved
+ *
+ * FPA_ADDR_RANGE_ERROR = FPA's Pool Address Range Error Information
+ *
+ * When an address is sent to a pool that does not fall in the start and end address spcified by
+ * FPA_POOLX_START_ADDR and FPA_POOLX_END_ADDR the information related to the failure is captured here.
+ * In addition FPA_INT_SUM[PADDR_E] will be set and this register will not be updated again till
+ * FPA_INT_SUM[PADDR_E] is cleared.
+ */
+union cvmx_fpa_addr_range_error {
+ uint64_t u64;
+ struct cvmx_fpa_addr_range_error_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t pool : 5; /**< Pool address sent to. */
+ uint64_t addr : 33; /**< Failing address. */
+#else
+ uint64_t addr : 33;
+ uint64_t pool : 5;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_fpa_addr_range_error_s cn61xx;
+ struct cvmx_fpa_addr_range_error_s cn66xx;
+ struct cvmx_fpa_addr_range_error_s cn68xx;
+ struct cvmx_fpa_addr_range_error_s cn68xxp1;
+ struct cvmx_fpa_addr_range_error_s cnf71xx;
+};
+typedef union cvmx_fpa_addr_range_error cvmx_fpa_addr_range_error_t;
+
+/**
+ * cvmx_fpa_bist_status
+ *
+ * FPA_BIST_STATUS = BIST Status of FPA Memories
+ *
+ * The result of the BIST run on the FPA memories.
+ */
+union cvmx_fpa_bist_status {
+ uint64_t u64;
+ struct cvmx_fpa_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t frd : 1; /**< fpa_frd memory bist status. */
+ uint64_t fpf0 : 1; /**< fpa_fpf0 memory bist status. */
+ uint64_t fpf1 : 1; /**< fpa_fpf1 memory bist status. */
+ uint64_t ffr : 1; /**< fpa_ffr memory bist status. */
+ uint64_t fdr : 1; /**< fpa_fdr memory bist status. */
+#else
+ uint64_t fdr : 1;
+ uint64_t ffr : 1;
+ uint64_t fpf1 : 1;
+ uint64_t fpf0 : 1;
+ uint64_t frd : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_fpa_bist_status_s cn30xx;
+ struct cvmx_fpa_bist_status_s cn31xx;
+ struct cvmx_fpa_bist_status_s cn38xx;
+ struct cvmx_fpa_bist_status_s cn38xxp2;
+ struct cvmx_fpa_bist_status_s cn50xx;
+ struct cvmx_fpa_bist_status_s cn52xx;
+ struct cvmx_fpa_bist_status_s cn52xxp1;
+ struct cvmx_fpa_bist_status_s cn56xx;
+ struct cvmx_fpa_bist_status_s cn56xxp1;
+ struct cvmx_fpa_bist_status_s cn58xx;
+ struct cvmx_fpa_bist_status_s cn58xxp1;
+ struct cvmx_fpa_bist_status_s cn61xx;
+ struct cvmx_fpa_bist_status_s cn63xx;
+ struct cvmx_fpa_bist_status_s cn63xxp1;
+ struct cvmx_fpa_bist_status_s cn66xx;
+ struct cvmx_fpa_bist_status_s cn68xx;
+ struct cvmx_fpa_bist_status_s cn68xxp1;
+ struct cvmx_fpa_bist_status_s cnf71xx;
+};
+typedef union cvmx_fpa_bist_status cvmx_fpa_bist_status_t;
+
+/**
+ * cvmx_fpa_ctl_status
+ *
+ * FPA_CTL_STATUS = FPA's Control/Status Register
+ *
+ * The FPA's interrupt enable register.
+ */
+union cvmx_fpa_ctl_status {
+ uint64_t u64;
+ struct cvmx_fpa_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t free_en : 1; /**< Enables the setting of the INT_SUM_[FREE*] bits. */
+ uint64_t ret_off : 1; /**< When set NCB devices returning pointer will be
+ stalled. */
+ uint64_t req_off : 1; /**< When set NCB devices requesting pointers will be
+ stalled. */
+ uint64_t reset : 1; /**< When set causes a reset of the FPA with the */
+ uint64_t use_ldt : 1; /**< When clear '0' the FPA will use LDT to load
+ pointers from the L2C. This is a PASS-2 field. */
+ uint64_t use_stt : 1; /**< When clear '0' the FPA will use STT to store
+ pointers to the L2C. This is a PASS-2 field. */
+ uint64_t enb : 1; /**< Must be set to 1 AFTER writing all config registers
+ and 10 cycles have past. If any of the config
+ register are written after writing this bit the
+ FPA may begin to operate incorrectly. */
+ uint64_t mem1_err : 7; /**< Causes a flip of the ECC bit associated 38:32
+ respective to bit 6:0 of this field, for FPF
+ FIFO 1. */
+ uint64_t mem0_err : 7; /**< Causes a flip of the ECC bit associated 38:32
+ respective to bit 6:0 of this field, for FPF
+ FIFO 0. */
+#else
+ uint64_t mem0_err : 7;
+ uint64_t mem1_err : 7;
+ uint64_t enb : 1;
+ uint64_t use_stt : 1;
+ uint64_t use_ldt : 1;
+ uint64_t reset : 1;
+ uint64_t req_off : 1;
+ uint64_t ret_off : 1;
+ uint64_t free_en : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_fpa_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t reset : 1; /**< When set causes a reset of the FPA with the
+ exception of the RSL. */
+ uint64_t use_ldt : 1; /**< When clear '0' the FPA will use LDT to load
+ pointers from the L2C. */
+ uint64_t use_stt : 1; /**< When clear '0' the FPA will use STT to store
+ pointers to the L2C. */
+ uint64_t enb : 1; /**< Must be set to 1 AFTER writing all config registers
+ and 10 cycles have past. If any of the config
+ register are written after writing this bit the
+ FPA may begin to operate incorrectly. */
+ uint64_t mem1_err : 7; /**< Causes a flip of the ECC bit associated 38:32
+ respective to bit 6:0 of this field, for FPF
+ FIFO 1. */
+ uint64_t mem0_err : 7; /**< Causes a flip of the ECC bit associated 38:32
+ respective to bit 6:0 of this field, for FPF
+ FIFO 0. */
+#else
+ uint64_t mem0_err : 7;
+ uint64_t mem1_err : 7;
+ uint64_t enb : 1;
+ uint64_t use_stt : 1;
+ uint64_t use_ldt : 1;
+ uint64_t reset : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn30xx;
+ struct cvmx_fpa_ctl_status_cn30xx cn31xx;
+ struct cvmx_fpa_ctl_status_cn30xx cn38xx;
+ struct cvmx_fpa_ctl_status_cn30xx cn38xxp2;
+ struct cvmx_fpa_ctl_status_cn30xx cn50xx;
+ struct cvmx_fpa_ctl_status_cn30xx cn52xx;
+ struct cvmx_fpa_ctl_status_cn30xx cn52xxp1;
+ struct cvmx_fpa_ctl_status_cn30xx cn56xx;
+ struct cvmx_fpa_ctl_status_cn30xx cn56xxp1;
+ struct cvmx_fpa_ctl_status_cn30xx cn58xx;
+ struct cvmx_fpa_ctl_status_cn30xx cn58xxp1;
+ struct cvmx_fpa_ctl_status_s cn61xx;
+ struct cvmx_fpa_ctl_status_s cn63xx;
+ struct cvmx_fpa_ctl_status_cn30xx cn63xxp1;
+ struct cvmx_fpa_ctl_status_s cn66xx;
+ struct cvmx_fpa_ctl_status_s cn68xx;
+ struct cvmx_fpa_ctl_status_s cn68xxp1;
+ struct cvmx_fpa_ctl_status_s cnf71xx;
+};
+typedef union cvmx_fpa_ctl_status cvmx_fpa_ctl_status_t;
+
+/**
+ * cvmx_fpa_fpf#_marks
+ *
+ * FPA_FPF1_MARKS = FPA's Queue 1 Free Page FIFO Read Write Marks
+ *
+ * The high and low watermark register that determines when we write and read free pages from L2C
+ * for Queue 1. The value of FPF_RD and FPF_WR should have at least a 33 difference. Recommend value
+ * is FPF_RD == (FPA_FPF#_SIZE[FPF_SIZ] * .25) and FPF_WR == (FPA_FPF#_SIZE[FPF_SIZ] * .75)
+ */
+union cvmx_fpa_fpfx_marks {
+ uint64_t u64;
+ struct cvmx_fpa_fpfx_marks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t fpf_wr : 11; /**< When the number of free-page-pointers in a
+ queue exceeds this value the FPA will write
+ 32-page-pointers of that queue to DRAM.
+ The MAX value for this field should be
+ FPA_FPF1_SIZE[FPF_SIZ]-2. */
+ uint64_t fpf_rd : 11; /**< When the number of free-page-pointers in a
+ queue drops below this value and there are
+ free-page-pointers in DRAM, the FPA will
+ read one page (32 pointers) from DRAM.
+ This maximum value for this field should be
+ FPA_FPF1_SIZE[FPF_SIZ]-34. The min number
+ for this would be 16. */
+#else
+ uint64_t fpf_rd : 11;
+ uint64_t fpf_wr : 11;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_fpa_fpfx_marks_s cn38xx;
+ struct cvmx_fpa_fpfx_marks_s cn38xxp2;
+ struct cvmx_fpa_fpfx_marks_s cn56xx;
+ struct cvmx_fpa_fpfx_marks_s cn56xxp1;
+ struct cvmx_fpa_fpfx_marks_s cn58xx;
+ struct cvmx_fpa_fpfx_marks_s cn58xxp1;
+ struct cvmx_fpa_fpfx_marks_s cn61xx;
+ struct cvmx_fpa_fpfx_marks_s cn63xx;
+ struct cvmx_fpa_fpfx_marks_s cn63xxp1;
+ struct cvmx_fpa_fpfx_marks_s cn66xx;
+ struct cvmx_fpa_fpfx_marks_s cn68xx;
+ struct cvmx_fpa_fpfx_marks_s cn68xxp1;
+ struct cvmx_fpa_fpfx_marks_s cnf71xx;
+};
+typedef union cvmx_fpa_fpfx_marks cvmx_fpa_fpfx_marks_t;
+
+/**
+ * cvmx_fpa_fpf#_size
+ *
+ * FPA_FPFX_SIZE = FPA's Queue 1-7 Free Page FIFO Size
+ *
+ * The number of page pointers that will be kept local to the FPA for this Queue. FPA Queues are
+ * assigned in order from Queue 0 to Queue 7, though only Queue 0 through Queue x can be used.
+ * The sum of the 8 (0-7) FPA_FPF#_SIZE registers must be limited to 2048.
+ */
+union cvmx_fpa_fpfx_size {
+ uint64_t u64;
+ struct cvmx_fpa_fpfx_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t fpf_siz : 11; /**< The number of entries assigned in the FPA FIFO
+ (used to hold page-pointers) for this Queue.
+ The value of this register must divisable by 2,
+ and the FPA will ignore bit [0] of this register.
+ The total of the FPF_SIZ field of the 8 (0-7)
+ FPA_FPF#_SIZE registers must not exceed 2048.
+ After writing this field the FPA will need 10
+ core clock cycles to be ready for operation. The
+ assignment of location in the FPA FIFO must
+ start with Queue 0, then 1, 2, etc.
+ The number of useable entries will be FPF_SIZ-2. */
+#else
+ uint64_t fpf_siz : 11;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_fpa_fpfx_size_s cn38xx;
+ struct cvmx_fpa_fpfx_size_s cn38xxp2;
+ struct cvmx_fpa_fpfx_size_s cn56xx;
+ struct cvmx_fpa_fpfx_size_s cn56xxp1;
+ struct cvmx_fpa_fpfx_size_s cn58xx;
+ struct cvmx_fpa_fpfx_size_s cn58xxp1;
+ struct cvmx_fpa_fpfx_size_s cn61xx;
+ struct cvmx_fpa_fpfx_size_s cn63xx;
+ struct cvmx_fpa_fpfx_size_s cn63xxp1;
+ struct cvmx_fpa_fpfx_size_s cn66xx;
+ struct cvmx_fpa_fpfx_size_s cn68xx;
+ struct cvmx_fpa_fpfx_size_s cn68xxp1;
+ struct cvmx_fpa_fpfx_size_s cnf71xx;
+};
+typedef union cvmx_fpa_fpfx_size cvmx_fpa_fpfx_size_t;
+
+/**
+ * cvmx_fpa_fpf0_marks
+ *
+ * FPA_FPF0_MARKS = FPA's Queue 0 Free Page FIFO Read Write Marks
+ *
+ * The high and low watermark register that determines when we write and read free pages from L2C
+ * for Queue 0. The value of FPF_RD and FPF_WR should have at least a 33 difference. Recommend value
+ * is FPF_RD == (FPA_FPF#_SIZE[FPF_SIZ] * .25) and FPF_WR == (FPA_FPF#_SIZE[FPF_SIZ] * .75)
+ */
+union cvmx_fpa_fpf0_marks {
+ uint64_t u64;
+ struct cvmx_fpa_fpf0_marks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t fpf_wr : 12; /**< When the number of free-page-pointers in a
+ queue exceeds this value the FPA will write
+ 32-page-pointers of that queue to DRAM.
+ The MAX value for this field should be
+ FPA_FPF0_SIZE[FPF_SIZ]-2. */
+ uint64_t fpf_rd : 12; /**< When the number of free-page-pointers in a
+ queue drops below this value and there are
+ free-page-pointers in DRAM, the FPA will
+ read one page (32 pointers) from DRAM.
+ This maximum value for this field should be
+ FPA_FPF0_SIZE[FPF_SIZ]-34. The min number
+ for this would be 16. */
+#else
+ uint64_t fpf_rd : 12;
+ uint64_t fpf_wr : 12;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_fpa_fpf0_marks_s cn38xx;
+ struct cvmx_fpa_fpf0_marks_s cn38xxp2;
+ struct cvmx_fpa_fpf0_marks_s cn56xx;
+ struct cvmx_fpa_fpf0_marks_s cn56xxp1;
+ struct cvmx_fpa_fpf0_marks_s cn58xx;
+ struct cvmx_fpa_fpf0_marks_s cn58xxp1;
+ struct cvmx_fpa_fpf0_marks_s cn61xx;
+ struct cvmx_fpa_fpf0_marks_s cn63xx;
+ struct cvmx_fpa_fpf0_marks_s cn63xxp1;
+ struct cvmx_fpa_fpf0_marks_s cn66xx;
+ struct cvmx_fpa_fpf0_marks_s cn68xx;
+ struct cvmx_fpa_fpf0_marks_s cn68xxp1;
+ struct cvmx_fpa_fpf0_marks_s cnf71xx;
+};
+typedef union cvmx_fpa_fpf0_marks cvmx_fpa_fpf0_marks_t;
+
+/**
+ * cvmx_fpa_fpf0_size
+ *
+ * FPA_FPF0_SIZE = FPA's Queue 0 Free Page FIFO Size
+ *
+ * The number of page pointers that will be kept local to the FPA for this Queue. FPA Queues are
+ * assigned in order from Queue 0 to Queue 7, though only Queue 0 through Queue x can be used.
+ * The sum of the 8 (0-7) FPA_FPF#_SIZE registers must be limited to 2048.
+ */
+union cvmx_fpa_fpf0_size {
+ uint64_t u64;
+ struct cvmx_fpa_fpf0_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t fpf_siz : 12; /**< The number of entries assigned in the FPA FIFO
+ (used to hold page-pointers) for this Queue.
+ The value of this register must divisable by 2,
+ and the FPA will ignore bit [0] of this register.
+ The total of the FPF_SIZ field of the 8 (0-7)
+ FPA_FPF#_SIZE registers must not exceed 2048.
+ After writing this field the FPA will need 10
+ core clock cycles to be ready for operation. The
+ assignment of location in the FPA FIFO must
+ start with Queue 0, then 1, 2, etc.
+ The number of useable entries will be FPF_SIZ-2. */
+#else
+ uint64_t fpf_siz : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_fpa_fpf0_size_s cn38xx;
+ struct cvmx_fpa_fpf0_size_s cn38xxp2;
+ struct cvmx_fpa_fpf0_size_s cn56xx;
+ struct cvmx_fpa_fpf0_size_s cn56xxp1;
+ struct cvmx_fpa_fpf0_size_s cn58xx;
+ struct cvmx_fpa_fpf0_size_s cn58xxp1;
+ struct cvmx_fpa_fpf0_size_s cn61xx;
+ struct cvmx_fpa_fpf0_size_s cn63xx;
+ struct cvmx_fpa_fpf0_size_s cn63xxp1;
+ struct cvmx_fpa_fpf0_size_s cn66xx;
+ struct cvmx_fpa_fpf0_size_s cn68xx;
+ struct cvmx_fpa_fpf0_size_s cn68xxp1;
+ struct cvmx_fpa_fpf0_size_s cnf71xx;
+};
+typedef union cvmx_fpa_fpf0_size cvmx_fpa_fpf0_size_t;
+
+/**
+ * cvmx_fpa_fpf8_marks
+ *
+ * Reserved through 0x238 for additional thresholds
+ *
+ * FPA_FPF8_MARKS = FPA's Queue 8 Free Page FIFO Read Write Marks
+ *
+ * The high and low watermark register that determines when we write and read free pages from L2C
+ * for Queue 8. The value of FPF_RD and FPF_WR should have at least a 33 difference. Recommend value
+ * is FPF_RD == (FPA_FPF#_SIZE[FPF_SIZ] * .25) and FPF_WR == (FPA_FPF#_SIZE[FPF_SIZ] * .75)
+ */
+union cvmx_fpa_fpf8_marks {
+ uint64_t u64;
+ struct cvmx_fpa_fpf8_marks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t fpf_wr : 11; /**< When the number of free-page-pointers in a
+ queue exceeds this value the FPA will write
+ 32-page-pointers of that queue to DRAM.
+ The MAX value for this field should be
+ FPA_FPF0_SIZE[FPF_SIZ]-2. */
+ uint64_t fpf_rd : 11; /**< When the number of free-page-pointers in a
+ queue drops below this value and there are
+ free-page-pointers in DRAM, the FPA will
+ read one page (32 pointers) from DRAM.
+ This maximum value for this field should be
+ FPA_FPF0_SIZE[FPF_SIZ]-34. The min number
+ for this would be 16. */
+#else
+ uint64_t fpf_rd : 11;
+ uint64_t fpf_wr : 11;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_fpa_fpf8_marks_s cn68xx;
+ struct cvmx_fpa_fpf8_marks_s cn68xxp1;
+};
+typedef union cvmx_fpa_fpf8_marks cvmx_fpa_fpf8_marks_t;
+
+/**
+ * cvmx_fpa_fpf8_size
+ *
+ * FPA_FPF8_SIZE = FPA's Queue 8 Free Page FIFO Size
+ *
+ * The number of page pointers that will be kept local to the FPA for this Queue. FPA Queues are
+ * assigned in order from Queue 0 to Queue 7, though only Queue 0 through Queue x can be used.
+ * The sum of the 9 (0-8) FPA_FPF#_SIZE registers must be limited to 2048.
+ */
+union cvmx_fpa_fpf8_size {
+ uint64_t u64;
+ struct cvmx_fpa_fpf8_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t fpf_siz : 12; /**< The number of entries assigned in the FPA FIFO
+ (used to hold page-pointers) for this Queue.
+ The value of this register must divisable by 2,
+ and the FPA will ignore bit [0] of this register.
+ The total of the FPF_SIZ field of the 8 (0-7)
+ FPA_FPF#_SIZE registers must not exceed 2048.
+ After writing this field the FPA will need 10
+ core clock cycles to be ready for operation. The
+ assignment of location in the FPA FIFO must
+ start with Queue 0, then 1, 2, etc.
+ The number of useable entries will be FPF_SIZ-2. */
+#else
+ uint64_t fpf_siz : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_fpa_fpf8_size_s cn68xx;
+ struct cvmx_fpa_fpf8_size_s cn68xxp1;
+};
+typedef union cvmx_fpa_fpf8_size cvmx_fpa_fpf8_size_t;
+
+/**
+ * cvmx_fpa_int_enb
+ *
+ * FPA_INT_ENB = FPA's Interrupt Enable
+ *
+ * The FPA's interrupt enable register.
+ */
+union cvmx_fpa_int_enb {
+ uint64_t u64;
+ struct cvmx_fpa_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63 : 14;
+ uint64_t paddr_e : 1; /**< When set (1) and bit 49 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t reserved_44_48 : 5;
+ uint64_t free7 : 1; /**< When set (1) and bit 43 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free6 : 1; /**< When set (1) and bit 42 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free5 : 1; /**< When set (1) and bit 41 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free4 : 1; /**< When set (1) and bit 40 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free3 : 1; /**< When set (1) and bit 39 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free2 : 1; /**< When set (1) and bit 38 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free1 : 1; /**< When set (1) and bit 37 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free0 : 1; /**< When set (1) and bit 36 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool7th : 1; /**< When set (1) and bit 35 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool6th : 1; /**< When set (1) and bit 34 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool5th : 1; /**< When set (1) and bit 33 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool4th : 1; /**< When set (1) and bit 32 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool3th : 1; /**< When set (1) and bit 31 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool2th : 1; /**< When set (1) and bit 30 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool1th : 1; /**< When set (1) and bit 29 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool0th : 1; /**< When set (1) and bit 28 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_perr : 1; /**< When set (1) and bit 27 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_coff : 1; /**< When set (1) and bit 26 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_und : 1; /**< When set (1) and bit 25 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_perr : 1; /**< When set (1) and bit 24 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_coff : 1; /**< When set (1) and bit 23 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_und : 1; /**< When set (1) and bit 22 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_perr : 1; /**< When set (1) and bit 21 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_coff : 1; /**< When set (1) and bit 20 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_und : 1; /**< When set (1) and bit 19 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_perr : 1; /**< When set (1) and bit 18 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_coff : 1; /**< When set (1) and bit 17 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_und : 1; /**< When set (1) and bit 16 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_perr : 1; /**< When set (1) and bit 15 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_coff : 1; /**< When set (1) and bit 14 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_und : 1; /**< When set (1) and bit 13 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_perr : 1; /**< When set (1) and bit 12 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_coff : 1; /**< When set (1) and bit 11 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_und : 1; /**< When set (1) and bit 10 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_perr : 1; /**< When set (1) and bit 9 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_coff : 1; /**< When set (1) and bit 8 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_und : 1; /**< When set (1) and bit 7 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_perr : 1; /**< When set (1) and bit 6 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_coff : 1; /**< When set (1) and bit 5 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_und : 1; /**< When set (1) and bit 4 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_dbe : 1; /**< When set (1) and bit 3 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_sbe : 1; /**< When set (1) and bit 2 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_dbe : 1; /**< When set (1) and bit 1 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_sbe : 1; /**< When set (1) and bit 0 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t pool0th : 1;
+ uint64_t pool1th : 1;
+ uint64_t pool2th : 1;
+ uint64_t pool3th : 1;
+ uint64_t pool4th : 1;
+ uint64_t pool5th : 1;
+ uint64_t pool6th : 1;
+ uint64_t pool7th : 1;
+ uint64_t free0 : 1;
+ uint64_t free1 : 1;
+ uint64_t free2 : 1;
+ uint64_t free3 : 1;
+ uint64_t free4 : 1;
+ uint64_t free5 : 1;
+ uint64_t free6 : 1;
+ uint64_t free7 : 1;
+ uint64_t reserved_44_48 : 5;
+ uint64_t paddr_e : 1;
+ uint64_t reserved_50_63 : 14;
+#endif
+ } s;
+ struct cvmx_fpa_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t q7_perr : 1; /**< When set (1) and bit 27 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_coff : 1; /**< When set (1) and bit 26 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_und : 1; /**< When set (1) and bit 25 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_perr : 1; /**< When set (1) and bit 24 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_coff : 1; /**< When set (1) and bit 23 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_und : 1; /**< When set (1) and bit 22 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_perr : 1; /**< When set (1) and bit 21 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_coff : 1; /**< When set (1) and bit 20 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_und : 1; /**< When set (1) and bit 19 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_perr : 1; /**< When set (1) and bit 18 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_coff : 1; /**< When set (1) and bit 17 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_und : 1; /**< When set (1) and bit 16 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_perr : 1; /**< When set (1) and bit 15 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_coff : 1; /**< When set (1) and bit 14 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_und : 1; /**< When set (1) and bit 13 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_perr : 1; /**< When set (1) and bit 12 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_coff : 1; /**< When set (1) and bit 11 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_und : 1; /**< When set (1) and bit 10 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_perr : 1; /**< When set (1) and bit 9 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_coff : 1; /**< When set (1) and bit 8 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_und : 1; /**< When set (1) and bit 7 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_perr : 1; /**< When set (1) and bit 6 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_coff : 1; /**< When set (1) and bit 5 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_und : 1; /**< When set (1) and bit 4 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_dbe : 1; /**< When set (1) and bit 3 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_sbe : 1; /**< When set (1) and bit 2 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_dbe : 1; /**< When set (1) and bit 1 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_sbe : 1; /**< When set (1) and bit 0 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn30xx;
+ struct cvmx_fpa_int_enb_cn30xx cn31xx;
+ struct cvmx_fpa_int_enb_cn30xx cn38xx;
+ struct cvmx_fpa_int_enb_cn30xx cn38xxp2;
+ struct cvmx_fpa_int_enb_cn30xx cn50xx;
+ struct cvmx_fpa_int_enb_cn30xx cn52xx;
+ struct cvmx_fpa_int_enb_cn30xx cn52xxp1;
+ struct cvmx_fpa_int_enb_cn30xx cn56xx;
+ struct cvmx_fpa_int_enb_cn30xx cn56xxp1;
+ struct cvmx_fpa_int_enb_cn30xx cn58xx;
+ struct cvmx_fpa_int_enb_cn30xx cn58xxp1;
+ struct cvmx_fpa_int_enb_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63 : 14;
+ uint64_t paddr_e : 1; /**< When set (1) and bit 49 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t res_44 : 5; /**< Reserved */
+ uint64_t free7 : 1; /**< When set (1) and bit 43 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free6 : 1; /**< When set (1) and bit 42 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free5 : 1; /**< When set (1) and bit 41 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free4 : 1; /**< When set (1) and bit 40 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free3 : 1; /**< When set (1) and bit 39 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free2 : 1; /**< When set (1) and bit 38 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free1 : 1; /**< When set (1) and bit 37 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free0 : 1; /**< When set (1) and bit 36 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool7th : 1; /**< When set (1) and bit 35 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool6th : 1; /**< When set (1) and bit 34 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool5th : 1; /**< When set (1) and bit 33 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool4th : 1; /**< When set (1) and bit 32 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool3th : 1; /**< When set (1) and bit 31 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool2th : 1; /**< When set (1) and bit 30 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool1th : 1; /**< When set (1) and bit 29 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool0th : 1; /**< When set (1) and bit 28 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_perr : 1; /**< When set (1) and bit 27 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_coff : 1; /**< When set (1) and bit 26 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_und : 1; /**< When set (1) and bit 25 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_perr : 1; /**< When set (1) and bit 24 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_coff : 1; /**< When set (1) and bit 23 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_und : 1; /**< When set (1) and bit 22 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_perr : 1; /**< When set (1) and bit 21 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_coff : 1; /**< When set (1) and bit 20 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_und : 1; /**< When set (1) and bit 19 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_perr : 1; /**< When set (1) and bit 18 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_coff : 1; /**< When set (1) and bit 17 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_und : 1; /**< When set (1) and bit 16 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_perr : 1; /**< When set (1) and bit 15 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_coff : 1; /**< When set (1) and bit 14 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_und : 1; /**< When set (1) and bit 13 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_perr : 1; /**< When set (1) and bit 12 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_coff : 1; /**< When set (1) and bit 11 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_und : 1; /**< When set (1) and bit 10 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_perr : 1; /**< When set (1) and bit 9 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_coff : 1; /**< When set (1) and bit 8 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_und : 1; /**< When set (1) and bit 7 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_perr : 1; /**< When set (1) and bit 6 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_coff : 1; /**< When set (1) and bit 5 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_und : 1; /**< When set (1) and bit 4 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_dbe : 1; /**< When set (1) and bit 3 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_sbe : 1; /**< When set (1) and bit 2 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_dbe : 1; /**< When set (1) and bit 1 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_sbe : 1; /**< When set (1) and bit 0 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t pool0th : 1;
+ uint64_t pool1th : 1;
+ uint64_t pool2th : 1;
+ uint64_t pool3th : 1;
+ uint64_t pool4th : 1;
+ uint64_t pool5th : 1;
+ uint64_t pool6th : 1;
+ uint64_t pool7th : 1;
+ uint64_t free0 : 1;
+ uint64_t free1 : 1;
+ uint64_t free2 : 1;
+ uint64_t free3 : 1;
+ uint64_t free4 : 1;
+ uint64_t free5 : 1;
+ uint64_t free6 : 1;
+ uint64_t free7 : 1;
+ uint64_t res_44 : 5;
+ uint64_t paddr_e : 1;
+ uint64_t reserved_50_63 : 14;
+#endif
+ } cn61xx;
+ struct cvmx_fpa_int_enb_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t free7 : 1; /**< When set (1) and bit 43 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free6 : 1; /**< When set (1) and bit 42 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free5 : 1; /**< When set (1) and bit 41 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free4 : 1; /**< When set (1) and bit 40 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free3 : 1; /**< When set (1) and bit 39 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free2 : 1; /**< When set (1) and bit 38 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free1 : 1; /**< When set (1) and bit 37 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free0 : 1; /**< When set (1) and bit 36 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool7th : 1; /**< When set (1) and bit 35 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool6th : 1; /**< When set (1) and bit 34 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool5th : 1; /**< When set (1) and bit 33 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool4th : 1; /**< When set (1) and bit 32 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool3th : 1; /**< When set (1) and bit 31 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool2th : 1; /**< When set (1) and bit 30 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool1th : 1; /**< When set (1) and bit 29 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool0th : 1; /**< When set (1) and bit 28 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_perr : 1; /**< When set (1) and bit 27 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_coff : 1; /**< When set (1) and bit 26 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_und : 1; /**< When set (1) and bit 25 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_perr : 1; /**< When set (1) and bit 24 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_coff : 1; /**< When set (1) and bit 23 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_und : 1; /**< When set (1) and bit 22 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_perr : 1; /**< When set (1) and bit 21 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_coff : 1; /**< When set (1) and bit 20 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_und : 1; /**< When set (1) and bit 19 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_perr : 1; /**< When set (1) and bit 18 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_coff : 1; /**< When set (1) and bit 17 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_und : 1; /**< When set (1) and bit 16 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_perr : 1; /**< When set (1) and bit 15 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_coff : 1; /**< When set (1) and bit 14 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_und : 1; /**< When set (1) and bit 13 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_perr : 1; /**< When set (1) and bit 12 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_coff : 1; /**< When set (1) and bit 11 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_und : 1; /**< When set (1) and bit 10 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_perr : 1; /**< When set (1) and bit 9 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_coff : 1; /**< When set (1) and bit 8 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_und : 1; /**< When set (1) and bit 7 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_perr : 1; /**< When set (1) and bit 6 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_coff : 1; /**< When set (1) and bit 5 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_und : 1; /**< When set (1) and bit 4 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_dbe : 1; /**< When set (1) and bit 3 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_sbe : 1; /**< When set (1) and bit 2 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_dbe : 1; /**< When set (1) and bit 1 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_sbe : 1; /**< When set (1) and bit 0 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t pool0th : 1;
+ uint64_t pool1th : 1;
+ uint64_t pool2th : 1;
+ uint64_t pool3th : 1;
+ uint64_t pool4th : 1;
+ uint64_t pool5th : 1;
+ uint64_t pool6th : 1;
+ uint64_t pool7th : 1;
+ uint64_t free0 : 1;
+ uint64_t free1 : 1;
+ uint64_t free2 : 1;
+ uint64_t free3 : 1;
+ uint64_t free4 : 1;
+ uint64_t free5 : 1;
+ uint64_t free6 : 1;
+ uint64_t free7 : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } cn63xx;
+ struct cvmx_fpa_int_enb_cn30xx cn63xxp1;
+ struct cvmx_fpa_int_enb_cn61xx cn66xx;
+ struct cvmx_fpa_int_enb_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63 : 14;
+ uint64_t paddr_e : 1; /**< When set (1) and bit 49 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool8th : 1; /**< When set (1) and bit 48 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q8_perr : 1; /**< When set (1) and bit 47 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q8_coff : 1; /**< When set (1) and bit 46 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q8_und : 1; /**< When set (1) and bit 45 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free8 : 1; /**< When set (1) and bit 44 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free7 : 1; /**< When set (1) and bit 43 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free6 : 1; /**< When set (1) and bit 42 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free5 : 1; /**< When set (1) and bit 41 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free4 : 1; /**< When set (1) and bit 40 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free3 : 1; /**< When set (1) and bit 39 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free2 : 1; /**< When set (1) and bit 38 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free1 : 1; /**< When set (1) and bit 37 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t free0 : 1; /**< When set (1) and bit 36 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool7th : 1; /**< When set (1) and bit 35 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool6th : 1; /**< When set (1) and bit 34 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool5th : 1; /**< When set (1) and bit 33 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool4th : 1; /**< When set (1) and bit 32 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool3th : 1; /**< When set (1) and bit 31 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool2th : 1; /**< When set (1) and bit 30 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool1th : 1; /**< When set (1) and bit 29 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t pool0th : 1; /**< When set (1) and bit 28 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_perr : 1; /**< When set (1) and bit 27 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_coff : 1; /**< When set (1) and bit 26 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_und : 1; /**< When set (1) and bit 25 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_perr : 1; /**< When set (1) and bit 24 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_coff : 1; /**< When set (1) and bit 23 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_und : 1; /**< When set (1) and bit 22 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_perr : 1; /**< When set (1) and bit 21 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_coff : 1; /**< When set (1) and bit 20 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_und : 1; /**< When set (1) and bit 19 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_perr : 1; /**< When set (1) and bit 18 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_coff : 1; /**< When set (1) and bit 17 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_und : 1; /**< When set (1) and bit 16 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_perr : 1; /**< When set (1) and bit 15 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_coff : 1; /**< When set (1) and bit 14 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_und : 1; /**< When set (1) and bit 13 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_perr : 1; /**< When set (1) and bit 12 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_coff : 1; /**< When set (1) and bit 11 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_und : 1; /**< When set (1) and bit 10 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_perr : 1; /**< When set (1) and bit 9 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_coff : 1; /**< When set (1) and bit 8 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_und : 1; /**< When set (1) and bit 7 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_perr : 1; /**< When set (1) and bit 6 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_coff : 1; /**< When set (1) and bit 5 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_und : 1; /**< When set (1) and bit 4 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_dbe : 1; /**< When set (1) and bit 3 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_sbe : 1; /**< When set (1) and bit 2 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_dbe : 1; /**< When set (1) and bit 1 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_sbe : 1; /**< When set (1) and bit 0 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t pool0th : 1;
+ uint64_t pool1th : 1;
+ uint64_t pool2th : 1;
+ uint64_t pool3th : 1;
+ uint64_t pool4th : 1;
+ uint64_t pool5th : 1;
+ uint64_t pool6th : 1;
+ uint64_t pool7th : 1;
+ uint64_t free0 : 1;
+ uint64_t free1 : 1;
+ uint64_t free2 : 1;
+ uint64_t free3 : 1;
+ uint64_t free4 : 1;
+ uint64_t free5 : 1;
+ uint64_t free6 : 1;
+ uint64_t free7 : 1;
+ uint64_t free8 : 1;
+ uint64_t q8_und : 1;
+ uint64_t q8_coff : 1;
+ uint64_t q8_perr : 1;
+ uint64_t pool8th : 1;
+ uint64_t paddr_e : 1;
+ uint64_t reserved_50_63 : 14;
+#endif
+ } cn68xx;
+ struct cvmx_fpa_int_enb_cn68xx cn68xxp1;
+ struct cvmx_fpa_int_enb_cn61xx cnf71xx;
+};
+typedef union cvmx_fpa_int_enb cvmx_fpa_int_enb_t;
+
+/**
+ * cvmx_fpa_int_sum
+ *
+ * FPA_INT_SUM = FPA's Interrupt Summary Register
+ *
+ * Contains the different interrupt summary bits of the FPA.
+ */
+union cvmx_fpa_int_sum {
+ uint64_t u64;
+ struct cvmx_fpa_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63 : 14;
+ uint64_t paddr_e : 1; /**< Set when a pointer address does not fall in the
+ address range for a pool specified by
+ FPA_POOLX_START_ADDR and FPA_POOLX_END_ADDR. */
+ uint64_t pool8th : 1; /**< Set when FPA_QUE8_AVAILABLE is equal to
+ FPA_POOL8_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t q8_perr : 1; /**< Set when a Queue8 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q8_coff : 1; /**< Set when a Queue8 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q8_und : 1; /**< Set when a Queue8 page count available goes
+ negative. */
+ uint64_t free8 : 1; /**< When a pointer for POOL8 is freed bit is set. */
+ uint64_t free7 : 1; /**< When a pointer for POOL7 is freed bit is set. */
+ uint64_t free6 : 1; /**< When a pointer for POOL6 is freed bit is set. */
+ uint64_t free5 : 1; /**< When a pointer for POOL5 is freed bit is set. */
+ uint64_t free4 : 1; /**< When a pointer for POOL4 is freed bit is set. */
+ uint64_t free3 : 1; /**< When a pointer for POOL3 is freed bit is set. */
+ uint64_t free2 : 1; /**< When a pointer for POOL2 is freed bit is set. */
+ uint64_t free1 : 1; /**< When a pointer for POOL1 is freed bit is set. */
+ uint64_t free0 : 1; /**< When a pointer for POOL0 is freed bit is set. */
+ uint64_t pool7th : 1; /**< Set when FPA_QUE7_AVAILABLE is equal to
+ FPA_POOL7_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool6th : 1; /**< Set when FPA_QUE6_AVAILABLE is equal to
+ FPA_POOL6_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool5th : 1; /**< Set when FPA_QUE5_AVAILABLE is equal to
+ FPA_POOL5_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool4th : 1; /**< Set when FPA_QUE4_AVAILABLE is equal to
+ FPA_POOL4_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool3th : 1; /**< Set when FPA_QUE3_AVAILABLE is equal to
+ FPA_POOL3_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool2th : 1; /**< Set when FPA_QUE2_AVAILABLE is equal to
+ FPA_POOL2_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool1th : 1; /**< Set when FPA_QUE1_AVAILABLE is equal to
+ FPA_POOL1_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool0th : 1; /**< Set when FPA_QUE0_AVAILABLE is equal to
+ FPA_POOL`_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t q7_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q7_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q7_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q6_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q6_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q6_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q5_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q5_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q5_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q4_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q4_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q4_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q3_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q3_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q3_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q2_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q2_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q2_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q1_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q1_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q1_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q0_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q0_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q0_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t fed1_dbe : 1; /**< Set when a Double Bit Error is detected in FPF1. */
+ uint64_t fed1_sbe : 1; /**< Set when a Single Bit Error is detected in FPF1. */
+ uint64_t fed0_dbe : 1; /**< Set when a Double Bit Error is detected in FPF0. */
+ uint64_t fed0_sbe : 1; /**< Set when a Single Bit Error is detected in FPF0. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t pool0th : 1;
+ uint64_t pool1th : 1;
+ uint64_t pool2th : 1;
+ uint64_t pool3th : 1;
+ uint64_t pool4th : 1;
+ uint64_t pool5th : 1;
+ uint64_t pool6th : 1;
+ uint64_t pool7th : 1;
+ uint64_t free0 : 1;
+ uint64_t free1 : 1;
+ uint64_t free2 : 1;
+ uint64_t free3 : 1;
+ uint64_t free4 : 1;
+ uint64_t free5 : 1;
+ uint64_t free6 : 1;
+ uint64_t free7 : 1;
+ uint64_t free8 : 1;
+ uint64_t q8_und : 1;
+ uint64_t q8_coff : 1;
+ uint64_t q8_perr : 1;
+ uint64_t pool8th : 1;
+ uint64_t paddr_e : 1;
+ uint64_t reserved_50_63 : 14;
+#endif
+ } s;
+ struct cvmx_fpa_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t q7_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q7_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q7_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q6_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q6_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q6_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q5_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q5_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q5_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q4_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q4_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q4_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q3_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q3_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q3_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q2_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q2_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q2_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q1_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q1_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q1_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q0_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q0_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q0_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t fed1_dbe : 1; /**< Set when a Double Bit Error is detected in FPF1. */
+ uint64_t fed1_sbe : 1; /**< Set when a Single Bit Error is detected in FPF1. */
+ uint64_t fed0_dbe : 1; /**< Set when a Double Bit Error is detected in FPF0. */
+ uint64_t fed0_sbe : 1; /**< Set when a Single Bit Error is detected in FPF0. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn30xx;
+ struct cvmx_fpa_int_sum_cn30xx cn31xx;
+ struct cvmx_fpa_int_sum_cn30xx cn38xx;
+ struct cvmx_fpa_int_sum_cn30xx cn38xxp2;
+ struct cvmx_fpa_int_sum_cn30xx cn50xx;
+ struct cvmx_fpa_int_sum_cn30xx cn52xx;
+ struct cvmx_fpa_int_sum_cn30xx cn52xxp1;
+ struct cvmx_fpa_int_sum_cn30xx cn56xx;
+ struct cvmx_fpa_int_sum_cn30xx cn56xxp1;
+ struct cvmx_fpa_int_sum_cn30xx cn58xx;
+ struct cvmx_fpa_int_sum_cn30xx cn58xxp1;
+ struct cvmx_fpa_int_sum_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63 : 14;
+ uint64_t paddr_e : 1; /**< Set when a pointer address does not fall in the
+ address range for a pool specified by
+ FPA_POOLX_START_ADDR and FPA_POOLX_END_ADDR. */
+ uint64_t reserved_44_48 : 5;
+ uint64_t free7 : 1; /**< When a pointer for POOL7 is freed bit is set. */
+ uint64_t free6 : 1; /**< When a pointer for POOL6 is freed bit is set. */
+ uint64_t free5 : 1; /**< When a pointer for POOL5 is freed bit is set. */
+ uint64_t free4 : 1; /**< When a pointer for POOL4 is freed bit is set. */
+ uint64_t free3 : 1; /**< When a pointer for POOL3 is freed bit is set. */
+ uint64_t free2 : 1; /**< When a pointer for POOL2 is freed bit is set. */
+ uint64_t free1 : 1; /**< When a pointer for POOL1 is freed bit is set. */
+ uint64_t free0 : 1; /**< When a pointer for POOL0 is freed bit is set. */
+ uint64_t pool7th : 1; /**< Set when FPA_QUE7_AVAILABLE is equal to
+ FPA_POOL7_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool6th : 1; /**< Set when FPA_QUE6_AVAILABLE is equal to
+ FPA_POOL6_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool5th : 1; /**< Set when FPA_QUE5_AVAILABLE is equal to
+ FPA_POOL5_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool4th : 1; /**< Set when FPA_QUE4_AVAILABLE is equal to
+ FPA_POOL4_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool3th : 1; /**< Set when FPA_QUE3_AVAILABLE is equal to
+ FPA_POOL3_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool2th : 1; /**< Set when FPA_QUE2_AVAILABLE is equal to
+ FPA_POOL2_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool1th : 1; /**< Set when FPA_QUE1_AVAILABLE is equal to
+ FPA_POOL1_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool0th : 1; /**< Set when FPA_QUE0_AVAILABLE is equal to
+ FPA_POOL`_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t q7_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q7_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q7_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q6_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q6_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q6_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q5_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q5_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q5_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q4_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q4_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q4_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q3_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q3_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q3_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q2_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q2_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q2_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q1_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q1_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q1_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q0_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q0_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q0_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t fed1_dbe : 1; /**< Set when a Double Bit Error is detected in FPF1. */
+ uint64_t fed1_sbe : 1; /**< Set when a Single Bit Error is detected in FPF1. */
+ uint64_t fed0_dbe : 1; /**< Set when a Double Bit Error is detected in FPF0. */
+ uint64_t fed0_sbe : 1; /**< Set when a Single Bit Error is detected in FPF0. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t pool0th : 1;
+ uint64_t pool1th : 1;
+ uint64_t pool2th : 1;
+ uint64_t pool3th : 1;
+ uint64_t pool4th : 1;
+ uint64_t pool5th : 1;
+ uint64_t pool6th : 1;
+ uint64_t pool7th : 1;
+ uint64_t free0 : 1;
+ uint64_t free1 : 1;
+ uint64_t free2 : 1;
+ uint64_t free3 : 1;
+ uint64_t free4 : 1;
+ uint64_t free5 : 1;
+ uint64_t free6 : 1;
+ uint64_t free7 : 1;
+ uint64_t reserved_44_48 : 5;
+ uint64_t paddr_e : 1;
+ uint64_t reserved_50_63 : 14;
+#endif
+ } cn61xx;
+ struct cvmx_fpa_int_sum_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t free7 : 1; /**< When a pointer for POOL7 is freed bit is set. */
+ uint64_t free6 : 1; /**< When a pointer for POOL6 is freed bit is set. */
+ uint64_t free5 : 1; /**< When a pointer for POOL5 is freed bit is set. */
+ uint64_t free4 : 1; /**< When a pointer for POOL4 is freed bit is set. */
+ uint64_t free3 : 1; /**< When a pointer for POOL3 is freed bit is set. */
+ uint64_t free2 : 1; /**< When a pointer for POOL2 is freed bit is set. */
+ uint64_t free1 : 1; /**< When a pointer for POOL1 is freed bit is set. */
+ uint64_t free0 : 1; /**< When a pointer for POOL0 is freed bit is set. */
+ uint64_t pool7th : 1; /**< Set when FPA_QUE7_AVAILABLE is equal to
+ FPA_POOL7_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool6th : 1; /**< Set when FPA_QUE6_AVAILABLE is equal to
+ FPA_POOL6_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool5th : 1; /**< Set when FPA_QUE5_AVAILABLE is equal to
+ FPA_POOL5_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool4th : 1; /**< Set when FPA_QUE4_AVAILABLE is equal to
+ FPA_POOL4_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool3th : 1; /**< Set when FPA_QUE3_AVAILABLE is equal to
+ FPA_POOL3_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool2th : 1; /**< Set when FPA_QUE2_AVAILABLE is equal to
+ FPA_POOL2_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool1th : 1; /**< Set when FPA_QUE1_AVAILABLE is equal to
+ FPA_POOL1_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t pool0th : 1; /**< Set when FPA_QUE0_AVAILABLE is equal to
+ FPA_POOL`_THRESHOLD[THRESH] and a pointer is
+ allocated or de-allocated. */
+ uint64_t q7_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q7_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q7_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q6_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q6_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q6_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q5_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q5_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q5_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q4_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q4_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q4_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q3_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q3_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q3_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q2_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q2_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q2_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q1_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q1_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q1_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q0_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q0_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q0_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t fed1_dbe : 1; /**< Set when a Double Bit Error is detected in FPF1. */
+ uint64_t fed1_sbe : 1; /**< Set when a Single Bit Error is detected in FPF1. */
+ uint64_t fed0_dbe : 1; /**< Set when a Double Bit Error is detected in FPF0. */
+ uint64_t fed0_sbe : 1; /**< Set when a Single Bit Error is detected in FPF0. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t pool0th : 1;
+ uint64_t pool1th : 1;
+ uint64_t pool2th : 1;
+ uint64_t pool3th : 1;
+ uint64_t pool4th : 1;
+ uint64_t pool5th : 1;
+ uint64_t pool6th : 1;
+ uint64_t pool7th : 1;
+ uint64_t free0 : 1;
+ uint64_t free1 : 1;
+ uint64_t free2 : 1;
+ uint64_t free3 : 1;
+ uint64_t free4 : 1;
+ uint64_t free5 : 1;
+ uint64_t free6 : 1;
+ uint64_t free7 : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } cn63xx;
+ struct cvmx_fpa_int_sum_cn30xx cn63xxp1;
+ struct cvmx_fpa_int_sum_cn61xx cn66xx;
+ struct cvmx_fpa_int_sum_s cn68xx;
+ struct cvmx_fpa_int_sum_s cn68xxp1;
+ struct cvmx_fpa_int_sum_cn61xx cnf71xx;
+};
+typedef union cvmx_fpa_int_sum cvmx_fpa_int_sum_t;
+
+/**
+ * cvmx_fpa_packet_threshold
+ *
+ * FPA_PACKET_THRESHOLD = FPA's Packet Threshold
+ *
+ * When the value of FPA_QUE0_AVAILABLE[QUE_SIZ] is Less than the value of this register a low pool count signal is sent to the
+ * PCIe packet instruction engine (to make it stop reading instructions) and to the Packet-Arbiter informing it to not give grants
+ * to packets MAC with the exception of the PCIe MAC.
+ */
+union cvmx_fpa_packet_threshold {
+ uint64_t u64;
+ struct cvmx_fpa_packet_threshold_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t thresh : 32; /**< Packet Threshold. */
+#else
+ uint64_t thresh : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_fpa_packet_threshold_s cn61xx;
+ struct cvmx_fpa_packet_threshold_s cn63xx;
+ struct cvmx_fpa_packet_threshold_s cn66xx;
+ struct cvmx_fpa_packet_threshold_s cn68xx;
+ struct cvmx_fpa_packet_threshold_s cn68xxp1;
+ struct cvmx_fpa_packet_threshold_s cnf71xx;
+};
+typedef union cvmx_fpa_packet_threshold cvmx_fpa_packet_threshold_t;
+
+/**
+ * cvmx_fpa_pool#_end_addr
+ *
+ * Space here reserved
+ *
+ * FPA_POOLX_END_ADDR = FPA's Pool-X Ending Addres
+ *
+ * Pointers sent to this pool must be equal to or less than this address.
+ */
+union cvmx_fpa_poolx_end_addr {
+ uint64_t u64;
+ struct cvmx_fpa_poolx_end_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t addr : 33; /**< Address. */
+#else
+ uint64_t addr : 33;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_fpa_poolx_end_addr_s cn61xx;
+ struct cvmx_fpa_poolx_end_addr_s cn66xx;
+ struct cvmx_fpa_poolx_end_addr_s cn68xx;
+ struct cvmx_fpa_poolx_end_addr_s cn68xxp1;
+ struct cvmx_fpa_poolx_end_addr_s cnf71xx;
+};
+typedef union cvmx_fpa_poolx_end_addr cvmx_fpa_poolx_end_addr_t;
+
+/**
+ * cvmx_fpa_pool#_start_addr
+ *
+ * FPA_POOLX_START_ADDR = FPA's Pool-X Starting Addres
+ *
+ * Pointers sent to this pool must be equal to or greater than this address.
+ */
+union cvmx_fpa_poolx_start_addr {
+ uint64_t u64;
+ struct cvmx_fpa_poolx_start_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t addr : 33; /**< Address. */
+#else
+ uint64_t addr : 33;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_fpa_poolx_start_addr_s cn61xx;
+ struct cvmx_fpa_poolx_start_addr_s cn66xx;
+ struct cvmx_fpa_poolx_start_addr_s cn68xx;
+ struct cvmx_fpa_poolx_start_addr_s cn68xxp1;
+ struct cvmx_fpa_poolx_start_addr_s cnf71xx;
+};
+typedef union cvmx_fpa_poolx_start_addr cvmx_fpa_poolx_start_addr_t;
+
+/**
+ * cvmx_fpa_pool#_threshold
+ *
+ * FPA_POOLX_THRESHOLD = FPA's Pool 0-7 Threshold
+ *
+ * When the value of FPA_QUEX_AVAILABLE is equal to FPA_POOLX_THRESHOLD[THRESH] when a pointer is allocated
+ * or deallocated, set interrupt FPA_INT_SUM[POOLXTH].
+ */
+union cvmx_fpa_poolx_threshold {
+ uint64_t u64;
+ struct cvmx_fpa_poolx_threshold_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t thresh : 32; /**< The Threshold. */
+#else
+ uint64_t thresh : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_fpa_poolx_threshold_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t thresh : 29; /**< The Threshold. */
+#else
+ uint64_t thresh : 29;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn61xx;
+ struct cvmx_fpa_poolx_threshold_cn61xx cn63xx;
+ struct cvmx_fpa_poolx_threshold_cn61xx cn66xx;
+ struct cvmx_fpa_poolx_threshold_s cn68xx;
+ struct cvmx_fpa_poolx_threshold_s cn68xxp1;
+ struct cvmx_fpa_poolx_threshold_cn61xx cnf71xx;
+};
+typedef union cvmx_fpa_poolx_threshold cvmx_fpa_poolx_threshold_t;
+
+/**
+ * cvmx_fpa_que#_available
+ *
+ * FPA_QUEX_PAGES_AVAILABLE = FPA's Queue 0-7 Free Page Available Register
+ *
+ * The number of page pointers that are available in the FPA and local DRAM.
+ */
+union cvmx_fpa_quex_available {
+ uint64_t u64;
+ struct cvmx_fpa_quex_available_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t que_siz : 32; /**< The number of free pages available in this Queue.
+ In PASS-1 this field was [25:0]. */
+#else
+ uint64_t que_siz : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_fpa_quex_available_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t que_siz : 29; /**< The number of free pages available in this Queue. */
+#else
+ uint64_t que_siz : 29;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn30xx;
+ struct cvmx_fpa_quex_available_cn30xx cn31xx;
+ struct cvmx_fpa_quex_available_cn30xx cn38xx;
+ struct cvmx_fpa_quex_available_cn30xx cn38xxp2;
+ struct cvmx_fpa_quex_available_cn30xx cn50xx;
+ struct cvmx_fpa_quex_available_cn30xx cn52xx;
+ struct cvmx_fpa_quex_available_cn30xx cn52xxp1;
+ struct cvmx_fpa_quex_available_cn30xx cn56xx;
+ struct cvmx_fpa_quex_available_cn30xx cn56xxp1;
+ struct cvmx_fpa_quex_available_cn30xx cn58xx;
+ struct cvmx_fpa_quex_available_cn30xx cn58xxp1;
+ struct cvmx_fpa_quex_available_cn30xx cn61xx;
+ struct cvmx_fpa_quex_available_cn30xx cn63xx;
+ struct cvmx_fpa_quex_available_cn30xx cn63xxp1;
+ struct cvmx_fpa_quex_available_cn30xx cn66xx;
+ struct cvmx_fpa_quex_available_s cn68xx;
+ struct cvmx_fpa_quex_available_s cn68xxp1;
+ struct cvmx_fpa_quex_available_cn30xx cnf71xx;
+};
+typedef union cvmx_fpa_quex_available cvmx_fpa_quex_available_t;
+
+/**
+ * cvmx_fpa_que#_page_index
+ *
+ * FPA_QUE0_PAGE_INDEX = FPA's Queue0 Page Index
+ *
+ * The present index page for queue 0 of the FPA, this is a PASS-2 register.
+ * This number reflects the number of pages of pointers that have been written to memory
+ * for this queue.
+ */
+union cvmx_fpa_quex_page_index {
+ uint64_t u64;
+ struct cvmx_fpa_quex_page_index_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t pg_num : 25; /**< Page number. */
+#else
+ uint64_t pg_num : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_fpa_quex_page_index_s cn30xx;
+ struct cvmx_fpa_quex_page_index_s cn31xx;
+ struct cvmx_fpa_quex_page_index_s cn38xx;
+ struct cvmx_fpa_quex_page_index_s cn38xxp2;
+ struct cvmx_fpa_quex_page_index_s cn50xx;
+ struct cvmx_fpa_quex_page_index_s cn52xx;
+ struct cvmx_fpa_quex_page_index_s cn52xxp1;
+ struct cvmx_fpa_quex_page_index_s cn56xx;
+ struct cvmx_fpa_quex_page_index_s cn56xxp1;
+ struct cvmx_fpa_quex_page_index_s cn58xx;
+ struct cvmx_fpa_quex_page_index_s cn58xxp1;
+ struct cvmx_fpa_quex_page_index_s cn61xx;
+ struct cvmx_fpa_quex_page_index_s cn63xx;
+ struct cvmx_fpa_quex_page_index_s cn63xxp1;
+ struct cvmx_fpa_quex_page_index_s cn66xx;
+ struct cvmx_fpa_quex_page_index_s cn68xx;
+ struct cvmx_fpa_quex_page_index_s cn68xxp1;
+ struct cvmx_fpa_quex_page_index_s cnf71xx;
+};
+typedef union cvmx_fpa_quex_page_index cvmx_fpa_quex_page_index_t;
+
+/**
+ * cvmx_fpa_que8_page_index
+ *
+ * FPA_QUE8_PAGE_INDEX = FPA's Queue7 Page Index
+ *
+ * The present index page for queue 7 of the FPA.
+ * This number reflects the number of pages of pointers that have been written to memory
+ * for this queue.
+ * Because the address space is 38-bits the number of 128 byte pages could cause this register value to wrap.
+ */
+union cvmx_fpa_que8_page_index {
+ uint64_t u64;
+ struct cvmx_fpa_que8_page_index_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t pg_num : 25; /**< Page number. */
+#else
+ uint64_t pg_num : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_fpa_que8_page_index_s cn68xx;
+ struct cvmx_fpa_que8_page_index_s cn68xxp1;
+};
+typedef union cvmx_fpa_que8_page_index cvmx_fpa_que8_page_index_t;
+
+/**
+ * cvmx_fpa_que_act
+ *
+ * FPA_QUE_ACT = FPA's Queue# Actual Page Index
+ *
+ * When a INT_SUM[PERR#] occurs this will be latched with the value read from L2C. PASS-2 register.
+ * This is latched on the first error and will not latch again unitl all errors are cleared.
+ */
+union cvmx_fpa_que_act {
+ uint64_t u64;
+ struct cvmx_fpa_que_act_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t act_que : 3; /**< FPA-queue-number read from memory. */
+ uint64_t act_indx : 26; /**< Page number read from memory. */
+#else
+ uint64_t act_indx : 26;
+ uint64_t act_que : 3;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_fpa_que_act_s cn30xx;
+ struct cvmx_fpa_que_act_s cn31xx;
+ struct cvmx_fpa_que_act_s cn38xx;
+ struct cvmx_fpa_que_act_s cn38xxp2;
+ struct cvmx_fpa_que_act_s cn50xx;
+ struct cvmx_fpa_que_act_s cn52xx;
+ struct cvmx_fpa_que_act_s cn52xxp1;
+ struct cvmx_fpa_que_act_s cn56xx;
+ struct cvmx_fpa_que_act_s cn56xxp1;
+ struct cvmx_fpa_que_act_s cn58xx;
+ struct cvmx_fpa_que_act_s cn58xxp1;
+ struct cvmx_fpa_que_act_s cn61xx;
+ struct cvmx_fpa_que_act_s cn63xx;
+ struct cvmx_fpa_que_act_s cn63xxp1;
+ struct cvmx_fpa_que_act_s cn66xx;
+ struct cvmx_fpa_que_act_s cn68xx;
+ struct cvmx_fpa_que_act_s cn68xxp1;
+ struct cvmx_fpa_que_act_s cnf71xx;
+};
+typedef union cvmx_fpa_que_act cvmx_fpa_que_act_t;
+
+/**
+ * cvmx_fpa_que_exp
+ *
+ * FPA_QUE_EXP = FPA's Queue# Expected Page Index
+ *
+ * When a INT_SUM[PERR#] occurs this will be latched with the expected value. PASS-2 register.
+ * This is latched on the first error and will not latch again unitl all errors are cleared.
+ */
+union cvmx_fpa_que_exp {
+ uint64_t u64;
+ struct cvmx_fpa_que_exp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t exp_que : 3; /**< Expected fpa-queue-number read from memory. */
+ uint64_t exp_indx : 26; /**< Expected page number read from memory. */
+#else
+ uint64_t exp_indx : 26;
+ uint64_t exp_que : 3;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_fpa_que_exp_s cn30xx;
+ struct cvmx_fpa_que_exp_s cn31xx;
+ struct cvmx_fpa_que_exp_s cn38xx;
+ struct cvmx_fpa_que_exp_s cn38xxp2;
+ struct cvmx_fpa_que_exp_s cn50xx;
+ struct cvmx_fpa_que_exp_s cn52xx;
+ struct cvmx_fpa_que_exp_s cn52xxp1;
+ struct cvmx_fpa_que_exp_s cn56xx;
+ struct cvmx_fpa_que_exp_s cn56xxp1;
+ struct cvmx_fpa_que_exp_s cn58xx;
+ struct cvmx_fpa_que_exp_s cn58xxp1;
+ struct cvmx_fpa_que_exp_s cn61xx;
+ struct cvmx_fpa_que_exp_s cn63xx;
+ struct cvmx_fpa_que_exp_s cn63xxp1;
+ struct cvmx_fpa_que_exp_s cn66xx;
+ struct cvmx_fpa_que_exp_s cn68xx;
+ struct cvmx_fpa_que_exp_s cn68xxp1;
+ struct cvmx_fpa_que_exp_s cnf71xx;
+};
+typedef union cvmx_fpa_que_exp cvmx_fpa_que_exp_t;
+
+/**
+ * cvmx_fpa_wart_ctl
+ *
+ * FPA_WART_CTL = FPA's WART Control
+ *
+ * Control and status for the WART block.
+ */
+union cvmx_fpa_wart_ctl {
+ uint64_t u64;
+ struct cvmx_fpa_wart_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t ctl : 16; /**< Control information. */
+#else
+ uint64_t ctl : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_fpa_wart_ctl_s cn30xx;
+ struct cvmx_fpa_wart_ctl_s cn31xx;
+ struct cvmx_fpa_wart_ctl_s cn38xx;
+ struct cvmx_fpa_wart_ctl_s cn38xxp2;
+ struct cvmx_fpa_wart_ctl_s cn50xx;
+ struct cvmx_fpa_wart_ctl_s cn52xx;
+ struct cvmx_fpa_wart_ctl_s cn52xxp1;
+ struct cvmx_fpa_wart_ctl_s cn56xx;
+ struct cvmx_fpa_wart_ctl_s cn56xxp1;
+ struct cvmx_fpa_wart_ctl_s cn58xx;
+ struct cvmx_fpa_wart_ctl_s cn58xxp1;
+};
+typedef union cvmx_fpa_wart_ctl cvmx_fpa_wart_ctl_t;
+
+/**
+ * cvmx_fpa_wart_status
+ *
+ * FPA_WART_STATUS = FPA's WART Status
+ *
+ * Control and status for the WART block.
+ */
+union cvmx_fpa_wart_status {
+ uint64_t u64;
+ struct cvmx_fpa_wart_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t status : 32; /**< Status information. */
+#else
+ uint64_t status : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_fpa_wart_status_s cn30xx;
+ struct cvmx_fpa_wart_status_s cn31xx;
+ struct cvmx_fpa_wart_status_s cn38xx;
+ struct cvmx_fpa_wart_status_s cn38xxp2;
+ struct cvmx_fpa_wart_status_s cn50xx;
+ struct cvmx_fpa_wart_status_s cn52xx;
+ struct cvmx_fpa_wart_status_s cn52xxp1;
+ struct cvmx_fpa_wart_status_s cn56xx;
+ struct cvmx_fpa_wart_status_s cn56xxp1;
+ struct cvmx_fpa_wart_status_s cn58xx;
+ struct cvmx_fpa_wart_status_s cn58xxp1;
+};
+typedef union cvmx_fpa_wart_status cvmx_fpa_wart_status_t;
+
+/**
+ * cvmx_fpa_wqe_threshold
+ *
+ * FPA_WQE_THRESHOLD = FPA's WQE Threshold
+ *
+ * When the value of FPA_QUE#_AVAILABLE[QUE_SIZ] (\# is determined by the value of IPD_WQE_FPA_QUEUE) is Less than the value of this
+ * register a low pool count signal is sent to the PCIe packet instruction engine (to make it stop reading instructions) and to the
+ * Packet-Arbiter informing it to not give grants to packets MAC with the exception of the PCIe MAC.
+ */
+union cvmx_fpa_wqe_threshold {
+ uint64_t u64;
+ struct cvmx_fpa_wqe_threshold_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t thresh : 32; /**< WQE Threshold. */
+#else
+ uint64_t thresh : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_fpa_wqe_threshold_s cn61xx;
+ struct cvmx_fpa_wqe_threshold_s cn63xx;
+ struct cvmx_fpa_wqe_threshold_s cn66xx;
+ struct cvmx_fpa_wqe_threshold_s cn68xx;
+ struct cvmx_fpa_wqe_threshold_s cn68xxp1;
+ struct cvmx_fpa_wqe_threshold_s cnf71xx;
+};
+typedef union cvmx_fpa_wqe_threshold cvmx_fpa_wqe_threshold_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-fpa-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-fpa.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-fpa.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-fpa.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,208 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Support library for the hardware Free Pool Allocator.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#include "cvmx.h"
+#include "cvmx-fpa.h"
+#include "cvmx-ipd.h"
+
+/**
+ * Current state of all the pools. Use access functions
+ * instead of using it directly.
+ */
+CVMX_SHARED cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
+
+
+/**
+ * Setup a FPA pool to control a new block of memory. The
+ * buffer pointer must be a physical address.
+ *
+ * @param pool Pool to initialize
+ * 0 <= pool < 8
+ * @param name Constant character string to name this pool.
+ * String is not copied.
+ * @param buffer Pointer to the block of memory to use. This must be
+ * accessable by all processors and external hardware.
+ * @param block_size Size for each block controlled by the FPA
+ * @param num_blocks Number of blocks
+ *
+ * @return 0 on Success,
+ * -1 on failure
+ */
+int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
+ uint64_t block_size, uint64_t num_blocks)
+{
+ char *ptr;
+ if (!buffer)
+ {
+ cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: NULL buffer pointer!\n");
+ return -1;
+ }
+ if (pool >= CVMX_FPA_NUM_POOLS)
+ {
+ cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Illegal pool!\n");
+ return -1;
+ }
+
+ if (block_size < CVMX_FPA_MIN_BLOCK_SIZE)
+ {
+ cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Block size too small.\n");
+ return -1;
+ }
+
+ if (((unsigned long)buffer & (CVMX_FPA_ALIGNMENT-1)) != 0)
+ {
+ cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Buffer not aligned properly.\n");
+ return -1;
+ }
+
+ cvmx_fpa_pool_info[pool].name = name;
+ cvmx_fpa_pool_info[pool].size = block_size;
+ cvmx_fpa_pool_info[pool].starting_element_count = num_blocks;
+ cvmx_fpa_pool_info[pool].base = buffer;
+
+ ptr = (char*)buffer;
+ while (num_blocks--)
+ {
+ cvmx_fpa_free(ptr, pool, 0);
+ ptr += block_size;
+ }
+ return 0;
+}
+
+/**
+ * Shutdown a Memory pool and validate that it had all of
+ * the buffers originally placed in it. This should only be
+ * called by one processor after all hardware has finished
+ * using the pool. Most like you will want to have called
+ * cvmx_helper_shutdown_packet_io_global() before this
+ * function to make sure all FPA buffers are out of the packet
+ * IO hardware.
+ *
+ * @param pool Pool to shutdown
+ *
+ * @return Zero on success
+ * - Positive is count of missing buffers
+ * - Negative is too many buffers or corrupted pointers
+ */
+uint64_t cvmx_fpa_shutdown_pool(uint64_t pool)
+{
+ int errors = 0;
+ int count = 0;
+ int expected_count = cvmx_fpa_pool_info[pool].starting_element_count;
+ uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base);
+ uint64_t finish = base + cvmx_fpa_pool_info[pool].size * expected_count;
+
+ count = 0;
+ while (1)
+ {
+ uint64_t address;
+ void *ptr = cvmx_fpa_alloc(pool);
+ if (!ptr)
+ break;
+
+ address = cvmx_ptr_to_phys(ptr);
+ if ((address >= base) && (address < finish) &&
+ (((address - base) % cvmx_fpa_pool_info[pool].size) == 0))
+ {
+ count++;
+ }
+ else
+ {
+ cvmx_dprintf("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n",
+ (unsigned long long)address, cvmx_fpa_pool_info[pool].name, (int)pool);
+ errors++;
+ }
+ }
+
+ if (count < expected_count)
+ {
+ cvmx_dprintf("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) missing %d buffers\n",
+ cvmx_fpa_pool_info[pool].name, (int)pool, expected_count - count);
+ }
+ else if (count > expected_count)
+ {
+ cvmx_dprintf("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) had %d duplicate buffers\n",
+ cvmx_fpa_pool_info[pool].name, (int)pool, count - expected_count);
+ }
+
+ if (errors)
+ {
+ cvmx_dprintf("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%x\n",
+ cvmx_fpa_pool_info[pool].name, (int)pool, (unsigned long long)base, (unsigned long long)finish, (int)cvmx_fpa_pool_info[pool].size);
+ return -errors;
+ }
+ else
+ return expected_count - count;
+}
+
+uint64_t cvmx_fpa_get_block_size(uint64_t pool)
+{
+ switch (pool)
+ {
+ case 0:
+ return CVMX_FPA_POOL_0_SIZE;
+ case 1:
+ return CVMX_FPA_POOL_1_SIZE;
+ case 2:
+ return CVMX_FPA_POOL_2_SIZE;
+ case 3:
+ return CVMX_FPA_POOL_3_SIZE;
+ case 4:
+ return CVMX_FPA_POOL_4_SIZE;
+ case 5:
+ return CVMX_FPA_POOL_5_SIZE;
+ case 6:
+ return CVMX_FPA_POOL_6_SIZE;
+ case 7:
+ return CVMX_FPA_POOL_7_SIZE;
+ default:
+ return 0;
+ }
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-fpa.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-fpa.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-fpa.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-fpa.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,338 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Interface to the hardware Free Pool Allocator.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMX_FPA_H__
+#define __CVMX_FPA_H__
+
+#include "cvmx-scratch.h"
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include "cvmx-fpa-defs.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_FPA_NUM_POOLS 8
+#define CVMX_FPA_MIN_BLOCK_SIZE 128
+#define CVMX_FPA_ALIGNMENT 128
+
+/**
+ * Structure describing the data format used for stores to the FPA.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct {
+ uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
+ uint64_t len : 8; /**< the number of words in the response (0 => no response) */
+ uint64_t did : 8; /**< the ID of the device on the non-coherent bus */
+ uint64_t addr :40; /**< the address that will appear in the first tick on the NCB bus */
+ } s;
+} cvmx_fpa_iobdma_data_t;
+
+/**
+ * Structure describing the current state of a FPA pool.
+ */
+typedef struct
+{
+ const char *name; /**< Name it was created under */
+ uint64_t size; /**< Size of each block */
+ void * base; /**< The base memory address of whole block */
+ uint64_t starting_element_count; /**< The number of elements in the pool at creation */
+} cvmx_fpa_pool_info_t;
+
+/**
+ * Current state of all the pools. Use access functions
+ * instead of using it directly.
+ */
+extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
+
+/* CSR typedefs have been moved to cvmx-fpa-defs.h */
+
+/**
+ * Return the name of the pool
+ *
+ * @param pool Pool to get the name of
+ * @return The name
+ */
+static inline const char *cvmx_fpa_get_name(uint64_t pool)
+{
+ return cvmx_fpa_pool_info[pool].name;
+}
+
+/**
+ * Return the base of the pool
+ *
+ * @param pool Pool to get the base of
+ * @return The base
+ */
+static inline void *cvmx_fpa_get_base(uint64_t pool)
+{
+ return cvmx_fpa_pool_info[pool].base;
+}
+
+/**
+ * Check if a pointer belongs to an FPA pool. Return non-zero
+ * if the supplied pointer is inside the memory controlled by
+ * an FPA pool.
+ *
+ * @param pool Pool to check
+ * @param ptr Pointer to check
+ * @return Non-zero if pointer is in the pool. Zero if not
+ */
+static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
+{
+ return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
+ ((char*)ptr < ((char*)(cvmx_fpa_pool_info[pool].base)) + cvmx_fpa_pool_info[pool].size * cvmx_fpa_pool_info[pool].starting_element_count));
+}
+
+/**
+ * Enable the FPA for use. Must be performed after any CSR
+ * configuration but before any other FPA functions.
+ */
+static inline void cvmx_fpa_enable(void)
+{
+ cvmx_fpa_ctl_status_t status;
+
+ status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
+ if (status.s.enb)
+ {
+ /*
+ * CN68XXP1 should not reset the FPA (doing so may break the
+ * SSO, so we may end up enabling it more than once. Just
+ * return and don't spew messages.
+ */
+ return;
+ }
+
+ status.u64 = 0;
+ status.s.enb = 1;
+ cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
+}
+
+/**
+ * Reset FPA to disable. Make sure buffers from all FPA pools are freed
+ * before disabling FPA.
+ */
+static inline void cvmx_fpa_disable(void)
+{
+ cvmx_fpa_ctl_status_t status;
+
+ status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
+ status.s.reset = 1;
+ cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
+}
+
+/**
+ * Get a new block from the FPA
+ *
+ * @param pool Pool to get the block from
+ * @return Pointer to the block or NULL on failure
+ */
+static inline void *cvmx_fpa_alloc(uint64_t pool)
+{
+ uint64_t address;
+
+ for (;;) {
+ address = cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool)));
+ if (cvmx_likely(address)) {
+ return cvmx_phys_to_ptr(address);
+ } else {
+ /* If pointers are available, continuously retry. */
+ if (cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool)) > 0)
+ cvmx_wait(50);
+ else
+ return NULL;
+ }
+ }
+}
+
+/**
+ * Asynchronously get a new block from the FPA
+ *
+ * The result of cvmx_fpa_async_alloc() may be retrieved using
+ * cvmx_fpa_async_alloc_finish().
+ *
+ * @param scr_addr Local scratch address to put response in. This is a byte address,
+ * but must be 8 byte aligned.
+ * @param pool Pool to get the block from
+ */
+static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
+{
+ cvmx_fpa_iobdma_data_t data;
+
+ /* Hardware only uses 64 bit aligned locations, so convert from byte address
+ ** to 64-bit index
+ */
+ data.s.scraddr = scr_addr >> 3;
+ data.s.len = 1;
+ data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool);
+ data.s.addr = 0;
+ cvmx_send_single(data.u64);
+}
+
+/**
+ * Retrieve the result of cvmx_fpa_async_alloc
+ *
+ * @param scr_addr The Local scratch address. Must be the same value
+ * passed to cvmx_fpa_async_alloc().
+ *
+ * @param pool Pool the block came from. Must be the same value
+ * passed to cvmx_fpa_async_alloc.
+ *
+ * @return Pointer to the block or NULL on failure
+ */
+static inline void *cvmx_fpa_async_alloc_finish(uint64_t scr_addr, uint64_t pool)
+{
+ uint64_t address;
+
+ CVMX_SYNCIOBDMA;
+
+ address = cvmx_scratch_read64(scr_addr);
+ if (cvmx_likely(address))
+ return cvmx_phys_to_ptr(address);
+ else
+ return cvmx_fpa_alloc(pool);
+}
+
+/**
+ * Free a block allocated with a FPA pool.
+ * Does NOT provide memory ordering in cases where the memory block was modified by the core.
+ *
+ * @param ptr Block to free
+ * @param pool Pool to put it in
+ * @param num_cache_lines
+ * Cache lines to invalidate
+ */
+static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool, uint64_t num_cache_lines)
+{
+ cvmx_addr_t newptr;
+ newptr.u64 = cvmx_ptr_to_phys(ptr);
+ newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool));
+ asm volatile ("" : : : "memory"); /* Prevent GCC from reordering around free */
+ /* value written is number of cache lines not written back */
+ cvmx_write_io(newptr.u64, num_cache_lines);
+}
+
+/**
+ * Free a block allocated with a FPA pool. Provides required memory
+ * ordering in cases where memory block was modified by core.
+ *
+ * @param ptr Block to free
+ * @param pool Pool to put it in
+ * @param num_cache_lines
+ * Cache lines to invalidate
+ */
+static inline void cvmx_fpa_free(void *ptr, uint64_t pool, uint64_t num_cache_lines)
+{
+ cvmx_addr_t newptr;
+ newptr.u64 = cvmx_ptr_to_phys(ptr);
+ newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool));
+ /* Make sure that any previous writes to memory go out before we free this buffer.
+ ** This also serves as a barrier to prevent GCC from reordering operations to after
+ ** the free. */
+ CVMX_SYNCWS;
+ /* value written is number of cache lines not written back */
+ cvmx_write_io(newptr.u64, num_cache_lines);
+}
+
+/**
+ * Setup a FPA pool to control a new block of memory.
+ * This can only be called once per pool. Make sure proper
+ * locking enforces this.
+ *
+ * @param pool Pool to initialize
+ * 0 <= pool < 8
+ * @param name Constant character string to name this pool.
+ * String is not copied.
+ * @param buffer Pointer to the block of memory to use. This must be
+ * accessable by all processors and external hardware.
+ * @param block_size Size for each block controlled by the FPA
+ * @param num_blocks Number of blocks
+ *
+ * @return 0 on Success,
+ * -1 on failure
+ */
+extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
+ uint64_t block_size, uint64_t num_blocks);
+
+/**
+ * Shutdown a Memory pool and validate that it had all of
+ * the buffers originally placed in it. This should only be
+ * called by one processor after all hardware has finished
+ * using the pool. Most like you will want to have called
+ * cvmx_helper_shutdown_packet_io_global() before this
+ * function to make sure all FPA buffers are out of the packet
+ * IO hardware.
+ *
+ * @param pool Pool to shutdown
+ *
+ * @return Zero on success
+ * - Positive is count of missing buffers
+ * - Negative is too many buffers or corrupted pointers
+ */
+extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
+
+/**
+ * Get the size of blocks controlled by the pool
+ * This is resolved to a constant at compile time.
+ *
+ * @param pool Pool to access
+ * @return Size of the block in bytes
+ */
+uint64_t cvmx_fpa_get_block_size(uint64_t pool);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVM_FPA_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-fpa.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-gmx.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-gmx.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-gmx.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,97 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the GMX hardware.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_GMX_H__
+#define __CVMX_GMX_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* CSR typedefs have been moved to cvmx-gmx-defs.h */
+
+/**
+ * Disables the sending of flow control (pause) frames on the specified
+ * RGMII port(s).
+ *
+ * @param interface Which interface (0 or 1)
+ * @param port_mask Mask (4bits) of which ports on the interface to disable
+ * backpressure on.
+ * 1 => disable backpressure
+ * 0 => enable backpressure
+ *
+ * @return 0 on success
+ * -1 on error
+ */
+
+static inline int cvmx_gmx_set_backpressure_override(uint32_t interface, uint32_t port_mask)
+{
+ cvmx_gmxx_tx_ovr_bp_t gmxx_tx_ovr_bp;
+ /* Check for valid arguments */
+ if (port_mask & ~0xf || interface & ~0x1)
+ return(-1);
+ gmxx_tx_ovr_bp.u64 = 0;
+ gmxx_tx_ovr_bp.s.en = port_mask; /* Per port Enable back pressure override */
+ gmxx_tx_ovr_bp.s.ign_full = port_mask; /* Ignore the RX FIFO full when computing BP */
+ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmxx_tx_ovr_bp.u64);
+ return(0);
+
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-gmx.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-gmxx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-gmxx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-gmxx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,10604 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-gmxx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon gmxx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_GMXX_DEFS_H__
+#define __CVMX_GMXX_DEFS_H__
+
+static inline uint64_t CVMX_GMXX_BAD_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_BAD_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_BIST(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_BIST (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 0) * 0x8000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_BPID_MAPX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 15)) && ((block_id <= 4))))))
+ cvmx_warn("CVMX_GMXX_BPID_MAPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 15) + ((block_id) & 7) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_GMXX_BPID_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 15) + ((block_id) & 7) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_BPID_MSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
+ cvmx_warn("CVMX_GMXX_BPID_MSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000700ull) + ((block_id) & 7) * 0x1000000ull;
+}
+#else
+#define CVMX_GMXX_BPID_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000700ull) + ((block_id) & 7) * 0x1000000ull)
+#endif
+static inline uint64_t CVMX_GMXX_CLK_EN(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_CLK_EN (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 0) * 0x8000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_EBP_DIS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
+ cvmx_warn("CVMX_GMXX_EBP_DIS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000608ull) + ((block_id) & 7) * 0x1000000ull;
+}
+#else
+#define CVMX_GMXX_EBP_DIS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000608ull) + ((block_id) & 7) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_EBP_MSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
+ cvmx_warn("CVMX_GMXX_EBP_MSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000600ull) + ((block_id) & 7) * 0x1000000ull;
+}
+#else
+#define CVMX_GMXX_EBP_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + ((block_id) & 7) * 0x1000000ull)
+#endif
+static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_HG2_CONTROL (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_INF_MODE(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_INF_MODE (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_NXA_ADR(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_NXA_ADR (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 0) * 0x8000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_PIPE_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
+ cvmx_warn("CVMX_GMXX_PIPE_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000760ull) + ((block_id) & 7) * 0x1000000ull;
+}
+#else
+#define CVMX_GMXX_PIPE_STATUS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000760ull) + ((block_id) & 7) * 0x1000000ull)
+#endif
+static inline uint64_t CVMX_GMXX_PRTX_CBFC_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset == 0)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset == 0)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset == 0)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_PRTX_CBFC_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_PRTX_CFG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_RXAUI_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
+ cvmx_warn("CVMX_GMXX_RXAUI_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000740ull) + ((block_id) & 7) * 0x1000000ull;
+}
+#else
+#define CVMX_GMXX_RXAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000740ull) + ((block_id) & 7) * 0x1000000ull)
+#endif
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM0 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM1 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM2 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM3 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM4 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM5 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_ALL_EN(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000110ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000110ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000110ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM_ALL_EN (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000110ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM_EN (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_DECISION(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_DECISION (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_FRM_CHK(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_FRM_CHK (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_FRM_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_RXX_FRM_MAX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_RXX_FRM_MAX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+}
+#else
+#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_RXX_FRM_MIN(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_RXX_FRM_MIN(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+}
+#else
+#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
+#endif
+static inline uint64_t CVMX_GMXX_RXX_IFG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_IFG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_INT_EN (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_INT_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_JABBER (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_PAUSE_DROP_TIME(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_PAUSE_DROP_TIME (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_RXX_RX_INBND(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_RXX_RX_INBND(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+}
+#else
+#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
+#endif
+static inline uint64_t CVMX_GMXX_RXX_STATS_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DMAC(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DMAC (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DRP(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DRP (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_BAD(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_BAD (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DMAC(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DMAC (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DRP(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DRP (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RXX_UDD_SKP(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RXX_UDD_SKP (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_RX_BP_DROPX(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 7) * 0x200000ull) * 8;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RX_BP_DROPX (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
+}
+static inline uint64_t CVMX_GMXX_RX_BP_OFFX(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 7) * 0x200000ull) * 8;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RX_BP_OFFX (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
+}
+static inline uint64_t CVMX_GMXX_RX_BP_ONX(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 7) * 0x200000ull) * 8;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RX_BP_ONX (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
+}
+static inline uint64_t CVMX_GMXX_RX_HG2_STATUS(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RX_HG2_STATUS (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 0) * 0x8000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_RX_PASS_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_RX_PASS_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_GMXX_RX_PASS_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_RX_PASS_MAPX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 15)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_RX_PASS_MAPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+static inline uint64_t CVMX_GMXX_RX_PRTS(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RX_PRTS (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_RX_PRT_INFO(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RX_PRT_INFO (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 0) * 0x8000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_RX_TX_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_RX_TX_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080007E8ull);
+}
+#else
+#define CVMX_GMXX_RX_TX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull))
+#endif
+static inline uint64_t CVMX_GMXX_RX_XAUI_BAD_COL(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RX_XAUI_BAD_COL (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_RX_XAUI_CTL (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_SMACX (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_SOFT_BIST(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_SOFT_BIST (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_GMXX_STAT_BP(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_STAT_BP (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TB_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TB_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TXX_APPEND(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_APPEND (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_BURST (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_CBFC_XOFF(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset == 0)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset == 0)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset == 0)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_CBFC_XOFF (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TXX_CBFC_XON(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if (((offset == 0)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset == 0)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset == 0)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_CBFC_XON (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 0) * 0x8000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_TXX_CLK(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_TXX_CLK(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+}
+#else
+#define CVMX_GMXX_TXX_CLK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
+#endif
+static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_MIN_PKT(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_MIN_PKT (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_TIME (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_TOGO(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_PAUSE_TOGO (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_ZERO(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_PAUSE_ZERO (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_TXX_PIPE(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 4))))))
+ cvmx_warn("CVMX_GMXX_TXX_PIPE(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000310ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+}
+#else
+#define CVMX_GMXX_TXX_PIPE(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000310ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048)
+#endif
+static inline uint64_t CVMX_GMXX_TXX_SGMII_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_SGMII_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_SLOT (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_SOFT_PAUSE(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_SOFT_PAUSE (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT0(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT0 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT1(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT1 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT2(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT2 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT3(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT3 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT4(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT4 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT5(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT5 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT6(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT6 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT7(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT7 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT8(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT8 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STAT9(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STAT9 (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_STATS_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_STATS_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
+ break;
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 2)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TXX_THRESH (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
+}
+static inline uint64_t CVMX_GMXX_TX_BP(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_BP (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 0) * 0x8000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_TX_CLK_MSKX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 1)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 1)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TX_CLK_MSKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8;
+}
+#else
+#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
+#endif
+static inline uint64_t CVMX_GMXX_TX_COL_ATTEMPT(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_COL_ATTEMPT (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_CORRUPT(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_CORRUPT (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_HG2_REG1(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_HG2_REG1 (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_HG2_REG2(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_HG2_REG2 (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_IFG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_IFG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_INT_EN (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_INT_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_INT_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_JAM(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_JAM (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_LFSR(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_LFSR (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_OVR_BP(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_OVR_BP (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_DMAC(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_DMAC (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_TYPE(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_TYPE (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_PRTS (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 0) * 0x8000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_TX_SPI_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_GMXX_TX_SPI_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_TX_SPI_DRAIN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_DRAIN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_GMXX_TX_SPI_DRAIN(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_TX_SPI_MAX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_MAX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_GMXX_TX_SPI_MAX(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_TX_SPI_ROUNDX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 31)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_ROUNDX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GMXX_TX_SPI_THRESH(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_THRESH(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_GMXX_TX_SPI_THRESH(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+static inline uint64_t CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_TX_XAUI_CTL (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 0) * 0x8000000ull;
+}
+static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_GMXX_XAUI_EXT_LOOPBACK (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 0) * 0x8000000ull;
+}
+
+/**
+ * cvmx_gmx#_bad_reg
+ *
+ * GMX_BAD_REG = A collection of things that have gone very, very wrong
+ *
+ *
+ * Notes:
+ * In XAUI mode, only the lsb (corresponding to port0) of INB_NXA, LOSTSTAT, OUT_OVR, are used.
+ *
+ */
+union cvmx_gmxx_bad_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_bad_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */
+ uint64_t statovr : 1; /**< TX Statistics overflow
+ The common FIFO to SGMII and XAUI had an overflow
+ TX Stats are corrupted */
+ uint64_t loststat : 4; /**< TX Statistics data was over-written
+ In SGMII, one bit per port
+ In XAUI, only port0 is used
+ TX Stats are corrupted */
+ uint64_t reserved_18_21 : 4;
+ uint64_t out_ovr : 16; /**< Outbound data FIFO overflow (per port) */
+ uint64_t ncb_ovr : 1; /**< Outbound NCB FIFO Overflow */
+ uint64_t out_col : 1; /**< Outbound collision occured between PKO and NCB */
+#else
+ uint64_t out_col : 1;
+ uint64_t ncb_ovr : 1;
+ uint64_t out_ovr : 16;
+ uint64_t reserved_18_21 : 4;
+ uint64_t loststat : 4;
+ uint64_t statovr : 1;
+ uint64_t inb_nxa : 4;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_gmxx_bad_reg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_25_25 : 1;
+ uint64_t loststat : 3; /**< TX Statistics data was over-written (per RGM port)
+ TX Stats are corrupted */
+ uint64_t reserved_5_21 : 17;
+ uint64_t out_ovr : 3; /**< Outbound data FIFO overflow (per port) */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 3;
+ uint64_t reserved_5_21 : 17;
+ uint64_t loststat : 3;
+ uint64_t reserved_25_25 : 1;
+ uint64_t statovr : 1;
+ uint64_t inb_nxa : 4;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_bad_reg_cn30xx cn31xx;
+ struct cvmx_gmxx_bad_reg_s cn38xx;
+ struct cvmx_gmxx_bad_reg_s cn38xxp2;
+ struct cvmx_gmxx_bad_reg_cn30xx cn50xx;
+ struct cvmx_gmxx_bad_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */
+ uint64_t statovr : 1; /**< TX Statistics overflow
+ The common FIFO to SGMII and XAUI had an overflow
+ TX Stats are corrupted */
+ uint64_t loststat : 4; /**< TX Statistics data was over-written
+ In SGMII, one bit per port
+ In XAUI, only port0 is used
+ TX Stats are corrupted */
+ uint64_t reserved_6_21 : 16;
+ uint64_t out_ovr : 4; /**< Outbound data FIFO overflow (per port) */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 4;
+ uint64_t reserved_6_21 : 16;
+ uint64_t loststat : 4;
+ uint64_t statovr : 1;
+ uint64_t inb_nxa : 4;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1;
+ struct cvmx_gmxx_bad_reg_cn52xx cn56xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1;
+ struct cvmx_gmxx_bad_reg_s cn58xx;
+ struct cvmx_gmxx_bad_reg_s cn58xxp1;
+ struct cvmx_gmxx_bad_reg_cn52xx cn61xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn63xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn63xxp1;
+ struct cvmx_gmxx_bad_reg_cn52xx cn66xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn68xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn68xxp1;
+ struct cvmx_gmxx_bad_reg_cn52xx cnf71xx;
+};
+typedef union cvmx_gmxx_bad_reg cvmx_gmxx_bad_reg_t;
+
+/**
+ * cvmx_gmx#_bist
+ *
+ * GMX_BIST = GMX BIST Results
+ *
+ */
+union cvmx_gmxx_bist {
+ uint64_t u64;
+ struct cvmx_gmxx_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t status : 25; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.fif_bnk0
+ - 1: gmx#.inb.fif_bnk1
+ - 2: gmx#.inb.fif_bnk2
+ - 3: gmx#.inb.fif_bnk3
+ - 4: gmx#.inb.fif_bnk_ext0
+ - 5: gmx#.inb.fif_bnk_ext1
+ - 6: gmx#.inb.fif_bnk_ext2
+ - 7: gmx#.inb.fif_bnk_ext3
+ - 8: gmx#.outb.fif.fif_bnk0
+ - 9: gmx#.outb.fif.fif_bnk1
+ - 10: gmx#.outb.fif.fif_bnk2
+ - 11: gmx#.outb.fif.fif_bnk3
+ - 12: gmx#.outb.fif.fif_bnk_ext0
+ - 13: gmx#.outb.fif.fif_bnk_ext1
+ - 14: gmx#.outb.fif.fif_bnk_ext2
+ - 15: gmx#.outb.fif.fif_bnk_ext3
+ - 16: gmx#.csr.gmi0.srf8x64m1_bist
+ - 17: gmx#.csr.gmi1.srf8x64m1_bist
+ - 18: gmx#.csr.gmi2.srf8x64m1_bist
+ - 19: gmx#.csr.gmi3.srf8x64m1_bist
+ - 20: gmx#.csr.drf20x32m2_bist
+ - 21: gmx#.csr.drf20x48m2_bist
+ - 22: gmx#.outb.stat.drf16x27m1_bist
+ - 23: gmx#.outb.stat.drf40x64m1_bist
+ - 24: xgmii.tx.drf16x38m1_async_bist */
+#else
+ uint64_t status : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_gmxx_bist_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t status : 10; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.dpr512x78m4_bist
+ - 1: gmx#.outb.fif.dpr512x71m4_bist
+ - 2: gmx#.csr.gmi0.srf8x64m1_bist
+ - 3: gmx#.csr.gmi1.srf8x64m1_bist
+ - 4: gmx#.csr.gmi2.srf8x64m1_bist
+ - 5: 0
+ - 6: gmx#.csr.drf20x80m1_bist
+ - 7: gmx#.outb.stat.drf16x27m1_bist
+ - 8: gmx#.outb.stat.drf40x64m1_bist
+ - 9: 0 */
+#else
+ uint64_t status : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_bist_cn30xx cn31xx;
+ struct cvmx_gmxx_bist_cn30xx cn38xx;
+ struct cvmx_gmxx_bist_cn30xx cn38xxp2;
+ struct cvmx_gmxx_bist_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t status : 12; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails */
+#else
+ uint64_t status : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_bist_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t status : 16; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.fif_bnk0
+ - 1: gmx#.inb.fif_bnk1
+ - 2: gmx#.inb.fif_bnk2
+ - 3: gmx#.inb.fif_bnk3
+ - 4: gmx#.outb.fif.fif_bnk0
+ - 5: gmx#.outb.fif.fif_bnk1
+ - 6: gmx#.outb.fif.fif_bnk2
+ - 7: gmx#.outb.fif.fif_bnk3
+ - 8: gmx#.csr.gmi0.srf8x64m1_bist
+ - 9: gmx#.csr.gmi1.srf8x64m1_bist
+ - 10: gmx#.csr.gmi2.srf8x64m1_bist
+ - 11: gmx#.csr.gmi3.srf8x64m1_bist
+ - 12: gmx#.csr.drf20x80m1_bist
+ - 13: gmx#.outb.stat.drf16x27m1_bist
+ - 14: gmx#.outb.stat.drf40x64m1_bist
+ - 15: xgmii.tx.drf16x38m1_async_bist */
+#else
+ uint64_t status : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_bist_cn52xx cn52xxp1;
+ struct cvmx_gmxx_bist_cn52xx cn56xx;
+ struct cvmx_gmxx_bist_cn52xx cn56xxp1;
+ struct cvmx_gmxx_bist_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t status : 17; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.fif_bnk0
+ - 1: gmx#.inb.fif_bnk1
+ - 2: gmx#.inb.fif_bnk2
+ - 3: gmx#.inb.fif_bnk3
+ - 4: gmx#.outb.fif.fif_bnk0
+ - 5: gmx#.outb.fif.fif_bnk1
+ - 6: gmx#.outb.fif.fif_bnk2
+ - 7: gmx#.outb.fif.fif_bnk3
+ - 8: gmx#.csr.gmi0.srf8x64m1_bist
+ - 9: gmx#.csr.gmi1.srf8x64m1_bist
+ - 10: gmx#.csr.gmi2.srf8x64m1_bist
+ - 11: gmx#.csr.gmi3.srf8x64m1_bist
+ - 12: gmx#.csr.drf20x80m1_bist
+ - 13: gmx#.outb.stat.drf16x27m1_bist
+ - 14: gmx#.outb.stat.drf40x64m1_bist
+ - 15: gmx#.outb.ncb.drf16x76m1_bist
+ - 16: gmx#.outb.fif.srf32x16m2_bist */
+#else
+ uint64_t status : 17;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn58xx;
+ struct cvmx_gmxx_bist_cn58xx cn58xxp1;
+ struct cvmx_gmxx_bist_s cn61xx;
+ struct cvmx_gmxx_bist_s cn63xx;
+ struct cvmx_gmxx_bist_s cn63xxp1;
+ struct cvmx_gmxx_bist_s cn66xx;
+ struct cvmx_gmxx_bist_s cn68xx;
+ struct cvmx_gmxx_bist_s cn68xxp1;
+ struct cvmx_gmxx_bist_s cnf71xx;
+};
+typedef union cvmx_gmxx_bist cvmx_gmxx_bist_t;
+
+/**
+ * cvmx_gmx#_bpid_map#
+ *
+ * Notes:
+ * GMX will build BPID_VECTOR<15:0> using the 16 GMX_BPID_MAP entries and the BPID
+ * state from IPD. In XAUI/RXAUI mode when PFC/CBFC/HiGig2 is used, the
+ * BPID_VECTOR becomes the logical backpressure. In XAUI/RXAUI mode when
+ * PFC/CBFC/HiGig2 is not used or when in 4xSGMII mode, the BPID_VECTOR can be used
+ * with the GMX_BPID_MSK register to determine the physical backpressure.
+ *
+ * In XAUI/RXAUI mode, the entire BPID_VECTOR<15:0> is available determining physical
+ * backpressure for the single XAUI/RXAUI interface.
+ *
+ * In SGMII mode, BPID_VECTOR is broken up as follows:
+ * SGMII interface0 uses BPID_VECTOR<3:0>
+ * SGMII interface1 uses BPID_VECTOR<7:4>
+ * SGMII interface2 uses BPID_VECTOR<11:8>
+ * SGMII interface3 uses BPID_VECTOR<15:12>
+ *
+ * In all SGMII configurations, and in some XAUI/RXAUI configurations, the
+ * interface protocols only support physical backpressure. In these cases, a single
+ * BPID will commonly drive the physical backpressure for the physical
+ * interface. We provide example programmings for these simple cases.
+ *
+ * In XAUI/RXAUI mode where PFC/CBFC/HiGig2 is not used, an example programming
+ * would be as follows:
+ *
+ * @verbatim
+ * GMX_BPID_MAP0[VAL] = 1;
+ * GMX_BPID_MAP0[BPID] = xaui_bpid;
+ * GMX_BPID_MSK[MSK_OR] = 1;
+ * GMX_BPID_MSK[MSK_AND] = 0;
+ * @endverbatim
+ *
+ * In SGMII mode, an example programming would be as follows:
+ *
+ * @verbatim
+ * for (i=0; i<4; i++) [
+ * if (GMX_PRTi_CFG[EN]) [
+ * GMX_BPID_MAP(i*4)[VAL] = 1;
+ * GMX_BPID_MAP(i*4)[BPID] = sgmii_bpid(i);
+ * GMX_BPID_MSK[MSK_OR] = (1 << (i*4)) | GMX_BPID_MSK[MSK_OR];
+ * ]
+ * ]
+ * GMX_BPID_MSK[MSK_AND] = 0;
+ * @endverbatim
+ */
+union cvmx_gmxx_bpid_mapx {
+ uint64_t u64;
+ struct cvmx_gmxx_bpid_mapx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t status : 1; /**< Current received BP from IPD */
+ uint64_t reserved_9_15 : 7;
+ uint64_t val : 1; /**< Table entry is valid */
+ uint64_t reserved_6_7 : 2;
+ uint64_t bpid : 6; /**< Backpressure ID the entry maps to */
+#else
+ uint64_t bpid : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t val : 1;
+ uint64_t reserved_9_15 : 7;
+ uint64_t status : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_gmxx_bpid_mapx_s cn68xx;
+ struct cvmx_gmxx_bpid_mapx_s cn68xxp1;
+};
+typedef union cvmx_gmxx_bpid_mapx cvmx_gmxx_bpid_mapx_t;
+
+/**
+ * cvmx_gmx#_bpid_msk
+ */
+union cvmx_gmxx_bpid_msk {
+ uint64_t u64;
+ struct cvmx_gmxx_bpid_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t msk_or : 16; /**< Assert physical BP when the backpressure ID vector
+ combined with MSK_OR indicates BP as follows.
+ phys_bp_msk_or =
+ (BPID_VECTOR<x:y> & MSK_OR<x:y>) != 0
+ phys_bp = phys_bp_msk_or || phys_bp_msk_and
+ In XAUI/RXAUI mode, x=15, y=0
+ In SGMII mode, x/y are set depending on the SGMII
+ interface.
+ SGMII interface0, x=3, y=0
+ SGMII interface1, x=7, y=4
+ SGMII interface2, x=11, y=8
+ SGMII interface3, x=15, y=12 */
+ uint64_t reserved_16_31 : 16;
+ uint64_t msk_and : 16; /**< Assert physical BP when the backpressure ID vector
+ combined with MSK_AND indicates BP as follows.
+ phys_bp_msk_and =
+ (BPID_VECTOR<x:y> & MSK_AND<x:y>) == MSK_AND<x:y>
+ phys_bp = phys_bp_msk_or || phys_bp_msk_and
+ In XAUI/RXAUI mode, x=15, y=0
+ In SGMII mode, x/y are set depending on the SGMII
+ interface.
+ SGMII interface0, x=3, y=0
+ SGMII interface1, x=7, y=4
+ SGMII interface2, x=11, y=8
+ SGMII interface3, x=15, y=12 */
+#else
+ uint64_t msk_and : 16;
+ uint64_t reserved_16_31 : 16;
+ uint64_t msk_or : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_bpid_msk_s cn68xx;
+ struct cvmx_gmxx_bpid_msk_s cn68xxp1;
+};
+typedef union cvmx_gmxx_bpid_msk cvmx_gmxx_bpid_msk_t;
+
+/**
+ * cvmx_gmx#_clk_en
+ *
+ * DON'T PUT IN HRM*
+ *
+ */
+union cvmx_gmxx_clk_en {
+ uint64_t u64;
+ struct cvmx_gmxx_clk_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t clk_en : 1; /**< Force the clock enables on */
+#else
+ uint64_t clk_en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gmxx_clk_en_s cn52xx;
+ struct cvmx_gmxx_clk_en_s cn52xxp1;
+ struct cvmx_gmxx_clk_en_s cn56xx;
+ struct cvmx_gmxx_clk_en_s cn56xxp1;
+ struct cvmx_gmxx_clk_en_s cn61xx;
+ struct cvmx_gmxx_clk_en_s cn63xx;
+ struct cvmx_gmxx_clk_en_s cn63xxp1;
+ struct cvmx_gmxx_clk_en_s cn66xx;
+ struct cvmx_gmxx_clk_en_s cn68xx;
+ struct cvmx_gmxx_clk_en_s cn68xxp1;
+ struct cvmx_gmxx_clk_en_s cnf71xx;
+};
+typedef union cvmx_gmxx_clk_en cvmx_gmxx_clk_en_t;
+
+/**
+ * cvmx_gmx#_ebp_dis
+ */
+union cvmx_gmxx_ebp_dis {
+ uint64_t u64;
+ struct cvmx_gmxx_ebp_dis_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t dis : 16; /**< BP channel disable
+ GMX has the ability to remap unused channels
+ in order to get down to GMX_TX_PIPE[NUMP]
+ channels. */
+#else
+ uint64_t dis : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_ebp_dis_s cn68xx;
+ struct cvmx_gmxx_ebp_dis_s cn68xxp1;
+};
+typedef union cvmx_gmxx_ebp_dis cvmx_gmxx_ebp_dis_t;
+
+/**
+ * cvmx_gmx#_ebp_msk
+ */
+union cvmx_gmxx_ebp_msk {
+ uint64_t u64;
+ struct cvmx_gmxx_ebp_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t msk : 16; /**< BP channel mask
+ GMX can completely ignore the channel BP for
+ channels specified by the MSK field. Any channel
+ in which MSK == 1, will never send BP information
+ to PKO. */
+#else
+ uint64_t msk : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_ebp_msk_s cn68xx;
+ struct cvmx_gmxx_ebp_msk_s cn68xxp1;
+};
+typedef union cvmx_gmxx_ebp_msk cvmx_gmxx_ebp_msk_t;
+
+/**
+ * cvmx_gmx#_hg2_control
+ *
+ * Notes:
+ * The HiGig2 TX and RX enable would normally be both set together for HiGig2 messaging. However
+ * setting just the TX or RX bit will result in only the HG2 message transmit or the receive
+ * capability.
+ * PHYS_EN and LOGL_EN bits when 1, allow link pause or back pressure to PKO as per received
+ * HiGig2 message. When 0, link pause and back pressure to PKO in response to received messages
+ * are disabled.
+ *
+ * GMX*_TX_XAUI_CTL[HG_EN] must be set to one(to enable HiGig) whenever either HG2TX_EN or HG2RX_EN
+ * are set.
+ *
+ * GMX*_RX0_UDD_SKP[LEN] must be set to 16 (to select HiGig2) whenever either HG2TX_EN or HG2RX_EN
+ * are set.
+ *
+ * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero
+ * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol when
+ * GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by GMX*_TX_XAUI_CTL[HG_EN]=1
+ * and GMX*_RX0_UDD_SKP[LEN]=16.) The HW can only auto-generate backpressure via HiGig2 messages
+ * (optionally, when HG2TX_EN=1) with the HiGig2 protocol.
+ */
+union cvmx_gmxx_hg2_control {
+ uint64_t u64;
+ struct cvmx_gmxx_hg2_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t hg2tx_en : 1; /**< Enable Transmission of HG2 phys and logl messages
+ When set, also disables HW auto-generated (802.3
+ and CBFC) pause frames. (OCTEON cannot generate
+ proper 802.3 or CBFC pause frames in HiGig2 mode.) */
+ uint64_t hg2rx_en : 1; /**< Enable extraction and processing of HG2 message
+ packet from RX flow. Physical logical pause info
+ is used to pause physical link, back pressure PKO
+ HG2RX_EN must be set when HiGig2 messages are
+ present in the receive stream. */
+ uint64_t phys_en : 1; /**< 1 bit physical link pause enable for recevied
+ HiGig2 physical pause message */
+ uint64_t logl_en : 16; /**< 16 bit xof enables for recevied HiGig2 messages
+ or CBFC packets */
+#else
+ uint64_t logl_en : 16;
+ uint64_t phys_en : 1;
+ uint64_t hg2rx_en : 1;
+ uint64_t hg2tx_en : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_gmxx_hg2_control_s cn52xx;
+ struct cvmx_gmxx_hg2_control_s cn52xxp1;
+ struct cvmx_gmxx_hg2_control_s cn56xx;
+ struct cvmx_gmxx_hg2_control_s cn61xx;
+ struct cvmx_gmxx_hg2_control_s cn63xx;
+ struct cvmx_gmxx_hg2_control_s cn63xxp1;
+ struct cvmx_gmxx_hg2_control_s cn66xx;
+ struct cvmx_gmxx_hg2_control_s cn68xx;
+ struct cvmx_gmxx_hg2_control_s cn68xxp1;
+ struct cvmx_gmxx_hg2_control_s cnf71xx;
+};
+typedef union cvmx_gmxx_hg2_control cvmx_gmxx_hg2_control_t;
+
+/**
+ * cvmx_gmx#_inf_mode
+ *
+ * GMX_INF_MODE = Interface Mode
+ *
+ */
+union cvmx_gmxx_inf_mode {
+ uint64_t u64;
+ struct cvmx_gmxx_inf_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t rate : 4; /**< SERDES speed rate
+ reset value is based on the QLM speed select
+ 0 = 1.25 Gbaud
+ 1 = 3.125 Gbaud
+ (only valid for GMX0 instance)
+ Software must not change RATE from its reset value */
+ uint64_t reserved_12_15 : 4;
+ uint64_t speed : 4; /**< Interface Speed
+ QLM speed pins which select reference clock
+ period and interface data rate. If the QLM PLL
+ inputs are correct, the speed setting correspond
+ to the following data rates (in Gbaud).
+ 0 = 5
+ 1 = 2.5
+ 2 = 2.5
+ 3 = 1.25
+ 4 = 1.25
+ 5 = 6.25
+ 6 = 5
+ 7 = 2.5
+ 8 = 3.125
+ 9 = 2.5
+ 10 = 1.25
+ 11 = 5
+ 12 = 6.25
+ 13 = 3.75
+ 14 = 3.125
+ 15 = QLM disabled */
+ uint64_t reserved_7_7 : 1;
+ uint64_t mode : 3; /**< Interface Electrical Operating Mode
+ - 0: SGMII (v1.8)
+ - 1: XAUI (IEEE 802.3-2005) */
+ uint64_t reserved_3_3 : 1;
+ uint64_t p0mii : 1; /**< Port 0 Interface Mode
+ - 0: Port 0 is RGMII
+ - 1: Port 0 is MII */
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Interface Protocol Type
+ - 0: SGMII/1000Base-X
+ - 1: XAUI */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t p0mii : 1;
+ uint64_t reserved_3_3 : 1;
+ uint64_t mode : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t speed : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t rate : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_gmxx_inf_mode_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t p0mii : 1; /**< Port 0 Interface Mode
+ - 0: Port 0 is RGMII
+ - 1: Port 0 is MII */
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Port 1/2 Interface Mode
+ - 0: Ports 1 and 2 are RGMII
+ - 1: Port 1 is GMII/MII, Port 2 is unused
+ GMII/MII is selected by GMX_PRT1_CFG[SPEED] */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t p0mii : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_inf_mode_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Interface Mode
+ - 0: All three ports are RGMII ports
+ - 1: prt0 is RGMII, prt1 is GMII, and prt2 is unused */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_inf_mode_cn31xx cn38xx;
+ struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2;
+ struct cvmx_gmxx_inf_mode_cn30xx cn50xx;
+ struct cvmx_gmxx_inf_mode_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t speed : 2; /**< Interface Speed
+ - 0: 1.250GHz
+ - 1: 2.500GHz
+ - 2: 3.125GHz
+ - 3: 3.750GHz */
+ uint64_t reserved_6_7 : 2;
+ uint64_t mode : 2; /**< Interface Electrical Operating Mode
+ - 0: Disabled (PCIe)
+ - 1: XAUI (IEEE 802.3-2005)
+ - 2: SGMII (v1.8)
+ - 3: PICMG3.1 */
+ uint64_t reserved_2_3 : 2;
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Interface Protocol Type
+ - 0: SGMII/1000Base-X
+ - 1: XAUI */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t mode : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t speed : 2;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1;
+ struct cvmx_gmxx_inf_mode_cn52xx cn56xx;
+ struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1;
+ struct cvmx_gmxx_inf_mode_cn31xx cn58xx;
+ struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1;
+ struct cvmx_gmxx_inf_mode_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t speed : 4; /**< Interface Speed
+ QLM speed pins which select reference clock
+ period and interface data rate. If the QLM PLL
+ inputs are correct, the speed setting correspond
+ to the following data rates (in Gbaud).
+ 0 = 5
+ 1 = 2.5
+ 2 = 2.5
+ 3 = 1.25
+ 4 = 1.25
+ 5 = 6.25
+ 6 = 5
+ 7 = 2.5
+ 8 = 3.125
+ 9 = 2.5
+ 10 = 1.25
+ 11 = 5
+ 12 = 6.25
+ 13 = 3.75
+ 14 = 3.125
+ 15 = QLM disabled */
+ uint64_t reserved_5_7 : 3;
+ uint64_t mode : 1; /**< Interface Electrical Operating Mode
+ - 0: SGMII (v1.8)
+ - 1: XAUI (IEEE 802.3-2005) */
+ uint64_t reserved_2_3 : 2;
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Interface Protocol Type
+ - 0: SGMII/1000Base-X
+ - 1: XAUI */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t mode : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t speed : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn61xx;
+ struct cvmx_gmxx_inf_mode_cn61xx cn63xx;
+ struct cvmx_gmxx_inf_mode_cn61xx cn63xxp1;
+ struct cvmx_gmxx_inf_mode_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t rate : 4; /**< SERDES speed rate
+ reset value is based on the QLM speed select
+ 0 = 1.25 Gbaud
+ 1 = 3.125 Gbaud
+ (only valid for GMX0 instance)
+ Software must not change RATE from its reset value */
+ uint64_t reserved_12_15 : 4;
+ uint64_t speed : 4; /**< Interface Speed
+ QLM speed pins which select reference clock
+ period and interface data rate. If the QLM PLL
+ inputs are correct, the speed setting correspond
+ to the following data rates (in Gbaud).
+ 0 = 5
+ 1 = 2.5
+ 2 = 2.5
+ 3 = 1.25
+ 4 = 1.25
+ 5 = 6.25
+ 6 = 5
+ 7 = 2.5
+ 8 = 3.125
+ 9 = 2.5
+ 10 = 1.25
+ 11 = 5
+ 12 = 6.25
+ 13 = 3.75
+ 14 = 3.125
+ 15 = QLM disabled */
+ uint64_t reserved_5_7 : 3;
+ uint64_t mode : 1; /**< Interface Electrical Operating Mode
+ - 0: SGMII (v1.8)
+ - 1: XAUI (IEEE 802.3-2005) */
+ uint64_t reserved_2_3 : 2;
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Interface Protocol Type
+ - 0: SGMII/1000Base-X
+ - 1: XAUI */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t mode : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t speed : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t rate : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn66xx;
+ struct cvmx_gmxx_inf_mode_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t speed : 4; /**< Interface Speed
+ QLM speed pins which select reference clock
+ period and interface data rate. If the QLM PLL
+ inputs are correct, the speed setting correspond
+ to the following data rates (in Gbaud).
+ 0 = 5
+ 1 = 2.5
+ 2 = 2.5
+ 3 = 1.25
+ 4 = 1.25
+ 5 = 6.25
+ 6 = 5
+ 7 = 2.5
+ 8 = 3.125
+ 9 = 2.5
+ 10 = 1.25
+ 11 = 5
+ 12 = 6.25
+ 13 = 3.75
+ 14 = 3.125
+ 15 = QLM disabled */
+ uint64_t reserved_7_7 : 1;
+ uint64_t mode : 3; /**< Interface Electrical Operating Mode
+ - 0: Reserved
+ - 1: Reserved
+ - 2: SGMII (v1.8)
+ - 3: XAUI (IEEE 802.3-2005)
+ - 4: Reserved
+ - 5: Reserved
+ - 6: Reserved
+ - 7: RXAUI */
+ uint64_t reserved_2_3 : 2;
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ b IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Interface Protocol Type
+ - 0: SGMII/1000Base-X
+ - 1: XAUI/RXAUI */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t mode : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t speed : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn68xx;
+ struct cvmx_gmxx_inf_mode_cn68xx cn68xxp1;
+ struct cvmx_gmxx_inf_mode_cn61xx cnf71xx;
+};
+typedef union cvmx_gmxx_inf_mode cvmx_gmxx_inf_mode_t;
+
+/**
+ * cvmx_gmx#_nxa_adr
+ *
+ * GMX_NXA_ADR = NXA Port Address
+ *
+ */
+union cvmx_gmxx_nxa_adr {
+ uint64_t u64;
+ struct cvmx_gmxx_nxa_adr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t pipe : 7; /**< Logged pipe for NXP exceptions */
+ uint64_t reserved_6_15 : 10;
+ uint64_t prt : 6; /**< Logged address for NXA exceptions
+ The logged address will be from the first
+ exception that caused the problem. NCB has
+ higher priority than PKO and will win.
+ (only PRT[3:0]) */
+#else
+ uint64_t prt : 6;
+ uint64_t reserved_6_15 : 10;
+ uint64_t pipe : 7;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_gmxx_nxa_adr_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t prt : 6; /**< Logged address for NXA exceptions
+ The logged address will be from the first
+ exception that caused the problem. NCB has
+ higher priority than PKO and will win. */
+#else
+ uint64_t prt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn31xx;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn38xx;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn38xxp2;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn50xx;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn52xx;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn52xxp1;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn56xx;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn56xxp1;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn58xx;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn58xxp1;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn61xx;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn63xx;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn63xxp1;
+ struct cvmx_gmxx_nxa_adr_cn30xx cn66xx;
+ struct cvmx_gmxx_nxa_adr_s cn68xx;
+ struct cvmx_gmxx_nxa_adr_s cn68xxp1;
+ struct cvmx_gmxx_nxa_adr_cn30xx cnf71xx;
+};
+typedef union cvmx_gmxx_nxa_adr cvmx_gmxx_nxa_adr_t;
+
+/**
+ * cvmx_gmx#_pipe_status
+ *
+ * DON'T PUT IN HRM*
+ *
+ */
+union cvmx_gmxx_pipe_status {
+ uint64_t u64;
+ struct cvmx_gmxx_pipe_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t ovr : 4; /**< Pipe credit return FIFO has overflowed. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp : 4; /**< Pipe credit return FIFO has filled up and asserted
+ backpressure to the datapath. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t stop : 4; /**< PKO has asserted backpressure on the pipe credit
+ return interface. */
+#else
+ uint64_t stop : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t bp : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t ovr : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_gmxx_pipe_status_s cn68xx;
+ struct cvmx_gmxx_pipe_status_s cn68xxp1;
+};
+typedef union cvmx_gmxx_pipe_status cvmx_gmxx_pipe_status_t;
+
+/**
+ * cvmx_gmx#_prt#_cbfc_ctl
+ *
+ * ** HG2 message CSRs end
+ *
+ *
+ * Notes:
+ * XOFF for a specific port is XOFF<prt> = (PHYS_EN<prt> & PHYS_BP) | (LOGL_EN<prt> & LOGL_BP<prt>)
+ *
+ */
+union cvmx_gmxx_prtx_cbfc_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t phys_en : 16; /**< Determines which ports will have physical
+ backpressure pause packets.
+ The value pplaced in the Class Enable Vector
+ field of the CBFC pause packet will be
+ PHYS_EN | LOGL_EN */
+ uint64_t logl_en : 16; /**< Determines which ports will have logical
+ backpressure pause packets.
+ The value pplaced in the Class Enable Vector
+ field of the CBFC pause packet will be
+ PHYS_EN | LOGL_EN */
+ uint64_t phys_bp : 16; /**< When RX_EN is set and the HW is backpressuring any
+ ports (from either CBFC pause packets or the
+ GMX_TX_OVR_BP[TX_PRT_BP] register) and all ports
+ indiciated by PHYS_BP are backpressured, simulate
+ physical backpressure by defering all packets on
+ the transmitter. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t bck_en : 1; /**< Forward CBFC Pause information to BP block */
+ uint64_t drp_en : 1; /**< Drop Control CBFC Pause Frames */
+ uint64_t tx_en : 1; /**< When set, allow for CBFC Pause Packets
+ Must be clear in HiGig2 mode i.e. when
+ GMX_TX_XAUI_CTL[HG_EN]=1 and
+ GMX_RX_UDD_SKP[SKIP]=16. */
+ uint64_t rx_en : 1; /**< When set, allow for CBFC Pause Packets
+ Must be clear in HiGig2 mode i.e. when
+ GMX_TX_XAUI_CTL[HG_EN]=1 and
+ GMX_RX_UDD_SKP[SKIP]=16. */
+#else
+ uint64_t rx_en : 1;
+ uint64_t tx_en : 1;
+ uint64_t drp_en : 1;
+ uint64_t bck_en : 1;
+ uint64_t reserved_4_15 : 12;
+ uint64_t phys_bp : 16;
+ uint64_t logl_en : 16;
+ uint64_t phys_en : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn61xx;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xx;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xxp1;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn66xx;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn68xx;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn68xxp1;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_prtx_cbfc_ctl cvmx_gmxx_prtx_cbfc_ctl_t;
+
+/**
+ * cvmx_gmx#_prt#_cfg
+ *
+ * GMX_PRT_CFG = Port description
+ *
+ */
+union cvmx_gmxx_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t pknd : 6; /**< Port Kind used for processing the packet by PKI */
+ uint64_t reserved_14_15 : 2;
+ uint64_t tx_idle : 1; /**< TX Machine is idle */
+ uint64_t rx_idle : 1; /**< RX Machine is idle */
+ uint64_t reserved_9_11 : 3;
+ uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED]
+ 10 = 10Mbs operation
+ 00 = 100Mbs operation
+ 01 = 1000Mbs operation
+ 11 = Reserved
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = 4096 bitimes (1000Mbs operation)
+ (SGMII/1000Base-X only) */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex
+ (SGMII/1000Base-X only) */
+ uint64_t speed : 1; /**< Link Speed LSB [SPEED_MSB:SPEED]
+ 10 = 10Mbs operation
+ 00 = 100Mbs operation
+ 01 = 1000Mbs operation
+ 11 = Reserved
+ (SGMII/1000Base-X only) */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t reserved_4_7 : 4;
+ uint64_t speed_msb : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t rx_idle : 1;
+ uint64_t tx_idle : 1;
+ uint64_t reserved_14_15 : 2;
+ uint64_t pknd : 6;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_gmxx_prtx_cfg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = 4096 bitimes (1000Mbs operation) */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex */
+ uint64_t speed : 1; /**< Link Speed
+ 0 = 10/100Mbs operation
+ (in RGMII mode, GMX_TX_CLK[CLK_CNT] > 1)
+ (in MII mode, GMX_TX_CLK[CLK_CNT] == 1)
+ 1 = 1000Mbs operation */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx;
+ struct cvmx_gmxx_prtx_cfg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t tx_idle : 1; /**< TX Machine is idle */
+ uint64_t rx_idle : 1; /**< RX Machine is idle */
+ uint64_t reserved_9_11 : 3;
+ uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED]
+ 10 = 10Mbs operation
+ 00 = 100Mbs operation
+ 01 = 1000Mbs operation
+ 11 = Reserved
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = 4096 bitimes (1000Mbs operation)
+ (SGMII/1000Base-X only) */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex
+ (SGMII/1000Base-X only) */
+ uint64_t speed : 1; /**< Link Speed LSB [SPEED_MSB:SPEED]
+ 10 = 10Mbs operation
+ 00 = 100Mbs operation
+ 01 = 1000Mbs operation
+ 11 = Reserved
+ (SGMII/1000Base-X only) */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t reserved_4_7 : 4;
+ uint64_t speed_msb : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t rx_idle : 1;
+ uint64_t tx_idle : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_prtx_cfg_cn52xx cn52xxp1;
+ struct cvmx_gmxx_prtx_cfg_cn52xx cn56xx;
+ struct cvmx_gmxx_prtx_cfg_cn52xx cn56xxp1;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1;
+ struct cvmx_gmxx_prtx_cfg_cn52xx cn61xx;
+ struct cvmx_gmxx_prtx_cfg_cn52xx cn63xx;
+ struct cvmx_gmxx_prtx_cfg_cn52xx cn63xxp1;
+ struct cvmx_gmxx_prtx_cfg_cn52xx cn66xx;
+ struct cvmx_gmxx_prtx_cfg_s cn68xx;
+ struct cvmx_gmxx_prtx_cfg_s cn68xxp1;
+ struct cvmx_gmxx_prtx_cfg_cn52xx cnf71xx;
+};
+typedef union cvmx_gmxx_prtx_cfg cvmx_gmxx_prtx_cfg_t;
+
+/**
+ * cvmx_gmx#_rx#_adr_cam0
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+union cvmx_gmxx_rxx_adr_cam0 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses.
+
+ ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
+ in either SGMII or XAUI mode such that any GMX
+ MAC can use any of the 32 common DMAC entries.
+
+ GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
+ registers used in XAUI mode. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn61xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn63xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn63xxp1;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn66xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn68xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn68xxp1;
+ struct cvmx_gmxx_rxx_adr_cam0_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_adr_cam0 cvmx_gmxx_rxx_adr_cam0_t;
+
+/**
+ * cvmx_gmx#_rx#_adr_cam1
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+union cvmx_gmxx_rxx_adr_cam1 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses.
+
+ ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
+ in either SGMII or XAUI mode such that any GMX
+ MAC can use any of the 32 common DMAC entries.
+
+ GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
+ registers used in XAUI mode. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn61xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn63xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn63xxp1;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn66xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn68xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn68xxp1;
+ struct cvmx_gmxx_rxx_adr_cam1_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_adr_cam1 cvmx_gmxx_rxx_adr_cam1_t;
+
+/**
+ * cvmx_gmx#_rx#_adr_cam2
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+union cvmx_gmxx_rxx_adr_cam2 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses.
+
+ ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
+ in either SGMII or XAUI mode such that any GMX
+ MAC can use any of the 32 common DMAC entries.
+
+ GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
+ registers used in XAUI mode. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn61xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn63xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn63xxp1;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn66xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn68xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn68xxp1;
+ struct cvmx_gmxx_rxx_adr_cam2_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_adr_cam2 cvmx_gmxx_rxx_adr_cam2_t;
+
+/**
+ * cvmx_gmx#_rx#_adr_cam3
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+union cvmx_gmxx_rxx_adr_cam3 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses.
+
+ ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
+ in either SGMII or XAUI mode such that any GMX
+ MAC can use any of the 32 common DMAC entries.
+
+ GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
+ registers used in XAUI mode. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn61xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn63xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn63xxp1;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn66xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn68xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn68xxp1;
+ struct cvmx_gmxx_rxx_adr_cam3_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_adr_cam3 cvmx_gmxx_rxx_adr_cam3_t;
+
+/**
+ * cvmx_gmx#_rx#_adr_cam4
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+union cvmx_gmxx_rxx_adr_cam4 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses.
+
+ ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
+ in either SGMII or XAUI mode such that any GMX
+ MAC can use any of the 32 common DMAC entries.
+
+ GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
+ registers used in XAUI mode. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn61xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn63xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn63xxp1;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn66xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn68xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn68xxp1;
+ struct cvmx_gmxx_rxx_adr_cam4_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_adr_cam4 cvmx_gmxx_rxx_adr_cam4_t;
+
+/**
+ * cvmx_gmx#_rx#_adr_cam5
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+union cvmx_gmxx_rxx_adr_cam5 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr : 64; /**< The DMAC address to match on
+
+ Each entry contributes 8bits to one of 8 matchers.
+ The CAM matches against unicst or multicst DMAC
+ addresses.
+
+ ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
+ in either SGMII or XAUI mode such that any GMX
+ MAC can use any of the 32 common DMAC entries.
+
+ GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
+ registers used in XAUI mode. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn61xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn63xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn63xxp1;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn66xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn68xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn68xxp1;
+ struct cvmx_gmxx_rxx_adr_cam5_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_adr_cam5 cvmx_gmxx_rxx_adr_cam5_t;
+
+/**
+ * cvmx_gmx#_rx#_adr_cam_all_en
+ *
+ * GMX_RX_ADR_CAM_ALL_EN = Address Filtering Control Enable
+ *
+ */
+union cvmx_gmxx_rxx_adr_cam_all_en {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam_all_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t en : 32; /**< CAM Entry Enables
+
+ GMX has 32 DMAC entries that can be accessed with
+ the GMX_RX[0..3]_ADR_CAM[0..5] CSRs.
+ These 32 DMAC entries can be used by any of the
+ four SGMII MACs or the XAUI MAC.
+
+ Each port interface has independent control of
+ which of the 32 DMAC entries to include in the
+ CAM lookup.
+
+ GMX_RXx_ADR_CAM_ALL_EN was not present in legacy
+ GMX implemenations which had only eight DMAC CAM
+ entries. New applications may choose to ignore
+ GMX_RXx_ADR_CAM_EN using GMX_RX_ADR_CAM_ALL_EN
+ instead.
+
+ EN represents the full 32 indepedent per MAC
+ enables.
+
+ Writes to EN will be reflected in
+ GMX_RXx_ADR_CAM_EN[EN] and writes to
+ GMX_RXx_ADR_CAM_EN[EN] will be reflected in EN.
+ Refer to GMX_RXx_ADR_CAM_EN for the CSR mapping.
+
+ In XAUI mode, only GMX_RX0_ADR_CAM_ALL_EN is used
+ and GMX_RX[1,2,3]_ADR_CAM_ALL_EN should not be
+ used. */
+#else
+ uint64_t en : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam_all_en_s cn61xx;
+ struct cvmx_gmxx_rxx_adr_cam_all_en_s cn66xx;
+ struct cvmx_gmxx_rxx_adr_cam_all_en_s cn68xx;
+ struct cvmx_gmxx_rxx_adr_cam_all_en_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_adr_cam_all_en cvmx_gmxx_rxx_adr_cam_all_en_t;
+
+/**
+ * cvmx_gmx#_rx#_adr_cam_en
+ *
+ * GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
+ *
+ */
+union cvmx_gmxx_rxx_adr_cam_en {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t en : 8; /**< CAM Entry Enables
+
+ GMX has 32 DMAC entries that can be accessed with
+ the GMX_RX[0..3]_ADR_CAM[0..5] CSRs.
+ These 32 DMAC entries can be used by any of the
+ four SGMII MACs or the XAUI MAC.
+
+ Each port interface has independent control of
+ which of the 32 DMAC entries to include in the
+ CAM lookup.
+
+ Legacy GMX implementations were able to CAM
+ against eight DMAC entries while current
+ implementations use 32 common entries.
+ This register is intended for legacy applications
+ that only require eight DMAC CAM entries per MAC.
+ New applications may choose to ignore
+ GMX_RXx_ADR_CAM_EN using GMX_RXx_ADR_CAM_ALL_EN
+ instead.
+
+ EN controls the enables for the eight legacy CAM
+ entries as follows:
+ port0, EN = GMX_RX0_ADR_CAM_ALL_EN[EN<7:0>]
+ port1, EN = GMX_RX1_ADR_CAM_ALL_EN[EN<15:8>]
+ port2, EN = GMX_RX2_ADR_CAM_ALL_EN[EN<23:16>]
+ port3, EN = GMX_RX3_ADR_CAM_ALL_EN[EN<31:24>]
+
+ The full 32 indepedent per MAC enables are in
+ GMX_RX_ADR_CAM_ALL_EN.
+
+ Therefore, writes to GMX_RXX_ADR_CAM_ALL_EN[EN]
+ will be reflected in EN and writes to EN will be
+ reflected in GMX_RXX_ADR_CAM_ALL_EN[EN].
+
+ In XAUI mode, only GMX_RX0_ADR_CAM_EN is used and
+ GMX_RX[1,2,3]_ADR_CAM_EN should not be used. */
+#else
+ uint64_t en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn61xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn63xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn63xxp1;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn66xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn68xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn68xxp1;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_adr_cam_en cvmx_gmxx_rxx_adr_cam_en_t;
+
+/**
+ * cvmx_gmx#_rx#_adr_ctl
+ *
+ * GMX_RX_ADR_CTL = Address Filtering Control
+ *
+ *
+ * Notes:
+ * * ALGORITHM
+ * Here is some pseudo code that represents the address filter behavior.
+ *
+ * @verbatim
+ * bool dmac_addr_filter(uint8 prt, uint48 dmac) [
+ * ASSERT(prt >= 0 && prt <= 3);
+ * if (is_bcst(dmac)) // broadcast accept
+ * return (GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
+ * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject
+ * return REJECT;
+ * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept
+ * return ACCEPT;
+ *
+ * cam_hit = 0;
+ *
+ * for (i=0; i<32; i++) [
+ * if (GMX_RX[prt]_ADR_CAM_ALL_EN[EN<i>] == 0)
+ * continue;
+ * uint48 unswizzled_mac_adr = 0x0;
+ * for (j=5; j>=0; j--) [
+ * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | GMX_RX[i>>3]_ADR_CAM[j][ADR<(i&7)*8+7:(i&7)*8>];
+ * ]
+ * if (unswizzled_mac_adr == dmac) [
+ * cam_hit = 1;
+ * break;
+ * ]
+ * ]
+ *
+ * if (cam_hit)
+ * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
+ * else
+ * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
+ * ]
+ * @endverbatim
+ *
+ * * XAUI Mode
+ *
+ * In XAUI mode, only GMX_RX0_ADR_CTL is used. GMX_RX[1,2,3]_ADR_CTL should not be used.
+ */
+union cvmx_gmxx_rxx_adr_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter
+ 0 = reject the packet on DMAC address match
+ 1 = accept the packet on DMAC address match */
+ uint64_t mcst : 2; /**< Multicast Mode
+ 0 = Use the Address Filter CAM
+ 1 = Force reject all multicast packets
+ 2 = Force accept all multicast packets
+ 3 = Reserved */
+ uint64_t bcst : 1; /**< Accept All Broadcast Packets */
+#else
+ uint64_t bcst : 1;
+ uint64_t mcst : 2;
+ uint64_t cam_mode : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn61xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn63xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn63xxp1;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn66xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn68xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn68xxp1;
+ struct cvmx_gmxx_rxx_adr_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_adr_ctl cvmx_gmxx_rxx_adr_ctl_t;
+
+/**
+ * cvmx_gmx#_rx#_decision
+ *
+ * GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
+ *
+ *
+ * Notes:
+ * As each byte in a packet is received by GMX, the L2 byte count is compared
+ * against the GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes
+ * from the beginning of the L2 header (DMAC). In normal operation, the L2
+ * header begins after the PREAMBLE+SFD (GMX_RX_FRM_CTL[PRE_CHK]=1) and any
+ * optional UDD skip data (GMX_RX_UDD_SKP[LEN]).
+ *
+ * When GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
+ * packet and would require UDD skip length to account for them.
+ *
+ * L2 Size
+ * Port Mode <GMX_RX_DECISION bytes (default=24) >=GMX_RX_DECISION bytes (default=24)
+ *
+ * Full Duplex accept packet apply filters
+ * no filtering is applied accept packet based on DMAC and PAUSE packet filters
+ *
+ * Half Duplex drop packet apply filters
+ * packet is unconditionally dropped accept packet based on DMAC
+ *
+ * where l2_size = MAX(0, total_packet_size - GMX_RX_UDD_SKP[LEN] - ((GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
+ */
+union cvmx_gmxx_rxx_decision {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_decision_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t cnt : 5; /**< The byte count to decide when to accept or filter
+ a packet. */
+#else
+ uint64_t cnt : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_decision_s cn30xx;
+ struct cvmx_gmxx_rxx_decision_s cn31xx;
+ struct cvmx_gmxx_rxx_decision_s cn38xx;
+ struct cvmx_gmxx_rxx_decision_s cn38xxp2;
+ struct cvmx_gmxx_rxx_decision_s cn50xx;
+ struct cvmx_gmxx_rxx_decision_s cn52xx;
+ struct cvmx_gmxx_rxx_decision_s cn52xxp1;
+ struct cvmx_gmxx_rxx_decision_s cn56xx;
+ struct cvmx_gmxx_rxx_decision_s cn56xxp1;
+ struct cvmx_gmxx_rxx_decision_s cn58xx;
+ struct cvmx_gmxx_rxx_decision_s cn58xxp1;
+ struct cvmx_gmxx_rxx_decision_s cn61xx;
+ struct cvmx_gmxx_rxx_decision_s cn63xx;
+ struct cvmx_gmxx_rxx_decision_s cn63xxp1;
+ struct cvmx_gmxx_rxx_decision_s cn66xx;
+ struct cvmx_gmxx_rxx_decision_s cn68xx;
+ struct cvmx_gmxx_rxx_decision_s cn68xxp1;
+ struct cvmx_gmxx_rxx_decision_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_decision cvmx_gmxx_rxx_decision_t;
+
+/**
+ * cvmx_gmx#_rx#_frm_chk
+ *
+ * GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
+ *
+ *
+ * Notes:
+ * If GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
+ *
+ * In XAUI mode prt0 is used for checking.
+ */
+union cvmx_gmxx_rxx_frm_chk {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_chk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_frm_chk_s cn30xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn31xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn38xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_chk_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t reserved_6_6 : 1;
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_chk_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1;
+ struct cvmx_gmxx_rxx_frm_chk_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn61xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn61xx cn63xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn61xx cn63xxp1;
+ struct cvmx_gmxx_rxx_frm_chk_cn61xx cn66xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn61xx cn68xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn61xx cn68xxp1;
+ struct cvmx_gmxx_rxx_frm_chk_cn61xx cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_frm_chk cvmx_gmxx_rxx_frm_chk_t;
+
+/**
+ * cvmx_gmx#_rx#_frm_ctl
+ *
+ * GMX_RX_FRM_CTL = Frame Control
+ *
+ *
+ * Notes:
+ * * PRE_STRP
+ * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
+ * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
+ * core as part of the packet.
+ *
+ * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
+ * size when checking against the MIN and MAX bounds. Furthermore, the bytes
+ * are skipped when locating the start of the L2 header for DMAC and Control
+ * frame recognition.
+ *
+ * * CTL_BCK/CTL_DRP
+ * These bits control how the HW handles incoming PAUSE packets. Here are
+ * the most common modes of operation:
+ * CTL_BCK=1,CTL_DRP=1 - HW does it all
+ * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames
+ * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored
+ *
+ * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
+ * Since PAUSE packets only apply to fulldup operation, any PAUSE packet
+ * would constitute an exception which should be handled by the processing
+ * cores. PAUSE packets should not be forwarded.
+ */
+union cvmx_gmxx_rxx_frm_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t ptp_mode : 1; /**< Timestamp mode
+ When PTP_MODE is set, a 64-bit timestamp will be
+ prepended to every incoming packet. The timestamp
+ bytes are added to the packet in such a way as to
+ not modify the packet's receive byte count. This
+ implies that the GMX_RX_JABBER, MINERR,
+ GMX_RX_DECISION, GMX_RX_UDD_SKP, and the
+ GMX_RX_STATS_* do not require any adjustment as
+ they operate on the received packet size.
+ When the packet reaches PKI, its size will
+ reflect the additional bytes and is subject to
+ the restrictions below.
+ If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1.
+ If PTP_MODE=1,
+ PIP_PRT_CFGx[SKIP] should be increased by 8.
+ PIP_PRT_CFGx[HIGIG_EN] should be 0.
+ PIP_FRM_CHKx[MAXLEN] should be increased by 8.
+ PIP_FRM_CHKx[MINLEN] should be increased by 8.
+ PIP_TAG_INCx[EN] should be adjusted.
+ PIP_PRT_CFGBx[ALT_SKP_EN] should be 0. */
+ uint64_t reserved_11_11 : 1;
+ uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
+ due to PARITAL packets */
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PRE_STRP should be set to
+ account for the variable nature of the PREAMBLE.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ (SGMII at 10/100Mbs only) */
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data
+ (PASS3 Only) */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ GMX will begin the frame at the first SFD.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ (SGMII/1000Base-X only) */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
+ uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3
+ PREAMBLE to begin every frame. GMX checks that a
+ valid PREAMBLE is received (based on PRE_FREE).
+ When a problem does occur within the PREAMBLE
+ seqeunce, the frame is marked as bad and not sent
+ into the core. The GMX_GMX_RX_INT_REG[PCTERR]
+ interrupt is also raised.
+ When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
+ must be zero.
+ If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t pre_align : 1;
+ uint64_t null_dis : 1;
+ uint64_t reserved_11_11 : 1;
+ uint64_t ptp_mode : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking.
+ 0-7 cycles of PREAMBLE followed by SFD (pass 1.0)
+ 0-254 cycles of PREAMBLE followed by SFD (else) */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking.
+ 0 - 7 cycles of PREAMBLE followed by SFD (pass1.0)
+ 0 - 254 cycles of PREAMBLE followed by SFD (else) */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
+ due to PARITAL packets */
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PREAMBLE can be consumed
+ by the HW so when PRE_ALIGN is set, PRE_FREE,
+ PRE_STRP must be set for correct operation.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking.
+ 0-254 cycles of PREAMBLE followed by SFD */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_align : 1;
+ uint64_t null_dis : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PRE_STRP should be set to
+ account for the variable nature of the PREAMBLE.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ (SGMII at 10/100Mbs only) */
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ 0 - 254 cycles of PREAMBLE followed by SFD
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ (SGMII/1000Base-X only) */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly.
+ When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
+ must be zero. */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_align : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
+ due to PARITAL packets
+ In spi4 mode, all ports use prt0 for checking. */
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PREAMBLE can be consumed
+ by the HW so when PRE_ALIGN is set, PRE_FREE,
+ PRE_STRP must be set for correct operation.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data
+ (PASS3 Only) */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ 0 - 254 cycles of PREAMBLE followed by SFD */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t pre_align : 1;
+ uint64_t null_dis : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn58xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t ptp_mode : 1; /**< Timestamp mode
+ When PTP_MODE is set, a 64-bit timestamp will be
+ prepended to every incoming packet. The timestamp
+ bytes are added to the packet in such a way as to
+ not modify the packet's receive byte count. This
+ implies that the GMX_RX_JABBER, MINERR,
+ GMX_RX_DECISION, GMX_RX_UDD_SKP, and the
+ GMX_RX_STATS_* do not require any adjustment as
+ they operate on the received packet size.
+ When the packet reaches PKI, its size will
+ reflect the additional bytes and is subject to
+ the restrictions below.
+ If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1.
+ If PTP_MODE=1,
+ PIP_PRT_CFGx[SKIP] should be increased by 8.
+ PIP_PRT_CFGx[HIGIG_EN] should be 0.
+ PIP_FRM_CHKx[MAXLEN] should be increased by 8.
+ PIP_FRM_CHKx[MINLEN] should be increased by 8.
+ PIP_TAG_INCx[EN] should be adjusted.
+ PIP_PRT_CFGBx[ALT_SKP_EN] should be 0. */
+ uint64_t reserved_11_11 : 1;
+ uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
+ due to PARITAL packets */
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PRE_STRP should be set to
+ account for the variable nature of the PREAMBLE.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ (SGMII at 10/100Mbs only) */
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ GMX will begin the frame at the first SFD.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ (SGMII/1000Base-X only) */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
+ uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3
+ PREAMBLE to begin every frame. GMX checks that a
+ valid PREAMBLE is received (based on PRE_FREE).
+ When a problem does occur within the PREAMBLE
+ seqeunce, the frame is marked as bad and not sent
+ into the core. The GMX_GMX_RX_INT_REG[PCTERR]
+ interrupt is also raised.
+ When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
+ must be zero.
+ If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_align : 1;
+ uint64_t null_dis : 1;
+ uint64_t reserved_11_11 : 1;
+ uint64_t ptp_mode : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn61xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn63xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn63xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn66xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn68xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn68xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_frm_ctl cvmx_gmxx_rxx_frm_ctl_t;
+
+/**
+ * cvmx_gmx#_rx#_frm_max
+ *
+ * GMX_RX_FRM_MAX = Frame Max length
+ *
+ *
+ * Notes:
+ * In spi4 mode, all spi4 ports use prt0 for checking.
+ *
+ * When changing the LEN field, be sure that LEN does not exceed
+ * GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
+ * are within the maximum length parameter to be rejected because they exceed
+ * the GMX_RX_JABBER[CNT] limit.
+ */
+union cvmx_gmxx_rxx_frm_max {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_max_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Max-sized frame check
+ GMX_RXn_FRM_CHK[MAXERR] enables the check for
+ port n.
+ If enabled, failing packets set the MAXERR
+ interrupt and work-queue entry WORD2[opcode] is
+ set to OVER_FCS (0x3, if packet has bad FCS) or
+ OVER_ERR (0x4, if packet has good FCS).
+ LEN =< GMX_RX_JABBER[CNT] */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_frm_max_s cn30xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn31xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn38xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_max_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn58xxp1;
+};
+typedef union cvmx_gmxx_rxx_frm_max cvmx_gmxx_rxx_frm_max_t;
+
+/**
+ * cvmx_gmx#_rx#_frm_min
+ *
+ * GMX_RX_FRM_MIN = Frame Min length
+ *
+ *
+ * Notes:
+ * In spi4 mode, all spi4 ports use prt0 for checking.
+ *
+ */
+union cvmx_gmxx_rxx_frm_min {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_min_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Min-sized frame check
+ GMX_RXn_FRM_CHK[MINERR] enables the check for
+ port n.
+ If enabled, failing packets set the MINERR
+ interrupt and work-queue entry WORD2[opcode] is
+ set to UNDER_FCS (0x6, if packet has bad FCS) or
+ UNDER_ERR (0x8, if packet has good FCS). */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_frm_min_s cn30xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn31xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn38xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_min_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn58xxp1;
+};
+typedef union cvmx_gmxx_rxx_frm_min cvmx_gmxx_rxx_frm_min_t;
+
+/**
+ * cvmx_gmx#_rx#_ifg
+ *
+ * GMX_RX_IFG = RX Min IFG
+ *
+ */
+union cvmx_gmxx_rxx_ifg {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_ifg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t ifg : 4; /**< Min IFG (in IFG*8 bits) between packets used to
+ determine IFGERR. Normally IFG is 96 bits.
+ Note in some operating modes, IFG cycles can be
+ inserted or removed in order to achieve clock rate
+ adaptation. For these reasons, the default value
+ is slightly conservative and does not check upto
+ the full 96 bits of IFG.
+ (SGMII/1000Base-X only) */
+#else
+ uint64_t ifg : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_ifg_s cn30xx;
+ struct cvmx_gmxx_rxx_ifg_s cn31xx;
+ struct cvmx_gmxx_rxx_ifg_s cn38xx;
+ struct cvmx_gmxx_rxx_ifg_s cn38xxp2;
+ struct cvmx_gmxx_rxx_ifg_s cn50xx;
+ struct cvmx_gmxx_rxx_ifg_s cn52xx;
+ struct cvmx_gmxx_rxx_ifg_s cn52xxp1;
+ struct cvmx_gmxx_rxx_ifg_s cn56xx;
+ struct cvmx_gmxx_rxx_ifg_s cn56xxp1;
+ struct cvmx_gmxx_rxx_ifg_s cn58xx;
+ struct cvmx_gmxx_rxx_ifg_s cn58xxp1;
+ struct cvmx_gmxx_rxx_ifg_s cn61xx;
+ struct cvmx_gmxx_rxx_ifg_s cn63xx;
+ struct cvmx_gmxx_rxx_ifg_s cn63xxp1;
+ struct cvmx_gmxx_rxx_ifg_s cn66xx;
+ struct cvmx_gmxx_rxx_ifg_s cn68xx;
+ struct cvmx_gmxx_rxx_ifg_s cn68xxp1;
+ struct cvmx_gmxx_rxx_ifg_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_ifg cvmx_gmxx_rxx_ifg_t;
+
+/**
+ * cvmx_gmx#_rx#_int_en
+ *
+ * GMX_RX_INT_EN = Interrupt Enable
+ *
+ *
+ * Notes:
+ * In XAUI mode prt0 is used for checking.
+ *
+ */
+union cvmx_gmxx_rxx_int_en {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */
+ uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ (SGMII/1000Base-X only) */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_int_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx;
+ struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx;
+ struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2;
+ struct cvmx_gmxx_rxx_int_en_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t reserved_6_6 : 1;
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */
+ uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx;
+ struct cvmx_gmxx_rxx_int_en_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */
+ uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn61xx;
+ struct cvmx_gmxx_rxx_int_en_cn61xx cn63xx;
+ struct cvmx_gmxx_rxx_int_en_cn61xx cn63xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn61xx cn66xx;
+ struct cvmx_gmxx_rxx_int_en_cn61xx cn68xx;
+ struct cvmx_gmxx_rxx_int_en_cn61xx cn68xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn61xx cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_int_en cvmx_gmxx_rxx_int_en_t;
+
+/**
+ * cvmx_gmx#_rx#_int_reg
+ *
+ * GMX_RX_INT_REG = Interrupt Register
+ *
+ *
+ * Notes:
+ * (1) exceptions will only be raised to the control processor if the
+ * corresponding bit in the GMX_RX_INT_EN register is set.
+ *
+ * (2) exception conditions 10:0 can also set the rcv/opcode in the received
+ * packet's workQ entry. The GMX_RX_FRM_CHK register provides a bit mask
+ * for configuring which conditions set the error.
+ *
+ * (3) in half duplex operation, the expectation is that collisions will appear
+ * as either MINERR o r CAREXT errors.
+ *
+ * (4) JABBER - An RX Jabber error indicates that a packet was received which
+ * is longer than the maximum allowed packet as defined by the
+ * system. GMX will truncate the packet at the JABBER count.
+ * Failure to do so could lead to system instabilty.
+ *
+ * (5) NIBERR - This error is illegal at 1000Mbs speeds
+ * (GMX_RX_PRT_CFG[SPEED]==0) and will never assert.
+ *
+ * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
+ * GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
+ * > GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
+ *
+ * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < 64
+ *
+ * (8) ALNERR - Indicates that the packet received was not an integer number of
+ * bytes. If FCS checking is enabled, ALNERR will only assert if
+ * the FCS is bad. If FCS checking is disabled, ALNERR will
+ * assert in all non-integer frame cases.
+ *
+ * (9) Collisions - Collisions can only occur in half-duplex mode. A collision
+ * is assumed by the receiver when the slottime
+ * (GMX_PRT_CFG[SLOTTIME]) is not satisfied. In 10/100 mode,
+ * this will result in a frame < SLOTTIME. In 1000 mode, it
+ * could result either in frame < SLOTTIME or a carrier extend
+ * error with the SLOTTIME. These conditions are visible by...
+ *
+ * . transfer ended before slottime - COLDET
+ * . carrier extend error - CAREXT
+ *
+ * (A) LENERR - Length errors occur when the received packet does not match the
+ * length field. LENERR is only checked for packets between 64
+ * and 1500 bytes. For untagged frames, the length must exact
+ * match. For tagged frames the length or length+4 must match.
+ *
+ * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence.
+ * Does not check the number of PREAMBLE cycles.
+ *
+ * (C) OVRERR -
+ *
+ * OVRERR is an architectural assertion check internal to GMX to
+ * make sure no assumption was violated. In a correctly operating
+ * system, this interrupt can never fire.
+ *
+ * GMX has an internal arbiter which selects which of 4 ports to
+ * buffer in the main RX FIFO. If we normally buffer 8 bytes,
+ * then each port will typically push a tick every 8 cycles - if
+ * the packet interface is going as fast as possible. If there
+ * are four ports, they push every two cycles. So that's the
+ * assumption. That the inbound module will always be able to
+ * consume the tick before another is produced. If that doesn't
+ * happen - that's when OVRERR will assert.
+ *
+ * (D) In XAUI mode prt0 is used for interrupt logging.
+ */
+union cvmx_gmxx_rxx_int_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error
+ Set when either CRC8 error detected or when
+ a Control Character is found in the message
+ bytes after the K.SOM
+ NOTE: HG2CC has higher priority than HG2FLD
+ i.e. a HiGig2 message that results in HG2CC
+ getting set, will never set HG2FLD. */
+ uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below
+ 1) MSG_TYPE field not 6'b00_0000
+ i.e. it is not a FLOW CONTROL message, which
+ is the only defined type for HiGig2
+ 2) FWD_TYPE field not 2'b00 i.e. Link Level msg
+ which is the only defined type for HiGig2
+ 3) FC_OBJECT field is neither 4'b0000 for
+ Physical Link nor 4'b0010 for Logical Link.
+ Those are the only two defined types in HiGig2 */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol
+ In XAUI mode, the column of data that was bad
+ will be logged in GMX_RX_XAUI_BAD_COL */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert
+ (SGMII/1000Base-X only) */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize
+ Frame length checks are typically handled in PIP
+ (PIP_INT_REG[MINERR]), but pause frames are
+ normally discarded before being inspected by PIP. */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2;
+ struct cvmx_gmxx_rxx_int_reg_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t reserved_6_6 : 1;
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error
+ Set when either CRC8 error detected or when
+ a Control Character is found in the message
+ bytes after the K.SOM
+ NOTE: HG2CC has higher priority than HG2FLD
+ i.e. a HiGig2 message that results in HG2CC
+ getting set, will never set HG2FLD. */
+ uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below
+ 1) MSG_TYPE field not 6'b00_0000
+ i.e. it is not a FLOW CONTROL message, which
+ is the only defined type for HiGig2
+ 2) FWD_TYPE field not 2'b00 i.e. Link Level msg
+ which is the only defined type for HiGig2
+ 3) FC_OBJECT field is neither 4'b0000 for
+ Physical Link nor 4'b0010 for Logical Link.
+ Those are the only two defined types in HiGig2 */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol
+ In XAUI mode, the column of data that was bad
+ will be logged in GMX_RX_XAUI_BAD_COL */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx;
+ struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol
+ In XAUI mode, the column of data that was bad
+ will be logged in GMX_RX_XAUI_BAD_COL */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error
+ Set when either CRC8 error detected or when
+ a Control Character is found in the message
+ bytes after the K.SOM
+ NOTE: HG2CC has higher priority than HG2FLD
+ i.e. a HiGig2 message that results in HG2CC
+ getting set, will never set HG2FLD. */
+ uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below
+ 1) MSG_TYPE field not 6'b00_0000
+ i.e. it is not a FLOW CONTROL message, which
+ is the only defined type for HiGig2
+ 2) FWD_TYPE field not 2'b00 i.e. Link Level msg
+ which is the only defined type for HiGig2
+ 3) FC_OBJECT field is neither 4'b0000 for
+ Physical Link nor 4'b0010 for Logical Link.
+ Those are the only two defined types in HiGig2 */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol
+ In XAUI mode, the column of data that was bad
+ will be logged in GMX_RX_XAUI_BAD_COL */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize
+ Frame length checks are typically handled in PIP
+ (PIP_INT_REG[MINERR]), but pause frames are
+ normally discarded before being inspected by PIP. */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn61xx;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx cn63xx;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx cn63xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx cn66xx;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx cn68xx;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx cn68xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_int_reg cvmx_gmxx_rxx_int_reg_t;
+
+/**
+ * cvmx_gmx#_rx#_jabber
+ *
+ * GMX_RX_JABBER = The max size packet after which GMX will truncate
+ *
+ *
+ * Notes:
+ * CNT must be 8-byte aligned such that CNT[2:0] == 0
+ *
+ * The packet that will be sent to the packet input logic will have an
+ * additionl 8 bytes if GMX_RX_FRM_CTL[PRE_CHK] is set and
+ * GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is
+ * defined as...
+ *
+ * max_sized_packet = GMX_RX_JABBER[CNT]+((GMX_RX_FRM_CTL[PRE_CHK] & !GMX_RX_FRM_CTL[PRE_STRP])*8)
+ *
+ * In XAUI mode prt0 is used for checking.
+ */
+union cvmx_gmxx_rxx_jabber {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_jabber_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< Byte count for jabber check
+ Failing packets set the JABBER interrupt and are
+ optionally sent with opcode==JABBER
+ GMX will truncate the packet to CNT bytes */
+#else
+ uint64_t cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_jabber_s cn30xx;
+ struct cvmx_gmxx_rxx_jabber_s cn31xx;
+ struct cvmx_gmxx_rxx_jabber_s cn38xx;
+ struct cvmx_gmxx_rxx_jabber_s cn38xxp2;
+ struct cvmx_gmxx_rxx_jabber_s cn50xx;
+ struct cvmx_gmxx_rxx_jabber_s cn52xx;
+ struct cvmx_gmxx_rxx_jabber_s cn52xxp1;
+ struct cvmx_gmxx_rxx_jabber_s cn56xx;
+ struct cvmx_gmxx_rxx_jabber_s cn56xxp1;
+ struct cvmx_gmxx_rxx_jabber_s cn58xx;
+ struct cvmx_gmxx_rxx_jabber_s cn58xxp1;
+ struct cvmx_gmxx_rxx_jabber_s cn61xx;
+ struct cvmx_gmxx_rxx_jabber_s cn63xx;
+ struct cvmx_gmxx_rxx_jabber_s cn63xxp1;
+ struct cvmx_gmxx_rxx_jabber_s cn66xx;
+ struct cvmx_gmxx_rxx_jabber_s cn68xx;
+ struct cvmx_gmxx_rxx_jabber_s cn68xxp1;
+ struct cvmx_gmxx_rxx_jabber_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_jabber cvmx_gmxx_rxx_jabber_t;
+
+/**
+ * cvmx_gmx#_rx#_pause_drop_time
+ *
+ * GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
+ *
+ */
+union cvmx_gmxx_rxx_pause_drop_time {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_pause_drop_time_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */
+#else
+ uint64_t status : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn61xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn63xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn63xxp1;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn66xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn68xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn68xxp1;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_pause_drop_time cvmx_gmxx_rxx_pause_drop_time_t;
+
+/**
+ * cvmx_gmx#_rx#_rx_inbnd
+ *
+ * GMX_RX_INBND = RGMII InBand Link Status
+ *
+ *
+ * Notes:
+ * These fields are only valid if the attached PHY is operating in RGMII mode
+ * and supports the optional in-band status (see section 3.4.1 of the RGMII
+ * specification, version 1.3 for more information).
+ */
+union cvmx_gmxx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_rx_inbnd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t duplex : 1; /**< RGMII Inbound LinkDuplex
+ 0=half-duplex
+ 1=full-duplex */
+ uint64_t speed : 2; /**< RGMII Inbound LinkSpeed
+ 00=2.5MHz
+ 01=25MHz
+ 10=125MHz
+ 11=Reserved */
+ uint64_t status : 1; /**< RGMII Inbound LinkStatus
+ 0=down
+ 1=up */
+#else
+ uint64_t status : 1;
+ uint64_t speed : 2;
+ uint64_t duplex : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1;
+};
+typedef union cvmx_gmxx_rxx_rx_inbnd cvmx_gmxx_rxx_rx_inbnd_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_ctl
+ *
+ * GMX_RX_STATS_CTL = RX Stats Control register
+ *
+ */
+union cvmx_gmxx_rxx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */
+#else
+ uint64_t rd_clr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_ctl cvmx_gmxx_rxx_stats_ctl_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_octs
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_rxx_stats_octs {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_octs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of received good packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_octs_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_octs_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_octs cvmx_gmxx_rxx_stats_octs_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_octs_ctl
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_rxx_stats_octs_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of received pause packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_octs_ctl cvmx_gmxx_rxx_stats_octs_ctl_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_octs_dmac
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_rxx_stats_octs_dmac {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of filtered dmac packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_octs_dmac cvmx_gmxx_rxx_stats_octs_dmac_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_octs_drp
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_rxx_stats_octs_drp {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of dropped packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_octs_drp cvmx_gmxx_rxx_stats_octs_drp_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_pkts
+ *
+ * GMX_RX_STATS_PKTS
+ *
+ * Count of good received packets - packets that are not recognized as PAUSE
+ * packets, dropped due the DMAC filter, dropped due FIFO full status, or
+ * have any other OPCODE (FCS, Length, etc).
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_rxx_stats_pkts {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of received good packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_pkts cvmx_gmxx_rxx_stats_pkts_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_pkts_bad
+ *
+ * GMX_RX_STATS_PKTS_BAD
+ *
+ * Count of all packets received with some error that were not dropped
+ * either due to the dmac filter or lack of room in the receive FIFO.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_rxx_stats_pkts_bad {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of bad packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_pkts_bad cvmx_gmxx_rxx_stats_pkts_bad_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_pkts_ctl
+ *
+ * GMX_RX_STATS_PKTS_CTL
+ *
+ * Count of all packets received that were recognized as Flow Control or
+ * PAUSE packets. PAUSE packets with any kind of error are counted in
+ * GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or
+ * forwarded based on the GMX_RX_FRM_CTL[CTL_DRP] bit. This count
+ * increments regardless of whether the packet is dropped. Pause packets
+ * will never be counted in GMX_RX_STATS_PKTS. Packets dropped due the dmac
+ * filter will be counted in GMX_RX_STATS_PKTS_DMAC and not here.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_rxx_stats_pkts_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of received pause packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_pkts_ctl cvmx_gmxx_rxx_stats_pkts_ctl_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_pkts_dmac
+ *
+ * GMX_RX_STATS_PKTS_DMAC
+ *
+ * Count of all packets received that were dropped by the dmac filter.
+ * Packets that match the DMAC will be dropped and counted here regardless
+ * of if they were bad packets. These packets will never be counted in
+ * GMX_RX_STATS_PKTS.
+ *
+ * Some packets that were not able to satisify the DECISION_CNT may not
+ * actually be dropped by Octeon, but they will be counted here as if they
+ * were dropped.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_rxx_stats_pkts_dmac {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of filtered dmac packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_pkts_dmac cvmx_gmxx_rxx_stats_pkts_dmac_t;
+
+/**
+ * cvmx_gmx#_rx#_stats_pkts_drp
+ *
+ * GMX_RX_STATS_PKTS_DRP
+ *
+ * Count of all packets received that were dropped due to a full receive FIFO.
+ * This counts both partial packets in which there was enough space in the RX
+ * FIFO to begin to buffer and the packet and total drops in which no packet was
+ * sent to PKI. This counts good and bad packets received - all packets dropped
+ * by the FIFO. It does not count packets dropped by the dmac or pause packet
+ * filters.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_rxx_stats_pkts_drp {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of dropped packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn61xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn66xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn68xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn68xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_stats_pkts_drp cvmx_gmxx_rxx_stats_pkts_drp_t;
+
+/**
+ * cvmx_gmx#_rx#_udd_skp
+ *
+ * GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
+ *
+ *
+ * Notes:
+ * (1) The skip bytes are part of the packet and will be sent down the NCB
+ * packet interface and will be handled by PKI.
+ *
+ * (2) The system can determine if the UDD bytes are included in the FCS check
+ * by using the FCSSEL field - if the FCS check is enabled.
+ *
+ * (3) Assume that the preamble/sfd is always at the start of the frame - even
+ * before UDD bytes. In most cases, there will be no preamble in these
+ * cases since it will be packet interface in direct communication to
+ * another packet interface (MAC to MAC) without a PHY involved.
+ *
+ * (4) We can still do address filtering and control packet filtering is the
+ * user desires.
+ *
+ * (5) UDD_SKP must be 0 in half-duplex operation unless
+ * GMX_RX_FRM_CTL[PRE_CHK] is clear. If GMX_RX_FRM_CTL[PRE_CHK] is clear,
+ * then UDD_SKP will normally be 8.
+ *
+ * (6) In all cases, the UDD bytes will be sent down the packet interface as
+ * part of the packet. The UDD bytes are never stripped from the actual
+ * packet.
+ *
+ * (7) If LEN != 0, then GMX_RX_FRM_CHK[LENERR] will be disabled and GMX_RX_INT_REG[LENERR] will be zero
+ */
+union cvmx_gmxx_rxx_udd_skp {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_udd_skp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation
+ 0 = all skip bytes are included in FCS
+ 1 = the skip bytes are not included in FCS
+ When GMX_TX_XAUI_CTL[HG_EN] is set, FCSSEL must
+ be zero. */
+ uint64_t reserved_7_7 : 1;
+ uint64_t len : 7; /**< Amount of User-defined data before the start of
+ the L2 data. Zero means L2 comes first.
+ Max value is 64.
+ When GMX_TX_XAUI_CTL[HG_EN] is set, LEN must be
+ set to 12 or 16 (depending on HiGig header size)
+ to account for the HiGig header. LEN=12 selects
+ HiGig/HiGig+, and LEN=16 selects HiGig2. */
+#else
+ uint64_t len : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t fcssel : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_udd_skp_s cn30xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn31xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn38xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2;
+ struct cvmx_gmxx_rxx_udd_skp_s cn50xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn52xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1;
+ struct cvmx_gmxx_rxx_udd_skp_s cn56xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1;
+ struct cvmx_gmxx_rxx_udd_skp_s cn58xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1;
+ struct cvmx_gmxx_rxx_udd_skp_s cn61xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn63xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn63xxp1;
+ struct cvmx_gmxx_rxx_udd_skp_s cn66xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn68xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn68xxp1;
+ struct cvmx_gmxx_rxx_udd_skp_s cnf71xx;
+};
+typedef union cvmx_gmxx_rxx_udd_skp cvmx_gmxx_rxx_udd_skp_t;
+
+/**
+ * cvmx_gmx#_rx_bp_drop#
+ *
+ * GMX_RX_BP_DROP = FIFO mark for packet drop
+ *
+ *
+ * Notes:
+ * The actual watermark is dynamic with respect to the GMX_RX_PRTS
+ * register. The GMX_RX_PRTS controls the depth of the port's
+ * FIFO so as ports are added or removed, the drop point may change.
+ *
+ * In XAUI mode prt0 is used for checking.
+ */
+union cvmx_gmxx_rx_bp_dropx {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_bp_dropx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO.
+ When the FIFO exceeds this count, packets will
+ be dropped and not buffered.
+ MARK should typically be programmed to ports+1.
+ Failure to program correctly can lead to system
+ instability. */
+#else
+ uint64_t mark : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_bp_dropx_s cn30xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn31xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn38xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2;
+ struct cvmx_gmxx_rx_bp_dropx_s cn50xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn52xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1;
+ struct cvmx_gmxx_rx_bp_dropx_s cn56xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1;
+ struct cvmx_gmxx_rx_bp_dropx_s cn58xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1;
+ struct cvmx_gmxx_rx_bp_dropx_s cn61xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn63xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn63xxp1;
+ struct cvmx_gmxx_rx_bp_dropx_s cn66xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn68xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn68xxp1;
+ struct cvmx_gmxx_rx_bp_dropx_s cnf71xx;
+};
+typedef union cvmx_gmxx_rx_bp_dropx cvmx_gmxx_rx_bp_dropx_t;
+
+/**
+ * cvmx_gmx#_rx_bp_off#
+ *
+ * GMX_RX_BP_OFF = Lowater mark for packet drop
+ *
+ *
+ * Notes:
+ * In XAUI mode, prt0 is used for checking.
+ *
+ */
+union cvmx_gmxx_rx_bp_offx {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_bp_offx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */
+#else
+ uint64_t mark : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_bp_offx_s cn30xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn31xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn38xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn38xxp2;
+ struct cvmx_gmxx_rx_bp_offx_s cn50xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn52xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn52xxp1;
+ struct cvmx_gmxx_rx_bp_offx_s cn56xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn56xxp1;
+ struct cvmx_gmxx_rx_bp_offx_s cn58xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn58xxp1;
+ struct cvmx_gmxx_rx_bp_offx_s cn61xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn63xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn63xxp1;
+ struct cvmx_gmxx_rx_bp_offx_s cn66xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn68xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn68xxp1;
+ struct cvmx_gmxx_rx_bp_offx_s cnf71xx;
+};
+typedef union cvmx_gmxx_rx_bp_offx cvmx_gmxx_rx_bp_offx_t;
+
+/**
+ * cvmx_gmx#_rx_bp_on#
+ *
+ * GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
+ *
+ *
+ * Notes:
+ * In XAUI mode, prt0 is used for checking.
+ *
+ */
+union cvmx_gmxx_rx_bp_onx {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_bp_onx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t mark : 11; /**< Hiwater mark (8B ticks) for backpressure.
+ Each register is for an individual port. In XAUI
+ mode, prt0 is used for the unified RX FIFO
+ GMX_RX_BP_ON must satisfy
+ BP_OFF <= BP_ON < (FIFO_SIZE - BP_DROP)
+ A value of zero will immediately assert back
+ pressure. */
+#else
+ uint64_t mark : 11;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure.
+ In RGMII mode, the backpressure is given per
+ port. In Spi4 mode, the backpressure is for the
+ entire interface. GMX_RX_BP_ON must satisfy
+ BP_OFF <= BP_ON < (FIFO_SIZE - BP_DROP)
+ The reset value is half the FIFO.
+ Reset value RGMII mode = 0x40 (512bytes)
+ Reset value Spi4 mode = 0x100 (2048bytes)
+ A value of zero will immediately assert back
+ pressure. */
+#else
+ uint64_t mark : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn31xx;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn38xx;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn38xxp2;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn50xx;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn52xx;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn52xxp1;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn56xx;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn56xxp1;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn58xx;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn58xxp1;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn61xx;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn63xx;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn63xxp1;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cn66xx;
+ struct cvmx_gmxx_rx_bp_onx_s cn68xx;
+ struct cvmx_gmxx_rx_bp_onx_s cn68xxp1;
+ struct cvmx_gmxx_rx_bp_onx_cn30xx cnf71xx;
+};
+typedef union cvmx_gmxx_rx_bp_onx cvmx_gmxx_rx_bp_onx_t;
+
+/**
+ * cvmx_gmx#_rx_hg2_status
+ *
+ * ** HG2 message CSRs
+ *
+ */
+union cvmx_gmxx_rx_hg2_status {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_hg2_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t phtim2go : 16; /**< Physical time to go for removal of physical link
+ pause. Initial value from received HiGig2 msg pkt
+ Non-zero only when physical back pressure active */
+ uint64_t xof : 16; /**< 16 bit xof back pressure vector from HiGig2 msg pkt
+ or from CBFC packets.
+ Non-zero only when logical back pressure is active
+ All bits will be 0 when LGTIM2GO=0 */
+ uint64_t lgtim2go : 16; /**< Logical packet flow back pressure time remaining
+ Initial value set from xof time field of HiGig2
+ message packet received or a function of the
+ enabled and current timers for CBFC packets.
+ Non-zero only when logical back pressure is active */
+#else
+ uint64_t lgtim2go : 16;
+ uint64_t xof : 16;
+ uint64_t phtim2go : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_hg2_status_s cn52xx;
+ struct cvmx_gmxx_rx_hg2_status_s cn52xxp1;
+ struct cvmx_gmxx_rx_hg2_status_s cn56xx;
+ struct cvmx_gmxx_rx_hg2_status_s cn61xx;
+ struct cvmx_gmxx_rx_hg2_status_s cn63xx;
+ struct cvmx_gmxx_rx_hg2_status_s cn63xxp1;
+ struct cvmx_gmxx_rx_hg2_status_s cn66xx;
+ struct cvmx_gmxx_rx_hg2_status_s cn68xx;
+ struct cvmx_gmxx_rx_hg2_status_s cn68xxp1;
+ struct cvmx_gmxx_rx_hg2_status_s cnf71xx;
+};
+typedef union cvmx_gmxx_rx_hg2_status cvmx_gmxx_rx_hg2_status_t;
+
+/**
+ * cvmx_gmx#_rx_pass_en
+ *
+ * GMX_RX_PASS_EN = Packet pass through mode enable
+ *
+ * When both Octane ports are running in Spi4 mode, packets can be directly
+ * passed from one SPX interface to the other without being processed by the
+ * core or PP's. The register has one bit for each port to enable the pass
+ * through feature.
+ *
+ * Notes:
+ * (1) Can only be used in dual Spi4 configs
+ *
+ * (2) The mapped pass through output port cannot be the destination port for
+ * any Octane core traffic.
+ */
+union cvmx_gmxx_rx_pass_en {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_pass_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t en : 16; /**< Which ports to configure in pass through mode */
+#else
+ uint64_t en : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_pass_en_s cn38xx;
+ struct cvmx_gmxx_rx_pass_en_s cn38xxp2;
+ struct cvmx_gmxx_rx_pass_en_s cn58xx;
+ struct cvmx_gmxx_rx_pass_en_s cn58xxp1;
+};
+typedef union cvmx_gmxx_rx_pass_en cvmx_gmxx_rx_pass_en_t;
+
+/**
+ * cvmx_gmx#_rx_pass_map#
+ *
+ * GMX_RX_PASS_MAP = Packet pass through port map
+ *
+ */
+union cvmx_gmxx_rx_pass_mapx {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_pass_mapx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t dprt : 4; /**< Destination port to map Spi pass through traffic */
+#else
+ uint64_t dprt : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_pass_mapx_s cn38xx;
+ struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2;
+ struct cvmx_gmxx_rx_pass_mapx_s cn58xx;
+ struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1;
+};
+typedef union cvmx_gmxx_rx_pass_mapx cvmx_gmxx_rx_pass_mapx_t;
+
+/**
+ * cvmx_gmx#_rx_prt_info
+ *
+ * GMX_RX_PRT_INFO = Report the RX status for port
+ *
+ *
+ * Notes:
+ * In XAUI mode, only the lsb (corresponding to port0) of DROP and COMMIT are used.
+ *
+ */
+union cvmx_gmxx_rx_prt_info {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_prt_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t drop : 16; /**< Per port indication that data was dropped */
+ uint64_t commit : 16; /**< Per port indication that SOP was accepted */
+#else
+ uint64_t commit : 16;
+ uint64_t drop : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_prt_info_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t drop : 3; /**< Per port indication that data was dropped */
+ uint64_t reserved_3_15 : 13;
+ uint64_t commit : 3; /**< Per port indication that SOP was accepted */
+#else
+ uint64_t commit : 3;
+ uint64_t reserved_3_15 : 13;
+ uint64_t drop : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx;
+ struct cvmx_gmxx_rx_prt_info_s cn38xx;
+ struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t drop : 4; /**< Per port indication that data was dropped */
+ uint64_t reserved_4_15 : 12;
+ uint64_t commit : 4; /**< Per port indication that SOP was accepted */
+#else
+ uint64_t commit : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t drop : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1;
+ struct cvmx_gmxx_rx_prt_info_s cn58xx;
+ struct cvmx_gmxx_rx_prt_info_s cn58xxp1;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn61xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn63xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn63xxp1;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn66xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn68xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn68xxp1;
+ struct cvmx_gmxx_rx_prt_info_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t drop : 2; /**< Per port indication that data was dropped */
+ uint64_t reserved_2_15 : 14;
+ uint64_t commit : 2; /**< Per port indication that SOP was accepted */
+#else
+ uint64_t commit : 2;
+ uint64_t reserved_2_15 : 14;
+ uint64_t drop : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_gmxx_rx_prt_info cvmx_gmxx_rx_prt_info_t;
+
+/**
+ * cvmx_gmx#_rx_prts
+ *
+ * GMX_RX_PRTS = Number of FIFOs to carve the RX buffer into
+ *
+ *
+ * Notes:
+ * GMX_RX_PRTS[PRTS] must be set to '1' in XAUI mode.
+ *
+ */
+union cvmx_gmxx_rx_prts {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_prts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t prts : 3; /**< In SGMII/1000Base-X mode, the RX buffer can be
+ carved into several logical buffers depending on
+ the number or implemented ports.
+ 0 or 1 port = 512ticks / 4096bytes
+ 2 ports = 256ticks / 2048bytes
+ 3 or 4 ports = 128ticks / 1024bytes */
+#else
+ uint64_t prts : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_prts_s cn30xx;
+ struct cvmx_gmxx_rx_prts_s cn31xx;
+ struct cvmx_gmxx_rx_prts_s cn38xx;
+ struct cvmx_gmxx_rx_prts_s cn38xxp2;
+ struct cvmx_gmxx_rx_prts_s cn50xx;
+ struct cvmx_gmxx_rx_prts_s cn52xx;
+ struct cvmx_gmxx_rx_prts_s cn52xxp1;
+ struct cvmx_gmxx_rx_prts_s cn56xx;
+ struct cvmx_gmxx_rx_prts_s cn56xxp1;
+ struct cvmx_gmxx_rx_prts_s cn58xx;
+ struct cvmx_gmxx_rx_prts_s cn58xxp1;
+ struct cvmx_gmxx_rx_prts_s cn61xx;
+ struct cvmx_gmxx_rx_prts_s cn63xx;
+ struct cvmx_gmxx_rx_prts_s cn63xxp1;
+ struct cvmx_gmxx_rx_prts_s cn66xx;
+ struct cvmx_gmxx_rx_prts_s cn68xx;
+ struct cvmx_gmxx_rx_prts_s cn68xxp1;
+ struct cvmx_gmxx_rx_prts_s cnf71xx;
+};
+typedef union cvmx_gmxx_rx_prts cvmx_gmxx_rx_prts_t;
+
+/**
+ * cvmx_gmx#_rx_tx_status
+ *
+ * GMX_RX_TX_STATUS = GMX RX/TX Status
+ *
+ */
+union cvmx_gmxx_rx_tx_status {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_tx_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t tx : 3; /**< Transmit data since last read */
+ uint64_t reserved_3_3 : 1;
+ uint64_t rx : 3; /**< Receive data since last read */
+#else
+ uint64_t rx : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t tx : 3;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_tx_status_s cn30xx;
+ struct cvmx_gmxx_rx_tx_status_s cn31xx;
+ struct cvmx_gmxx_rx_tx_status_s cn50xx;
+};
+typedef union cvmx_gmxx_rx_tx_status cvmx_gmxx_rx_tx_status_t;
+
+/**
+ * cvmx_gmx#_rx_xaui_bad_col
+ */
+union cvmx_gmxx_rx_xaui_bad_col {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_xaui_bad_col_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t val : 1; /**< Set when GMX_RX_INT_REG[PCTERR] is set.
+ (XAUI mode only) */
+ uint64_t state : 3; /**< When GMX_RX_INT_REG[PCTERR] is set, STATE will
+ conatin the receive state at the time of the
+ error.
+ (XAUI mode only) */
+ uint64_t lane_rxc : 4; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXC will
+ conatin the XAUI column at the time of the error.
+ (XAUI mode only) */
+ uint64_t lane_rxd : 32; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXD will
+ conatin the XAUI column at the time of the error.
+ (XAUI mode only) */
+#else
+ uint64_t lane_rxd : 32;
+ uint64_t lane_rxc : 4;
+ uint64_t state : 3;
+ uint64_t val : 1;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn61xx;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn63xx;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn63xxp1;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn66xx;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn68xx;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn68xxp1;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cnf71xx;
+};
+typedef union cvmx_gmxx_rx_xaui_bad_col cvmx_gmxx_rx_xaui_bad_col_t;
+
+/**
+ * cvmx_gmx#_rx_xaui_ctl
+ */
+union cvmx_gmxx_rx_xaui_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_xaui_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t status : 2; /**< Link Status
+ 0=Link OK
+ 1=Local Fault
+ 2=Remote Fault
+ 3=Reserved
+ (XAUI mode only) */
+#else
+ uint64_t status : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn52xx;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn56xx;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn61xx;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn63xx;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn63xxp1;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn66xx;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn68xx;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn68xxp1;
+ struct cvmx_gmxx_rx_xaui_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_rx_xaui_ctl cvmx_gmxx_rx_xaui_ctl_t;
+
+/**
+ * cvmx_gmx#_rxaui_ctl
+ */
+union cvmx_gmxx_rxaui_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxaui_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t disparity : 1; /**< Selects which disparity calculation to use when
+ combining or splitting the RXAUI lanes.
+ 0=Interleave lanes before PCS layer
+ As described in the Dune Networks/Broadcom
+ RXAUI v2.1 specification.
+ (obeys 6.25GHz SERDES disparity)
+ 1=Interleave lanes after PCS layer
+ As described in the Marvell RXAUI Interface
+ specification.
+ (does not obey 6.25GHz SERDES disparity)
+ (RXAUI mode only) */
+#else
+ uint64_t disparity : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gmxx_rxaui_ctl_s cn68xx;
+ struct cvmx_gmxx_rxaui_ctl_s cn68xxp1;
+};
+typedef union cvmx_gmxx_rxaui_ctl cvmx_gmxx_rxaui_ctl_t;
+
+/**
+ * cvmx_gmx#_smac#
+ *
+ * GMX_SMAC = Packet SMAC
+ *
+ */
+union cvmx_gmxx_smacx {
+ uint64_t u64;
+ struct cvmx_gmxx_smacx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t smac : 48; /**< The SMAC field is used for generating and
+ accepting Control Pause packets */
+#else
+ uint64_t smac : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_smacx_s cn30xx;
+ struct cvmx_gmxx_smacx_s cn31xx;
+ struct cvmx_gmxx_smacx_s cn38xx;
+ struct cvmx_gmxx_smacx_s cn38xxp2;
+ struct cvmx_gmxx_smacx_s cn50xx;
+ struct cvmx_gmxx_smacx_s cn52xx;
+ struct cvmx_gmxx_smacx_s cn52xxp1;
+ struct cvmx_gmxx_smacx_s cn56xx;
+ struct cvmx_gmxx_smacx_s cn56xxp1;
+ struct cvmx_gmxx_smacx_s cn58xx;
+ struct cvmx_gmxx_smacx_s cn58xxp1;
+ struct cvmx_gmxx_smacx_s cn61xx;
+ struct cvmx_gmxx_smacx_s cn63xx;
+ struct cvmx_gmxx_smacx_s cn63xxp1;
+ struct cvmx_gmxx_smacx_s cn66xx;
+ struct cvmx_gmxx_smacx_s cn68xx;
+ struct cvmx_gmxx_smacx_s cn68xxp1;
+ struct cvmx_gmxx_smacx_s cnf71xx;
+};
+typedef union cvmx_gmxx_smacx cvmx_gmxx_smacx_t;
+
+/**
+ * cvmx_gmx#_soft_bist
+ *
+ * GMX_SOFT_BIST = Software BIST Control
+ *
+ */
+union cvmx_gmxx_soft_bist {
+ uint64_t u64;
+ struct cvmx_gmxx_soft_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t start_bist : 1; /**< Run BIST on all memories in the XAUI/RXAUI
+ CLK domain */
+ uint64_t clear_bist : 1; /**< Choose between full BIST and CLEAR bist
+ 0=Run full BIST
+ 1=Only run clear BIST */
+#else
+ uint64_t clear_bist : 1;
+ uint64_t start_bist : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_gmxx_soft_bist_s cn63xx;
+ struct cvmx_gmxx_soft_bist_s cn63xxp1;
+ struct cvmx_gmxx_soft_bist_s cn66xx;
+ struct cvmx_gmxx_soft_bist_s cn68xx;
+ struct cvmx_gmxx_soft_bist_s cn68xxp1;
+};
+typedef union cvmx_gmxx_soft_bist cvmx_gmxx_soft_bist_t;
+
+/**
+ * cvmx_gmx#_stat_bp
+ *
+ * GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
+ *
+ *
+ * Notes:
+ * It has no relationship with the TX FIFO per se. The TX engine sends packets
+ * from PKO and upon completion, sends a command to the TX stats block for an
+ * update based on the packet size. The stats operation can take a few cycles -
+ * normally not enough to be visible considering the 64B min packet size that is
+ * ethernet convention.
+ *
+ * In the rare case in which SW attempted to schedule really, really, small packets
+ * or the sclk (6xxx) is running ass-slow, then the stats updates may not happen in
+ * real time and can back up the TX engine.
+ *
+ * This counter is the number of cycles in which the TX engine was stalled. In
+ * normal operation, it should always be zeros.
+ */
+union cvmx_gmxx_stat_bp {
+ uint64_t u64;
+ struct cvmx_gmxx_stat_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t bp : 1; /**< Current TX stats BP state
+ When the TX stats machine cannot update the stats
+ registers quickly enough, the machine has the
+ ability to BP TX datapath. This is a rare event
+ and will not occur in normal operation.
+ 0 = no backpressure is applied
+ 1 = backpressure is applied to TX datapath to
+ allow stat update operations to complete */
+ uint64_t cnt : 16; /**< Number of cycles that BP has been asserted
+ Saturating counter */
+#else
+ uint64_t cnt : 16;
+ uint64_t bp : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_gmxx_stat_bp_s cn30xx;
+ struct cvmx_gmxx_stat_bp_s cn31xx;
+ struct cvmx_gmxx_stat_bp_s cn38xx;
+ struct cvmx_gmxx_stat_bp_s cn38xxp2;
+ struct cvmx_gmxx_stat_bp_s cn50xx;
+ struct cvmx_gmxx_stat_bp_s cn52xx;
+ struct cvmx_gmxx_stat_bp_s cn52xxp1;
+ struct cvmx_gmxx_stat_bp_s cn56xx;
+ struct cvmx_gmxx_stat_bp_s cn56xxp1;
+ struct cvmx_gmxx_stat_bp_s cn58xx;
+ struct cvmx_gmxx_stat_bp_s cn58xxp1;
+ struct cvmx_gmxx_stat_bp_s cn61xx;
+ struct cvmx_gmxx_stat_bp_s cn63xx;
+ struct cvmx_gmxx_stat_bp_s cn63xxp1;
+ struct cvmx_gmxx_stat_bp_s cn66xx;
+ struct cvmx_gmxx_stat_bp_s cn68xx;
+ struct cvmx_gmxx_stat_bp_s cn68xxp1;
+ struct cvmx_gmxx_stat_bp_s cnf71xx;
+};
+typedef union cvmx_gmxx_stat_bp cvmx_gmxx_stat_bp_t;
+
+/**
+ * cvmx_gmx#_tb_reg
+ *
+ * DON'T PUT IN HRM*
+ *
+ */
+union cvmx_gmxx_tb_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_tb_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t wr_magic : 1; /**< Enter stats model magic mode */
+#else
+ uint64_t wr_magic : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gmxx_tb_reg_s cn61xx;
+ struct cvmx_gmxx_tb_reg_s cn66xx;
+ struct cvmx_gmxx_tb_reg_s cn68xx;
+ struct cvmx_gmxx_tb_reg_s cnf71xx;
+};
+typedef union cvmx_gmxx_tb_reg cvmx_gmxx_tb_reg_t;
+
+/**
+ * cvmx_gmx#_tx#_append
+ *
+ * GMX_TX_APPEND = Packet TX Append Control
+ *
+ */
+union cvmx_gmxx_txx_append {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_append_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet
+ when FCS is clear. Pause packets are normally
+ padded to 60 bytes. If GMX_TX_MIN_PKT[MIN_SIZE]
+ exceeds 59, then FORCE_FCS will not be used. */
+ uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */
+ uint64_t pad : 1; /**< Append PAD bytes such that min sized */
+ uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer
+ When GMX_TX_XAUI_CTL[HG_EN] is set, PREAMBLE
+ must be zero. */
+#else
+ uint64_t preamble : 1;
+ uint64_t pad : 1;
+ uint64_t fcs : 1;
+ uint64_t force_fcs : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_append_s cn30xx;
+ struct cvmx_gmxx_txx_append_s cn31xx;
+ struct cvmx_gmxx_txx_append_s cn38xx;
+ struct cvmx_gmxx_txx_append_s cn38xxp2;
+ struct cvmx_gmxx_txx_append_s cn50xx;
+ struct cvmx_gmxx_txx_append_s cn52xx;
+ struct cvmx_gmxx_txx_append_s cn52xxp1;
+ struct cvmx_gmxx_txx_append_s cn56xx;
+ struct cvmx_gmxx_txx_append_s cn56xxp1;
+ struct cvmx_gmxx_txx_append_s cn58xx;
+ struct cvmx_gmxx_txx_append_s cn58xxp1;
+ struct cvmx_gmxx_txx_append_s cn61xx;
+ struct cvmx_gmxx_txx_append_s cn63xx;
+ struct cvmx_gmxx_txx_append_s cn63xxp1;
+ struct cvmx_gmxx_txx_append_s cn66xx;
+ struct cvmx_gmxx_txx_append_s cn68xx;
+ struct cvmx_gmxx_txx_append_s cn68xxp1;
+ struct cvmx_gmxx_txx_append_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_append cvmx_gmxx_txx_append_t;
+
+/**
+ * cvmx_gmx#_tx#_burst
+ *
+ * GMX_TX_BURST = Packet TX Burst Counter
+ *
+ */
+union cvmx_gmxx_txx_burst {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_burst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t burst : 16; /**< Burst (refer to 802.3 to set correctly)
+ Only valid for 1000Mbs half-duplex operation
+ halfdup / 1000Mbs: 0x2000
+ all other modes: 0x0
+ (SGMII/1000Base-X only) */
+#else
+ uint64_t burst : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_burst_s cn30xx;
+ struct cvmx_gmxx_txx_burst_s cn31xx;
+ struct cvmx_gmxx_txx_burst_s cn38xx;
+ struct cvmx_gmxx_txx_burst_s cn38xxp2;
+ struct cvmx_gmxx_txx_burst_s cn50xx;
+ struct cvmx_gmxx_txx_burst_s cn52xx;
+ struct cvmx_gmxx_txx_burst_s cn52xxp1;
+ struct cvmx_gmxx_txx_burst_s cn56xx;
+ struct cvmx_gmxx_txx_burst_s cn56xxp1;
+ struct cvmx_gmxx_txx_burst_s cn58xx;
+ struct cvmx_gmxx_txx_burst_s cn58xxp1;
+ struct cvmx_gmxx_txx_burst_s cn61xx;
+ struct cvmx_gmxx_txx_burst_s cn63xx;
+ struct cvmx_gmxx_txx_burst_s cn63xxp1;
+ struct cvmx_gmxx_txx_burst_s cn66xx;
+ struct cvmx_gmxx_txx_burst_s cn68xx;
+ struct cvmx_gmxx_txx_burst_s cn68xxp1;
+ struct cvmx_gmxx_txx_burst_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_burst cvmx_gmxx_txx_burst_t;
+
+/**
+ * cvmx_gmx#_tx#_cbfc_xoff
+ */
+union cvmx_gmxx_txx_cbfc_xoff {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_cbfc_xoff_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t xoff : 16; /**< Which ports to backpressure
+ Do not write in HiGig2 mode i.e. when
+ GMX_TX_XAUI_CTL[HG_EN]=1 and
+ GMX_RX_UDD_SKP[SKIP]=16. */
+#else
+ uint64_t xoff : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn61xx;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn63xx;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn63xxp1;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn66xx;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn68xx;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn68xxp1;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_cbfc_xoff cvmx_gmxx_txx_cbfc_xoff_t;
+
+/**
+ * cvmx_gmx#_tx#_cbfc_xon
+ */
+union cvmx_gmxx_txx_cbfc_xon {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_cbfc_xon_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t xon : 16; /**< Which ports to stop backpressure
+ Do not write in HiGig2 mode i.e. when
+ GMX_TX_XAUI_CTL[HG_EN]=1 and
+ GMX_RX_UDD_SKP[SKIP]=16. */
+#else
+ uint64_t xon : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn52xx;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn56xx;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn61xx;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn63xx;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn63xxp1;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn66xx;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn68xx;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn68xxp1;
+ struct cvmx_gmxx_txx_cbfc_xon_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_cbfc_xon cvmx_gmxx_txx_cbfc_xon_t;
+
+/**
+ * cvmx_gmx#_tx#_clk
+ *
+ * Per Port
+ *
+ *
+ * GMX_TX_CLK = RGMII TX Clock Generation Register
+ *
+ * Notes:
+ * Programming Restrictions:
+ * (1) In RGMII mode, if GMX_PRT_CFG[SPEED]==0, then CLK_CNT must be > 1.
+ * (2) In MII mode, CLK_CNT == 1
+ * (3) In RGMII or GMII mode, if CLK_CNT==0, Octeon will not generate a tx clock.
+ *
+ * RGMII Example:
+ * Given a 125MHz PLL reference clock...
+ * CLK_CNT == 1 ==> 125.0MHz TXC clock period (8ns* 1)
+ * CLK_CNT == 5 ==> 25.0MHz TXC clock period (8ns* 5)
+ * CLK_CNT == 50 ==> 2.5MHz TXC clock period (8ns*50)
+ */
+union cvmx_gmxx_txx_clk {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_clk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t clk_cnt : 6; /**< Controls the RGMII TXC frequency
+ When PLL is used, TXC(phase) =
+ spi4_tx_pll_ref_clk(period)/2*CLK_CNT
+ When PLL bypass is used, TXC(phase) =
+ spi4_tx_pll_ref_clk(period)*2*CLK_CNT
+ NOTE: CLK_CNT==0 will not generate any clock
+ if CLK_CNT > 1 if GMX_PRT_CFG[SPEED]==0 */
+#else
+ uint64_t clk_cnt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_clk_s cn30xx;
+ struct cvmx_gmxx_txx_clk_s cn31xx;
+ struct cvmx_gmxx_txx_clk_s cn38xx;
+ struct cvmx_gmxx_txx_clk_s cn38xxp2;
+ struct cvmx_gmxx_txx_clk_s cn50xx;
+ struct cvmx_gmxx_txx_clk_s cn58xx;
+ struct cvmx_gmxx_txx_clk_s cn58xxp1;
+};
+typedef union cvmx_gmxx_txx_clk cvmx_gmxx_txx_clk_t;
+
+/**
+ * cvmx_gmx#_tx#_ctl
+ *
+ * GMX_TX_CTL = TX Control register
+ *
+ */
+union cvmx_gmxx_txx_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats
+ and interrupts
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats
+ and interrupts
+ (SGMII/1000Base-X half-duplex only) */
+#else
+ uint64_t xscol_en : 1;
+ uint64_t xsdef_en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_ctl_s cn30xx;
+ struct cvmx_gmxx_txx_ctl_s cn31xx;
+ struct cvmx_gmxx_txx_ctl_s cn38xx;
+ struct cvmx_gmxx_txx_ctl_s cn38xxp2;
+ struct cvmx_gmxx_txx_ctl_s cn50xx;
+ struct cvmx_gmxx_txx_ctl_s cn52xx;
+ struct cvmx_gmxx_txx_ctl_s cn52xxp1;
+ struct cvmx_gmxx_txx_ctl_s cn56xx;
+ struct cvmx_gmxx_txx_ctl_s cn56xxp1;
+ struct cvmx_gmxx_txx_ctl_s cn58xx;
+ struct cvmx_gmxx_txx_ctl_s cn58xxp1;
+ struct cvmx_gmxx_txx_ctl_s cn61xx;
+ struct cvmx_gmxx_txx_ctl_s cn63xx;
+ struct cvmx_gmxx_txx_ctl_s cn63xxp1;
+ struct cvmx_gmxx_txx_ctl_s cn66xx;
+ struct cvmx_gmxx_txx_ctl_s cn68xx;
+ struct cvmx_gmxx_txx_ctl_s cn68xxp1;
+ struct cvmx_gmxx_txx_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_ctl cvmx_gmxx_txx_ctl_t;
+
+/**
+ * cvmx_gmx#_tx#_min_pkt
+ *
+ * GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size)
+ *
+ */
+union cvmx_gmxx_txx_min_pkt {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_min_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied
+ Padding is only appened when GMX_TX_APPEND[PAD]
+ for the coresponding port is set.
+ In SGMII mode, packets will be padded to
+ MIN_SIZE+1. The reset value will pad to 60 bytes.
+ In XAUI mode, packets will be padded to
+ MIN(252,(MIN_SIZE+1 & ~0x3))
+ When GMX_TX_XAUI_CTL[HG_EN] is set, the HiGig
+ header (12B or 16B) is normally added to the
+ packet, so MIN_SIZE should be 59+12=71B for
+ HiGig or 59+16=75B for HiGig2. */
+#else
+ uint64_t min_size : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_min_pkt_s cn30xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn31xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn38xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn38xxp2;
+ struct cvmx_gmxx_txx_min_pkt_s cn50xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn52xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn52xxp1;
+ struct cvmx_gmxx_txx_min_pkt_s cn56xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn56xxp1;
+ struct cvmx_gmxx_txx_min_pkt_s cn58xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn58xxp1;
+ struct cvmx_gmxx_txx_min_pkt_s cn61xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn63xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn63xxp1;
+ struct cvmx_gmxx_txx_min_pkt_s cn66xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn68xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn68xxp1;
+ struct cvmx_gmxx_txx_min_pkt_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_min_pkt cvmx_gmxx_txx_min_pkt_t;
+
+/**
+ * cvmx_gmx#_tx#_pause_pkt_interval
+ *
+ * GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent
+ *
+ *
+ * Notes:
+ * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and
+ * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
+ * designer. It is suggested that TIME be much greater than INTERVAL and
+ * GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
+ * count and then when the backpressure condition is lifted, a PAUSE packet
+ * with TIME==0 will be sent indicating that Octane is ready for additional
+ * data.
+ *
+ * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they satisify the
+ * following rule...
+ *
+ * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
+ *
+ * where largest_pkt_size is that largest packet that the system can send
+ * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
+ * of the PAUSE packet (normally 64B).
+ */
+union cvmx_gmxx_txx_pause_pkt_interval {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t interval : 16; /**< Arbitrate for a 802.3 pause packet, HiGig2 message,
+ or CBFC pause packet every (INTERVAL*512)
+ bit-times.
+ Normally, 0 < INTERVAL < GMX_TX_PAUSE_PKT_TIME
+ INTERVAL=0, will only send a single PAUSE packet
+ for each backpressure event */
+#else
+ uint64_t interval : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn61xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn66xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn68xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn68xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_pause_pkt_interval cvmx_gmxx_txx_pause_pkt_interval_t;
+
+/**
+ * cvmx_gmx#_tx#_pause_pkt_time
+ *
+ * GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field
+ *
+ *
+ * Notes:
+ * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and
+ * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
+ * designer. It is suggested that TIME be much greater than INTERVAL and
+ * GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
+ * count and then when the backpressure condition is lifted, a PAUSE packet
+ * with TIME==0 will be sent indicating that Octane is ready for additional
+ * data.
+ *
+ * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they satisify the
+ * following rule...
+ *
+ * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
+ *
+ * where largest_pkt_size is that largest packet that the system can send
+ * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
+ * of the PAUSE packet (normally 64B).
+ */
+union cvmx_gmxx_txx_pause_pkt_time {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_pause_pkt_time_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< The pause_time field placed in outbnd 802.3 pause
+ packets, HiGig2 messages, or CBFC pause packets.
+ pause_time is in 512 bit-times
+ Normally, TIME > GMX_TX_PAUSE_PKT_INTERVAL */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn61xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn63xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn63xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn66xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn68xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn68xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_pause_pkt_time cvmx_gmxx_txx_pause_pkt_time_t;
+
+/**
+ * cvmx_gmx#_tx#_pause_togo
+ *
+ * GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure
+ *
+ */
+union cvmx_gmxx_txx_pause_togo {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_pause_togo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t msg_time : 16; /**< Amount of time remaining to backpressure
+ From the higig2 physical message pause timer
+ (only valid on port0) */
+ uint64_t time : 16; /**< Amount of time remaining to backpressure
+ From the standard 802.3 pause timer */
+#else
+ uint64_t time : 16;
+ uint64_t msg_time : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< Amount of time remaining to backpressure */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx;
+ struct cvmx_gmxx_txx_pause_togo_s cn52xx;
+ struct cvmx_gmxx_txx_pause_togo_s cn52xxp1;
+ struct cvmx_gmxx_txx_pause_togo_s cn56xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1;
+ struct cvmx_gmxx_txx_pause_togo_s cn61xx;
+ struct cvmx_gmxx_txx_pause_togo_s cn63xx;
+ struct cvmx_gmxx_txx_pause_togo_s cn63xxp1;
+ struct cvmx_gmxx_txx_pause_togo_s cn66xx;
+ struct cvmx_gmxx_txx_pause_togo_s cn68xx;
+ struct cvmx_gmxx_txx_pause_togo_s cn68xxp1;
+ struct cvmx_gmxx_txx_pause_togo_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_pause_togo cvmx_gmxx_txx_pause_togo_t;
+
+/**
+ * cvmx_gmx#_tx#_pause_zero
+ *
+ * GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure
+ *
+ */
+union cvmx_gmxx_txx_pause_zero {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_pause_zero_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t send : 1; /**< When backpressure condition clear, send PAUSE
+ packet with pause_time of zero to enable the
+ channel */
+#else
+ uint64_t send : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_pause_zero_s cn30xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn31xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn38xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn38xxp2;
+ struct cvmx_gmxx_txx_pause_zero_s cn50xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn52xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn52xxp1;
+ struct cvmx_gmxx_txx_pause_zero_s cn56xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn56xxp1;
+ struct cvmx_gmxx_txx_pause_zero_s cn58xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn58xxp1;
+ struct cvmx_gmxx_txx_pause_zero_s cn61xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn63xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn63xxp1;
+ struct cvmx_gmxx_txx_pause_zero_s cn66xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn68xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn68xxp1;
+ struct cvmx_gmxx_txx_pause_zero_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_pause_zero cvmx_gmxx_txx_pause_zero_t;
+
+/**
+ * cvmx_gmx#_tx#_pipe
+ */
+union cvmx_gmxx_txx_pipe {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_pipe_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t ign_bp : 1; /**< When set, GMX will not throttle the TX machines
+ if the PIPE return FIFO fills up.
+ IGN_BP should be clear in normal operation. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t nump : 5; /**< Number of pipes this port|channel supports.
+ In SGMII mode, each port binds to one pipe.
+ In XAUI/RXAUI mode, the port can bind upto 16
+ consecutive pipes.
+ SGMII mode, NUMP = 0 or 1.
+ XAUI/RXAUI mode, NUMP = 0 or 1-16.
+ 0 = Disabled */
+ uint64_t reserved_7_15 : 9;
+ uint64_t base : 7; /**< When NUMP is non-zero, indicates the base pipe
+ number this port|channel will accept.
+ This port will accept pko packets from pipes in
+ the range of:
+ BASE .. (BASE+(NUMP-1))
+ BASE and NUMP must be constrained such that
+ 1) BASE+(NUMP-1) < 127
+ 2) Each used PKO pipe must map to exactly
+ one port|channel
+ 3) The pipe ranges must be consistent with
+ the PKO configuration. */
+#else
+ uint64_t base : 7;
+ uint64_t reserved_7_15 : 9;
+ uint64_t nump : 5;
+ uint64_t reserved_21_31 : 11;
+ uint64_t ign_bp : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_pipe_s cn68xx;
+ struct cvmx_gmxx_txx_pipe_s cn68xxp1;
+};
+typedef union cvmx_gmxx_txx_pipe cvmx_gmxx_txx_pipe_t;
+
+/**
+ * cvmx_gmx#_tx#_sgmii_ctl
+ */
+union cvmx_gmxx_txx_sgmii_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_sgmii_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t align : 1; /**< Align the transmission to even cycles
+
+ Recommended value is:
+ ALIGN = !GMX_TX_APPEND[PREAMBLE]
+
+ (See the Transmit Conversion to Code groups
+ section in the SGMII Interface chapter of the
+ HRM for a complete discussion)
+
+ 0 = Data can be sent on any cycle
+ In this mode, the interface will function at
+ maximum bandwidth. It is possible to for the
+ TX PCS machine to drop first byte of the TX
+ frame. When GMX_TX_APPEND[PREAMBLE] is set,
+ the first byte will be a preamble byte which
+ can be dropped to compensate for an extended
+ IPG.
+
+ 1 = Data will only be sent on even cycles.
+ In this mode, there can be bandwidth
+ implications when sending odd-byte packets as
+ the IPG can extend an extra cycle.
+ There will be no loss of data.
+
+ (SGMII/1000Base-X only) */
+#else
+ uint64_t align : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn61xx;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn63xx;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn63xxp1;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn66xx;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn68xx;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn68xxp1;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_sgmii_ctl cvmx_gmxx_txx_sgmii_ctl_t;
+
+/**
+ * cvmx_gmx#_tx#_slot
+ *
+ * GMX_TX_SLOT = Packet TX Slottime Counter
+ *
+ */
+union cvmx_gmxx_txx_slot {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_slot_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t slot : 10; /**< Slottime (refer to 802.3 to set correctly)
+ 10/100Mbs: 0x40
+ 1000Mbs: 0x200
+ (SGMII/1000Base-X only) */
+#else
+ uint64_t slot : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_slot_s cn30xx;
+ struct cvmx_gmxx_txx_slot_s cn31xx;
+ struct cvmx_gmxx_txx_slot_s cn38xx;
+ struct cvmx_gmxx_txx_slot_s cn38xxp2;
+ struct cvmx_gmxx_txx_slot_s cn50xx;
+ struct cvmx_gmxx_txx_slot_s cn52xx;
+ struct cvmx_gmxx_txx_slot_s cn52xxp1;
+ struct cvmx_gmxx_txx_slot_s cn56xx;
+ struct cvmx_gmxx_txx_slot_s cn56xxp1;
+ struct cvmx_gmxx_txx_slot_s cn58xx;
+ struct cvmx_gmxx_txx_slot_s cn58xxp1;
+ struct cvmx_gmxx_txx_slot_s cn61xx;
+ struct cvmx_gmxx_txx_slot_s cn63xx;
+ struct cvmx_gmxx_txx_slot_s cn63xxp1;
+ struct cvmx_gmxx_txx_slot_s cn66xx;
+ struct cvmx_gmxx_txx_slot_s cn68xx;
+ struct cvmx_gmxx_txx_slot_s cn68xxp1;
+ struct cvmx_gmxx_txx_slot_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_slot cvmx_gmxx_txx_slot_t;
+
+/**
+ * cvmx_gmx#_tx#_soft_pause
+ *
+ * GMX_TX_SOFT_PAUSE = Packet TX Software Pause
+ *
+ */
+union cvmx_gmxx_txx_soft_pause {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_soft_pause_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_soft_pause_s cn30xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn31xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn38xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn38xxp2;
+ struct cvmx_gmxx_txx_soft_pause_s cn50xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn52xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn52xxp1;
+ struct cvmx_gmxx_txx_soft_pause_s cn56xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn56xxp1;
+ struct cvmx_gmxx_txx_soft_pause_s cn58xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn58xxp1;
+ struct cvmx_gmxx_txx_soft_pause_s cn61xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn63xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn63xxp1;
+ struct cvmx_gmxx_txx_soft_pause_s cn66xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn68xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn68xxp1;
+ struct cvmx_gmxx_txx_soft_pause_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_soft_pause cvmx_gmxx_txx_soft_pause_t;
+
+/**
+ * cvmx_gmx#_tx#_stat0
+ *
+ * GMX_TX_STAT0 = GMX_TX_STATS_XSDEF / GMX_TX_STATS_XSCOL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_txx_stat0 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t xsdef : 32; /**< Number of packets dropped (never successfully
+ sent) due to excessive deferal
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol : 32; /**< Number of packets dropped (never successfully
+ sent) due to excessive collision. Defined by
+ GMX_TX_COL_ATTEMPT[LIMIT].
+ (SGMII/1000Base-X half-duplex only) */
+#else
+ uint64_t xscol : 32;
+ uint64_t xsdef : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat0_s cn30xx;
+ struct cvmx_gmxx_txx_stat0_s cn31xx;
+ struct cvmx_gmxx_txx_stat0_s cn38xx;
+ struct cvmx_gmxx_txx_stat0_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat0_s cn50xx;
+ struct cvmx_gmxx_txx_stat0_s cn52xx;
+ struct cvmx_gmxx_txx_stat0_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat0_s cn56xx;
+ struct cvmx_gmxx_txx_stat0_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat0_s cn58xx;
+ struct cvmx_gmxx_txx_stat0_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat0_s cn61xx;
+ struct cvmx_gmxx_txx_stat0_s cn63xx;
+ struct cvmx_gmxx_txx_stat0_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat0_s cn66xx;
+ struct cvmx_gmxx_txx_stat0_s cn68xx;
+ struct cvmx_gmxx_txx_stat0_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat0_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat0 cvmx_gmxx_txx_stat0_t;
+
+/**
+ * cvmx_gmx#_tx#_stat1
+ *
+ * GMX_TX_STAT1 = GMX_TX_STATS_SCOL / GMX_TX_STATS_MCOL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_txx_stat1 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scol : 32; /**< Number of packets sent with a single collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t mcol : 32; /**< Number of packets sent with multiple collisions
+ but < GMX_TX_COL_ATTEMPT[LIMIT].
+ (SGMII/1000Base-X half-duplex only) */
+#else
+ uint64_t mcol : 32;
+ uint64_t scol : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat1_s cn30xx;
+ struct cvmx_gmxx_txx_stat1_s cn31xx;
+ struct cvmx_gmxx_txx_stat1_s cn38xx;
+ struct cvmx_gmxx_txx_stat1_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat1_s cn50xx;
+ struct cvmx_gmxx_txx_stat1_s cn52xx;
+ struct cvmx_gmxx_txx_stat1_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat1_s cn56xx;
+ struct cvmx_gmxx_txx_stat1_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat1_s cn58xx;
+ struct cvmx_gmxx_txx_stat1_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat1_s cn61xx;
+ struct cvmx_gmxx_txx_stat1_s cn63xx;
+ struct cvmx_gmxx_txx_stat1_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat1_s cn66xx;
+ struct cvmx_gmxx_txx_stat1_s cn68xx;
+ struct cvmx_gmxx_txx_stat1_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat1_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat1 cvmx_gmxx_txx_stat1_t;
+
+/**
+ * cvmx_gmx#_tx#_stat2
+ *
+ * GMX_TX_STAT2 = GMX_TX_STATS_OCTS
+ *
+ *
+ * Notes:
+ * - Octect counts are the sum of all data transmitted on the wire including
+ * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect
+ * counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_txx_stat2 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< Number of total octets sent on the interface.
+ Does not count octets from frames that were
+ truncated due to collisions in halfdup mode. */
+#else
+ uint64_t octs : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat2_s cn30xx;
+ struct cvmx_gmxx_txx_stat2_s cn31xx;
+ struct cvmx_gmxx_txx_stat2_s cn38xx;
+ struct cvmx_gmxx_txx_stat2_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat2_s cn50xx;
+ struct cvmx_gmxx_txx_stat2_s cn52xx;
+ struct cvmx_gmxx_txx_stat2_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat2_s cn56xx;
+ struct cvmx_gmxx_txx_stat2_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat2_s cn58xx;
+ struct cvmx_gmxx_txx_stat2_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat2_s cn61xx;
+ struct cvmx_gmxx_txx_stat2_s cn63xx;
+ struct cvmx_gmxx_txx_stat2_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat2_s cn66xx;
+ struct cvmx_gmxx_txx_stat2_s cn68xx;
+ struct cvmx_gmxx_txx_stat2_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat2_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat2 cvmx_gmxx_txx_stat2_t;
+
+/**
+ * cvmx_gmx#_tx#_stat3
+ *
+ * GMX_TX_STAT3 = GMX_TX_STATS_PKTS
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_txx_stat3 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pkts : 32; /**< Number of total frames sent on the interface.
+ Does not count frames that were truncated due to
+ collisions in halfdup mode. */
+#else
+ uint64_t pkts : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat3_s cn30xx;
+ struct cvmx_gmxx_txx_stat3_s cn31xx;
+ struct cvmx_gmxx_txx_stat3_s cn38xx;
+ struct cvmx_gmxx_txx_stat3_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat3_s cn50xx;
+ struct cvmx_gmxx_txx_stat3_s cn52xx;
+ struct cvmx_gmxx_txx_stat3_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat3_s cn56xx;
+ struct cvmx_gmxx_txx_stat3_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat3_s cn58xx;
+ struct cvmx_gmxx_txx_stat3_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat3_s cn61xx;
+ struct cvmx_gmxx_txx_stat3_s cn63xx;
+ struct cvmx_gmxx_txx_stat3_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat3_s cn66xx;
+ struct cvmx_gmxx_txx_stat3_s cn68xx;
+ struct cvmx_gmxx_txx_stat3_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat3_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat3 cvmx_gmxx_txx_stat3_t;
+
+/**
+ * cvmx_gmx#_tx#_stat4
+ *
+ * GMX_TX_STAT4 = GMX_TX_STATS_HIST1 (64) / GMX_TX_STATS_HIST0 (<64)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_txx_stat4 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */
+ uint64_t hist0 : 32; /**< Number of packets sent with an octet count
+ of < 64. */
+#else
+ uint64_t hist0 : 32;
+ uint64_t hist1 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat4_s cn30xx;
+ struct cvmx_gmxx_txx_stat4_s cn31xx;
+ struct cvmx_gmxx_txx_stat4_s cn38xx;
+ struct cvmx_gmxx_txx_stat4_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat4_s cn50xx;
+ struct cvmx_gmxx_txx_stat4_s cn52xx;
+ struct cvmx_gmxx_txx_stat4_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat4_s cn56xx;
+ struct cvmx_gmxx_txx_stat4_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat4_s cn58xx;
+ struct cvmx_gmxx_txx_stat4_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat4_s cn61xx;
+ struct cvmx_gmxx_txx_stat4_s cn63xx;
+ struct cvmx_gmxx_txx_stat4_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat4_s cn66xx;
+ struct cvmx_gmxx_txx_stat4_s cn68xx;
+ struct cvmx_gmxx_txx_stat4_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat4_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat4 cvmx_gmxx_txx_stat4_t;
+
+/**
+ * cvmx_gmx#_tx#_stat5
+ *
+ * GMX_TX_STAT5 = GMX_TX_STATS_HIST3 (128- 255) / GMX_TX_STATS_HIST2 (65- 127)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_txx_stat5 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist3 : 32; /**< Number of packets sent with an octet count of
+ 128 - 255. */
+ uint64_t hist2 : 32; /**< Number of packets sent with an octet count of
+ 65 - 127. */
+#else
+ uint64_t hist2 : 32;
+ uint64_t hist3 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat5_s cn30xx;
+ struct cvmx_gmxx_txx_stat5_s cn31xx;
+ struct cvmx_gmxx_txx_stat5_s cn38xx;
+ struct cvmx_gmxx_txx_stat5_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat5_s cn50xx;
+ struct cvmx_gmxx_txx_stat5_s cn52xx;
+ struct cvmx_gmxx_txx_stat5_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat5_s cn56xx;
+ struct cvmx_gmxx_txx_stat5_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat5_s cn58xx;
+ struct cvmx_gmxx_txx_stat5_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat5_s cn61xx;
+ struct cvmx_gmxx_txx_stat5_s cn63xx;
+ struct cvmx_gmxx_txx_stat5_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat5_s cn66xx;
+ struct cvmx_gmxx_txx_stat5_s cn68xx;
+ struct cvmx_gmxx_txx_stat5_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat5_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat5 cvmx_gmxx_txx_stat5_t;
+
+/**
+ * cvmx_gmx#_tx#_stat6
+ *
+ * GMX_TX_STAT6 = GMX_TX_STATS_HIST5 (512-1023) / GMX_TX_STATS_HIST4 (256-511)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_txx_stat6 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist5 : 32; /**< Number of packets sent with an octet count of
+ 512 - 1023. */
+ uint64_t hist4 : 32; /**< Number of packets sent with an octet count of
+ 256 - 511. */
+#else
+ uint64_t hist4 : 32;
+ uint64_t hist5 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat6_s cn30xx;
+ struct cvmx_gmxx_txx_stat6_s cn31xx;
+ struct cvmx_gmxx_txx_stat6_s cn38xx;
+ struct cvmx_gmxx_txx_stat6_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat6_s cn50xx;
+ struct cvmx_gmxx_txx_stat6_s cn52xx;
+ struct cvmx_gmxx_txx_stat6_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat6_s cn56xx;
+ struct cvmx_gmxx_txx_stat6_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat6_s cn58xx;
+ struct cvmx_gmxx_txx_stat6_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat6_s cn61xx;
+ struct cvmx_gmxx_txx_stat6_s cn63xx;
+ struct cvmx_gmxx_txx_stat6_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat6_s cn66xx;
+ struct cvmx_gmxx_txx_stat6_s cn68xx;
+ struct cvmx_gmxx_txx_stat6_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat6_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat6 cvmx_gmxx_txx_stat6_t;
+
+/**
+ * cvmx_gmx#_tx#_stat7
+ *
+ * GMX_TX_STAT7 = GMX_TX_STATS_HIST7 (1024-1518) / GMX_TX_STATS_HIST6 (>1518)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_txx_stat7 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist7 : 32; /**< Number of packets sent with an octet count
+ of > 1518. */
+ uint64_t hist6 : 32; /**< Number of packets sent with an octet count of
+ 1024 - 1518. */
+#else
+ uint64_t hist6 : 32;
+ uint64_t hist7 : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat7_s cn30xx;
+ struct cvmx_gmxx_txx_stat7_s cn31xx;
+ struct cvmx_gmxx_txx_stat7_s cn38xx;
+ struct cvmx_gmxx_txx_stat7_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat7_s cn50xx;
+ struct cvmx_gmxx_txx_stat7_s cn52xx;
+ struct cvmx_gmxx_txx_stat7_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat7_s cn56xx;
+ struct cvmx_gmxx_txx_stat7_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat7_s cn58xx;
+ struct cvmx_gmxx_txx_stat7_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat7_s cn61xx;
+ struct cvmx_gmxx_txx_stat7_s cn63xx;
+ struct cvmx_gmxx_txx_stat7_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat7_s cn66xx;
+ struct cvmx_gmxx_txx_stat7_s cn68xx;
+ struct cvmx_gmxx_txx_stat7_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat7_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat7 cvmx_gmxx_txx_stat7_t;
+
+/**
+ * cvmx_gmx#_tx#_stat8
+ *
+ * GMX_TX_STAT8 = GMX_TX_STATS_MCST / GMX_TX_STATS_BCST
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
+ * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet
+ * as per the 802.3 frame definition. If the system requires additional data
+ * before the L2 header, then the MCST and BCST counters may not reflect
+ * reality and should be ignored by software.
+ */
+union cvmx_gmxx_txx_stat8 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat8_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC.
+ Does not include BCST packets. */
+ uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC.
+ Does not include MCST packets. */
+#else
+ uint64_t bcst : 32;
+ uint64_t mcst : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat8_s cn30xx;
+ struct cvmx_gmxx_txx_stat8_s cn31xx;
+ struct cvmx_gmxx_txx_stat8_s cn38xx;
+ struct cvmx_gmxx_txx_stat8_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat8_s cn50xx;
+ struct cvmx_gmxx_txx_stat8_s cn52xx;
+ struct cvmx_gmxx_txx_stat8_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat8_s cn56xx;
+ struct cvmx_gmxx_txx_stat8_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat8_s cn58xx;
+ struct cvmx_gmxx_txx_stat8_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat8_s cn61xx;
+ struct cvmx_gmxx_txx_stat8_s cn63xx;
+ struct cvmx_gmxx_txx_stat8_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat8_s cn66xx;
+ struct cvmx_gmxx_txx_stat8_s cn68xx;
+ struct cvmx_gmxx_txx_stat8_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat8_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat8 cvmx_gmxx_txx_stat8_t;
+
+/**
+ * cvmx_gmx#_tx#_stat9
+ *
+ * GMX_TX_STAT9 = GMX_TX_STATS_UNDFLW / GMX_TX_STATS_CTL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ */
+union cvmx_gmxx_txx_stat9 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat9_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t undflw : 32; /**< Number of underflow packets */
+ uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control)
+ generated by GMX. It does not include control
+ packets forwarded or generated by the PP's.
+ CTL will count the number of generated PFC frames.
+ CTL will not track the number of generated HG2
+ messages. */
+#else
+ uint64_t ctl : 32;
+ uint64_t undflw : 32;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stat9_s cn30xx;
+ struct cvmx_gmxx_txx_stat9_s cn31xx;
+ struct cvmx_gmxx_txx_stat9_s cn38xx;
+ struct cvmx_gmxx_txx_stat9_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat9_s cn50xx;
+ struct cvmx_gmxx_txx_stat9_s cn52xx;
+ struct cvmx_gmxx_txx_stat9_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat9_s cn56xx;
+ struct cvmx_gmxx_txx_stat9_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat9_s cn58xx;
+ struct cvmx_gmxx_txx_stat9_s cn58xxp1;
+ struct cvmx_gmxx_txx_stat9_s cn61xx;
+ struct cvmx_gmxx_txx_stat9_s cn63xx;
+ struct cvmx_gmxx_txx_stat9_s cn63xxp1;
+ struct cvmx_gmxx_txx_stat9_s cn66xx;
+ struct cvmx_gmxx_txx_stat9_s cn68xx;
+ struct cvmx_gmxx_txx_stat9_s cn68xxp1;
+ struct cvmx_gmxx_txx_stat9_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stat9 cvmx_gmxx_txx_stat9_t;
+
+/**
+ * cvmx_gmx#_tx#_stats_ctl
+ *
+ * GMX_TX_STATS_CTL = TX Stats Control register
+ *
+ */
+union cvmx_gmxx_txx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stats_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t rd_clr : 1; /**< Stats registers will clear on reads */
+#else
+ uint64_t rd_clr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_stats_ctl_s cn30xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn31xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn38xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2;
+ struct cvmx_gmxx_txx_stats_ctl_s cn50xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn52xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1;
+ struct cvmx_gmxx_txx_stats_ctl_s cn56xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1;
+ struct cvmx_gmxx_txx_stats_ctl_s cn58xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1;
+ struct cvmx_gmxx_txx_stats_ctl_s cn61xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn63xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn63xxp1;
+ struct cvmx_gmxx_txx_stats_ctl_s cn66xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn68xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn68xxp1;
+ struct cvmx_gmxx_txx_stats_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_txx_stats_ctl cvmx_gmxx_txx_stats_ctl_t;
+
+/**
+ * cvmx_gmx#_tx#_thresh
+ *
+ * Per Port
+ *
+ *
+ * GMX_TX_THRESH = Packet TX Threshold
+ *
+ * Notes:
+ * In XAUI mode, prt0 is used for checking. Since XAUI mode uses a single TX FIFO and is higher data rate, recommended value is 0x100.
+ *
+ */
+union cvmx_gmxx_txx_thresh {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_thresh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t cnt : 10; /**< Number of 16B ticks to accumulate in the TX FIFO
+ before sending on the packet interface
+ This register should be large enough to prevent
+ underflow on the packet interface and must never
+ be set to zero. This register cannot exceed the
+ the TX FIFO depth which is...
+ GMX_TX_PRTS==0,1: CNT MAX = 0x100
+ GMX_TX_PRTS==2 : CNT MAX = 0x080
+ GMX_TX_PRTS==3,4: CNT MAX = 0x040 */
+#else
+ uint64_t cnt : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_thresh_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t cnt : 7; /**< Number of 16B ticks to accumulate in the TX FIFO
+ before sending on the RGMII interface
+ This register should be large enough to prevent
+ underflow on the RGMII interface and must never
+ be set below 4. This register cannot exceed the
+ the TX FIFO depth which is 64 16B entries. */
+#else
+ uint64_t cnt : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_txx_thresh_cn30xx cn31xx;
+ struct cvmx_gmxx_txx_thresh_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t cnt : 9; /**< Number of 16B ticks to accumulate in the TX FIFO
+ before sending on the RGMII interface
+ This register should be large enough to prevent
+ underflow on the RGMII interface and must never
+ be set to zero. This register cannot exceed the
+ the TX FIFO depth which is...
+ GMX_TX_PRTS==0,1: CNT MAX = 0x100
+ GMX_TX_PRTS==2 : CNT MAX = 0x080
+ GMX_TX_PRTS==3,4: CNT MAX = 0x040
+ (PASS2 expands from 6 to 9 bits) */
+#else
+ uint64_t cnt : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn38xx;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn38xxp2;
+ struct cvmx_gmxx_txx_thresh_cn30xx cn50xx;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn52xx;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn52xxp1;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn56xx;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn56xxp1;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn58xx;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn58xxp1;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn61xx;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn63xx;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn63xxp1;
+ struct cvmx_gmxx_txx_thresh_cn38xx cn66xx;
+ struct cvmx_gmxx_txx_thresh_s cn68xx;
+ struct cvmx_gmxx_txx_thresh_s cn68xxp1;
+ struct cvmx_gmxx_txx_thresh_cn38xx cnf71xx;
+};
+typedef union cvmx_gmxx_txx_thresh cvmx_gmxx_txx_thresh_t;
+
+/**
+ * cvmx_gmx#_tx_bp
+ *
+ * GMX_TX_BP = Packet Interface TX BackPressure Register
+ *
+ *
+ * Notes:
+ * In XAUI mode, only the lsb (corresponding to port0) of BP is used.
+ *
+ */
+union cvmx_gmxx_tx_bp {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t bp : 4; /**< Per port BackPressure status
+ 0=Port is available
+ 1=Port should be back pressured */
+#else
+ uint64_t bp : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_bp_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t bp : 3; /**< Per port BackPressure status
+ 0=Port is available
+ 1=Port should be back pressured */
+#else
+ uint64_t bp : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_tx_bp_cn30xx cn31xx;
+ struct cvmx_gmxx_tx_bp_s cn38xx;
+ struct cvmx_gmxx_tx_bp_s cn38xxp2;
+ struct cvmx_gmxx_tx_bp_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_bp_s cn52xx;
+ struct cvmx_gmxx_tx_bp_s cn52xxp1;
+ struct cvmx_gmxx_tx_bp_s cn56xx;
+ struct cvmx_gmxx_tx_bp_s cn56xxp1;
+ struct cvmx_gmxx_tx_bp_s cn58xx;
+ struct cvmx_gmxx_tx_bp_s cn58xxp1;
+ struct cvmx_gmxx_tx_bp_s cn61xx;
+ struct cvmx_gmxx_tx_bp_s cn63xx;
+ struct cvmx_gmxx_tx_bp_s cn63xxp1;
+ struct cvmx_gmxx_tx_bp_s cn66xx;
+ struct cvmx_gmxx_tx_bp_s cn68xx;
+ struct cvmx_gmxx_tx_bp_s cn68xxp1;
+ struct cvmx_gmxx_tx_bp_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t bp : 2; /**< Per port BackPressure status
+ 0=Port is available
+ 1=Port should be back pressured */
+#else
+ uint64_t bp : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_gmxx_tx_bp cvmx_gmxx_tx_bp_t;
+
+/**
+ * cvmx_gmx#_tx_clk_msk#
+ *
+ * GMX_TX_CLK_MSK = GMX Clock Select
+ *
+ */
+union cvmx_gmxx_tx_clk_mskx {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_clk_mskx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t msk : 1; /**< Write this bit to a 1 when switching clks */
+#else
+ uint64_t msk : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_clk_mskx_s cn30xx;
+ struct cvmx_gmxx_tx_clk_mskx_s cn50xx;
+};
+typedef union cvmx_gmxx_tx_clk_mskx cvmx_gmxx_tx_clk_mskx_t;
+
+/**
+ * cvmx_gmx#_tx_col_attempt
+ *
+ * GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame
+ *
+ */
+union cvmx_gmxx_tx_col_attempt {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_col_attempt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t limit : 5; /**< Collision Attempts
+ (SGMII/1000Base-X half-duplex only) */
+#else
+ uint64_t limit : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_col_attempt_s cn30xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn31xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn38xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn38xxp2;
+ struct cvmx_gmxx_tx_col_attempt_s cn50xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn52xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn52xxp1;
+ struct cvmx_gmxx_tx_col_attempt_s cn56xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn56xxp1;
+ struct cvmx_gmxx_tx_col_attempt_s cn58xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn58xxp1;
+ struct cvmx_gmxx_tx_col_attempt_s cn61xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn63xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn63xxp1;
+ struct cvmx_gmxx_tx_col_attempt_s cn66xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn68xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn68xxp1;
+ struct cvmx_gmxx_tx_col_attempt_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_col_attempt cvmx_gmxx_tx_col_attempt_t;
+
+/**
+ * cvmx_gmx#_tx_corrupt
+ *
+ * GMX_TX_CORRUPT = TX - Corrupt TX packets with the ERR bit set
+ *
+ *
+ * Notes:
+ * Packets sent from PKO with the ERR wire asserted will be corrupted by
+ * the transmitter if CORRUPT[prt] is set (XAUI uses prt==0).
+ *
+ * Corruption means that GMX will send a bad FCS value. If GMX_TX_APPEND[FCS]
+ * is clear then no FCS is sent and the GMX cannot corrupt it. The corrupt FCS
+ * value is 0xeeeeeeee for SGMII/1000Base-X and 4 bytes of the error
+ * propagation code in XAUI mode.
+ */
+union cvmx_gmxx_tx_corrupt {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_corrupt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t corrupt : 4; /**< Per port error propagation
+ 0=Never corrupt packets
+ 1=Corrupt packets with ERR */
+#else
+ uint64_t corrupt : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_corrupt_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t corrupt : 3; /**< Per port error propagation
+ 0=Never corrupt packets
+ 1=Corrupt packets with ERR */
+#else
+ uint64_t corrupt : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx;
+ struct cvmx_gmxx_tx_corrupt_s cn38xx;
+ struct cvmx_gmxx_tx_corrupt_s cn38xxp2;
+ struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_corrupt_s cn52xx;
+ struct cvmx_gmxx_tx_corrupt_s cn52xxp1;
+ struct cvmx_gmxx_tx_corrupt_s cn56xx;
+ struct cvmx_gmxx_tx_corrupt_s cn56xxp1;
+ struct cvmx_gmxx_tx_corrupt_s cn58xx;
+ struct cvmx_gmxx_tx_corrupt_s cn58xxp1;
+ struct cvmx_gmxx_tx_corrupt_s cn61xx;
+ struct cvmx_gmxx_tx_corrupt_s cn63xx;
+ struct cvmx_gmxx_tx_corrupt_s cn63xxp1;
+ struct cvmx_gmxx_tx_corrupt_s cn66xx;
+ struct cvmx_gmxx_tx_corrupt_s cn68xx;
+ struct cvmx_gmxx_tx_corrupt_s cn68xxp1;
+ struct cvmx_gmxx_tx_corrupt_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t corrupt : 2; /**< Per port error propagation
+ 0=Never corrupt packets
+ 1=Corrupt packets with ERR */
+#else
+ uint64_t corrupt : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_gmxx_tx_corrupt cvmx_gmxx_tx_corrupt_t;
+
+/**
+ * cvmx_gmx#_tx_hg2_reg1
+ *
+ * Notes:
+ * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in
+ * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of
+ * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of
+ * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior.
+ * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values.
+ */
+union cvmx_gmxx_tx_hg2_reg1 {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_hg2_reg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t tx_xof : 16; /**< TX HiGig2 message for logical link pause when any
+ bit value changes
+ Only write in HiGig2 mode i.e. when
+ GMX_TX_XAUI_CTL[HG_EN]=1 and
+ GMX_RX_UDD_SKP[SKIP]=16. */
+#else
+ uint64_t tx_xof : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn52xx;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn56xx;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn61xx;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn63xx;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn63xxp1;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn66xx;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn68xx;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn68xxp1;
+ struct cvmx_gmxx_tx_hg2_reg1_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_hg2_reg1 cvmx_gmxx_tx_hg2_reg1_t;
+
+/**
+ * cvmx_gmx#_tx_hg2_reg2
+ *
+ * Notes:
+ * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in
+ * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of
+ * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of
+ * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior.
+ * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values.
+ */
+union cvmx_gmxx_tx_hg2_reg2 {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_hg2_reg2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t tx_xon : 16; /**< TX HiGig2 message for logical link pause when any
+ bit value changes
+ Only write in HiGig2 mode i.e. when
+ GMX_TX_XAUI_CTL[HG_EN]=1 and
+ GMX_RX_UDD_SKP[SKIP]=16. */
+#else
+ uint64_t tx_xon : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn52xx;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn56xx;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn61xx;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn63xx;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn63xxp1;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn66xx;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn68xx;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn68xxp1;
+ struct cvmx_gmxx_tx_hg2_reg2_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_hg2_reg2 cvmx_gmxx_tx_hg2_reg2_t;
+
+/**
+ * cvmx_gmx#_tx_ifg
+ *
+ * GMX_TX_IFG = Packet TX Interframe Gap
+ *
+ *
+ * Notes:
+ * * Programming IFG1 and IFG2.
+ *
+ * For 10/100/1000Mbs half-duplex systems that require IEEE 802.3
+ * compatibility, IFG1 must be in the range of 1-8, IFG2 must be in the range
+ * of 4-12, and the IFG1+IFG2 sum must be 12.
+ *
+ * For 10/100/1000Mbs full-duplex systems that require IEEE 802.3
+ * compatibility, IFG1 must be in the range of 1-11, IFG2 must be in the range
+ * of 1-11, and the IFG1+IFG2 sum must be 12.
+ *
+ * For XAUI/10Gbs systems that require IEEE 802.3 compatibility, the
+ * IFG1+IFG2 sum must be 12. IFG1[1:0] and IFG2[1:0] must be zero.
+ *
+ * For all other systems, IFG1 and IFG2 can be any value in the range of
+ * 1-15. Allowing for a total possible IFG sum of 2-30.
+ */
+union cvmx_gmxx_tx_ifg {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_ifg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing (in IFG2*8 bits)
+ If CRS is detected during IFG2, then the
+ interFrameSpacing timer is not reset and a frame
+ is transmited once the timer expires. */
+ uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing (in IFG1*8 bits)
+ If CRS is detected during IFG1, then the
+ interFrameSpacing timer is reset and a frame is
+ not transmited. */
+#else
+ uint64_t ifg1 : 4;
+ uint64_t ifg2 : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_ifg_s cn30xx;
+ struct cvmx_gmxx_tx_ifg_s cn31xx;
+ struct cvmx_gmxx_tx_ifg_s cn38xx;
+ struct cvmx_gmxx_tx_ifg_s cn38xxp2;
+ struct cvmx_gmxx_tx_ifg_s cn50xx;
+ struct cvmx_gmxx_tx_ifg_s cn52xx;
+ struct cvmx_gmxx_tx_ifg_s cn52xxp1;
+ struct cvmx_gmxx_tx_ifg_s cn56xx;
+ struct cvmx_gmxx_tx_ifg_s cn56xxp1;
+ struct cvmx_gmxx_tx_ifg_s cn58xx;
+ struct cvmx_gmxx_tx_ifg_s cn58xxp1;
+ struct cvmx_gmxx_tx_ifg_s cn61xx;
+ struct cvmx_gmxx_tx_ifg_s cn63xx;
+ struct cvmx_gmxx_tx_ifg_s cn63xxp1;
+ struct cvmx_gmxx_tx_ifg_s cn66xx;
+ struct cvmx_gmxx_tx_ifg_s cn68xx;
+ struct cvmx_gmxx_tx_ifg_s cn68xxp1;
+ struct cvmx_gmxx_tx_ifg_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_ifg cvmx_gmxx_tx_ifg_t;
+
+/**
+ * cvmx_gmx#_tx_int_en
+ *
+ * GMX_TX_INT_EN = Interrupt Enable
+ *
+ *
+ * Notes:
+ * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used.
+ *
+ */
+union cvmx_gmxx_tx_int_en {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t xchange : 1; /**< XAUI link status changed - this denotes a change
+ to GMX_RX_XAUI_CTL[STATUS]
+ (XAUI mode only) */
+ uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t late_col : 4; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t ptp_lost : 4;
+ uint64_t xchange : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_int_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t late_col : 3; /**< TX Late Collision */
+ uint64_t reserved_15_15 : 1;
+ uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */
+ uint64_t reserved_11_11 : 1;
+ uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 3;
+ uint64_t reserved_5_7 : 3;
+ uint64_t xscol : 3;
+ uint64_t reserved_11_11 : 1;
+ uint64_t xsdef : 3;
+ uint64_t reserved_15_15 : 1;
+ uint64_t late_col : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_tx_int_en_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */
+ uint64_t reserved_11_11 : 1;
+ uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 3;
+ uint64_t reserved_5_7 : 3;
+ uint64_t xscol : 3;
+ uint64_t reserved_11_11 : 1;
+ uint64_t xsdef : 3;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_tx_int_en_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t late_col : 4; /**< TX Late Collision
+ (PASS3 only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */
+ uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t ncb_nxa : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn38xx;
+ struct cvmx_gmxx_tx_int_en_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */
+ uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t ncb_nxa : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xxp2;
+ struct cvmx_gmxx_tx_int_en_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t late_col : 4; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1;
+ struct cvmx_gmxx_tx_int_en_cn52xx cn56xx;
+ struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1;
+ struct cvmx_gmxx_tx_int_en_cn38xx cn58xx;
+ struct cvmx_gmxx_tx_int_en_cn38xx cn58xxp1;
+ struct cvmx_gmxx_tx_int_en_s cn61xx;
+ struct cvmx_gmxx_tx_int_en_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t late_col : 4; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t ptp_lost : 4;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn63xx;
+ struct cvmx_gmxx_tx_int_en_cn63xx cn63xxp1;
+ struct cvmx_gmxx_tx_int_en_s cn66xx;
+ struct cvmx_gmxx_tx_int_en_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t xchange : 1; /**< XAUI/RXAUI link status changed - this denotes a
+ change to GMX_RX_XAUI_CTL[STATUS]
+ (XAUI/RXAUI mode only) */
+ uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t late_col : 4; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow */
+ uint64_t pko_nxp : 1; /**< Port pipe out-of-range from PKO Interface */
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t pko_nxp : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t ptp_lost : 4;
+ uint64_t xchange : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } cn68xx;
+ struct cvmx_gmxx_tx_int_en_cn68xx cn68xxp1;
+ struct cvmx_gmxx_tx_int_en_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t xchange : 1; /**< XAUI link status changed - this denotes a change
+ to GMX_RX_XAUI_CTL[STATUS]
+ (XAUI mode only) */
+ uint64_t reserved_22_23 : 2;
+ uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t reserved_18_19 : 2;
+ uint64_t late_col : 2; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_19 : 2;
+ uint64_t ptp_lost : 2;
+ uint64_t reserved_22_23 : 2;
+ uint64_t xchange : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_gmxx_tx_int_en cvmx_gmxx_tx_int_en_t;
+
+/**
+ * cvmx_gmx#_tx_int_reg
+ *
+ * GMX_TX_INT_REG = Interrupt Register
+ *
+ *
+ * Notes:
+ * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used.
+ *
+ */
+union cvmx_gmxx_tx_int_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t xchange : 1; /**< XAUI link status changed - this denotes a change
+ to GMX_RX_XAUI_CTL[STATUS]
+ (XAUI mode only) */
+ uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t late_col : 4; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t ptp_lost : 4;
+ uint64_t xchange : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_int_reg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t late_col : 3; /**< TX Late Collision */
+ uint64_t reserved_15_15 : 1;
+ uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */
+ uint64_t reserved_11_11 : 1;
+ uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 3;
+ uint64_t reserved_5_7 : 3;
+ uint64_t xscol : 3;
+ uint64_t reserved_11_11 : 1;
+ uint64_t xsdef : 3;
+ uint64_t reserved_15_15 : 1;
+ uint64_t late_col : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_tx_int_reg_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */
+ uint64_t reserved_11_11 : 1;
+ uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 3;
+ uint64_t reserved_5_7 : 3;
+ uint64_t xscol : 3;
+ uint64_t reserved_11_11 : 1;
+ uint64_t xsdef : 3;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_tx_int_reg_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t late_col : 4; /**< TX Late Collision
+ (PASS3 only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */
+ uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t ncb_nxa : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn38xx;
+ struct cvmx_gmxx_tx_int_reg_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */
+ uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t ncb_nxa : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xxp2;
+ struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t late_col : 4; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx;
+ struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1;
+ struct cvmx_gmxx_tx_int_reg_cn38xx cn58xx;
+ struct cvmx_gmxx_tx_int_reg_cn38xx cn58xxp1;
+ struct cvmx_gmxx_tx_int_reg_s cn61xx;
+ struct cvmx_gmxx_tx_int_reg_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t late_col : 4; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t ptp_lost : 4;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn63xx;
+ struct cvmx_gmxx_tx_int_reg_cn63xx cn63xxp1;
+ struct cvmx_gmxx_tx_int_reg_s cn66xx;
+ struct cvmx_gmxx_tx_int_reg_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t xchange : 1; /**< XAUI/RXAUI link status changed - this denotes ae
+ change to GMX_RX_XAUI_CTL[STATUS]
+ (XAUI/RXAUI mode only) */
+ uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t late_col : 4; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xsdef : 4; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t xscol : 4; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t undflw : 4; /**< TX Underflow */
+ uint64_t pko_nxp : 1; /**< Port pipe out-of-range from PKO Interface */
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t pko_nxp : 1;
+ uint64_t undflw : 4;
+ uint64_t reserved_6_7 : 2;
+ uint64_t xscol : 4;
+ uint64_t xsdef : 4;
+ uint64_t late_col : 4;
+ uint64_t ptp_lost : 4;
+ uint64_t xchange : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } cn68xx;
+ struct cvmx_gmxx_tx_int_reg_cn68xx cn68xxp1;
+ struct cvmx_gmxx_tx_int_reg_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t xchange : 1; /**< XAUI link status changed - this denotes a change
+ to GMX_RX_XAUI_CTL[STATUS]
+ (XAUI mode only) */
+ uint64_t reserved_22_23 : 2;
+ uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t reserved_18_19 : 2;
+ uint64_t late_col : 2; /**< TX Late Collision
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_19 : 2;
+ uint64_t ptp_lost : 2;
+ uint64_t reserved_22_23 : 2;
+ uint64_t xchange : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_gmxx_tx_int_reg cvmx_gmxx_tx_int_reg_t;
+
+/**
+ * cvmx_gmx#_tx_jam
+ *
+ * GMX_TX_JAM = Packet TX Jam Pattern
+ *
+ */
+union cvmx_gmxx_tx_jam {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_jam_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t jam : 8; /**< Jam pattern
+ (SGMII/1000Base-X half-duplex only) */
+#else
+ uint64_t jam : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_jam_s cn30xx;
+ struct cvmx_gmxx_tx_jam_s cn31xx;
+ struct cvmx_gmxx_tx_jam_s cn38xx;
+ struct cvmx_gmxx_tx_jam_s cn38xxp2;
+ struct cvmx_gmxx_tx_jam_s cn50xx;
+ struct cvmx_gmxx_tx_jam_s cn52xx;
+ struct cvmx_gmxx_tx_jam_s cn52xxp1;
+ struct cvmx_gmxx_tx_jam_s cn56xx;
+ struct cvmx_gmxx_tx_jam_s cn56xxp1;
+ struct cvmx_gmxx_tx_jam_s cn58xx;
+ struct cvmx_gmxx_tx_jam_s cn58xxp1;
+ struct cvmx_gmxx_tx_jam_s cn61xx;
+ struct cvmx_gmxx_tx_jam_s cn63xx;
+ struct cvmx_gmxx_tx_jam_s cn63xxp1;
+ struct cvmx_gmxx_tx_jam_s cn66xx;
+ struct cvmx_gmxx_tx_jam_s cn68xx;
+ struct cvmx_gmxx_tx_jam_s cn68xxp1;
+ struct cvmx_gmxx_tx_jam_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_jam cvmx_gmxx_tx_jam_t;
+
+/**
+ * cvmx_gmx#_tx_lfsr
+ *
+ * GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
+ *
+ */
+union cvmx_gmxx_tx_lfsr {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_lfsr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random
+ numbers to compute truncated binary exponential
+ backoff.
+ (SGMII/1000Base-X half-duplex only) */
+#else
+ uint64_t lfsr : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_lfsr_s cn30xx;
+ struct cvmx_gmxx_tx_lfsr_s cn31xx;
+ struct cvmx_gmxx_tx_lfsr_s cn38xx;
+ struct cvmx_gmxx_tx_lfsr_s cn38xxp2;
+ struct cvmx_gmxx_tx_lfsr_s cn50xx;
+ struct cvmx_gmxx_tx_lfsr_s cn52xx;
+ struct cvmx_gmxx_tx_lfsr_s cn52xxp1;
+ struct cvmx_gmxx_tx_lfsr_s cn56xx;
+ struct cvmx_gmxx_tx_lfsr_s cn56xxp1;
+ struct cvmx_gmxx_tx_lfsr_s cn58xx;
+ struct cvmx_gmxx_tx_lfsr_s cn58xxp1;
+ struct cvmx_gmxx_tx_lfsr_s cn61xx;
+ struct cvmx_gmxx_tx_lfsr_s cn63xx;
+ struct cvmx_gmxx_tx_lfsr_s cn63xxp1;
+ struct cvmx_gmxx_tx_lfsr_s cn66xx;
+ struct cvmx_gmxx_tx_lfsr_s cn68xx;
+ struct cvmx_gmxx_tx_lfsr_s cn68xxp1;
+ struct cvmx_gmxx_tx_lfsr_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_lfsr cvmx_gmxx_tx_lfsr_t;
+
+/**
+ * cvmx_gmx#_tx_ovr_bp
+ *
+ * GMX_TX_OVR_BP = Packet Interface TX Override BackPressure
+ *
+ *
+ * Notes:
+ * In XAUI mode, only the lsb (corresponding to port0) of EN, BP, and IGN_FULL are used.
+ *
+ * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero
+ * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol
+ * when GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by
+ * GMX*_TX_XAUI_CTL[HG_EN]=1 and GMX*_RX0_UDD_SKP[LEN]=16.) HW can only auto-generate backpressure
+ * through HiGig2 messages (optionally, when GMX*_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2
+ * protocol.
+ */
+union cvmx_gmxx_tx_ovr_bp {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_ovr_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t tx_prt_bp : 16; /**< Per port BP sent to PKO
+ 0=Port is available
+ 1=Port should be back pressured
+ TX_PRT_BP should not be set until
+ GMX_INF_MODE[EN] has been enabled */
+ uint64_t reserved_12_31 : 20;
+ uint64_t en : 4; /**< Per port Enable back pressure override */
+ uint64_t bp : 4; /**< Per port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t ign_full : 4; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 4;
+ uint64_t bp : 4;
+ uint64_t en : 4;
+ uint64_t reserved_12_31 : 20;
+ uint64_t tx_prt_bp : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_ovr_bp_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t en : 3; /**< Per port Enable back pressure override */
+ uint64_t reserved_7_7 : 1;
+ uint64_t bp : 3; /**< Per port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t reserved_3_3 : 1;
+ uint64_t ign_full : 3; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t bp : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t en : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx;
+ struct cvmx_gmxx_tx_ovr_bp_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t en : 4; /**< Per port Enable back pressure override */
+ uint64_t bp : 4; /**< Per port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t ign_full : 4; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 4;
+ uint64_t bp : 4;
+ uint64_t en : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn38xx;
+ struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2;
+ struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn52xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1;
+ struct cvmx_gmxx_tx_ovr_bp_s cn56xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1;
+ struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx;
+ struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1;
+ struct cvmx_gmxx_tx_ovr_bp_s cn61xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn63xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn63xxp1;
+ struct cvmx_gmxx_tx_ovr_bp_s cn66xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn68xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn68xxp1;
+ struct cvmx_gmxx_tx_ovr_bp_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t tx_prt_bp : 16; /**< Per port BP sent to PKO
+ 0=Port is available
+ 1=Port should be back pressured
+ TX_PRT_BP should not be set until
+ GMX_INF_MODE[EN] has been enabled */
+ uint64_t reserved_10_31 : 22;
+ uint64_t en : 2; /**< Per port Enable back pressure override */
+ uint64_t reserved_6_7 : 2;
+ uint64_t bp : 2; /**< Per port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t reserved_2_3 : 2;
+ uint64_t ign_full : 2; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t bp : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t en : 2;
+ uint64_t reserved_10_31 : 22;
+ uint64_t tx_prt_bp : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_gmxx_tx_ovr_bp cvmx_gmxx_tx_ovr_bp_t;
+
+/**
+ * cvmx_gmx#_tx_pause_pkt_dmac
+ *
+ * GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field
+ *
+ */
+union cvmx_gmxx_tx_pause_pkt_dmac {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */
+#else
+ uint64_t dmac : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn61xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn66xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn68xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn68xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_pause_pkt_dmac cvmx_gmxx_tx_pause_pkt_dmac_t;
+
+/**
+ * cvmx_gmx#_tx_pause_pkt_type
+ *
+ * GMX_TX_PAUSE_PKT_TYPE = Packet Interface TX Pause Packet TYPE field
+ *
+ */
+union cvmx_gmxx_tx_pause_pkt_type {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_pause_pkt_type_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */
+#else
+ uint64_t type : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn61xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn63xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn63xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn66xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn68xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn68xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_pause_pkt_type cvmx_gmxx_tx_pause_pkt_type_t;
+
+/**
+ * cvmx_gmx#_tx_prts
+ *
+ * Common
+ *
+ *
+ * GMX_TX_PRTS = TX Ports
+ *
+ * Notes:
+ * * The value programmed for PRTS is the number of the highest architected
+ * port number on the interface, plus 1. For example, if port 2 is the
+ * highest architected port, then the programmed value should be 3 since
+ * there are 3 ports in the system - 0, 1, and 2.
+ */
+union cvmx_gmxx_tx_prts {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_prts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t prts : 5; /**< Number of ports allowed on the interface
+ (SGMII/1000Base-X only) */
+#else
+ uint64_t prts : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_prts_s cn30xx;
+ struct cvmx_gmxx_tx_prts_s cn31xx;
+ struct cvmx_gmxx_tx_prts_s cn38xx;
+ struct cvmx_gmxx_tx_prts_s cn38xxp2;
+ struct cvmx_gmxx_tx_prts_s cn50xx;
+ struct cvmx_gmxx_tx_prts_s cn52xx;
+ struct cvmx_gmxx_tx_prts_s cn52xxp1;
+ struct cvmx_gmxx_tx_prts_s cn56xx;
+ struct cvmx_gmxx_tx_prts_s cn56xxp1;
+ struct cvmx_gmxx_tx_prts_s cn58xx;
+ struct cvmx_gmxx_tx_prts_s cn58xxp1;
+ struct cvmx_gmxx_tx_prts_s cn61xx;
+ struct cvmx_gmxx_tx_prts_s cn63xx;
+ struct cvmx_gmxx_tx_prts_s cn63xxp1;
+ struct cvmx_gmxx_tx_prts_s cn66xx;
+ struct cvmx_gmxx_tx_prts_s cn68xx;
+ struct cvmx_gmxx_tx_prts_s cn68xxp1;
+ struct cvmx_gmxx_tx_prts_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_prts cvmx_gmxx_tx_prts_t;
+
+/**
+ * cvmx_gmx#_tx_spi_ctl
+ *
+ * GMX_TX_SPI_CTL = Spi4 TX ModesSpi4
+ *
+ */
+union cvmx_gmxx_tx_spi_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t tpa_clr : 1; /**< TPA Clear Mode
+ Clear credit counter when satisifed status */
+ uint64_t cont_pkt : 1; /**< Contiguous Packet Mode
+ Finish one packet before switching to another
+ Cannot be set in Spi4 pass-through mode */
+#else
+ uint64_t cont_pkt : 1;
+ uint64_t tpa_clr : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_spi_ctl_s cn38xx;
+ struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2;
+ struct cvmx_gmxx_tx_spi_ctl_s cn58xx;
+ struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1;
+};
+typedef union cvmx_gmxx_tx_spi_ctl cvmx_gmxx_tx_spi_ctl_t;
+
+/**
+ * cvmx_gmx#_tx_spi_drain
+ *
+ * GMX_TX_SPI_DRAIN = Drain out Spi TX FIFO
+ *
+ */
+union cvmx_gmxx_tx_spi_drain {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_drain_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t drain : 16; /**< Per port drain control
+ 0=Normal operation
+ 1=GMX TX will be popped, but no valid data will
+ be sent to SPX. Credits are correctly returned
+ to PKO. STX_IGN_CAL should be set to ignore
+ TPA and not stall due to back-pressure.
+ (PASS3 only) */
+#else
+ uint64_t drain : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_spi_drain_s cn38xx;
+ struct cvmx_gmxx_tx_spi_drain_s cn58xx;
+ struct cvmx_gmxx_tx_spi_drain_s cn58xxp1;
+};
+typedef union cvmx_gmxx_tx_spi_drain cvmx_gmxx_tx_spi_drain_t;
+
+/**
+ * cvmx_gmx#_tx_spi_max
+ *
+ * GMX_TX_SPI_MAX = RGMII TX Spi4 MAX
+ *
+ */
+union cvmx_gmxx_tx_spi_max {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_max_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t slice : 7; /**< Number of 16B blocks to transmit in a burst before
+ switching to the next port. SLICE does not always
+ limit the burst length transmitted by OCTEON.
+ Depending on the traffic pattern and
+ GMX_TX_SPI_ROUND programming, the next port could
+ be the same as the current port. In this case,
+ OCTEON may merge multiple sub-SLICE bursts into
+ one contiguous burst that is longer than SLICE
+ (as long as the burst does not cross a packet
+ boundary).
+ SLICE must be programmed to be >=
+ GMX_TX_SPI_THRESH[THRESH]
+ If SLICE==0, then the transmitter will tend to
+ send the complete packet. The port will only
+ switch if credits are exhausted or PKO cannot
+ keep up.
+ (90nm ONLY) */
+ uint64_t max2 : 8; /**< MAX2 (per Spi4.2 spec) */
+ uint64_t max1 : 8; /**< MAX1 (per Spi4.2 spec)
+ MAX1 >= GMX_TX_SPI_THRESH[THRESH] */
+#else
+ uint64_t max1 : 8;
+ uint64_t max2 : 8;
+ uint64_t slice : 7;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_spi_max_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t max2 : 8; /**< MAX2 (per Spi4.2 spec) */
+ uint64_t max1 : 8; /**< MAX1 (per Spi4.2 spec)
+ MAX1 >= GMX_TX_SPI_THRESH[THRESH] */
+#else
+ uint64_t max1 : 8;
+ uint64_t max2 : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2;
+ struct cvmx_gmxx_tx_spi_max_s cn58xx;
+ struct cvmx_gmxx_tx_spi_max_s cn58xxp1;
+};
+typedef union cvmx_gmxx_tx_spi_max cvmx_gmxx_tx_spi_max_t;
+
+/**
+ * cvmx_gmx#_tx_spi_round#
+ *
+ * GMX_TX_SPI_ROUND = Controls SPI4 TX Arbitration
+ *
+ */
+union cvmx_gmxx_tx_spi_roundx {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_roundx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t round : 16; /**< Which Spi ports participate in each arbitration
+ round. Each bit corresponds to a spi port
+ - 0: this port will arb in this round
+ - 1: this port will not arb in this round
+ (90nm ONLY) */
+#else
+ uint64_t round : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_spi_roundx_s cn58xx;
+ struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1;
+};
+typedef union cvmx_gmxx_tx_spi_roundx cvmx_gmxx_tx_spi_roundx_t;
+
+/**
+ * cvmx_gmx#_tx_spi_thresh
+ *
+ * GMX_TX_SPI_THRESH = RGMII TX Spi4 Transmit Threshold
+ *
+ *
+ * Notes:
+ * Note: zero will map to 0x20
+ *
+ * This will normally creates Spi4 traffic bursts at least THRESH in length.
+ * If dclk > eclk, then this rule may not always hold and Octeon may split
+ * transfers into smaller bursts - some of which could be as short as 16B.
+ * Octeon will never violate the Spi4.2 spec and send a non-EOP burst that is
+ * not a multiple of 16B.
+ */
+union cvmx_gmxx_tx_spi_thresh {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_thresh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t thresh : 6; /**< Transmit threshold in 16B blocks - cannot be zero
+ THRESH <= TX_FIFO size (in non-passthrough mode)
+ THRESH <= TX_FIFO size-2 (in passthrough mode)
+ THRESH <= GMX_TX_SPI_MAX[MAX1]
+ THRESH <= GMX_TX_SPI_MAX[MAX2], if not then is it
+ possible for Octeon to send a Spi4 data burst of
+ MAX2 <= burst <= THRESH 16B ticks
+ GMX_TX_SPI_MAX[SLICE] must be programmed to be >=
+ THRESH */
+#else
+ uint64_t thresh : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_spi_thresh_s cn38xx;
+ struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2;
+ struct cvmx_gmxx_tx_spi_thresh_s cn58xx;
+ struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1;
+};
+typedef union cvmx_gmxx_tx_spi_thresh cvmx_gmxx_tx_spi_thresh_t;
+
+/**
+ * cvmx_gmx#_tx_xaui_ctl
+ */
+union cvmx_gmxx_tx_xaui_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_xaui_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t hg_pause_hgi : 2; /**< HGI Field for HW generated HiGig pause packets
+ (XAUI mode only) */
+ uint64_t hg_en : 1; /**< Enable HiGig Mode
+ When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=12
+ the interface is in HiGig/HiGig+ mode and the
+ following must be set:
+ GMX_RX_FRM_CTL[PRE_CHK] == 0
+ GMX_RX_UDD_SKP[FCSSEL] == 0
+ GMX_RX_UDD_SKP[SKIP] == 12
+ GMX_TX_APPEND[PREAMBLE] == 0
+ When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=16
+ the interface is in HiGig2 mode and the
+ following must be set:
+ GMX_RX_FRM_CTL[PRE_CHK] == 0
+ GMX_RX_UDD_SKP[FCSSEL] == 0
+ GMX_RX_UDD_SKP[SKIP] == 16
+ GMX_TX_APPEND[PREAMBLE] == 0
+ GMX_PRT0_CBFC_CTL[RX_EN] == 0
+ GMX_PRT0_CBFC_CTL[TX_EN] == 0
+ (XAUI mode only) */
+ uint64_t reserved_7_7 : 1;
+ uint64_t ls_byp : 1; /**< Bypass the link status as determined by the XGMII
+ receiver and set the link status of the
+ transmitter to LS.
+ (XAUI mode only) */
+ uint64_t ls : 2; /**< Link Status
+ 0 = Link Ok
+ Link runs normally. RS passes MAC data to PCS
+ 1 = Local Fault
+ RS layer sends continuous remote fault
+ sequences.
+ 2 = Remote Fault
+ RS layer sends continuous idles sequences
+ 3 = Link Drain
+ RS layer drops full packets to allow GMX and
+ PKO to drain their FIFOs
+ (XAUI mode only) */
+ uint64_t reserved_2_3 : 2;
+ uint64_t uni_en : 1; /**< Enable Unidirectional Mode (IEEE Clause 66)
+ (XAUI mode only) */
+ uint64_t dic_en : 1; /**< Enable the deficit idle counter for IFG averaging
+ (XAUI mode only) */
+#else
+ uint64_t dic_en : 1;
+ uint64_t uni_en : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t ls : 2;
+ uint64_t ls_byp : 1;
+ uint64_t reserved_7_7 : 1;
+ uint64_t hg_en : 1;
+ uint64_t hg_pause_hgi : 2;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn52xx;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn56xx;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn61xx;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn63xx;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn63xxp1;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn66xx;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn68xx;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn68xxp1;
+ struct cvmx_gmxx_tx_xaui_ctl_s cnf71xx;
+};
+typedef union cvmx_gmxx_tx_xaui_ctl cvmx_gmxx_tx_xaui_ctl_t;
+
+/**
+ * cvmx_gmx#_xaui_ext_loopback
+ */
+union cvmx_gmxx_xaui_ext_loopback {
+ uint64_t u64;
+ struct cvmx_gmxx_xaui_ext_loopback_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t en : 1; /**< Loopback enable
+ Puts the packet interface in external loopback
+ mode on the XAUI bus in which the RX lines are
+ reflected on the TX lines.
+ (XAUI mode only) */
+ uint64_t thresh : 4; /**< Threshhold on the TX FIFO
+ SW must only write the typical value. Any other
+ value will cause loopback mode not to function
+ correctly.
+ (XAUI mode only) */
+#else
+ uint64_t thresh : 4;
+ uint64_t en : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn52xx;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn56xx;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn61xx;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn63xx;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn63xxp1;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn66xx;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn68xx;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn68xxp1;
+ struct cvmx_gmxx_xaui_ext_loopback_s cnf71xx;
+};
+typedef union cvmx_gmxx_xaui_ext_loopback cvmx_gmxx_xaui_ext_loopback_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-gmxx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-gpio-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-gpio-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-gpio-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,843 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-gpio-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon gpio.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_GPIO_DEFS_H__
+#define __CVMX_GPIO_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GPIO_BIT_CFGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 15)))))
+ cvmx_warn("CVMX_GPIO_BIT_CFGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000800ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_GPIO_BIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000800ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_GPIO_BOOT_ENA CVMX_GPIO_BOOT_ENA_FUNC()
+static inline uint64_t CVMX_GPIO_BOOT_ENA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)))
+ cvmx_warn("CVMX_GPIO_BOOT_ENA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000008A8ull);
+}
+#else
+#define CVMX_GPIO_BOOT_ENA (CVMX_ADD_IO_SEG(0x00010700000008A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GPIO_CLK_GENX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_GPIO_CLK_GENX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000008C0ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_GPIO_CLK_GENX(offset) (CVMX_ADD_IO_SEG(0x00010700000008C0ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GPIO_CLK_QLMX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_GPIO_CLK_QLMX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000008E0ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_GPIO_CLK_QLMX(offset) (CVMX_ADD_IO_SEG(0x00010700000008E0ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_GPIO_DBG_ENA CVMX_GPIO_DBG_ENA_FUNC()
+static inline uint64_t CVMX_GPIO_DBG_ENA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)))
+ cvmx_warn("CVMX_GPIO_DBG_ENA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000008A0ull);
+}
+#else
+#define CVMX_GPIO_DBG_ENA (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
+#endif
+#define CVMX_GPIO_INT_CLR (CVMX_ADD_IO_SEG(0x0001070000000898ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_GPIO_MULTI_CAST CVMX_GPIO_MULTI_CAST_FUNC()
+static inline uint64_t CVMX_GPIO_MULTI_CAST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_GPIO_MULTI_CAST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000008B0ull);
+}
+#else
+#define CVMX_GPIO_MULTI_CAST (CVMX_ADD_IO_SEG(0x00010700000008B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_GPIO_PIN_ENA CVMX_GPIO_PIN_ENA_FUNC()
+static inline uint64_t CVMX_GPIO_PIN_ENA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ cvmx_warn("CVMX_GPIO_PIN_ENA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000008B8ull);
+}
+#else
+#define CVMX_GPIO_PIN_ENA (CVMX_ADD_IO_SEG(0x00010700000008B8ull))
+#endif
+#define CVMX_GPIO_RX_DAT (CVMX_ADD_IO_SEG(0x0001070000000880ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_GPIO_TIM_CTL CVMX_GPIO_TIM_CTL_FUNC()
+static inline uint64_t CVMX_GPIO_TIM_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_GPIO_TIM_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000008A0ull);
+}
+#else
+#define CVMX_GPIO_TIM_CTL (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
+#endif
+#define CVMX_GPIO_TX_CLR (CVMX_ADD_IO_SEG(0x0001070000000890ull))
+#define CVMX_GPIO_TX_SET (CVMX_ADD_IO_SEG(0x0001070000000888ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_GPIO_XBIT_CFGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset >= 16) && (offset <= 23)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset >= 16) && (offset <= 23)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset >= 16) && (offset <= 23)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 16) && (offset <= 19)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 16) && (offset <= 19)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 16) && (offset <= 19))))))
+ cvmx_warn("CVMX_GPIO_XBIT_CFGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000900ull) + ((offset) & 31) * 8 - 8*16;
+}
+#else
+#define CVMX_GPIO_XBIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000900ull) + ((offset) & 31) * 8 - 8*16)
+#endif
+
+/**
+ * cvmx_gpio_bit_cfg#
+ *
+ * Notes:
+ * Only first 16 GPIO pins can introduce interrupts, GPIO_XBIT_CFG16(17,18,19)[INT_EN] and [INT_TYPE]
+ * will not be used, read out always zero.
+ */
+union cvmx_gpio_bit_cfgx {
+ uint64_t u64;
+ struct cvmx_gpio_bit_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t synce_sel : 2; /**< Selects the QLM clock output
+ x0=Normal GPIO output
+ 01=GPIO QLM clock selected by CSR GPIO_CLK_QLM0
+ 11=GPIO QLM clock selected by CSR GPIO_CLK_QLM1 */
+ uint64_t clk_gen : 1; /**< When TX_OE is set, GPIO pin becomes a clock */
+ uint64_t clk_sel : 2; /**< Selects which of the 4 GPIO clock generators */
+ uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
+ uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
+ uint64_t int_type : 1; /**< Type of interrupt
+ 0 = level (default)
+ 1 = rising edge */
+ uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */
+ uint64_t rx_xor : 1; /**< Invert the GPIO pin */
+ uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
+#else
+ uint64_t tx_oe : 1;
+ uint64_t rx_xor : 1;
+ uint64_t int_en : 1;
+ uint64_t int_type : 1;
+ uint64_t fil_cnt : 4;
+ uint64_t fil_sel : 4;
+ uint64_t clk_sel : 2;
+ uint64_t clk_gen : 1;
+ uint64_t synce_sel : 2;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_gpio_bit_cfgx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
+ uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
+ uint64_t int_type : 1; /**< Type of interrupt
+ 0 = level (default)
+ 1 = rising edge */
+ uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */
+ uint64_t rx_xor : 1; /**< Invert the GPIO pin */
+ uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
+#else
+ uint64_t tx_oe : 1;
+ uint64_t rx_xor : 1;
+ uint64_t int_en : 1;
+ uint64_t int_type : 1;
+ uint64_t fil_cnt : 4;
+ uint64_t fil_sel : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn30xx;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn31xx;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn38xx;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn50xx;
+ struct cvmx_gpio_bit_cfgx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t clk_gen : 1; /**< When TX_OE is set, GPIO pin becomes a clock */
+ uint64_t clk_sel : 2; /**< Selects which of the 4 GPIO clock generators */
+ uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
+ uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
+ uint64_t int_type : 1; /**< Type of interrupt
+ 0 = level (default)
+ 1 = rising edge */
+ uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */
+ uint64_t rx_xor : 1; /**< Invert the GPIO pin */
+ uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
+#else
+ uint64_t tx_oe : 1;
+ uint64_t rx_xor : 1;
+ uint64_t int_en : 1;
+ uint64_t int_type : 1;
+ uint64_t fil_cnt : 4;
+ uint64_t fil_sel : 4;
+ uint64_t clk_sel : 2;
+ uint64_t clk_gen : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } cn52xx;
+ struct cvmx_gpio_bit_cfgx_cn52xx cn52xxp1;
+ struct cvmx_gpio_bit_cfgx_cn52xx cn56xx;
+ struct cvmx_gpio_bit_cfgx_cn52xx cn56xxp1;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn58xx;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1;
+ struct cvmx_gpio_bit_cfgx_s cn61xx;
+ struct cvmx_gpio_bit_cfgx_s cn63xx;
+ struct cvmx_gpio_bit_cfgx_s cn63xxp1;
+ struct cvmx_gpio_bit_cfgx_s cn66xx;
+ struct cvmx_gpio_bit_cfgx_s cn68xx;
+ struct cvmx_gpio_bit_cfgx_s cn68xxp1;
+ struct cvmx_gpio_bit_cfgx_s cnf71xx;
+};
+typedef union cvmx_gpio_bit_cfgx cvmx_gpio_bit_cfgx_t;
+
+/**
+ * cvmx_gpio_boot_ena
+ */
+union cvmx_gpio_boot_ena {
+ uint64_t u64;
+ struct cvmx_gpio_boot_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t boot_ena : 4; /**< Drive boot bus chip enables [7:4] on gpio [11:8] */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t boot_ena : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_gpio_boot_ena_s cn30xx;
+ struct cvmx_gpio_boot_ena_s cn31xx;
+ struct cvmx_gpio_boot_ena_s cn50xx;
+};
+typedef union cvmx_gpio_boot_ena cvmx_gpio_boot_ena_t;
+
+/**
+ * cvmx_gpio_clk_gen#
+ */
+union cvmx_gpio_clk_genx {
+ uint64_t u64;
+ struct cvmx_gpio_clk_genx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t n : 32; /**< Determines the frequency of the GPIO clk generator
+ NOTE: Fgpio_clk = Feclk * N / 2^32
+ N = (Fgpio_clk / Feclk) * 2^32
+ NOTE: writing N == 0 stops the clock generator
+ N should be <= 2^31-1. */
+#else
+ uint64_t n : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_gpio_clk_genx_s cn52xx;
+ struct cvmx_gpio_clk_genx_s cn52xxp1;
+ struct cvmx_gpio_clk_genx_s cn56xx;
+ struct cvmx_gpio_clk_genx_s cn56xxp1;
+ struct cvmx_gpio_clk_genx_s cn61xx;
+ struct cvmx_gpio_clk_genx_s cn63xx;
+ struct cvmx_gpio_clk_genx_s cn63xxp1;
+ struct cvmx_gpio_clk_genx_s cn66xx;
+ struct cvmx_gpio_clk_genx_s cn68xx;
+ struct cvmx_gpio_clk_genx_s cn68xxp1;
+ struct cvmx_gpio_clk_genx_s cnf71xx;
+};
+typedef union cvmx_gpio_clk_genx cvmx_gpio_clk_genx_t;
+
+/**
+ * cvmx_gpio_clk_qlm#
+ *
+ * Notes:
+ * QLM0(A) and QLM1(B) can configured to source any of QLM0 or QLM2 as clock source.
+ * Clock speed output for different modes ...
+ *
+ * Speed With Speed with
+ * SERDES speed (Gbaud) DIV=0 (MHz) DIV=1 (MHz)
+ * **********************************************************
+ * 1.25 62.5 31.25
+ * 2.5 125 62.5
+ * 3.125 156.25 78.125
+ * 5.0 250 125
+ * 6.25 312.5 156.25
+ */
+union cvmx_gpio_clk_qlmx {
+ uint64_t u64;
+ struct cvmx_gpio_clk_qlmx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t qlm_sel : 3; /**< Selects which DLM to select from
+ x0 = select DLM0 as clock source
+ x1 = Disabled */
+ uint64_t reserved_3_7 : 5;
+ uint64_t div : 1; /**< Internal clock divider
+ 0=DIV2
+ 1=DIV4 */
+ uint64_t lane_sel : 2; /**< Selects which RX lane clock from QLMx to use as
+ the GPIO internal QLMx clock. The GPIO block can
+ support upto two unique clocks to send out any
+ GPIO pin as configured by $GPIO_BIT_CFG[SYNCE_SEL]
+ The clock can either be a divided by 2 or divide
+ by 4 of the selected RX lane clock. */
+#else
+ uint64_t lane_sel : 2;
+ uint64_t div : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t qlm_sel : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_gpio_clk_qlmx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t qlm_sel : 2; /**< Selects which QLM to select from
+ 01 = select QLM0 as clock source
+ 1x = select QLM2 as clock source
+ 0 = Disabled */
+ uint64_t reserved_3_7 : 5;
+ uint64_t div : 1; /**< Internal clock divider
+ 0=DIV2
+ 1=DIV4 */
+ uint64_t lane_sel : 2; /**< Selects which RX lane clock from QLMx to use as
+ the GPIO internal QLMx clock. The GPIO block can
+ support upto two unique clocks to send out any
+ GPIO pin as configured by $GPIO_BIT_CFG[SYNCE_SEL]
+ The clock can either be a divided by 2 or divide
+ by 4 of the selected RX lane clock. */
+#else
+ uint64_t lane_sel : 2;
+ uint64_t div : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t qlm_sel : 2;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_gpio_clk_qlmx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t div : 1; /**< Internal clock divider
+ 0=DIV2
+ 1=DIV4 */
+ uint64_t lane_sel : 2; /**< Selects which RX lane clock from QLM2 to use as
+ the GPIO internal QLMx clock. The GPIO block can
+ support upto two unique clocks to send out any
+ GPIO pin as configured by $GPIO_BIT_CFG[SYNCE_SEL]
+ The clock can either be a divided by 2 or divide
+ by 4 of the selected RX lane clock. */
+#else
+ uint64_t lane_sel : 2;
+ uint64_t div : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn63xx;
+ struct cvmx_gpio_clk_qlmx_cn63xx cn63xxp1;
+ struct cvmx_gpio_clk_qlmx_cn61xx cn66xx;
+ struct cvmx_gpio_clk_qlmx_s cn68xx;
+ struct cvmx_gpio_clk_qlmx_s cn68xxp1;
+ struct cvmx_gpio_clk_qlmx_cn61xx cnf71xx;
+};
+typedef union cvmx_gpio_clk_qlmx cvmx_gpio_clk_qlmx_t;
+
+/**
+ * cvmx_gpio_dbg_ena
+ */
+union cvmx_gpio_dbg_ena {
+ uint64_t u64;
+ struct cvmx_gpio_dbg_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t dbg_ena : 21; /**< Enable the debug port to be driven on the gpio */
+#else
+ uint64_t dbg_ena : 21;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_gpio_dbg_ena_s cn30xx;
+ struct cvmx_gpio_dbg_ena_s cn31xx;
+ struct cvmx_gpio_dbg_ena_s cn50xx;
+};
+typedef union cvmx_gpio_dbg_ena cvmx_gpio_dbg_ena_t;
+
+/**
+ * cvmx_gpio_int_clr
+ *
+ * Notes:
+ * Only 16 out of 20 GPIOs support interrupt.GPIO_INT_CLR only apply to GPIO0-GPIO15.
+ *
+ */
+union cvmx_gpio_int_clr {
+ uint64_t u64;
+ struct cvmx_gpio_int_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t type : 16; /**< Clear the interrupt rising edge detector */
+#else
+ uint64_t type : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gpio_int_clr_s cn30xx;
+ struct cvmx_gpio_int_clr_s cn31xx;
+ struct cvmx_gpio_int_clr_s cn38xx;
+ struct cvmx_gpio_int_clr_s cn38xxp2;
+ struct cvmx_gpio_int_clr_s cn50xx;
+ struct cvmx_gpio_int_clr_s cn52xx;
+ struct cvmx_gpio_int_clr_s cn52xxp1;
+ struct cvmx_gpio_int_clr_s cn56xx;
+ struct cvmx_gpio_int_clr_s cn56xxp1;
+ struct cvmx_gpio_int_clr_s cn58xx;
+ struct cvmx_gpio_int_clr_s cn58xxp1;
+ struct cvmx_gpio_int_clr_s cn61xx;
+ struct cvmx_gpio_int_clr_s cn63xx;
+ struct cvmx_gpio_int_clr_s cn63xxp1;
+ struct cvmx_gpio_int_clr_s cn66xx;
+ struct cvmx_gpio_int_clr_s cn68xx;
+ struct cvmx_gpio_int_clr_s cn68xxp1;
+ struct cvmx_gpio_int_clr_s cnf71xx;
+};
+typedef union cvmx_gpio_int_clr cvmx_gpio_int_clr_t;
+
+/**
+ * cvmx_gpio_multi_cast
+ *
+ * Notes:
+ * GPIO<7:4> have the option of operating in GPIO Interrupt Multicast mode. In
+ * this mode, the PP GPIO interrupts (CIU_INT<0-7>_SUM0/CIU_INT<0-3>_SUM4[GPIO<7:4>] values are
+ * stored per cnMIPS core.
+ * For GPIO<7:4> (x=4-7):
+ * When GPIO_MULTI_CAST[EN] = 1:
+ * When GPIO_BIT_CFGx[INT_EN]==1 & GPIO_BIT_CFGx[INT_TYPE]==1 (edge detection and interrupt enabled):
+ * * Reads to CIU_INT<0-7>_SUM0/<0-3>_SUM4[GPIO<x>] will return a unique interrupt state per
+ * cnMIPS core.
+ * * Reads to CIU_INT32/33_SUM0/4[GPIO<x>] will return the common GPIO<x>
+ * interrupt state.
+ * * Write of '1' to CIU_INT<0-7>_SUM0/<0-3>_SUM4[GPIO<x>] will clear the individual
+ * interrupt associated with the cnMIPS core.
+ * * Write of '1' to CIU_INT32/33_SUM0/4[GPIO<x>] will clear the common GPIO<x>
+ * interrupt state.
+ * * Write of '1' to GPIO_INT_CLR[TYPE<x>] will clear all
+ * CIU_INT*_SUM0/4[GPIO<x>] state across all cnMIPS cores and common GPIO<x> interrupt states.
+ * When GPIO_BIT_CFGx[INT_EN]==0 or GPIO_BIT_CFGx[INT_TYPE]==0,
+ * * either leveled interrupt or interrupt not enabled, write of '1' to CIU_INT*_SUM0/4[GPIO<x>]
+ * will have no effects.
+ * When GPIO_MULTI_CAST[EN] = 0:
+ * * Write of '1' to CIU_INT_SUM0/4[GPIO<x>] will have no effects, as this field is RO,
+ * backward compatible with o63.
+ * When GPIO_BIT_CFGx[INT_EN]==1 & GPIO_BIT_CFGx[INT_TYPE]==1 (edge detection and interrupt enabled):
+ * * Reads to CIU_INT*_SUM0/4[GPIO<x>] will return the common GPIO<X> interrupt state.
+ * * Write of '1' to GPIO_INT_CLR[TYPE<x>] will clear all
+ * CIU_INT*_SUM0/4[GPIO<x>] state across all cnMIPS cores and common GPIO<x> interrupt states.
+ * When GPIO_BIT_CFGx[INT_EN]==0 or GPIO_BIT_CFGx[INT_TYPE]==0,
+ * * either leveled interrupt or interrupt not enabled, write of '1' to CIU_INT*_SUM0/4[GPIO<x>]
+ * will have no effects.
+ *
+ * GPIO<15:8> and GPIO<3:0> will never be in multicast mode as those don't have per cnMIPS capabilities.
+ * For GPIO<y> (y=0-3,8-15):
+ * When GPIO_BIT_CFGx[INT_EN]==1 & GPIO_BIT_CFGx[INT_TYPE]==1 (edge detection and interrupt enabled):
+ * * Reads to CIU_INT*_SUM0/4[GPIO<y>] will return the common GPIO<y> interrupt state.
+ * * Write of '1' to GPIO_INT_CLR[TYPE<y>] will clear all CIU_INT*_SUM0/4[GPIO<y>] common GPIO<y>
+ * interrupt states.
+ * When GPIO_MULTI_CAST[EN] = 1:
+ * * Write of '1' to CIU_INT*_SUM0/4[GPIO<y>] will clear the common GPIO<y> interrupt state.
+ * When GPIO_MULTI_CAST[EN] = 0:
+ * * Write of '1' to CIU_INT*_SUM0/4[GPIO<y>] has no effect, as this field is RO,
+ * backward compatible to o63.
+ * When GPIO_BIT_CFGx[INT_EN]==0 or GPIO_BIT_CFGx[INT_TYPE]==0,
+ * * either leveled interrupt or interrupt not enabled, write of '1' to CIU_INT*_SUM0/4[GPIO<y>]
+ * will have no effects.
+ *
+ * Whenever there is mode change, (GPIO_BIT_CFGx[INT_EN] or GPIO_BIT_CFGx[INT_TYPE] or GPIO_MULTI_CAST[EN])
+ * software needs to write to $GPIO_INT_CLR to clear up all pending/stale interrupts.
+ */
+union cvmx_gpio_multi_cast {
+ uint64_t u64;
+ struct cvmx_gpio_multi_cast_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< Enable GPIO Interrupt Multicast mode
+ When EN is set, GPIO<7:4> will function in
+ multicast mode allowing these four GPIOs to
+ interrupt multi-cores.
+ Multicast functionality allows the GPIO to exist
+ as per cnMIPS interrupts as opposed to a global
+ interrupt. */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gpio_multi_cast_s cn61xx;
+ struct cvmx_gpio_multi_cast_s cnf71xx;
+};
+typedef union cvmx_gpio_multi_cast cvmx_gpio_multi_cast_t;
+
+/**
+ * cvmx_gpio_pin_ena
+ *
+ * Notes:
+ * GPIO0-GPIO17 has dedicated pins.
+ * GPIO18 share pin with UART (UART0_CTS_L/GPIO_18), GPIO18 enabled when $GPIO_PIN_ENA[ENA18]=1
+ * GPIO19 share pin with UART (UART1_CTS_L/GPIO_19), GPIO18 enabled when $GPIO_PIN_ENA[ENA19]=1
+ */
+union cvmx_gpio_pin_ena {
+ uint64_t u64;
+ struct cvmx_gpio_pin_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t ena19 : 1; /**< If 0, UART1_CTS_L/GPIO_19 pin is UART pin
+ If 1, UART1_CTS_L/GPIO_19 pin is GPIO19 pin */
+ uint64_t ena18 : 1; /**< If 0, UART0_CTS_L/GPIO_18 pin is UART pin
+ If 1, UART0_CTS_L/GPIO_18 pin is GPIO18 pin */
+ uint64_t reserved_0_17 : 18;
+#else
+ uint64_t reserved_0_17 : 18;
+ uint64_t ena18 : 1;
+ uint64_t ena19 : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_gpio_pin_ena_s cn66xx;
+};
+typedef union cvmx_gpio_pin_ena cvmx_gpio_pin_ena_t;
+
+/**
+ * cvmx_gpio_rx_dat
+ */
+union cvmx_gpio_rx_dat {
+ uint64_t u64;
+ struct cvmx_gpio_rx_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t dat : 24; /**< GPIO Read Data */
+#else
+ uint64_t dat : 24;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_gpio_rx_dat_s cn30xx;
+ struct cvmx_gpio_rx_dat_s cn31xx;
+ struct cvmx_gpio_rx_dat_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t dat : 16; /**< GPIO Read Data */
+#else
+ uint64_t dat : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn38xxp2;
+ struct cvmx_gpio_rx_dat_s cn50xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn52xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn52xxp1;
+ struct cvmx_gpio_rx_dat_cn38xx cn56xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn56xxp1;
+ struct cvmx_gpio_rx_dat_cn38xx cn58xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn58xxp1;
+ struct cvmx_gpio_rx_dat_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dat : 20; /**< GPIO Read Data */
+#else
+ uint64_t dat : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn61xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn63xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn63xxp1;
+ struct cvmx_gpio_rx_dat_cn61xx cn66xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn68xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn68xxp1;
+ struct cvmx_gpio_rx_dat_cn61xx cnf71xx;
+};
+typedef union cvmx_gpio_rx_dat cvmx_gpio_rx_dat_t;
+
+/**
+ * cvmx_gpio_tim_ctl
+ */
+union cvmx_gpio_tim_ctl {
+ uint64_t u64;
+ struct cvmx_gpio_tim_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t sel : 4; /**< Selects the GPIO pin to route to TIM */
+#else
+ uint64_t sel : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gpio_tim_ctl_s cn68xx;
+ struct cvmx_gpio_tim_ctl_s cn68xxp1;
+};
+typedef union cvmx_gpio_tim_ctl cvmx_gpio_tim_ctl_t;
+
+/**
+ * cvmx_gpio_tx_clr
+ */
+union cvmx_gpio_tx_clr {
+ uint64_t u64;
+ struct cvmx_gpio_tx_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t clr : 24; /**< Bit mask to indicate which GPIO_TX_DAT bits to set
+ to '0'. When read, CLR returns the GPIO_TX_DAT
+ storage. */
+#else
+ uint64_t clr : 24;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_gpio_tx_clr_s cn30xx;
+ struct cvmx_gpio_tx_clr_s cn31xx;
+ struct cvmx_gpio_tx_clr_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t clr : 16; /**< Bit mask to indicate which bits to drive to '0'. */
+#else
+ uint64_t clr : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn38xxp2;
+ struct cvmx_gpio_tx_clr_s cn50xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn52xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn52xxp1;
+ struct cvmx_gpio_tx_clr_cn38xx cn56xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn56xxp1;
+ struct cvmx_gpio_tx_clr_cn38xx cn58xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn58xxp1;
+ struct cvmx_gpio_tx_clr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t clr : 20; /**< Bit mask to indicate which GPIO_TX_DAT bits to set
+ to '0'. When read, CLR returns the GPIO_TX_DAT
+ storage. */
+#else
+ uint64_t clr : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn61xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn63xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn63xxp1;
+ struct cvmx_gpio_tx_clr_cn61xx cn66xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn68xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn68xxp1;
+ struct cvmx_gpio_tx_clr_cn61xx cnf71xx;
+};
+typedef union cvmx_gpio_tx_clr cvmx_gpio_tx_clr_t;
+
+/**
+ * cvmx_gpio_tx_set
+ */
+union cvmx_gpio_tx_set {
+ uint64_t u64;
+ struct cvmx_gpio_tx_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t set : 24; /**< Bit mask to indicate which GPIO_TX_DAT bits to set
+ to '1'. When read, SET returns the GPIO_TX_DAT
+ storage. */
+#else
+ uint64_t set : 24;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_gpio_tx_set_s cn30xx;
+ struct cvmx_gpio_tx_set_s cn31xx;
+ struct cvmx_gpio_tx_set_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t set : 16; /**< Bit mask to indicate which bits to drive to '1'. */
+#else
+ uint64_t set : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_gpio_tx_set_cn38xx cn38xxp2;
+ struct cvmx_gpio_tx_set_s cn50xx;
+ struct cvmx_gpio_tx_set_cn38xx cn52xx;
+ struct cvmx_gpio_tx_set_cn38xx cn52xxp1;
+ struct cvmx_gpio_tx_set_cn38xx cn56xx;
+ struct cvmx_gpio_tx_set_cn38xx cn56xxp1;
+ struct cvmx_gpio_tx_set_cn38xx cn58xx;
+ struct cvmx_gpio_tx_set_cn38xx cn58xxp1;
+ struct cvmx_gpio_tx_set_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t set : 20; /**< Bit mask to indicate which GPIO_TX_DAT bits to set
+ to '1'. When read, SET returns the GPIO_TX_DAT
+ storage. */
+#else
+ uint64_t set : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn61xx;
+ struct cvmx_gpio_tx_set_cn38xx cn63xx;
+ struct cvmx_gpio_tx_set_cn38xx cn63xxp1;
+ struct cvmx_gpio_tx_set_cn61xx cn66xx;
+ struct cvmx_gpio_tx_set_cn38xx cn68xx;
+ struct cvmx_gpio_tx_set_cn38xx cn68xxp1;
+ struct cvmx_gpio_tx_set_cn61xx cnf71xx;
+};
+typedef union cvmx_gpio_tx_set cvmx_gpio_tx_set_t;
+
+/**
+ * cvmx_gpio_xbit_cfg#
+ *
+ * Notes:
+ * Only first 16 GPIO pins can introduce interrupts, GPIO_XBIT_CFG16(17,18,19)[INT_EN] and [INT_TYPE]
+ * will not be used, read out always zero.
+ */
+union cvmx_gpio_xbit_cfgx {
+ uint64_t u64;
+ struct cvmx_gpio_xbit_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t synce_sel : 2; /**< Selects the QLM clock output
+ x0=Normal GPIO output
+ 01=GPIO QLM clock selected by CSR GPIO_CLK_QLM0
+ 11=GPIO QLM clock selected by CSR GPIO_CLK_QLM1 */
+ uint64_t clk_gen : 1; /**< When TX_OE is set, GPIO pin becomes a clock */
+ uint64_t clk_sel : 2; /**< Selects which of the 4 GPIO clock generators */
+ uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
+ uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
+ uint64_t int_type : 1; /**< Type of interrupt
+ 0 = level (default)
+ 1 = rising edge */
+ uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */
+ uint64_t rx_xor : 1; /**< Invert the GPIO pin */
+ uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
+#else
+ uint64_t tx_oe : 1;
+ uint64_t rx_xor : 1;
+ uint64_t int_en : 1;
+ uint64_t int_type : 1;
+ uint64_t fil_cnt : 4;
+ uint64_t fil_sel : 4;
+ uint64_t clk_sel : 2;
+ uint64_t clk_gen : 1;
+ uint64_t synce_sel : 2;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_gpio_xbit_cfgx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
+ uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rx_xor : 1; /**< Invert the GPIO pin */
+ uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
+#else
+ uint64_t tx_oe : 1;
+ uint64_t rx_xor : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t fil_cnt : 4;
+ uint64_t fil_sel : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn30xx;
+ struct cvmx_gpio_xbit_cfgx_cn30xx cn31xx;
+ struct cvmx_gpio_xbit_cfgx_cn30xx cn50xx;
+ struct cvmx_gpio_xbit_cfgx_s cn61xx;
+ struct cvmx_gpio_xbit_cfgx_s cn66xx;
+ struct cvmx_gpio_xbit_cfgx_s cnf71xx;
+};
+typedef union cvmx_gpio_xbit_cfgx cvmx_gpio_xbit_cfgx_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-gpio-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-gpio.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-gpio.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-gpio.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,186 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * General Purpose IO interface.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_GPIO_H__
+#define __CVMX_GPIO_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* CSR typedefs have been moved to cvmx-gpio-defs.h */
+
+/**
+ * Clear the interrupt rising edge detector for the supplied
+ * pins in the mask. Chips which have more than 16 GPIO pins
+ * can't use them for interrupts.
+ e
+ * @param clear_mask Mask of pins to clear
+ */
+static inline void cvmx_gpio_interrupt_clear(uint16_t clear_mask)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN61XX))
+ {
+ cvmx_gpio_multi_cast_t multi_cast;
+ cvmx_gpio_bit_cfgx_t gpio_bit;
+ int core = cvmx_get_core_num();
+
+ multi_cast.u64 = cvmx_read_csr(CVMX_GPIO_MULTI_CAST);
+ gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(core));
+
+ /* If Multicast mode is enabled, and GPIO interrupt is enabled for
+ edge detection, then GPIO<4..7> interrupts are per core */
+ if (multi_cast.s.en && gpio_bit.s.int_en && gpio_bit.s.int_type)
+ {
+ /* Clear GPIO<4..7> per core */
+ cvmx_ciu_intx_sum0_t ciu_sum0;
+ ciu_sum0.u64 = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core * 2));
+ ciu_sum0.s.gpio = clear_mask & 0xf0;
+ cvmx_write_csr(CVMX_CIU_INTX_SUM0(core * 2), ciu_sum0.u64);
+
+ /* Clear other GPIO pins for all cores. */
+ cvmx_write_csr(CVMX_GPIO_INT_CLR, (clear_mask & ~0xf0));
+ return;
+ }
+ }
+ /* Clear GPIO pins state across all cores and common interrupt states. */
+ cvmx_gpio_int_clr_t gpio_int_clr;
+ gpio_int_clr.u64 = 0;
+ gpio_int_clr.s.type = clear_mask;
+ cvmx_write_csr(CVMX_GPIO_INT_CLR, gpio_int_clr.u64);
+}
+
+/**
+ * GPIO Output Pin
+ *
+ * @param bit The GPIO to use
+ * @param mode Drive GPIO as output pin or not.
+ *
+ */
+static inline void cvmx_gpio_cfg(int bit, int mode)
+{
+ if (bit > 15 && bit < 20)
+ {
+ /* CN61XX/CN66XX has 20 GPIO pins and only 16 are interruptable. */
+ if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX))
+ {
+ cvmx_gpio_xbit_cfgx_t gpio_xbit;
+ gpio_xbit.u64 = cvmx_read_csr(CVMX_GPIO_XBIT_CFGX(bit));
+ if (mode)
+ gpio_xbit.s.tx_oe = 1;
+ else
+ gpio_xbit.s.tx_oe = 0;
+ cvmx_write_csr(CVMX_GPIO_XBIT_CFGX(bit), gpio_xbit.u64);
+ }
+ else
+ cvmx_dprintf("cvmx_gpio_cfg: Invalid GPIO bit(%d)\n", bit);
+ }
+ else
+ {
+ cvmx_gpio_bit_cfgx_t gpio_bit;
+ gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(bit));
+ if (mode)
+ gpio_bit.s.tx_oe = 1;
+ else
+ gpio_bit.s.tx_oe = 0;
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(bit), gpio_bit.u64);
+ }
+}
+
+/**
+ * GPIO Read Data
+ *
+ * @return Status of the GPIO pins
+ */
+static inline uint32_t cvmx_gpio_read(void)
+{
+ cvmx_gpio_rx_dat_t gpio_rx_dat;
+ gpio_rx_dat.u64 = cvmx_read_csr(CVMX_GPIO_RX_DAT);
+ return gpio_rx_dat.s.dat;
+}
+
+
+/**
+ * GPIO Clear pin
+ *
+ * @param clear_mask Bit mask to indicate which bits to drive to '0'.
+ */
+static inline void cvmx_gpio_clear(uint32_t clear_mask)
+{
+ cvmx_gpio_tx_clr_t gpio_tx_clr;
+ gpio_tx_clr.u64 = 0;
+ gpio_tx_clr.s.clr = clear_mask;
+ cvmx_write_csr(CVMX_GPIO_TX_CLR, gpio_tx_clr.u64);
+}
+
+
+/**
+ * GPIO Set pin
+ *
+ * @param set_mask Bit mask to indicate which bits to drive to '1'.
+ */
+static inline void cvmx_gpio_set(uint32_t set_mask)
+{
+ cvmx_gpio_tx_set_t gpio_tx_set;
+ gpio_tx_set.u64 = 0;
+ gpio_tx_set.s.set = set_mask;
+ cvmx_write_csr(CVMX_GPIO_TX_SET, gpio_tx_set.u64);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-gpio.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-board.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-board.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-board.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1650 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Helper functions to abstract board specific data about
+ * network ports from the rest of the cvmx-helper files.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-bootinfo.h>
+#include <asm/octeon/cvmx-smix-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-mdio.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+#include <asm/octeon/cvmx-helper-board.h>
+#include <asm/octeon/cvmx-twsi.h>
+#else
+#include "cvmx.h"
+#include "cvmx-app-init.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-twsi.h"
+#include "cvmx-mdio.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-util.h"
+#include "cvmx-helper-board.h"
+#include "cvmx-gpio.h"
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#ifdef __U_BOOT__
+# include <libfdt.h>
+#else
+# include "libfdt/libfdt.h"
+#endif
+#endif
+#include "cvmx-swap.h"
+#endif
+
+/**
+ * cvmx_override_board_link_get(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the process of
+ * talking to a PHY to determine link speed. It is called every
+ * time a PHY must be polled for link status. Users should set
+ * this pointer to a function before calling any cvmx-helper
+ * operations.
+ */
+CVMX_SHARED cvmx_helper_link_info_t (*cvmx_override_board_link_get)(int ipd_port) = NULL;
+
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && (!defined(__FreeBSD__) || !defined(_KERNEL))
+
+static void cvmx_retry_i2c_write(int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t data)
+{
+ int tries = 3;
+ int r;
+ do {
+ r = cvmx_twsix_write_ia(twsi_id, dev_addr, internal_addr, num_bytes, ia_width_bytes, data);
+ } while (tries-- > 0 && r < 0);
+}
+
+static int __pip_eth_node(const void *fdt_addr, int aliases, int ipd_port)
+{
+ char name_buffer[20];
+ const char*pip_path;
+ int pip, iface, eth;
+ int interface_num = cvmx_helper_get_interface_num(ipd_port);
+ int interface_index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ pip_path = fdt_getprop(fdt_addr, aliases, "pip", NULL);
+ if (!pip_path)
+ {
+ cvmx_dprintf("ERROR: pip path not found in device tree\n");
+ return -1;
+ }
+ pip = fdt_path_offset(fdt_addr, pip_path);
+ if (pip < 0)
+ {
+ cvmx_dprintf("ERROR: pip not found in device tree\n");
+ return -1;
+ }
+#ifdef __U_BOOT__
+ sprintf(name_buffer, "interface@%d", interface_num);
+#else
+ snprintf(name_buffer, sizeof(name_buffer), "interface@%d", interface_num);
+#endif
+ iface = fdt_subnode_offset(fdt_addr, pip, name_buffer);
+ if (iface < 0)
+ {
+ cvmx_dprintf("ERROR : pip intf %d not found in device tree \n",
+ interface_num);
+ return -1;
+ }
+#ifdef __U_BOOT__
+ sprintf(name_buffer, "ethernet@%x", interface_index);
+#else
+ snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", interface_index);
+#endif
+ eth = fdt_subnode_offset(fdt_addr, iface, name_buffer);
+ if (eth < 0)
+ {
+ cvmx_dprintf("ERROR : pip interface@%d ethernet@%d not found in device "
+ "tree\n", interface_num, interface_index);
+ return -1;
+ }
+ return eth;
+}
+
+static int __mix_eth_node(const void *fdt_addr, int aliases, int interface_index)
+{
+ char name_buffer[20];
+ const char*mix_path;
+ int mix;
+
+#ifdef __U_BOOT__
+ sprintf(name_buffer, "mix%d", interface_index);
+#else
+ snprintf(name_buffer, sizeof(name_buffer), "mix%d", interface_index);
+#endif
+ mix_path = fdt_getprop(fdt_addr, aliases, name_buffer, NULL);
+ if (!mix_path)
+ {
+ cvmx_dprintf("ERROR: mix%d path not found in device tree\n",interface_index);
+ }
+ mix = fdt_path_offset(fdt_addr, mix_path);
+ if (mix < 0)
+ {
+ cvmx_dprintf("ERROR: %s not found in device tree\n", mix_path);
+ return -1;
+ }
+ return mix;
+}
+
+typedef struct cvmx_phy_info
+{
+ int phy_addr;
+ int direct_connect;
+ cvmx_phy_type_t phy_type;
+}cvmx_phy_info_t;
+
+
+static int __mdiobus_addr_to_unit(uint32_t addr)
+{
+ int unit = (addr >> 7) & 3;
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ unit >>= 1;
+ return unit;
+}
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. The phy address is obtained from the device tree.
+ *
+ * @param ipd_port Octeon IPD port to get the MII address for.
+ *
+ * @return MII PHY address and bus number or -1.
+ */
+
+static cvmx_phy_info_t __get_phy_info_from_dt(int ipd_port)
+{
+ const void *fdt_addr = CASTPTR(const void *, cvmx_sysinfo_get()->fdt_addr);
+ uint32_t *phy_handle;
+ int aliases, eth, phy, phy_parent, phandle, ret;
+ cvmx_phy_info_t phy_info;
+ int mdio_unit=-1;
+ const char *phy_comaptible_str;
+ uint32_t *phy_addr_ptr;
+
+ phy_info.phy_addr = -1;
+ phy_info.direct_connect = -1;
+ phy_info.phy_type = (cvmx_phy_type_t) -1;
+
+ if (!fdt_addr)
+ {
+ cvmx_dprintf("No device tree found.\n");
+ return phy_info;
+ }
+ aliases = fdt_path_offset(fdt_addr, "/aliases");
+ if (aliases < 0) {
+ cvmx_dprintf("Error: No /aliases node in device tree.\n");
+ return phy_info;
+ }
+ if (ipd_port < 0)
+ {
+ int interface_index = ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT;
+ eth = __mix_eth_node(fdt_addr, aliases, interface_index) ;
+ }
+ else
+ {
+ eth = __pip_eth_node(fdt_addr, aliases, ipd_port);
+ }
+ if (eth < 0 )
+ {
+ cvmx_dprintf("ERROR : cannot find interface for ipd_port=%d\n", ipd_port);
+ return phy_info;
+ }
+ /* Get handle to phy */
+ phy_handle = (uint32_t *) fdt_getprop(fdt_addr, eth, "phy-handle", NULL);
+ if (!phy_handle)
+ {
+ cvmx_dprintf("ERROR : phy handle not found in device tree ipd_port=%d"
+ "\n", ipd_port);
+ return phy_info;
+ }
+ phandle = cvmx_be32_to_cpu(*phy_handle);
+ phy = fdt_node_offset_by_phandle(fdt_addr, phandle);
+ if (phy < 0)
+ {
+ cvmx_dprintf("ERROR : cannot find phy for ipd_port=%d ret=%d\n",
+ ipd_port, phy);
+ return phy_info;
+ }
+ phy_comaptible_str = (const char *) fdt_getprop(fdt_addr, phy,
+ "compatible", NULL);
+ if (!phy_comaptible_str)
+ {
+ cvmx_dprintf("ERROR : no compatible prop in phy\n");
+ return phy_info;
+ }
+ if (memcmp("marvell", phy_comaptible_str, strlen("marvell")) == 0)
+ {
+ phy_info.phy_type = MARVELL_GENERIC_PHY;
+ }
+ else if (memcmp("broadcom", phy_comaptible_str, strlen("broadcom")) == 0)
+ {
+ phy_info.phy_type = BROADCOM_GENERIC_PHY;
+ }
+ else
+ {
+ phy_info.phy_type = -1;
+ }
+
+ /* Check if PHY parent is the octeon MDIO bus. Some boards are connected
+ though a MUX and for them direct_connect_to_phy will be 0 */
+ phy_parent = fdt_parent_offset(fdt_addr, phy);
+ if (phy_parent < 0)
+ {
+ cvmx_dprintf("ERROR : cannot find phy parent for ipd_port=%d ret=%d\n",
+ ipd_port, phy_parent);
+ return phy_info;
+ }
+ ret = fdt_node_check_compatible(fdt_addr, phy_parent,
+ "cavium,octeon-3860-mdio");
+ if (ret == 0)
+ {
+ phy_info.direct_connect = 1 ;
+ uint32_t *mdio_reg_base = (uint32_t *) fdt_getprop(fdt_addr, phy_parent,"reg",0);
+ if (mdio_reg_base == 0)
+ {
+ cvmx_dprintf("ERROR : unable to get reg property in phy mdio\n");
+ return phy_info;
+ }
+ mdio_unit = __mdiobus_addr_to_unit(mdio_reg_base[1]);
+ //cvmx_dprintf("phy parent=%s reg_base=%08x unit=%d \n",
+ // fdt_get_name(fdt_addr,phy_parent, NULL), mdio_reg_base[1], mdio_unit);
+ }
+ else
+ {
+ phy_info.direct_connect = 0;
+ /* The PHY is not directly connected to the Octeon MDIO bus.
+ SE doesn't have abstractions for MDIO MUX or MDIO MUX drivers and
+ hence for the non direct cases code will be needed which is
+ board specific.
+ For now the the MDIO Unit is defaulted to 1.
+ */
+ mdio_unit = 1;
+ }
+
+ phy_addr_ptr = (uint32_t *) fdt_getprop(fdt_addr, phy, "reg", NULL);
+ phy_info.phy_addr = cvmx_be32_to_cpu(*phy_addr_ptr) | mdio_unit << 8;
+ return phy_info;
+
+}
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. The phy address is obtained from the device tree.
+ *
+ * @param ipd_port Octeon IPD port to get the MII address for.
+ *
+ * @return MII PHY address and bus number or -1.
+ */
+
+int cvmx_helper_board_get_mii_address_from_dt(int ipd_port)
+{
+ cvmx_phy_info_t phy_info = __get_phy_info_from_dt(ipd_port);
+ return phy_info.phy_addr;
+}
+#endif
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. A result of -1 means there isn't a MII capable PHY
+ * connected to this port. On chips supporting multiple MII
+ * busses the bus number is encoded in bits <15:8>.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It replies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @param ipd_port Octeon IPD port to get the MII address for.
+ *
+ * @return MII PHY address and bus number or -1.
+ */
+int cvmx_helper_board_get_mii_address(int ipd_port)
+{
+ /*
+ * Board types we have to know at compile-time.
+ */
+#ifdef OCTEON_BOARD_CAPK_0100ND
+ switch (ipd_port) {
+ case 0:
+ return 2;
+ case 1:
+ return 3;
+ case 2:
+ /* XXX Switch PHY? */
+ return -1;
+ default:
+ return -1;
+ }
+#endif
+
+ /*
+ * For board types we can determine at runtime.
+ */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ return -1;
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && (!defined(__FreeBSD__) || !defined(_KERNEL))
+ if (cvmx_sysinfo_get()->fdt_addr)
+ {
+ cvmx_phy_info_t phy_info = __get_phy_info_from_dt(ipd_port);
+ //cvmx_dprintf("ipd_port=%d phy_addr=%d\n", ipd_port, phy_info.phy_addr);
+ if (phy_info.phy_addr >= 0) return phy_info.phy_addr;
+ }
+#endif
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+ case CVMX_BOARD_TYPE_SIM:
+ /* Simulator doesn't have MII */
+ return -1;
+#if !defined(OCTEON_VENDOR_GEFES)
+ case CVMX_BOARD_TYPE_EBT5800:
+ case CVMX_BOARD_TYPE_NICPRO2:
+#endif
+ case CVMX_BOARD_TYPE_EBT3000:
+ case CVMX_BOARD_TYPE_THUNDER:
+ /* Interface 0 is SPI4, interface 1 is RGMII */
+ if ((ipd_port >= 16) && (ipd_port < 20))
+ return ipd_port - 16;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_LANAI2_A:
+ if (ipd_port == 0)
+ return 0;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_LANAI2_U:
+ case CVMX_BOARD_TYPE_LANAI2_G:
+ if (ipd_port == 0)
+ return 0x1c;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_KODAMA:
+ case CVMX_BOARD_TYPE_EBH3100:
+ case CVMX_BOARD_TYPE_HIKARI:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+#if !defined(OCTEON_VENDOR_GEFES)
+ case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
+#endif
+ /* Port 0 is WAN connected to a PHY, Port 1 is GMII connected to a
+ switch */
+ if (ipd_port == 0)
+ return 4;
+ else if (ipd_port == 1)
+ return 9;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_EBH3000:
+ /* Board has dual SPI4 and no PHYs */
+ return -1;
+ case CVMX_BOARD_TYPE_EBT5810:
+ /* Board has 10g PHYs hooked up to the MII controller on the
+ ** IXF18201 MAC. The 10G PHYS use clause 45 MDIO which the CN58XX
+ ** does not support. All MII accesses go through the IXF part. */
+ return -1;
+ case CVMX_BOARD_TYPE_EBH5200:
+ case CVMX_BOARD_TYPE_EBH5201:
+ case CVMX_BOARD_TYPE_EBT5200:
+ /* Board has 2 management ports */
+ if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) && (ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2)))
+ return ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT;
+ /* Board has 4 SGMII ports. The PHYs start right after the MII
+ ports MII0 = 0, MII1 = 1, SGMII = 2-5 */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port+2;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_EBH5600:
+ case CVMX_BOARD_TYPE_EBH5601:
+ case CVMX_BOARD_TYPE_EBH5610:
+ /* Board has 1 management port */
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+ return 0;
+ /* Board has 8 SGMII ports. 4 connect out, two connect to a switch,
+ and 2 loop to each other */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port+1;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_EBT5600:
+ /* Board has 1 management port */
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+ return 0;
+ /* Board has 1 XAUI port connected to a switch. */
+ return -1;
+ case CVMX_BOARD_TYPE_EBB5600:
+ {
+ static unsigned char qlm_switch_addr = 0;
+
+ /* Board has 1 management port */
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+ return 0;
+
+ /* Board has 8 SGMII ports. 4 connected QLM1, 4 connected QLM3 */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ {
+ if (qlm_switch_addr != 0x3)
+ {
+ qlm_switch_addr = 0x3; /* QLM1 */
+ cvmx_twsix_write_ia(0, 0x71, 0, 1, 1, qlm_switch_addr);
+ cvmx_wait_usec(11000); /* Let the write complete */
+ }
+ return ipd_port+1 + (1<<8);
+ }
+ else if ((ipd_port >= 16) && (ipd_port < 20))
+ {
+ if (qlm_switch_addr != 0xC)
+ {
+ qlm_switch_addr = 0xC; /* QLM3 */
+ cvmx_twsix_write_ia(0, 0x71, 0, 1, 1, qlm_switch_addr);
+ cvmx_wait_usec(11000); /* Let the write complete */
+ }
+ return ipd_port-16+1 + (1<<8);
+ }
+ else
+ return -1;
+ }
+ case CVMX_BOARD_TYPE_EBB6300:
+ /* Board has 2 management ports */
+ if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) && (ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2)))
+ return ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT + 4;
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port + 1 + (1<<8);
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_EBB6800:
+ /* Board has 1 management ports */
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+ return 6;
+ if (ipd_port >= 0x800 && ipd_port < 0x900) /* QLM 0*/
+ return 0x101 + ((ipd_port >> 4) & 3); /* SMI 1*/
+ if (ipd_port >= 0xa00 && ipd_port < 0xb00) /* QLM 2*/
+ return 0x201 + ((ipd_port >> 4) & 3); /* SMI 2*/
+ if (ipd_port >= 0xb00 && ipd_port < 0xc00) /* QLM 3*/
+ return 0x301 + ((ipd_port >> 4) & 3); /* SMI 3*/
+ if (ipd_port >= 0xc00 && ipd_port < 0xd00) /* QLM 4*/
+ return 0x001 + ((ipd_port >> 4) & 3); /* SMI 0*/
+ return -1;
+ case CVMX_BOARD_TYPE_EP6300C:
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+ return 0x01;
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT+1)
+ return 0x02;
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+ {
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int mode = cvmx_helper_interface_get_mode(interface);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI)
+ return ipd_port;
+ else if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port + 3;
+ else
+ return -1;
+ }
+#endif
+ break;
+ case CVMX_BOARD_TYPE_CUST_NB5:
+ if (ipd_port == 2)
+ return 4;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_NIC_XLE_4G:
+ /* Board has 4 SGMII ports. connected QLM3(interface 1) */
+ if ((ipd_port >= 16) && (ipd_port < 20))
+ return ipd_port - 16 + 1;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_NIC_XLE_10G:
+ case CVMX_BOARD_TYPE_NIC10E:
+ return -1; /* We don't use clause 45 MDIO for anything */
+ case CVMX_BOARD_TYPE_NIC4E:
+ if (ipd_port >= 0 && ipd_port <= 3)
+ return (ipd_port + 0x1f) & 0x1f;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_NIC2E:
+ if (ipd_port >= 0 && ipd_port <= 1)
+ return (ipd_port + 1);
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_REDWING:
+ return -1; /* No PHYs connected to Octeon */
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ return -1; /* No PHYs are connected to Octeon, everything is through switch */
+ case CVMX_BOARD_TYPE_CUST_WSX16:
+ if (ipd_port >= 0 && ipd_port <= 3)
+ return ipd_port;
+ else if (ipd_port >= 16 && ipd_port <= 19)
+ return ipd_port - 16 + 4;
+ else
+ return -1;
+
+ /* Private vendor-defined boards. */
+#if defined(OCTEON_VENDOR_LANNER)
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR955:
+ /* Interface 1 is 12 BCM5482S PHYs. */
+ if ((ipd_port >= 16) && (ipd_port < 28))
+ return ipd_port - 16;
+ return -1;
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
+ if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) && (ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2)))
+ return (ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT) + 0x81;
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port;
+ return -1;
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR320:
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR321X:
+ /* Port 0 is a Marvell 88E6161 switch, ports 1 and 2 are Marvell
+ 88E1111 interfaces. */
+ switch (ipd_port) {
+ case 0:
+ return 16;
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ default:
+ return -1;
+ }
+#endif
+#if defined(OCTEON_VENDOR_UBIQUITI)
+ case CVMX_BOARD_TYPE_CUST_UBIQUITI_E100:
+ if (ipd_port > 2)
+ return -1;
+ return (7 - ipd_port);
+#endif
+#if defined(OCTEON_VENDOR_RADISYS)
+ case CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE:
+ /* No MII. */
+ return -1;
+#endif
+#if defined(OCTEON_VENDOR_GEFES)
+ case CVMX_BOARD_TYPE_AT5810:
+ return -1;
+ case CVMX_BOARD_TYPE_TNPA3804:
+ case CVMX_BOARD_TYPE_CUST_TNPA5804:
+ case CVMX_BOARD_TYPE_CUST_W5800:
+ case CVMX_BOARD_TYPE_WNPA3850:
+ case CVMX_BOARD_TYPE_W3860:
+ return -1;// RGMII boards should use inbad status
+ case CVMX_BOARD_TYPE_CUST_W5651X:
+ case CVMX_BOARD_TYPE_CUST_W5650:
+ case CVMX_BOARD_TYPE_CUST_TNPA56X4:
+ case CVMX_BOARD_TYPE_CUST_TNPA5651X:
+ case CVMX_BOARD_TYPE_CUST_W63XX:
+ return -1; /* No PHYs are connected to Octeon, PHYs inside of SFPs which is accessed over TWSI */
+ case CVMX_BOARD_TYPE_CUST_W5434:
+ /* Board has 4 SGMII ports. 4 connect out
+ * must return the MII address of the PHY connected to each IPD port
+ */
+ if ((ipd_port >= 16) && (ipd_port < 20))
+ return ipd_port - 16 + 0x40;
+ else
+ return -1;
+#endif
+ }
+
+ /* Some unknown board. Somebody forgot to update this function... */
+ cvmx_dprintf("%s: Unknown board type %d\n",
+ __FUNCTION__, cvmx_sysinfo_get()->board_type);
+ return -1;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_board_get_mii_address);
+#endif
+
+/**
+ * @INTERNAL
+ * Get link state of marvell PHY
+ */
+static cvmx_helper_link_info_t __get_marvell_phy_link_state(int phy_addr)
+{
+ cvmx_helper_link_info_t result;
+ int phy_status;
+
+ result.u64 = 0;
+ /*All the speed information can be read from register 17 in one go.*/
+ phy_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
+
+ /* If the resolve bit 11 isn't set, see if autoneg is turned off
+ (bit 12, reg 0). The resolve bit doesn't get set properly when
+ autoneg is off, so force it */
+ if ((phy_status & (1<<11)) == 0)
+ {
+ int auto_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0);
+ if ((auto_status & (1<<12)) == 0)
+ phy_status |= 1<<11;
+ }
+
+ /* Only return a link if the PHY has finished auto negotiation
+ and set the resolved bit (bit 11) */
+ if (phy_status & (1<<11))
+ {
+ result.s.link_up = 1;
+ result.s.full_duplex = ((phy_status>>13)&1);
+ switch ((phy_status>>14)&3)
+ {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.u64 = 0;
+ break;
+ }
+ }
+ return result;
+}
+
+/**
+ * @INTERNAL
+ * Get link state of broadcom PHY
+ */
+static cvmx_helper_link_info_t __get_broadcom_phy_link_state(int phy_addr)
+{
+ cvmx_helper_link_info_t result;
+ int phy_status;
+
+ result.u64 = 0;
+ /* Below we are going to read SMI/MDIO register 0x19 which works
+ on Broadcom parts */
+ phy_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x19);
+ switch ((phy_status>>8) & 0x7)
+ {
+ case 0:
+ result.u64 = 0;
+ break;
+ case 1:
+ result.s.link_up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 10;
+ break;
+ case 2:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10;
+ break;
+ case 3:
+ result.s.link_up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 100;
+ break;
+ case 4:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 100;
+ break;
+ case 5:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 100;
+ break;
+ case 6:
+ result.s.link_up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 1000;
+ break;
+ case 7:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ break;
+ }
+ return result;
+}
+
+
+/**
+ * @INTERNAL
+ * Get link state using inband status
+ */
+static cvmx_helper_link_info_t __get_inband_link_state(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ cvmx_gmxx_rxx_rx_inbnd_t inband_status;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ result.u64 = 0;
+ inband_status.u64 = cvmx_read_csr(CVMX_GMXX_RXX_RX_INBND(index, interface));
+ result.s.link_up = inband_status.s.status;
+ result.s.full_duplex = inband_status.s.duplex;
+ switch (inband_status.s.speed)
+ {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.u64 = 0;
+ break;
+ }
+ return result;
+}
+
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && (!defined(__FreeBSD__) || !defined(_KERNEL))
+/**
+ * @INTERNAL
+ * Switch MDIO mux to the specified port.
+ */
+static int __switch_mdio_mux(int ipd_port)
+{
+ /* This method is board specific and doesn't use the device tree
+ information as SE doesn't implement MDIO MUX abstration */
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+ case CVMX_BOARD_TYPE_EBB5600:
+ {
+ static unsigned char qlm_switch_addr = 0;
+ /* Board has 1 management port */
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+ return 0;
+ /* Board has 8 SGMII ports. 4 connected QLM1, 4 connected QLM3 */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ {
+ if (qlm_switch_addr != 0x3)
+ {
+ qlm_switch_addr = 0x3; /* QLM1 */
+ cvmx_twsix_write_ia(0, 0x71, 0, 1, 1, qlm_switch_addr);
+ cvmx_wait_usec(11000); /* Let the write complete */
+ }
+ return ipd_port+1 + (1<<8);
+ }
+ else if ((ipd_port >= 16) && (ipd_port < 20))
+ {
+ if (qlm_switch_addr != 0xC)
+ {
+ qlm_switch_addr = 0xC; /* QLM3 */
+ cvmx_twsix_write_ia(0, 0x71, 0, 1, 1, qlm_switch_addr);
+ cvmx_wait_usec(11000); /* Let the write complete */
+ }
+ return ipd_port-16+1 + (1<<8);
+ }
+ else
+ return -1;
+ }
+ case CVMX_BOARD_TYPE_EBB6600:
+ {
+ static unsigned char qlm_switch_addr = 0;
+ int old_twsi_switch_reg;
+ /* Board has 2 management ports */
+ if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) &&
+ (ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2)))
+ return ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT + 4;
+ if ((ipd_port >= 0) && (ipd_port < 4)) /* QLM 2 */
+ {
+ if (qlm_switch_addr != 2)
+ {
+ int tries;
+ qlm_switch_addr = 2;
+ tries = 3;
+ do {
+ old_twsi_switch_reg = cvmx_twsix_read8(0, 0x70, 0);
+ } while (tries-- > 0 && old_twsi_switch_reg < 0);
+ /* Set I2C MUX to enable port expander */
+ cvmx_retry_i2c_write(0, 0x70, 0, 1, 0, 8);
+ /* Set selecter to QLM 1 */
+ cvmx_retry_i2c_write(0, 0x38, 0, 1, 0, 0xff);
+ /* disable port expander */
+ cvmx_retry_i2c_write(0, 0x70, 0, 1, 0, old_twsi_switch_reg);
+ }
+ return 0x101 + ipd_port;
+ }
+ else if ((ipd_port >= 16) && (ipd_port < 20)) /* QLM 1 */
+ {
+ if (qlm_switch_addr != 1)
+ {
+ int tries;
+ qlm_switch_addr = 1;
+ tries = 3;
+ do {
+ old_twsi_switch_reg = cvmx_twsix_read8(0, 0x70, 0);
+ } while (tries-- > 0 && old_twsi_switch_reg < 0);
+ /* Set I2C MUX to enable port expander */
+ cvmx_retry_i2c_write(0, 0x70, 0, 1, 0, 8);
+ /* Set selecter to QLM 2 */
+ cvmx_retry_i2c_write(0, 0x38, 0, 1, 0, 0xf7);
+ /* disable port expander */
+ cvmx_retry_i2c_write(0, 0x70, 0, 1, 0, old_twsi_switch_reg);
+ }
+ return 0x101 + (ipd_port - 16);
+ } else
+ return -1;
+ }
+ case CVMX_BOARD_TYPE_EBB6100:
+ {
+ static char gpio_configured = 0;
+
+ if (!gpio_configured)
+ {
+ cvmx_gpio_cfg(3, 1);
+ gpio_configured = 1;
+ }
+ /* Board has 2 management ports */
+ if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) &&
+ (ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2)))
+ return ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT + 4;
+ if ((ipd_port >= 0) && (ipd_port < 4)) /* QLM 2 */
+ {
+ cvmx_gpio_set(1ull << 3);
+ return 0x101 + ipd_port;
+ }
+ else if ((ipd_port >= 16) && (ipd_port < 20)) /* QLM 0 */
+ {
+ cvmx_gpio_clear(1ull << 3);
+ return 0x101 + (ipd_port - 16);
+ }
+ else
+ {
+ printf("%s: Unknown ipd port 0x%x\n", __func__, ipd_port);
+ return -1;
+ }
+ }
+ default:
+ {
+ cvmx_dprintf("ERROR : unexpected mdio switch for board=%08x\n",
+ cvmx_sysinfo_get()->board_type);
+ return -1;
+ }
+ }
+ /* should never get here */
+ return -1;
+}
+
+/**
+ * @INTERNAL
+ * This function is used ethernet ports link speed. This functions uses the
+ * device tree information to determine the phy address and type of PHY.
+ * The only supproted PHYs are Marvell and Broadcom.
+ *
+ * @param ipd_port IPD input port associated with the port we want to get link
+ * status for.
+ *
+ * @return The ports link status. If the link isn't fully resolved, this must
+ * return zero.
+ */
+
+cvmx_helper_link_info_t __cvmx_helper_board_link_get_from_dt(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ cvmx_phy_info_t phy_info;
+
+ result.u64 = 0;
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ {
+ /* The simulator gives you a simulated 1Gbps full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+ phy_info = __get_phy_info_from_dt(ipd_port);
+ //cvmx_dprintf("ipd_port=%d phy_addr=%d dc=%d type=%d \n", ipd_port,
+ // phy_info.phy_addr, phy_info.direct_connect, phy_info.phy_type);
+ if (phy_info.phy_addr < 0) return result;
+
+ if (phy_info.direct_connect == 0)
+ __switch_mdio_mux(ipd_port);
+ switch(phy_info.phy_type)
+ {
+ case BROADCOM_GENERIC_PHY:
+ result = __get_broadcom_phy_link_state(phy_info.phy_addr);
+ break;
+ case MARVELL_GENERIC_PHY:
+ result = __get_marvell_phy_link_state(phy_info.phy_addr);
+ break;
+ default:
+ result = __get_inband_link_state(ipd_port);
+ }
+ return result;
+
+}
+#endif
+
+/**
+ * @INTERNAL
+ * This function invokes __cvmx_helper_board_link_get_from_dt when device tree
+ * info is available. When the device tree information is not available then
+ * this function is the board specific method of determining an
+ * ethernet ports link speed. Most Octeon boards have Marvell PHYs
+ * and are handled by the fall through case. This function must be
+ * updated for boards that don't have the normal Marvell PHYs.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @param ipd_port IPD input port associated with the port we want to get link
+ * status for.
+ *
+ * @return The ports link status. If the link isn't fully resolved, this must
+ * return zero.
+ */
+cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ int phy_addr;
+ int is_broadcom_phy = 0;
+
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && (!defined(__FreeBSD__) || !defined(_KERNEL))
+ if (cvmx_sysinfo_get()->fdt_addr)
+ {
+ return __cvmx_helper_board_link_get_from_dt(ipd_port);
+ }
+#endif
+
+ /* Give the user a chance to override the processing of this function */
+ if (cvmx_override_board_link_get)
+ return cvmx_override_board_link_get(ipd_port);
+
+ /* Unless we fix it later, all links are defaulted to down */
+ result.u64 = 0;
+
+#if !defined(OCTEON_BOARD_CAPK_0100ND)
+ /* This switch statement should handle all ports that either don't use
+ Marvell PHYS, or don't support in-band status */
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+ case CVMX_BOARD_TYPE_SIM:
+ /* The simulator gives you a simulated 1Gbps full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ case CVMX_BOARD_TYPE_LANAI2_A:
+ case CVMX_BOARD_TYPE_LANAI2_U:
+ case CVMX_BOARD_TYPE_LANAI2_G:
+ break;
+ case CVMX_BOARD_TYPE_EBH3100:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+#if !defined(OCTEON_VENDOR_GEFES)
+ case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
+#endif
+ /* Port 1 on these boards is always Gigabit */
+ if (ipd_port == 1)
+ {
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+ /* Fall through to the generic code below */
+ break;
+ case CVMX_BOARD_TYPE_EBT5600:
+ case CVMX_BOARD_TYPE_EBH5600:
+ case CVMX_BOARD_TYPE_EBH5601:
+ case CVMX_BOARD_TYPE_EBH5610:
+ /* Board has 1 management ports */
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+ is_broadcom_phy = 1;
+ break;
+ case CVMX_BOARD_TYPE_EBH5200:
+ case CVMX_BOARD_TYPE_EBH5201:
+ case CVMX_BOARD_TYPE_EBT5200:
+ /* Board has 2 management ports */
+ if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) && (ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2)))
+ is_broadcom_phy = 1;
+ break;
+ case CVMX_BOARD_TYPE_EBB6100:
+ case CVMX_BOARD_TYPE_EBB6300: /* Only for MII mode, with PHY addresses 0/1. Default is RGMII*/
+ case CVMX_BOARD_TYPE_EBB6600: /* Only for MII mode, with PHY addresses 0/1. Default is RGMII*/
+ if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) && (ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2))
+ && cvmx_helper_board_get_mii_address(ipd_port) >= 0 && cvmx_helper_board_get_mii_address(ipd_port) <= 1)
+ is_broadcom_phy = 1;
+ break;
+ case CVMX_BOARD_TYPE_EP6300C:
+ is_broadcom_phy = 1;
+ break;
+ case CVMX_BOARD_TYPE_CUST_NB5:
+ /* Port 1 on these boards is always Gigabit */
+ if (ipd_port == 1)
+ {
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+ else /* The other port uses a broadcom PHY */
+ is_broadcom_phy = 1;
+ break;
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ /* Port 1 on these boards is always Gigabit */
+ if (ipd_port == 2)
+ {
+ /* Port 2 is not hooked up */
+ result.u64 = 0;
+ return result;
+ }
+ else
+ {
+ /* Ports 0 and 1 connect to the switch */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+ case CVMX_BOARD_TYPE_NIC4E:
+ case CVMX_BOARD_TYPE_NIC2E:
+ is_broadcom_phy = 1;
+ break;
+ /* Private vendor-defined boards. */
+#if defined(OCTEON_VENDOR_LANNER)
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
+ /* Ports are BCM5482S */
+ is_broadcom_phy = 1;
+ break;
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR320:
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR321X:
+ /* Port 0 connects to the switch */
+ if (ipd_port == 0)
+ {
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+ break;
+#endif
+#if defined(OCTEON_VENDOR_GEFES)
+ case CVMX_BOARD_TYPE_CUST_TNPA5651X:
+ /* Since we don't auto-negotiate... 1Gbps full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ break;
+#endif
+ }
+#endif
+
+ phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
+ //cvmx_dprintf("ipd_port=%d phy_addr=%d broadcom=%d\n",
+ // ipd_port, phy_addr, is_broadcom_phy);
+ if (phy_addr != -1)
+ {
+ if (is_broadcom_phy)
+ {
+ result = __get_broadcom_phy_link_state(phy_addr);
+ }
+ else
+ {
+ /* This code assumes we are using a Marvell Gigabit PHY. */
+ result = __get_marvell_phy_link_state(phy_addr);
+ }
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ {
+ /* We don't have a PHY address, so attempt to use in-band status. It is
+ really important that boards not supporting in-band status never get
+ here. Reading broken in-band status tends to do bad things */
+ result = __get_inband_link_state(ipd_port);
+ }
+#if defined(OCTEON_VENDOR_GEFES)
+ else if( (OCTEON_IS_MODEL(OCTEON_CN56XX)) || (OCTEON_IS_MODEL(OCTEON_CN63XX)) )
+ {
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ cvmx_pcsx_miscx_ctl_reg_t mode_type;
+ cvmx_pcsx_mrx_status_reg_t mrx_status;
+ cvmx_pcsx_anx_adv_reg_t anxx_adv;
+ cvmx_pcsx_sgmx_lp_adv_reg_t sgmii_inband_status;
+
+ anxx_adv.u64 = cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
+ mrx_status.u64 = cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG(index, interface));
+
+ mode_type.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+ /* Read Octeon's inband status */
+ sgmii_inband_status.u64 = cvmx_read_csr(CVMX_PCSX_SGMX_LP_ADV_REG(index, interface));
+
+ result.s.link_up = sgmii_inband_status.s.link;
+ result.s.full_duplex = sgmii_inband_status.s.dup;
+ switch (sgmii_inband_status.s.speed)
+ {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ break;
+ }
+ }
+#endif
+ else
+ {
+ /* We don't have a PHY address and we don't have in-band status. There
+ is no way to determine the link speed. Return down assuming this
+ port isn't wired */
+ result.u64 = 0;
+ }
+
+ /* If link is down, return all fields as zero. */
+ if (!result.s.link_up)
+ result.u64 = 0;
+
+ return result;
+}
+
+
+/**
+ * This function as a board specific method of changing the PHY
+ * speed, duplex, and autonegotiation. This programs the PHY and
+ * not Octeon. This can be used to force Octeon's links to
+ * specific settings.
+ *
+ * @param phy_addr The address of the PHY to program
+ * @param link_flags
+ * Flags to control autonegotiation. Bit 0 is autonegotiation
+ * enable/disable to maintain backward compatibility.
+ * @param link_info Link speed to program. If the speed is zero and autonegotiation
+ * is enabled, all possible negotiation speeds are advertised.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_helper_board_link_set_phy(int phy_addr, cvmx_helper_board_set_phy_link_flags_types_t link_flags,
+ cvmx_helper_link_info_t link_info)
+{
+
+ /* Set the flow control settings based on link_flags */
+ if ((link_flags & set_phy_link_flags_flow_control_mask) != set_phy_link_flags_flow_control_dont_touch)
+ {
+ cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
+ reg_autoneg_adver.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
+ reg_autoneg_adver.s.asymmetric_pause = (link_flags & set_phy_link_flags_flow_control_mask) == set_phy_link_flags_flow_control_enable;
+ reg_autoneg_adver.s.pause = (link_flags & set_phy_link_flags_flow_control_mask) == set_phy_link_flags_flow_control_enable;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_AUTONEG_ADVER, reg_autoneg_adver.u16);
+ }
+
+ /* If speed isn't set and autoneg is on advertise all supported modes */
+ if ((link_flags & set_phy_link_flags_autoneg) && (link_info.s.speed == 0))
+ {
+ cvmx_mdio_phy_reg_control_t reg_control;
+ cvmx_mdio_phy_reg_status_t reg_status;
+ cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
+ cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
+ cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
+
+ reg_status.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_STATUS);
+ reg_autoneg_adver.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
+ reg_autoneg_adver.s.advert_100base_t4 = reg_status.s.capable_100base_t4;
+ reg_autoneg_adver.s.advert_10base_tx_full = reg_status.s.capable_10_full;
+ reg_autoneg_adver.s.advert_10base_tx_half = reg_status.s.capable_10_half;
+ reg_autoneg_adver.s.advert_100base_tx_full = reg_status.s.capable_100base_x_full;
+ reg_autoneg_adver.s.advert_100base_tx_half = reg_status.s.capable_100base_x_half;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_AUTONEG_ADVER, reg_autoneg_adver.u16);
+ if (reg_status.s.capable_extended_status)
+ {
+ reg_extended_status.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
+ reg_control_1000.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL_1000);
+ reg_control_1000.s.advert_1000base_t_full = reg_extended_status.s.capable_1000base_t_full;
+ reg_control_1000.s.advert_1000base_t_half = reg_extended_status.s.capable_1000base_t_half;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL_1000, reg_control_1000.u16);
+ }
+ reg_control.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL);
+ reg_control.s.autoneg_enable = 1;
+ reg_control.s.restart_autoneg = 1;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
+ }
+ else if ((link_flags & set_phy_link_flags_autoneg))
+ {
+ cvmx_mdio_phy_reg_control_t reg_control;
+ cvmx_mdio_phy_reg_status_t reg_status;
+ cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
+ cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
+
+ reg_status.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_STATUS);
+ reg_autoneg_adver.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
+ reg_autoneg_adver.s.advert_100base_t4 = 0;
+ reg_autoneg_adver.s.advert_10base_tx_full = 0;
+ reg_autoneg_adver.s.advert_10base_tx_half = 0;
+ reg_autoneg_adver.s.advert_100base_tx_full = 0;
+ reg_autoneg_adver.s.advert_100base_tx_half = 0;
+ if (reg_status.s.capable_extended_status)
+ {
+ reg_control_1000.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL_1000);
+ reg_control_1000.s.advert_1000base_t_full = 0;
+ reg_control_1000.s.advert_1000base_t_half = 0;
+ }
+ switch (link_info.s.speed)
+ {
+ case 10:
+ reg_autoneg_adver.s.advert_10base_tx_full = link_info.s.full_duplex;
+ reg_autoneg_adver.s.advert_10base_tx_half = !link_info.s.full_duplex;
+ break;
+ case 100:
+ reg_autoneg_adver.s.advert_100base_tx_full = link_info.s.full_duplex;
+ reg_autoneg_adver.s.advert_100base_tx_half = !link_info.s.full_duplex;
+ break;
+ case 1000:
+ reg_control_1000.s.advert_1000base_t_full = link_info.s.full_duplex;
+ reg_control_1000.s.advert_1000base_t_half = !link_info.s.full_duplex;
+ break;
+ }
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_AUTONEG_ADVER, reg_autoneg_adver.u16);
+ if (reg_status.s.capable_extended_status)
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL_1000, reg_control_1000.u16);
+ reg_control.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL);
+ reg_control.s.autoneg_enable = 1;
+ reg_control.s.restart_autoneg = 1;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
+ }
+ else
+ {
+ cvmx_mdio_phy_reg_control_t reg_control;
+ reg_control.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL);
+ reg_control.s.autoneg_enable = 0;
+ reg_control.s.restart_autoneg = 1;
+ reg_control.s.duplex = link_info.s.full_duplex;
+ if (link_info.s.speed == 1000)
+ {
+ reg_control.s.speed_msb = 1;
+ reg_control.s.speed_lsb = 0;
+ }
+ else if (link_info.s.speed == 100)
+ {
+ reg_control.s.speed_msb = 0;
+ reg_control.s.speed_lsb = 1;
+ }
+ else if (link_info.s.speed == 10)
+ {
+ reg_control.s.speed_msb = 0;
+ reg_control.s.speed_lsb = 0;
+ }
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
+ }
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * This function is called by cvmx_helper_interface_probe() after it
+ * determines the number of ports Octeon can support on a specific
+ * interface. This function is the per board location to override
+ * this value. It is called with the number of ports Octeon might
+ * support and should return the number of actual ports on the
+ * board.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @param interface Interface to probe
+ * @param supported_ports
+ * Number of ports Octeon supports.
+ *
+ * @return Number of ports the actual board supports. Many times this will
+ * simple be "support_ports".
+ */
+int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
+{
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ case CVMX_BOARD_TYPE_LANAI2_A:
+ case CVMX_BOARD_TYPE_LANAI2_U:
+ case CVMX_BOARD_TYPE_LANAI2_G:
+ if (interface == 0)
+ return 2;
+ break;
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ if (interface == 0)
+ return 2;
+ break;
+ case CVMX_BOARD_TYPE_NIC_XLE_4G:
+ if (interface == 0)
+ return 0;
+ break;
+ /* The 2nd interface on the EBH5600 is connected to the Marvel switch,
+ which we don't support. Disable ports connected to it */
+ case CVMX_BOARD_TYPE_EBH5600:
+ if (interface == 1)
+ return 0;
+ break;
+ case CVMX_BOARD_TYPE_EBB5600:
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+ if (cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_PICMG)
+ return 0;
+#endif
+ break;
+ case CVMX_BOARD_TYPE_EBT5600:
+ /* Disable loopback. */
+ if (interface == 3)
+ return 0;
+ break;
+ case CVMX_BOARD_TYPE_EBT5810:
+ return 1; /* Two ports on each SPI: 1 hooked to MAC, 1 loopback
+ ** Loopback disabled by default. */
+ case CVMX_BOARD_TYPE_NIC2E:
+ if (interface == 0)
+ return 2;
+#if defined(OCTEON_VENDOR_LANNER)
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR955:
+ if (interface == 1)
+ return 12;
+ break;
+#endif
+#if defined(OCTEON_VENDOR_GEFES)
+ case CVMX_BOARD_TYPE_CUST_TNPA5651X:
+ if (interface < 2) /* interface can be EITHER 0 or 1 */
+ return 1;//always return 1 for XAUI and SGMII mode.
+ break;
+ case CVMX_BOARD_TYPE_CUST_TNPA56X4:
+ if ((interface == 0) &&
+ (cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_SGMII))
+ {
+ cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
+
+ /* For this port we need to set the mode to 1000BaseX */
+ pcsx_miscx_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(0, interface));
+ pcsx_miscx_ctl_reg.cn56xx.mode = 1;
+ cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(0, interface),
+ pcsx_miscx_ctl_reg.u64);
+ pcsx_miscx_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(1, interface));
+ pcsx_miscx_ctl_reg.cn56xx.mode = 1;
+ cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(1, interface),
+ pcsx_miscx_ctl_reg.u64);
+
+ return 2;
+ }
+ break;
+#endif
+ }
+#ifdef CVMX_BUILD_FOR_UBOOT
+ if (CVMX_HELPER_INTERFACE_MODE_SPI == cvmx_helper_interface_get_mode(interface) && getenv("disable_spi"))
+ return 0;
+#endif
+ return supported_ports;
+}
+
+
+/**
+ * @INTERNAL
+ * Enable packet input/output from the hardware. This function is
+ * called after by cvmx_helper_packet_hardware_enable() to
+ * perform board specific initialization. For most boards
+ * nothing is needed.
+ *
+ * @param interface Interface to enable
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_board_hardware_enable(int interface)
+{
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5)
+ {
+ if (interface == 0)
+ {
+ /* Different config for switch port */
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
+ /* Boards with gigabit WAN ports need a different setting that is
+ compatible with 100 Mbit settings */
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0xc);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0xc);
+ }
+ }
+ else if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_LANAI2_U)
+ {
+ if (interface == 0)
+ {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 16);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 16);
+ }
+ }
+ else if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3010_EVB_HS5)
+ {
+ /* Broadcom PHYs require different ASX clocks. Unfortunately
+ many customer don't define a new board Id and simply
+ mangle the CN3010_EVB_HS5 */
+ if (interface == 0)
+ {
+ /* Some customers boards use a hacked up bootloader that identifies them as
+ ** CN3010_EVB_HS5 evaluation boards. This leads to all kinds of configuration
+ ** problems. Detect one case, and print warning, while trying to do the right thing.
+ */
+ int phy_addr = cvmx_helper_board_get_mii_address(0);
+ if (phy_addr != -1)
+ {
+ int phy_identifier = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x2);
+ /* Is it a Broadcom PHY? */
+ if (phy_identifier == 0x0143)
+ {
+ cvmx_dprintf("\n");
+ cvmx_dprintf("ERROR:\n");
+ cvmx_dprintf("ERROR: Board type is CVMX_BOARD_TYPE_CN3010_EVB_HS5, but Broadcom PHY found.\n");
+ cvmx_dprintf("ERROR: The board type is mis-configured, and software malfunctions are likely.\n");
+ cvmx_dprintf("ERROR: All boards require a unique board type to identify them.\n");
+ cvmx_dprintf("ERROR:\n");
+ cvmx_dprintf("\n");
+ cvmx_wait(1000000000);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 5);
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 5);
+ }
+ }
+ }
+ }
+#if defined(OCTEON_VENDOR_UBIQUITI)
+ else if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_UBIQUITI_E100)
+ {
+ /* Configure ASX cloks for all ports on interface 0. */
+ if (interface == 0)
+ {
+ int port;
+
+ for (port = 0; port < 3; port++) {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 16);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 0);
+ }
+ }
+ }
+#endif
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Gets the clock type used for the USB block based on board type.
+ * Used by the USB code for auto configuration of clock type.
+ *
+ * @return USB clock type enumeration
+ */
+cvmx_helper_board_usb_clock_types_t __cvmx_helper_board_usb_get_clock_type(void)
+{
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && (!defined(__FreeBSD__) || !defined(_KERNEL))
+ const void *fdt_addr = CASTPTR(const void *, cvmx_sysinfo_get()->fdt_addr);
+ int nodeoffset;
+ const void *nodep;
+ int len;
+ uint32_t speed = 0;
+ const char *type = NULL;
+
+ if (fdt_addr)
+ {
+ nodeoffset = fdt_path_offset(fdt_addr, "/soc/uctl");
+ if (nodeoffset < 0)
+ nodeoffset = fdt_path_offset(fdt_addr, "/soc/usbn");
+
+ if (nodeoffset >= 0)
+ {
+ nodep = fdt_getprop(fdt_addr, nodeoffset, "refclk-type", &len);
+ if (nodep != NULL && len > 0)
+ type = (const char *)nodep;
+ else
+ type = "unknown";
+ nodep = fdt_getprop(fdt_addr, nodeoffset, "refclk-frequency", &len);
+ if (nodep != NULL && len == sizeof(uint32_t))
+ speed = fdt32_to_cpu(*(int *)nodep);
+ else
+ speed = 0;
+ if (!strcmp(type, "crystal"))
+ {
+ if (speed == 0 || speed == 12000000)
+ return USB_CLOCK_TYPE_CRYSTAL_12;
+ else
+ printf("Warning: invalid crystal speed for USB clock type in FDT\n");
+ }
+ else if (!strcmp(type, "external"))
+ {
+ switch (speed) {
+ case 12000000:
+ return USB_CLOCK_TYPE_REF_12;
+ case 24000000:
+ return USB_CLOCK_TYPE_REF_24;
+ case 0:
+ case 48000000:
+ return USB_CLOCK_TYPE_REF_48;
+ default:
+ printf("Warning: invalid USB clock speed of %u hz in FDT\n", speed);
+ }
+ }
+ else
+ printf("Warning: invalid USB reference clock type \"%s\" in FDT\n", type ? type : "NULL");
+ }
+ }
+#endif
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ case CVMX_BOARD_TYPE_LANAI2_A:
+ case CVMX_BOARD_TYPE_LANAI2_U:
+ case CVMX_BOARD_TYPE_LANAI2_G:
+#if defined(OCTEON_VENDOR_LANNER)
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR320:
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR321X:
+#endif
+#if defined(OCTEON_VENDOR_UBIQUITI)
+ case CVMX_BOARD_TYPE_CUST_UBIQUITI_E100:
+#endif
+#if defined(OCTEON_BOARD_CAPK_0100ND)
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+#endif
+#if defined(OCTEON_VENDOR_GEFES) /* All GEFES' boards use same xtal type */
+ case CVMX_BOARD_TYPE_TNPA3804:
+ case CVMX_BOARD_TYPE_AT5810:
+ case CVMX_BOARD_TYPE_WNPA3850:
+ case CVMX_BOARD_TYPE_W3860:
+ case CVMX_BOARD_TYPE_CUST_TNPA5804:
+ case CVMX_BOARD_TYPE_CUST_W5434:
+ case CVMX_BOARD_TYPE_CUST_W5650:
+ case CVMX_BOARD_TYPE_CUST_W5800:
+ case CVMX_BOARD_TYPE_CUST_W5651X:
+ case CVMX_BOARD_TYPE_CUST_TNPA5651X:
+ case CVMX_BOARD_TYPE_CUST_TNPA56X4:
+ case CVMX_BOARD_TYPE_CUST_W63XX:
+#endif
+ case CVMX_BOARD_TYPE_NIC10E_66:
+ return USB_CLOCK_TYPE_CRYSTAL_12;
+ case CVMX_BOARD_TYPE_NIC10E:
+ return USB_CLOCK_TYPE_REF_12;
+ default:
+ break;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) /* Most boards except NIC10e use a 12MHz crystal */
+ || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ return USB_CLOCK_TYPE_CRYSTAL_12;
+ return USB_CLOCK_TYPE_REF_48;
+}
+
+
+/**
+ * @INTERNAL
+ * Adjusts the number of available USB ports on Octeon based on board
+ * specifics.
+ *
+ * @param supported_ports expected number of ports based on chip type;
+ *
+ *
+ * @return number of available usb ports, based on board specifics.
+ * Return value is supported_ports if function does not
+ * override.
+ */
+int __cvmx_helper_board_usb_get_num_ports(int supported_ports)
+{
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+ case CVMX_BOARD_TYPE_NIC_XLE_4G:
+ case CVMX_BOARD_TYPE_NIC2E:
+ return 0;
+ }
+
+ return supported_ports;
+}
+
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-board.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-board.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-board.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-board.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,223 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Helper functions to abstract board specific data about
+ * network ports from the rest of the cvmx-helper files.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_BOARD_H__
+#define __CVMX_HELPER_BOARD_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+typedef enum {
+ USB_CLOCK_TYPE_REF_12,
+ USB_CLOCK_TYPE_REF_24,
+ USB_CLOCK_TYPE_REF_48,
+ USB_CLOCK_TYPE_CRYSTAL_12,
+} cvmx_helper_board_usb_clock_types_t;
+
+typedef enum {
+ BROADCOM_GENERIC_PHY,
+ MARVELL_GENERIC_PHY,
+} cvmx_phy_type_t;
+
+typedef enum {
+ set_phy_link_flags_autoneg = 0x1,
+ set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
+ set_phy_link_flags_flow_control_enable = 0x1 << 1,
+ set_phy_link_flags_flow_control_disable = 0x2 << 1,
+ set_phy_link_flags_flow_control_mask = 0x3 << 1, /* Mask for 2 bit wide flow control field */
+} cvmx_helper_board_set_phy_link_flags_types_t;
+
+
+/* Fake IPD port, the RGMII/MII interface may use different PHY, use this
+ macro to return appropriate MIX address to read the PHY. */
+#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
+
+/**
+ * cvmx_override_board_link_get(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the process of
+ * talking to a PHY to determine link speed. It is called every
+ * time a PHY must be polled for link status. Users should set
+ * this pointer to a function before calling any cvmx-helper
+ * operations.
+ */
+extern cvmx_helper_link_info_t (*cvmx_override_board_link_get)(int ipd_port);
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. A result of -1 means there isn't a MII capable PHY
+ * connected to this port. On chips supporting multiple MII
+ * busses the bus number is encoded in bits <15:8>.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @param ipd_port Octeon IPD port to get the MII address for.
+ *
+ * @return MII PHY address and bus number or -1.
+ */
+extern int cvmx_helper_board_get_mii_address(int ipd_port);
+
+/**
+ * This function as a board specific method of changing the PHY
+ * speed, duplex, and autonegotiation. This programs the PHY and
+ * not Octeon. This can be used to force Octeon's links to
+ * specific settings.
+ *
+ * @param phy_addr The address of the PHY to program
+ * @param link_flags
+ * Flags to control autonegotiation. Bit 0 is autonegotiation
+ * enable/disable to maintain backward compatibility.
+ * @param link_info Link speed to program. If the speed is zero and autonegotiation
+ * is enabled, all possible negotiation speeds are advertised.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_helper_board_link_set_phy(int phy_addr, cvmx_helper_board_set_phy_link_flags_types_t link_flags,
+ cvmx_helper_link_info_t link_info);
+
+/**
+ * @INTERNAL
+ * This function is the board specific method of determining an
+ * ethernet ports link speed. Most Octeon boards have Marvell PHYs
+ * and are handled by the fall through case. This function must be
+ * updated for boards that don't have the normal Marvell PHYs.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @param ipd_port IPD input port associated with the port we want to get link
+ * status for.
+ *
+ * @return The ports link status. If the link isn't fully resolved, this must
+ * return zero.
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
+
+/**
+ * @INTERNAL
+ * This function is called by cvmx_helper_interface_probe() after it
+ * determines the number of ports Octeon can support on a specific
+ * interface. This function is the per board location to override
+ * this value. It is called with the number of ports Octeon might
+ * support and should return the number of actual ports on the
+ * board.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @param interface Interface to probe
+ * @param supported_ports
+ * Number of ports Octeon supports.
+ *
+ * @return Number of ports the actual board supports. Many times this will
+ * simple be "support_ports".
+ */
+extern int __cvmx_helper_board_interface_probe(int interface, int supported_ports);
+
+/**
+ * @INTERNAL
+ * Enable packet input/output from the hardware. This function is
+ * called after by cvmx_helper_packet_hardware_enable() to
+ * perform board specific initialization. For most boards
+ * nothing is needed.
+ *
+ * @param interface Interface to enable
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_board_hardware_enable(int interface);
+
+
+
+
+/**
+ * @INTERNAL
+ * Gets the clock type used for the USB block based on board type.
+ * Used by the USB code for auto configuration of clock type.
+ *
+ * @return USB clock type enumeration
+ */
+cvmx_helper_board_usb_clock_types_t __cvmx_helper_board_usb_get_clock_type(void);
+
+
+/**
+ * @INTERNAL
+ * Adjusts the number of available USB ports on Octeon based on board
+ * specifics.
+ *
+ * @param supported_ports expected number of ports based on chip type;
+ *
+ *
+ * @return number of available usb ports, based on board specifics.
+ * Return value is supported_ports if function does not
+ * override.
+ */
+int __cvmx_helper_board_usb_get_num_ports(int supported_ports);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_HELPER_BOARD_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-board.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,718 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Helper Functions for the Configuration Framework
+ *
+ * <hr>$Revision: 0 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#include <asm/octeon/cvmx-helper-ilk.h>
+#include <asm/octeon/cvmx-ilk.h>
+#include <asm/octeon/cvmx-config.h>
+#else
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-util.h"
+#include "cvmx-helper-cfg.h"
+#include "cvmx-ilk.h"
+#include "cvmx-helper-ilk.h"
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "cvmx-config.h"
+#include "executive-config.h"
+#endif
+#endif
+
+#if defined(min)
+#else
+#define min( a, b ) ( ( a ) < ( b ) ) ? ( a ) : ( b )
+#endif
+
+/* #define CVMX_HELPER_CFG_DEBUG */
+
+/*
+ * Per physical port
+ */
+struct cvmx_cfg_port_param {
+ int8_t ccpp_pknd;
+ int8_t ccpp_bpid;
+ int8_t ccpp_pko_port_base;
+ int8_t ccpp_pko_num_ports;
+ uint8_t ccpp_pko_nqueues; /*
+ * When the user explicitly
+ * assigns queues,
+ * cvmx_cfg_pko_nqueue_pool[
+ * ccpp_pko_nqueues ...
+ * ccpp_pko_nqueues +
+ * ccpp_pko_num_ports - 1]
+ * are the numbers of PKO queues
+ * assigned to the PKO ports for
+ * this physical port.
+ */
+};
+
+/*
+ * Per pko_port
+ */
+struct cvmx_cfg_pko_port_param {
+ int16_t ccppp_queue_base;
+ int16_t ccppp_num_queues;
+};
+
+/*
+ * A map from pko_port to
+ * interface,
+ * index, and
+ * pko engine id
+ */
+struct cvmx_cfg_pko_port_map {
+ int16_t ccppl_interface;
+ int16_t ccppl_index;
+ int16_t ccppl_eid;
+};
+
+/*
+ * This is for looking up pko_base_port and pko_nport for ipd_port
+ */
+struct cvmx_cfg_pko_port_pair {
+ int8_t ccppp_base_port;
+ int8_t ccppp_nports;
+};
+
+static CVMX_SHARED struct cvmx_cfg_port_param cvmx_cfg_port
+ [CVMX_HELPER_CFG_MAX_IFACE][CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] =
+ {[0 ... CVMX_HELPER_CFG_MAX_IFACE - 1] =
+ {[0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE - 1] =
+ {CVMX_HELPER_CFG_INVALID_VALUE,
+ CVMX_HELPER_CFG_INVALID_VALUE,
+ CVMX_HELPER_CFG_INVALID_VALUE,
+ CVMX_HELPER_CFG_INVALID_VALUE,
+ CVMX_HELPER_CFG_INVALID_VALUE}}};
+
+/*
+ * Indexed by the pko_port number
+ */
+static CVMX_SHARED struct cvmx_cfg_pko_port_param cvmx_cfg_pko_port
+ [CVMX_HELPER_CFG_MAX_PKO_PORT] =
+ {[0 ... CVMX_HELPER_CFG_MAX_PKO_PORT - 1] =
+ {CVMX_HELPER_CFG_INVALID_VALUE,
+ CVMX_HELPER_CFG_INVALID_VALUE}};
+
+static CVMX_SHARED struct cvmx_cfg_pko_port_map cvmx_cfg_pko_port_map
+ [CVMX_HELPER_CFG_MAX_PKO_PORT] =
+ {[0 ... CVMX_HELPER_CFG_MAX_PKO_PORT - 1] =
+ {CVMX_HELPER_CFG_INVALID_VALUE,
+ CVMX_HELPER_CFG_INVALID_VALUE,
+ CVMX_HELPER_CFG_INVALID_VALUE}};
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+/*
+ * This array assists translation from ipd_port to pko_port.
+ * The ``16'' is the rounded value for the 3rd 4-bit value of
+ * ipd_port, used to differentiate ``interfaces.''
+ */
+static CVMX_SHARED struct cvmx_cfg_pko_port_pair ipd2pko_port_cache[16]
+ [CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] =
+ {[0 ... 15] =
+ {[0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE - 1] =
+ {CVMX_HELPER_CFG_INVALID_VALUE,
+ CVMX_HELPER_CFG_INVALID_VALUE}}};
+
+#ifdef CVMX_USER_DEFINED_HELPER_CONFIG_INIT
+
+static CVMX_SHARED int cvmx_cfg_default_pko_nqueues = 1;
+
+/*
+ * A pool for holding the pko_nqueues for the pko_ports assigned to a
+ * physical port.
+ */
+static CVMX_SHARED uint8_t cvmx_cfg_pko_nqueue_pool
+ [CVMX_HELPER_CFG_MAX_PKO_QUEUES] =
+ {[0 ... CVMX_HELPER_CFG_MAX_PKO_QUEUES - 1] = 1};
+
+#endif
+#endif
+
+/*
+ * Options
+ *
+ * Each array-elem's intial value is also the option's default value.
+ */
+static CVMX_SHARED uint64_t cvmx_cfg_opts[CVMX_HELPER_CFG_OPT_MAX] =
+ {[0 ... CVMX_HELPER_CFG_OPT_MAX - 1] = 1};
+
+/*
+ * MISC
+ */
+static CVMX_SHARED int cvmx_cfg_max_pko_engines; /* # of PKO DMA engines
+ allocated */
+int __cvmx_helper_cfg_pknd(int interface, int index)
+{
+ return cvmx_cfg_port[interface][index].ccpp_pknd;
+}
+
+int __cvmx_helper_cfg_bpid(int interface, int index)
+{
+ return cvmx_cfg_port[interface][index].ccpp_bpid;
+}
+
+int __cvmx_helper_cfg_pko_port_base(int interface, int index)
+{
+ return cvmx_cfg_port[interface][index].ccpp_pko_port_base;
+}
+
+int __cvmx_helper_cfg_pko_port_num(int interface, int index)
+{
+ return cvmx_cfg_port[interface][index].ccpp_pko_num_ports;
+}
+
+int __cvmx_helper_cfg_pko_queue_num(int pko_port)
+{
+ return cvmx_cfg_pko_port[pko_port].ccppp_num_queues;
+}
+
+int __cvmx_helper_cfg_pko_queue_base(int pko_port)
+{
+ return cvmx_cfg_pko_port[pko_port].ccppp_queue_base;
+}
+
+int __cvmx_helper_cfg_pko_max_queue(void)
+{
+ int i;
+
+ i = CVMX_HELPER_CFG_MAX_PKO_PORT - 1;
+
+ while (i >= 0)
+ {
+ if (cvmx_cfg_pko_port[i].ccppp_queue_base !=
+ CVMX_HELPER_CFG_INVALID_VALUE)
+ {
+ cvmx_helper_cfg_assert(cvmx_cfg_pko_port[i].ccppp_num_queues > 0);
+ return (cvmx_cfg_pko_port[i].ccppp_queue_base +
+ cvmx_cfg_pko_port[i].ccppp_num_queues);
+ }
+ i --;
+ }
+
+ cvmx_helper_cfg_assert(0); /* shouldn't get here */
+
+ return 0;
+}
+
+int __cvmx_helper_cfg_pko_max_engine(void)
+{
+ return cvmx_cfg_max_pko_engines;
+}
+
+int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, uint64_t val)
+{
+ if (opt >= CVMX_HELPER_CFG_OPT_MAX)
+ return -1;
+
+ cvmx_cfg_opts[opt] = val;
+
+ return 0;
+}
+
+uint64_t cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt)
+{
+ if (opt >= CVMX_HELPER_CFG_OPT_MAX)
+ return (uint64_t)CVMX_HELPER_CFG_INVALID_VALUE;
+
+ return cvmx_cfg_opts[opt];
+}
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(__cvmx_helper_cfg_init);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pknd);
+EXPORT_SYMBOL(__cvmx_helper_cfg_bpid);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_base);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_num);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pko_queue_base);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pko_queue_num);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pko_max_queue);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_interface);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_index);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_eid);
+EXPORT_SYMBOL(__cvmx_helper_cfg_pko_max_engine);
+EXPORT_SYMBOL(cvmx_helper_cfg_opt_get);
+EXPORT_SYMBOL(cvmx_helper_cfg_opt_set);
+EXPORT_SYMBOL(cvmx_helper_cfg_ipd2pko_port_base);
+EXPORT_SYMBOL(cvmx_helper_cfg_ipd2pko_port_num);
+#endif
+
+#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
+
+#ifdef CVMX_HELPER_CFG_DEBUG
+void cvmx_helper_cfg_show_cfg(void)
+{
+ int i, j;
+
+ for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++)
+ {
+ cvmx_dprintf(
+ "cvmx_helper_cfg_show_cfg: interface%d mode %10s nports%4d\n", i,
+ cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(i)),
+ cvmx_helper_interface_enumerate(i));
+
+ for (j = 0; j < cvmx_helper_interface_enumerate(i); j++)
+ {
+ cvmx_dprintf("\tpknd[%i][%d]%d", i, j,
+ __cvmx_helper_cfg_pknd(i, j));
+ cvmx_dprintf(" pko_port_base[%i][%d]%d", i, j,
+ __cvmx_helper_cfg_pko_port_base(i, j));
+ cvmx_dprintf(" pko_port_num[%i][%d]%d\n", i, j,
+ __cvmx_helper_cfg_pko_port_num(i, j));
+ }
+ }
+
+ for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
+ {
+ if (__cvmx_helper_cfg_pko_queue_base(i) !=
+ CVMX_HELPER_CFG_INVALID_VALUE)
+ {
+ cvmx_dprintf("cvmx_helper_cfg_show_cfg: pko_port%d qbase%d nqueues%d "
+ "interface%d index%d\n", i,
+ __cvmx_helper_cfg_pko_queue_base(i),
+ __cvmx_helper_cfg_pko_queue_num(i),
+ __cvmx_helper_cfg_pko_port_interface(i),
+ __cvmx_helper_cfg_pko_port_index(i));
+ }
+ }
+}
+#endif
+
+/*
+ * initialize cvmx_cfg_pko_port_map
+ */
+static void cvmx_helper_cfg_init_pko_port_map(void)
+{
+ int i, j, k;
+ int pko_eid;
+ int pko_port_base, pko_port_max;
+ cvmx_helper_interface_mode_t mode;
+
+ /*
+ * one pko_eid is allocated to each port except for ILK, NPI, and
+ * LOOP. Each of the three has one eid.
+ */
+ pko_eid = 0;
+ for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++)
+ {
+ mode = cvmx_helper_interface_get_mode(i);
+ for (j = 0; j < cvmx_helper_interface_enumerate(i); j++)
+ {
+ pko_port_base = cvmx_cfg_port[i][j].ccpp_pko_port_base;
+ pko_port_max = pko_port_base +
+ cvmx_cfg_port[i][j].ccpp_pko_num_ports;
+ cvmx_helper_cfg_assert(pko_port_base !=
+ CVMX_HELPER_CFG_INVALID_VALUE);
+ cvmx_helper_cfg_assert(pko_port_max >= pko_port_base);
+ for (k = pko_port_base; k < pko_port_max; k++)
+ {
+ cvmx_cfg_pko_port_map[k].ccppl_interface = i;
+ cvmx_cfg_pko_port_map[k].ccppl_index = j;
+ cvmx_cfg_pko_port_map[k].ccppl_eid = pko_eid;
+ }
+
+#if 0
+ /*
+ * For a physical port that is not configured a PKO port,
+ * pko_port_base here equals to pko_port_max. In this
+ * case, the physical port does not take a DMA engine.
+ */
+ if (pko_port_base > pko_port_max)
+#endif
+ if (!(mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_LOOP ||
+ mode == CVMX_HELPER_INTERFACE_MODE_ILK))
+ pko_eid ++;
+ }
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_LOOP ||
+ mode == CVMX_HELPER_INTERFACE_MODE_ILK)
+ pko_eid ++;
+ }
+
+ /*
+ * Legal pko_eids [0, 0x13] should not be exhausted.
+ */
+ cvmx_helper_cfg_assert(pko_eid <= 0x14);
+
+ cvmx_cfg_max_pko_engines = pko_eid;
+}
+#endif
+
+int __cvmx_helper_cfg_pko_port_interface(int pko_port)
+{
+ return cvmx_cfg_pko_port_map[pko_port].ccppl_interface;
+}
+
+int __cvmx_helper_cfg_pko_port_index(int pko_port)
+{
+ return cvmx_cfg_pko_port_map[pko_port].ccppl_index;
+}
+
+int __cvmx_helper_cfg_pko_port_eid(int pko_port)
+{
+ return cvmx_cfg_pko_port_map[pko_port].ccppl_eid;
+}
+
+/**
+ * Perform common init tasks for all chips.
+ * @return 1 for the caller to continue init and 0 otherwise.
+ *
+ * Note: ``common'' means this function is executed regardless of
+ * - chip, and
+ * - CVMX_ENABLE_HELPER_FUNCTIONS.
+ *
+ * This function decides based on these conditions if the
+ * configuration stage of the init process should continue.
+ *
+ * This is only meant to be called by __cvmx_helper_cfg_init().
+ */
+static int __cvmx_helper_cfg_init_common(void)
+{
+ int val;
+
+#ifndef CVMX_ENABLE_HELPER_FUNCTIONS
+ val = 0;
+#else
+ val = (octeon_has_feature(OCTEON_FEATURE_PKND));
+#endif
+
+ return val;
+}
+
+#define IPD2PKO_CACHE_Y(ipd_port) (ipd_port) >> 8
+#define IPD2PKO_CACHE_X(ipd_port) (ipd_port) & 0xff
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+/*
+ * ipd_port to pko_port translation cache
+ */
+static int __cvmx_helper_cfg_init_ipd2pko_cache(void)
+{
+ int i, j, n;
+ int ipd_y, ipd_x, ipd_port;
+
+ for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++)
+ {
+ n = cvmx_helper_interface_enumerate(i);
+
+ for (j = 0; j < n; j++)
+ {
+ ipd_port = cvmx_helper_get_ipd_port(i, j);
+ ipd_y = IPD2PKO_CACHE_Y(ipd_port);
+ ipd_x = IPD2PKO_CACHE_X(ipd_port);
+ ipd2pko_port_cache[ipd_y]
+ [(ipd_port & 0x800) ? ((ipd_x >> 4) & 3) : ipd_x] =
+ (struct cvmx_cfg_pko_port_pair)
+ {__cvmx_helper_cfg_pko_port_base(i, j),
+ __cvmx_helper_cfg_pko_port_num(i, j)};
+ }
+ }
+
+ return 0;
+}
+
+int cvmx_helper_cfg_ipd2pko_port_base(int ipd_port)
+{
+ int ipd_y, ipd_x;
+
+ ipd_y = IPD2PKO_CACHE_Y(ipd_port);
+ ipd_x = IPD2PKO_CACHE_X(ipd_port);
+
+ return ipd2pko_port_cache[ipd_y]
+ [(ipd_port & 0x800) ? ((ipd_x >> 4) & 3) : ipd_x].ccppp_base_port;
+}
+
+int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port)
+{
+ int ipd_y, ipd_x;
+
+ ipd_y = IPD2PKO_CACHE_Y(ipd_port);
+ ipd_x = IPD2PKO_CACHE_X(ipd_port);
+
+ return ipd2pko_port_cache[ipd_y]
+ [(ipd_port & 0x800) ? ((ipd_x >> 4) & 3) : ipd_x].ccppp_nports;
+}
+#endif
+
+#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
+#ifdef CVMX_USER_DEFINED_HELPER_CONFIG_INIT
+/**
+ * Return the number of queues assigned to this pko_port by user
+ *
+ * @param pko_port
+ * @return the number of queues for this pko_port
+ *
+ * Note: Called after the pko_port map is set up.
+ */
+static int __cvmx_ucfg_nqueues(int pko_port)
+{
+ int interface, index;
+ int i, k;
+
+ interface = __cvmx_helper_cfg_pko_port_interface(pko_port);
+ index = __cvmx_helper_cfg_pko_port_index(pko_port);
+
+ /*
+ * pko_port belongs to no physical port,
+ * don't assign a queue to it.
+ */
+ if (interface == CVMX_HELPER_CFG_INVALID_VALUE ||
+ index == CVMX_HELPER_CFG_INVALID_VALUE)
+ return 0;
+
+ /*
+ * Assign the default number of queues to those pko_ports not
+ * assigned explicitly.
+ */
+ i = cvmx_cfg_port[interface][index].ccpp_pko_nqueues;
+ if (i == (uint8_t)CVMX_HELPER_CFG_INVALID_VALUE)
+ return cvmx_cfg_default_pko_nqueues;
+
+ /*
+ * The user has assigned nqueues to this pko_port,
+ * recorded in the pool.
+ */
+ k = pko_port - cvmx_cfg_port[interface][index].ccpp_pko_port_base;
+ cvmx_helper_cfg_assert(k <
+ cvmx_cfg_port[interface][index].ccpp_pko_num_ports);
+ return cvmx_cfg_pko_nqueue_pool[i + k];
+}
+
+#else
+
+/**
+ * Return the number of queues to be assigned to this pko_port
+ *
+ * @param pko_port
+ * @return the number of queues for this pko_port
+ *
+ * Note: This function exists for backward compatibility.
+ * CVMX_PKO_QUEUES_PER_PORT_XXXX defines no of queues per HW port.
+ * pko_port is equivalent in pre-o68 SDK.
+ */
+static int cvmx_helper_cfg_dft_nqueues(int pko_port)
+{
+ cvmx_helper_interface_mode_t mode;
+ int interface;
+ int n;
+
+#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE0
+#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE0 1
+#endif
+
+#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE1
+#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE1 1
+#endif
+
+#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE2
+#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE2 1
+#endif
+
+#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE3
+#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE3 1
+#endif
+
+#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE4
+#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE4 1
+#endif
+
+ n = 1;
+ interface = __cvmx_helper_cfg_pko_port_interface(pko_port);
+ if (interface == 0)
+ {
+#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE0
+ n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
+#endif
+ }
+ if (interface == 1)
+ {
+#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE1
+ n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
+#endif
+ }
+
+ if (interface == 2)
+ {
+#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE2
+ n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE2;
+#endif
+ }
+ if (interface == 3)
+ {
+#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE3
+ n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE3;
+#endif
+ }
+ if (interface == 4)
+ {
+#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE4
+ n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE4;
+#endif
+ }
+
+ mode = cvmx_helper_interface_get_mode(interface);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
+ {
+#ifdef CVMX_PKO_QUEUES_PER_PORT_LOOP
+ n = CVMX_PKO_QUEUES_PER_PORT_LOOP;
+#endif
+ }
+ if (mode == CVMX_HELPER_INTERFACE_MODE_NPI)
+ {
+#ifdef CVMX_PKO_QUEUES_PER_PORT_PCI
+ n = CVMX_PKO_QUEUES_PER_PORT_PCI;
+#endif
+ }
+
+ return n;
+}
+#endif /* CVMX_USER_DEFINED_HELPER_CONFIG_INIT */
+#endif /* CVMX_ENABLE_HELPER_FUNCTIONS */
+
+int __cvmx_helper_cfg_init(void)
+{
+#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
+ struct cvmx_cfg_port_param *pport;
+ int cvmx_cfg_default_pko_nports;
+ int pknd, bpid, pko_port_base;
+ int qbase;
+ int i, j, n;
+
+ cvmx_cfg_default_pko_nports = 1;
+#endif
+
+ if (!__cvmx_helper_cfg_init_common())
+ return 0;
+
+#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
+
+#ifdef CVMX_USER_DEFINED_HELPER_CONFIG_INIT
+{
+ int cvmx_ucfg_nq;
+ cvmx_ucfg_nq = 0;
+#include "cvmx-helper-cfg-init.c"
+}
+#endif
+
+ /*
+ * per-port parameters
+ */
+ pknd = 0;
+ bpid = 0;
+ pko_port_base = 0;
+
+ for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++)
+ {
+ n = cvmx_helper_interface_enumerate(i);
+
+ pport = cvmx_cfg_port[i];
+ for (j = 0; j < n; j++, pport++)
+ {
+ int t;
+
+ t = cvmx_cfg_default_pko_nports;
+ if (pport->ccpp_pko_num_ports != CVMX_HELPER_CFG_INVALID_VALUE)
+ t = pport->ccpp_pko_num_ports;
+
+ *pport = (struct cvmx_cfg_port_param) {
+ pknd++,
+ bpid++,
+ pko_port_base,
+ t,
+ pport->ccpp_pko_nqueues};
+ pko_port_base += t;
+ }
+ }
+
+ cvmx_helper_cfg_assert(pknd <= CVMX_HELPER_CFG_MAX_PIP_PKND);
+ cvmx_helper_cfg_assert(bpid <= CVMX_HELPER_CFG_MAX_PIP_BPID);
+ cvmx_helper_cfg_assert(pko_port_base <= CVMX_HELPER_CFG_MAX_PKO_PORT);
+
+ /*
+ * pko_port map
+ */
+ cvmx_helper_cfg_init_pko_port_map();
+
+ /*
+ * per-pko_port parameters
+ */
+ qbase = 0;
+ for (i = 0; i < pko_port_base; i++)
+ {
+#ifdef CVMX_USER_DEFINED_HELPER_CONFIG_INIT
+ n = __cvmx_ucfg_nqueues(i);
+#else
+ n = cvmx_helper_cfg_dft_nqueues(i);
+#endif
+ cvmx_cfg_pko_port[i] = (struct cvmx_cfg_pko_port_param) {qbase, n};
+ qbase += n;
+ cvmx_helper_cfg_assert(qbase <= CVMX_HELPER_CFG_MAX_PKO_QUEUES);
+ }
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+ __cvmx_helper_cfg_init_ipd2pko_cache();
+#endif
+
+#ifdef CVMX_HELPER_CFG_DEBUG
+ cvmx_helper_cfg_show_cfg();
+#endif /* CVMX_HELPER_CFG_DEBUG */
+#endif
+ return 0;
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,283 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Helper Functions for the Configuration Framework
+ *
+ * OCTEON_CN68XX introduces a flexible hw interface configuration
+ * scheme. To cope with this change and the requirements of
+ * configurability for other system resources, e.g., IPD/PIP pknd and
+ * PKO ports and queues, a configuration framework for the SDK is
+ * designed. It has two goals: first to recognize and establish the
+ * default configuration and, second, to allow the user to define key
+ * parameters in a high-level language.
+ *
+ * The helper functions query the QLM setup to help achieving the
+ * first goal.
+ *
+ * The second goal is accomplished by generating
+ * cvmx_helper_cfg_init() from a high-level lanaguage.
+ *
+ * <hr>$Revision: 0 $<hr>
+ */
+
+#ifndef __CVMX_HELPER_CFG_H__
+#define __CVMX_HELPER_CFG_H__
+
+#define CVMX_HELPER_CFG_MAX_IFACE 9
+#define CVMX_HELPER_CFG_MAX_PKO_PORT 128
+#define CVMX_HELPER_CFG_MAX_PIP_BPID 64
+#define CVMX_HELPER_CFG_MAX_PIP_PKND 64
+#define CVMX_HELPER_CFG_MAX_PKO_QUEUES 256
+#define CVMX_HELPER_CFG_MAX_PORT_PER_IFACE 256
+
+#define CVMX_HELPER_CFG_INVALID_VALUE -1 /* The default return
+ * value upon failure
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define cvmx_helper_cfg_assert(cond) \
+ do { \
+ if (!(cond)) \
+ { \
+ cvmx_dprintf("cvmx_helper_cfg_assert (%s) at %s:%d\n", \
+ #cond, __FILE__, __LINE__); \
+ } \
+ } while (0)
+
+/*
+ * Config Options
+ *
+ * These options have to be set via cvmx_helper_cfg_opt_set() before calling the
+ * routines that set up the hw. These routines process the options and set them
+ * correctly to take effect at runtime.
+ */
+enum cvmx_helper_cfg_option {
+ CVMX_HELPER_CFG_OPT_USE_DWB, /*
+ * Global option to control if
+ * the SDK configures units (DMA,
+ * SSO, and PKO) to send don't
+ * write back (DWB) requests for
+ * freed buffers. Set to 1/0 to
+ * enable/disable DWB.
+ *
+ * For programs that fit inside
+ * L2, sending DWB just causes
+ * more L2 operations without
+ * benefit.
+ */
+
+ CVMX_HELPER_CFG_OPT_MAX
+};
+typedef enum cvmx_helper_cfg_option cvmx_helper_cfg_option_t;
+
+/*
+ * @INTERNAL
+ * Return configured pknd for the port
+ *
+ * @param interface the interface number
+ * @param index the port's index number
+ * @return the pknd
+ */
+extern int __cvmx_helper_cfg_pknd(int interface, int index);
+
+/*
+ * @INTERNAL
+ * Return the configured bpid for the port
+ *
+ * @param interface the interface number
+ * @param index the port's index number
+ * @return the bpid
+ */
+extern int __cvmx_helper_cfg_bpid(int interface, int index);
+
+/*
+ * @INTERNAL
+ * Return the configured pko_port base for the port
+ *
+ * @param interface the interface number
+ * @param index the port's index number
+ * @return the pko_port base
+ */
+extern int __cvmx_helper_cfg_pko_port_base(int interface, int index);
+
+/*
+ * @INTERNAL
+ * Return the configured number of pko_ports for the port
+ *
+ * @param interface the interface number
+ * @param index the port's index number
+ * @return the number of pko_ports
+ */
+extern int __cvmx_helper_cfg_pko_port_num(int interface, int index);
+
+/*
+ * @INTERNAL
+ * Return the configured pko_queue base for the pko_port
+ *
+ * @param pko_port
+ * @return the pko_queue base
+ */
+extern int __cvmx_helper_cfg_pko_queue_base(int pko_port);
+
+/*
+ * @INTERNAL
+ * Return the configured number of pko_queues for the pko_port
+ *
+ * @param pko_port
+ * @return the number of pko_queues
+ */
+extern int __cvmx_helper_cfg_pko_queue_num(int pko_port);
+
+/*
+ * @INTERNAL
+ * Return the interface the pko_port is configured for
+ *
+ * @param pko_port
+ * @return the interface for the pko_port
+ */
+extern int __cvmx_helper_cfg_pko_port_interface(int pko_port);
+
+/*
+ * @INTERNAL
+ * Return the index of the port the pko_port is configured for
+ *
+ * @param pko_port
+ * @return the index of the port
+ */
+extern int __cvmx_helper_cfg_pko_port_index(int pko_port);
+
+/*
+ * @INTERNAL
+ * Return the pko_eid of the pko_port
+ *
+ * @param pko_port
+ * @return the pko_eid
+ */
+extern int __cvmx_helper_cfg_pko_port_eid(int pko_port);
+
+/*
+ * @INTERNAL
+ * Return the max# of pko queues allocated.
+ *
+ * @return the max# of pko queues
+ *
+ * Note: there might be holes in the queue space depending on user
+ * configuration. The function returns the highest queue's index in
+ * use.
+ */
+extern int __cvmx_helper_cfg_pko_max_queue(void);
+
+/*
+ * @INTERNAL
+ * Return the max# of PKO DMA engines allocated.
+ *
+ * @return the max# of DMA engines
+ *
+ * NOTE: the DMA engines are allocated contiguously and starting from
+ * 0.
+ */
+extern int __cvmx_helper_cfg_pko_max_engine(void);
+
+/*
+ * Get the value set for the config option ``opt''.
+ *
+ * @param opt is the config option.
+ * @return the value set for the option
+ */
+extern uint64_t cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt);
+
+/*
+ * Set the value for a config option.
+ *
+ * @param opt is the config option.
+ * @param val is the value to set for the opt.
+ * @return 0 for success and -1 on error
+ *
+ * Note an option here is a config-time parameter and this means that
+ * it has to be set before calling the corresponding setup functions
+ * that actually sets the option in hw.
+ */
+extern int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, uint64_t val);
+
+/*
+ * Retrieve the pko_port base given ipd_port.
+ *
+ * @param ipd_port is the IPD eport
+ * @return the corresponding PKO port base for the physical port
+ * represented by the IPD eport or CVMX_HELPER_CFG_INVALID_VALUE.
+ */
+extern int cvmx_helper_cfg_ipd2pko_port_base(int ipd_port);
+
+/*
+ * Retrieve the number of pko_ports given ipd_port.
+ *
+ * @param ipd_port is the IPD eport
+ * @return the corresponding number of PKO ports for the physical port
+ * represented by IPD eport or CVMX_HELPER_CFG_INVALID_VALUE.
+ */
+extern int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port);
+
+/*
+ * @INTERNAL
+ * The init function
+ *
+ * @param none
+ * @return 0 for success.
+ *
+ * Note: this function is meant to be called to set the ``configured
+ * parameters,'' e.g., pknd, bpid, etc. and therefore should be before
+ * any of the corresponding cvmx_helper_cfg_xxxx() functions are
+ * called.
+ */
+
+extern int __cvmx_helper_cfg_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_HELPER_CFG_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-cfg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-check-defines.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-check-defines.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-check-defines.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,98 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Validate defines required by cvmx-helper. This header file
+ * validates a number of defines required for cvmx-helper to
+ * function properly. It either supplies a default or fails
+ * compile if a define is incorrect.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_CHECK_DEFINES_H__
+#define __CVMX_HELPER_CHECK_DEFINES_H__
+
+/* CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve before
+ the beginning of the packet. Override in executive-config.h */
+#ifndef CVMX_HELPER_FIRST_MBUFF_SKIP
+#define CVMX_HELPER_FIRST_MBUFF_SKIP 184
+#warning WARNING: default CVMX_HELPER_FIRST_MBUFF_SKIP used. Defaults deprecated, please set in executive-config.h
+#endif
+
+/* CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve in each
+ chained packet element. Override in executive-config.h */
+#ifndef CVMX_HELPER_NOT_FIRST_MBUFF_SKIP
+#define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0
+#warning WARNING: default CVMX_HELPER_NOT_FIRST_MBUFF_SKIP used. Defaults deprecated, please set in executive-config.h
+#endif
+
+/* CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper
+ function. Once it is enabled the hardware starts accepting packets. You
+ might want to skip the IPD enable if configuration changes are need
+ from the default helper setup. Override in executive-config.h */
+#ifndef CVMX_HELPER_ENABLE_IPD
+#define CVMX_HELPER_ENABLE_IPD 1
+#warning WARNING: default CVMX_HELPER_ENABLE_IPD used. Defaults deprecated, please set in executive-config.h
+#endif
+
+/* Set default (defaults are deprecated) input tag type */
+#ifndef CVMX_HELPER_INPUT_TAG_TYPE
+#define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED
+#warning WARNING: default CVMX_HELPER_INPUT_TAG_TYPE used. Defaults deprecated, please set in executive-config.h
+#endif
+
+#ifndef CVMX_HELPER_INPUT_PORT_SKIP_MODE
+#define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2
+#warning WARNING: default CVMX_HELPER_INPUT_PORT_SKIP_MODE used. Defaults deprecated, please set in executive-config.h
+#endif
+
+#if defined(CVMX_ENABLE_HELPER_FUNCTIONS) && !defined(CVMX_HELPER_INPUT_TAG_INPUT_PORT)
+#error CVMX_HELPER_INPUT_TAG_* values for determining tag hash inputs must be defined in executive-config.h
+#endif
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-check-defines.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,330 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Fixes and workaround for Octeon chip errata. This file
+ * contains functions called by cvmx-helper to workaround known
+ * chip errata. For the most part, code doesn't need to call
+ * these functions directly.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-jtag.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#endif
+
+#include "cvmx.h"
+
+#include "cvmx-fpa.h"
+#include "cvmx-pip.h"
+#include "cvmx-pko.h"
+#include "cvmx-ipd.h"
+#include "cvmx-gmx.h"
+#include "cvmx-spi.h"
+#include "cvmx-pow.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-jtag.h"
+#endif
+
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+
+/**
+ * @INTERNAL
+ * Function to adjust internal IPD pointer alignments
+ *
+ * @return 0 on success
+ * !0 on failure
+ */
+int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
+{
+#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
+#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
+#define FIX_IPD_OUTPORT 0
+#define INTERFACE(port) (port >> 4) /* Ports 0-15 are interface 0, 16-31 are interface 1 */
+#define INDEX(port) (port & 0xf)
+ uint64_t *p64;
+ cvmx_pko_command_word0_t pko_command;
+ cvmx_buf_ptr_t g_buffer, pkt_buffer;
+ cvmx_wqe_t *work;
+ int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ int retry_cnt;
+ int retry_loop_cnt;
+ int i;
+ cvmx_helper_link_info_t link_info;
+
+ /* Save values for restore at end */
+ uint64_t prtx_cfg = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t tx_ptr_en = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t rx_ptr_en = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t rxx_jabber = cvmx_read_csr(CVMX_GMXX_RXX_JABBER(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t frame_max = cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+
+ /* Configure port to gig FDX as required for loopback mode */
+ cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
+
+ /* Disable reception on all ports so if traffic is present it will not interfere. */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
+
+ cvmx_wait(100000000ull);
+
+ for (retry_loop_cnt = 0;retry_loop_cnt < 10;retry_loop_cnt++)
+ {
+ retry_cnt = 100000;
+ wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
+ pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
+ wqe_pcnt &= 0x7f;
+
+ num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
+
+ if (num_segs == 0)
+ goto fix_ipd_exit;
+
+ num_segs += 1;
+
+ size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES + ((num_segs-1)*FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
+ (FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
+
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 1 << INDEX(FIX_IPD_OUTPORT));
+ CVMX_SYNC;
+
+ g_buffer.u64 = 0;
+ g_buffer.s.addr = cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
+ if (g_buffer.s.addr == 0) {
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT buffer allocation failure.\n");
+ goto fix_ipd_exit;
+ }
+
+ g_buffer.s.pool = CVMX_FPA_WQE_POOL;
+ g_buffer.s.size = num_segs;
+
+ pkt_buffer.u64 = 0;
+ pkt_buffer.s.addr = cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
+ if (pkt_buffer.s.addr == 0) {
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT buffer allocation failure.\n");
+ goto fix_ipd_exit;
+ }
+ pkt_buffer.s.i = 1;
+ pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
+ pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
+
+ p64 = (uint64_t*) cvmx_phys_to_ptr(pkt_buffer.s.addr);
+ p64[0] = 0xffffffffffff0000ull;
+ p64[1] = 0x08004510ull;
+ p64[2] = ((uint64_t)(size-14) << 48) | 0x5ae740004000ull;
+ p64[3] = 0x3a5fc0a81073c0a8ull;
+
+ for (i=0;i<num_segs;i++)
+ {
+ if (i>0)
+ pkt_buffer.s.size = FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
+
+ if (i==(num_segs-1))
+ pkt_buffer.s.i = 0;
+
+ *(uint64_t*)cvmx_phys_to_ptr(g_buffer.s.addr + 8*i) = pkt_buffer.u64;
+ }
+
+ /* Build the PKO command */
+ pko_command.u64 = 0;
+ pko_command.s.segs = num_segs;
+ pko_command.s.total_bytes = size;
+ pko_command.s.dontfree = 0;
+ pko_command.s.gather = 1;
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 1 << INDEX(FIX_IPD_OUTPORT));
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 1 << INDEX(FIX_IPD_OUTPORT));
+
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), 65392-14-4);
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), 65392-14-4);
+
+ cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT, cvmx_pko_get_base_queue(FIX_IPD_OUTPORT), CVMX_PKO_LOCK_CMD_QUEUE);
+ cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT, cvmx_pko_get_base_queue(FIX_IPD_OUTPORT), pko_command, g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
+
+ CVMX_SYNC;
+
+ do {
+ work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
+ retry_cnt--;
+ } while ((work == NULL) && (retry_cnt > 0));
+
+ if (!retry_cnt)
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT get_work() timeout occurred.\n");
+
+
+ /* Free packet */
+ if (work)
+ cvmx_helper_free_packet_data(work);
+ }
+
+fix_ipd_exit:
+
+ /* Return CSR configs to saved values */
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), prtx_cfg);
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), tx_ptr_en);
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), rx_ptr_en);
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), rxx_jabber);
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), frame_max);
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
+ link_info.u64 = 0; /* Set link to down so autonegotiation will set it up again */
+ cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info);
+
+ /* Bring the link back up as autonegotiation is not done in user applications. */
+ cvmx_helper_link_autoconf(FIX_IPD_OUTPORT);
+
+ CVMX_SYNC;
+ if (num_segs)
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
+
+ return(!!num_segs);
+
+}
+
+
+/**
+ * This function needs to be called on all Octeon chips with
+ * errata PKI-100.
+ *
+ * The Size field is 8 too large in WQE and next pointers
+ *
+ * The Size field generated by IPD is 8 larger than it should
+ * be. The Size field is <55:40> of both:
+ * - WORD3 in the work queue entry, and
+ * - the next buffer pointer (which precedes the packet data
+ * in each buffer).
+ *
+ * @param work Work queue entry to fix
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_helper_fix_ipd_packet_chain(cvmx_wqe_t *work)
+{
+ uint64_t number_buffers = work->word2.s.bufs;
+
+ /* We only need to do this if the work has buffers */
+ if (number_buffers)
+ {
+ cvmx_buf_ptr_t buffer_ptr = work->packet_ptr;
+ /* Check for errata PKI-100 */
+ if ( (buffer_ptr.s.pool == 0) && (((uint64_t)buffer_ptr.s.size +
+ ((uint64_t)buffer_ptr.s.back << 7) + ((uint64_t)buffer_ptr.s.addr & 0x7F))
+ != (CVMX_FPA_PACKET_POOL_SIZE+8))) {
+ /* fix is not needed */
+ return 0;
+ }
+ /* Decrement the work packet pointer */
+ buffer_ptr.s.size -= 8;
+ work->packet_ptr = buffer_ptr;
+
+ /* Now loop through decrementing the size for each additional buffer */
+ while (--number_buffers)
+ {
+ /* Chain pointers are 8 bytes before the data */
+ cvmx_buf_ptr_t *ptr = (cvmx_buf_ptr_t*)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+ buffer_ptr = *ptr;
+ buffer_ptr.s.size -= 8;
+ *ptr = buffer_ptr;
+ }
+ }
+ /* Make sure that these write go out before other operations such as FPA frees */
+ CVMX_SYNCWS;
+ return 0;
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
+
+
+/**
+ * Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
+ * 1 doesn't work properly. The following code disables 2nd order
+ * CDR for the specified QLM.
+ *
+ * @param qlm QLM to disable 2nd order CDR for.
+ */
+void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm)
+{
+ int lane;
+ /* Apply the workaround only once. */
+ cvmx_ciu_qlm_jtgd_t qlm_jtgd;
+ qlm_jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+ if (qlm_jtgd.s.select != 0)
+ return;
+
+ cvmx_helper_qlm_jtag_init();
+ /* We need to load all four lanes of the QLM, a total of 1072 bits */
+ for (lane=0; lane<4; lane++)
+ {
+ /* Each lane has 268 bits. We need to set cfg_cdr_incx<67:64>=3 and
+ cfg_cdr_secord<77>=1. All other bits are zero. Bits go in LSB
+ first, so start off with the zeros for bits <63:0> */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1);
+ /* cfg_cdr_incx<67:64>=3 */
+ cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3);
+ /* Zeros for bits <76:68> */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1);
+ /* cfg_cdr_secord<77>=1 */
+ cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1);
+ /* Zeros for bits <267:78> */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1);
+ }
+ cvmx_helper_qlm_jtag_update(qlm);
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,94 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Fixes and workaround for Octeon chip errata. This file
+ * contains functions called by cvmx-helper to workaround known
+ * chip errata. For the most part, code doesn't need to call
+ * these functions directly.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_ERRATA_H__
+#define __CVMX_HELPER_ERRATA_H__
+
+/**
+ * @INTERNAL
+ * Function to adjust internal IPD pointer alignments
+ *
+ * @return 0 on success
+ * !0 on failure
+ */
+extern int __cvmx_helper_errata_fix_ipd_ptr_alignment(void);
+
+/**
+ * This function needs to be called on all Octeon chips with
+ * errata PKI-100.
+ *
+ * The Size field is 8 too large in WQE and next pointers
+ *
+ * The Size field generated by IPD is 8 larger than it should
+ * be. The Size field is <55:40> of both:
+ * - WORD3 in the work queue entry, and
+ * - the next buffer pointer (which precedes the packet data
+ * in each buffer).
+ *
+ * @param work Work queue entry to fix
+ * @return Zero on success. Negative on failure
+ */
+extern int cvmx_helper_fix_ipd_packet_chain(cvmx_wqe_t *work);
+
+/**
+ * Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
+ * 1 doesn't work properly. The following code disables 2nd order
+ * CDR for the specified QLM.
+ *
+ * @param qlm QLM to disable 2nd order CDR for.
+ */
+extern void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm);
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-errata.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,247 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Helper functions for FPA setup.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-fpa.h"
+#include "cvmx-helper-fpa.h"
+
+/**
+ * @INTERNAL
+ * Allocate memory for and initialize a single FPA pool.
+ *
+ * @param pool Pool to initialize
+ * @param buffer_size Size of buffers to allocate in bytes
+ * @param buffers Number of buffers to put in the pool. Zero is allowed
+ * @param name String name of the pool for debugging purposes
+ * @return Zero on success, non-zero on failure
+ */
+static int __cvmx_helper_initialize_fpa_pool(int pool, uint64_t buffer_size,
+ uint64_t buffers, const char *name)
+{
+ uint64_t current_num;
+ void *memory;
+ uint64_t align = CVMX_CACHE_LINE_SIZE;
+
+ /* Align the allocation so that power of 2 size buffers are naturally aligned */
+ while (align < buffer_size)
+ align = align << 1;
+
+ if (buffers == 0)
+ return 0;
+
+ current_num = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool));
+ if (current_num)
+ {
+ cvmx_dprintf("Fpa pool %d(%s) already has %llu buffers. Skipping setup.\n",
+ pool, name, (unsigned long long)current_num);
+ return 0;
+ }
+
+ memory = cvmx_bootmem_alloc(buffer_size * buffers, align);
+ if (memory == NULL)
+ {
+ cvmx_dprintf("Out of memory initializing fpa pool %d(%s).\n", pool, name);
+ return -1;
+ }
+ cvmx_fpa_setup_pool(pool, name, memory, buffer_size, buffers);
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Allocate memory and initialize the FPA pools using memory
+ * from cvmx-bootmem. Specifying zero for the number of
+ * buffers will cause that FPA pool to not be setup. This is
+ * useful if you aren't using some of the hardware and want
+ * to save memory. Use cvmx_helper_initialize_fpa instead of
+ * this function directly.
+ *
+ * @param pip_pool Should always be CVMX_FPA_PACKET_POOL
+ * @param pip_size Should always be CVMX_FPA_PACKET_POOL_SIZE
+ * @param pip_buffers
+ * Number of packet buffers.
+ * @param wqe_pool Should always be CVMX_FPA_WQE_POOL
+ * @param wqe_size Should always be CVMX_FPA_WQE_POOL_SIZE
+ * @param wqe_entries
+ * Number of work queue entries
+ * @param pko_pool Should always be CVMX_FPA_OUTPUT_BUFFER_POOL
+ * @param pko_size Should always be CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
+ * @param pko_buffers
+ * PKO Command buffers. You should at minimum have two per
+ * each PKO queue.
+ * @param tim_pool Should always be CVMX_FPA_TIMER_POOL
+ * @param tim_size Should always be CVMX_FPA_TIMER_POOL_SIZE
+ * @param tim_buffers
+ * TIM ring buffer command queues. At least two per timer bucket
+ * is recommended.
+ * @param dfa_pool Should always be CVMX_FPA_DFA_POOL
+ * @param dfa_size Should always be CVMX_FPA_DFA_POOL_SIZE
+ * @param dfa_buffers
+ * DFA command buffer. A relatively small (32 for example)
+ * number should work.
+ * @return Zero on success, non-zero if out of memory
+ */
+static int __cvmx_helper_initialize_fpa(int pip_pool, int pip_size, int pip_buffers,
+ int wqe_pool, int wqe_size, int wqe_entries,
+ int pko_pool, int pko_size, int pko_buffers,
+ int tim_pool, int tim_size, int tim_buffers,
+ int dfa_pool, int dfa_size, int dfa_buffers)
+{
+ int status;
+
+ cvmx_fpa_enable();
+
+ if ((pip_buffers > 0) && (pip_buffers <= 64))
+ cvmx_dprintf("Warning: %d packet buffers may not be enough for hardware"
+ " prefetch. 65 or more is recommended.\n", pip_buffers);
+
+ if (pip_pool >= 0)
+ {
+ status = __cvmx_helper_initialize_fpa_pool(pip_pool, pip_size, pip_buffers,
+ "Packet Buffers");
+ if (status)
+ return status;
+ }
+
+ if (wqe_pool >= 0)
+ {
+ status = __cvmx_helper_initialize_fpa_pool(wqe_pool, wqe_size, wqe_entries,
+ "Work Queue Entries");
+ if (status)
+ return status;
+ }
+
+ if (pko_pool >= 0)
+ {
+ status = __cvmx_helper_initialize_fpa_pool(pko_pool, pko_size, pko_buffers,
+ "PKO Command Buffers");
+ if (status)
+ return status;
+ }
+
+ if (tim_pool >= 0)
+ {
+ status = __cvmx_helper_initialize_fpa_pool(tim_pool, tim_size, tim_buffers,
+ "TIM Command Buffers");
+ if (status)
+ return status;
+ }
+
+ if (dfa_pool >= 0)
+ {
+ status = __cvmx_helper_initialize_fpa_pool(dfa_pool, dfa_size, dfa_buffers,
+ "DFA Command Buffers");
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+
+/**
+ * Allocate memory and initialize the FPA pools using memory
+ * from cvmx-bootmem. Sizes of each element in the pools is
+ * controlled by the cvmx-config.h header file. Specifying
+ * zero for any parameter will cause that FPA pool to not be
+ * setup. This is useful if you aren't using some of the
+ * hardware and want to save memory.
+ *
+ * @param packet_buffers
+ * Number of packet buffers to allocate
+ * @param work_queue_entries
+ * Number of work queue entries
+ * @param pko_buffers
+ * PKO Command buffers. You should at minimum have two per
+ * each PKO queue.
+ * @param tim_buffers
+ * TIM ring buffer command queues. At least two per timer bucket
+ * is recommended.
+ * @param dfa_buffers
+ * DFA command buffer. A relatively small (32 for example)
+ * number should work.
+ * @return Zero on success, non-zero if out of memory
+ */
+int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
+ int pko_buffers, int tim_buffers, int dfa_buffers)
+{
+#ifndef CVMX_FPA_PACKET_POOL
+#define CVMX_FPA_PACKET_POOL -1
+#define CVMX_FPA_PACKET_POOL_SIZE 0
+#endif
+#ifndef CVMX_FPA_WQE_POOL
+#define CVMX_FPA_WQE_POOL -1
+#define CVMX_FPA_WQE_POOL_SIZE 0
+#endif
+#ifndef CVMX_FPA_OUTPUT_BUFFER_POOL
+#define CVMX_FPA_OUTPUT_BUFFER_POOL -1
+#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 0
+#endif
+#ifndef CVMX_FPA_TIMER_POOL
+#define CVMX_FPA_TIMER_POOL -1
+#define CVMX_FPA_TIMER_POOL_SIZE 0
+#endif
+#ifndef CVMX_FPA_DFA_POOL
+#define CVMX_FPA_DFA_POOL -1
+#define CVMX_FPA_DFA_POOL_SIZE 0
+#endif
+ return __cvmx_helper_initialize_fpa(
+ CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, packet_buffers,
+ CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, work_queue_entries,
+ CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, pko_buffers,
+ CVMX_FPA_TIMER_POOL, CVMX_FPA_TIMER_POOL_SIZE, tim_buffers,
+ CVMX_FPA_DFA_POOL, CVMX_FPA_DFA_POOL_SIZE, dfa_buffers);
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,84 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Helper functions for FPA setup.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_H_FPA__
+#define __CVMX_HELPER_H_FPA__
+
+/**
+ * Allocate memory and initialize the FPA pools using memory
+ * from cvmx-bootmem. Sizes of each element in the pools is
+ * controlled by the cvmx-config.h header file. Specifying
+ * zero for any parameter will cause that FPA pool to not be
+ * setup. This is useful if you aren't using some of the
+ * hardware and want to save memory.
+ *
+ * @param packet_buffers
+ * Number of packet buffers to allocate
+ * @param work_queue_entries
+ * Number of work queue entries
+ * @param pko_buffers
+ * PKO Command buffers. You should at minimum have two per
+ * each PKO queue.
+ * @param tim_buffers
+ * TIM ring buffer command queues. At least two per timer bucket
+ * is recommended.
+ * @param dfa_buffers
+ * DFA command buffer. A relatively small (32 for example)
+ * number should work.
+ * @return Zero on success, non-zero if out of memory
+ */
+extern int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
+ int pko_buffers, int tim_buffers,
+ int dfa_buffers);
+
+#endif /* __CVMX_HELPER_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-fpa.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,443 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for ILK initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#include <asm/octeon/cvmx-ilk.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-qlm.h>
+#include <asm/octeon/cvmx-ilk-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#endif
+#include "cvmx.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-cfg.h"
+#include "cvmx-ilk.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-pko.h"
+#include "cvmx-qlm.h"
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+int __cvmx_helper_ilk_enumerate(int interface)
+{
+ interface -= CVMX_ILK_GBL_BASE;
+ return cvmx_ilk_chans[interface];
+}
+
+/**
+ * @INTERNAL
+ * Probe a ILK interface and determine the number of ports
+ * connected to it. The ILK interface should still be down
+ * after this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_ilk_probe(int interface)
+{
+ int i, j, res = -1;
+ static int pipe_base = 0, pknd_base = 0;
+ static cvmx_ilk_pipe_chan_t *pch = NULL, *tmp;
+ static cvmx_ilk_chan_pknd_t *chpknd = NULL, *tmp1;
+ static cvmx_ilk_cal_entry_t *calent = NULL, *tmp2;
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return 0;
+
+ interface -= CVMX_ILK_GBL_BASE;
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return 0;
+
+ /* the configuration should be done only once */
+ if (cvmx_ilk_get_intf_ena (interface))
+ return cvmx_ilk_chans[interface];
+
+ /* configure lanes and enable the link */
+ res = cvmx_ilk_start_interface (interface, cvmx_ilk_lane_mask[interface]);
+ if (res < 0)
+ return 0;
+
+ /* set up the group of pipes available to ilk */
+ if (pipe_base == 0)
+ pipe_base = __cvmx_pko_get_pipe (interface + CVMX_ILK_GBL_BASE, 0);
+
+ if (pipe_base == -1)
+ {
+ pipe_base = 0;
+ return 0;
+ }
+
+ res = cvmx_ilk_set_pipe (interface, pipe_base, cvmx_ilk_chans[interface]);
+ if (res < 0)
+ return 0;
+
+ /* set up pipe to channel mapping */
+ i = pipe_base;
+ if (pch == NULL)
+ {
+ pch = (cvmx_ilk_pipe_chan_t *)
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ kmalloc(CVMX_MAX_ILK_CHANS * sizeof(cvmx_ilk_pipe_chan_t), GFP_KERNEL);
+#else
+ cvmx_bootmem_alloc (CVMX_MAX_ILK_CHANS * sizeof(cvmx_ilk_pipe_chan_t),
+ sizeof(cvmx_ilk_pipe_chan_t));
+#endif
+ if (pch == NULL)
+ return 0;
+ }
+
+ memset (pch, 0, CVMX_MAX_ILK_CHANS * sizeof(cvmx_ilk_pipe_chan_t));
+ tmp = pch;
+ for (j = 0; j < cvmx_ilk_chans[interface]; j++)
+ {
+ tmp->pipe = i++;
+ tmp->chan = cvmx_ilk_chan_map[interface][j];
+ tmp++;
+ }
+ res = cvmx_ilk_tx_set_channel (interface, pch, cvmx_ilk_chans[interface]);
+ if (res < 0)
+ {
+ res = 0;
+ goto err_free_pch;
+ }
+ pipe_base += cvmx_ilk_chans[interface];
+
+ /* set up channel to pkind mapping */
+ if (pknd_base == 0)
+ pknd_base = cvmx_helper_get_pknd (interface + CVMX_ILK_GBL_BASE, 0);
+
+ i = pknd_base;
+ if (chpknd == NULL)
+ {
+ chpknd = (cvmx_ilk_chan_pknd_t *)
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ kmalloc(CVMX_MAX_ILK_PKNDS * sizeof(cvmx_ilk_chan_pknd_t), GFP_KERNEL);
+#else
+ cvmx_bootmem_alloc (CVMX_MAX_ILK_PKNDS * sizeof(cvmx_ilk_chan_pknd_t),
+ sizeof(cvmx_ilk_chan_pknd_t));
+#endif
+ if (chpknd == NULL)
+ {
+ pipe_base -= cvmx_ilk_chans[interface];
+ res = 0;
+ goto err_free_pch;
+ }
+ }
+
+ memset (chpknd, 0, CVMX_MAX_ILK_PKNDS * sizeof(cvmx_ilk_chan_pknd_t));
+ tmp1 = chpknd;
+ for (j = 0; j < cvmx_ilk_chans[interface]; j++)
+ {
+ tmp1->chan = cvmx_ilk_chan_map[interface][j];
+ tmp1->pknd = i++;
+ tmp1++;
+ }
+ res = cvmx_ilk_rx_set_pknd (interface, chpknd, cvmx_ilk_chans[interface]);
+ if (res < 0)
+ {
+ pipe_base -= cvmx_ilk_chans[interface];
+ res = 0;
+ goto err_free_chpknd;
+ }
+ pknd_base += cvmx_ilk_chans[interface];
+
+ /* Set up tx calendar */
+ if (calent == NULL)
+ {
+ calent = (cvmx_ilk_cal_entry_t *)
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ kmalloc(CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t), GFP_KERNEL);
+#else
+ cvmx_bootmem_alloc (CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t),
+ sizeof(cvmx_ilk_cal_entry_t));
+#endif
+ if (calent == NULL)
+ {
+ pipe_base -= cvmx_ilk_chans[interface];
+ pknd_base -= cvmx_ilk_chans[interface];
+ res = 0;
+ goto err_free_chpknd;
+ }
+ }
+
+ memset (calent, 0, CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t));
+ tmp1 = chpknd;
+ tmp2 = calent;
+ for (j = 0; j < cvmx_ilk_chans[interface]; j++)
+ {
+ tmp2->pipe_bpid = tmp1->pknd;
+ tmp2->ent_ctrl = PIPE_BPID;
+ tmp1++;
+ tmp2++;
+ }
+ res = cvmx_ilk_cal_setup_tx (interface, cvmx_ilk_chans[interface],
+ calent, 1);
+ if (res < 0)
+ {
+ pipe_base -= cvmx_ilk_chans[interface];
+ pknd_base -= cvmx_ilk_chans[interface];
+ res = 0;
+ goto err_free_calent;
+ }
+
+ /* set up rx calendar. allocated memory can be reused.
+ * this is because max pkind is always less than max pipe */
+ memset (calent, 0, CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t));
+ tmp = pch;
+ tmp2 = calent;
+ for (j = 0; j < cvmx_ilk_chans[interface]; j++)
+ {
+ tmp2->pipe_bpid = tmp->pipe;
+ tmp2->ent_ctrl = PIPE_BPID;
+ tmp++;
+ tmp2++;
+ }
+ res = cvmx_ilk_cal_setup_rx (interface, cvmx_ilk_chans[interface],
+ calent, CVMX_ILK_RX_FIFO_WM, 1);
+ if (res < 0)
+ {
+ pipe_base -= cvmx_ilk_chans[interface];
+ pknd_base -= cvmx_ilk_chans[interface];
+ res = 0;
+ goto err_free_calent;
+ }
+ res = __cvmx_helper_ilk_enumerate(interface + CVMX_ILK_GBL_BASE);
+
+ goto out;
+
+err_free_calent:
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ kfree (calent);
+#else
+ /* no free() for cvmx_bootmem_alloc() */
+#endif
+
+err_free_chpknd:
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ kfree (chpknd);
+#else
+ /* no free() for cvmx_bootmem_alloc() */
+#endif
+
+err_free_pch:
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ kfree (pch);
+#else
+ /* no free() for cvmx_bootmem_alloc() */
+#endif
+out:
+ return res;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable ILK interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_ilk_enable(int interface)
+{
+ interface -= CVMX_ILK_GBL_BASE;
+ return cvmx_ilk_enable(interface);
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by ILK link status.
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int retry_count = 0;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+ cvmx_ilk_rxx_int_t ilk_rxx_int;
+ int lanes = 0;
+
+ result.u64 = 0;
+ interface -= CVMX_ILK_GBL_BASE;
+
+retry:
+ retry_count++;
+ if (retry_count > 10)
+ goto out;
+
+ ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
+ ilk_rxx_int.u64 = cvmx_read_csr (CVMX_ILK_RXX_INT(interface));
+
+ /* Clear all RX status bits */
+ if (ilk_rxx_int.u64)
+ cvmx_write_csr(CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
+
+ if (ilk_rxx_cfg1.s.rx_bdry_lock_ena == 0)
+ {
+ /* We need to start looking for work boundary lock */
+ ilk_rxx_cfg1.s.rx_bdry_lock_ena = cvmx_ilk_get_intf_ln_msk(interface);
+ ilk_rxx_cfg1.s.rx_align_ena = 0;
+ cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+ //cvmx_dprintf("ILK%d: Looking for word boundary lock\n", interface);
+ goto retry;
+ }
+
+ if (ilk_rxx_cfg1.s.rx_align_ena == 0)
+ {
+ if (ilk_rxx_int.s.word_sync_done)
+ {
+ ilk_rxx_cfg1.s.rx_align_ena = 1;
+ cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+ //printf("ILK%d: Looking for lane alignment\n", interface);
+ goto retry;
+ }
+ goto out;
+ }
+
+ if (ilk_rxx_int.s.lane_align_fail)
+ {
+ ilk_rxx_cfg1.s.rx_bdry_lock_ena = 0;
+ ilk_rxx_cfg1.s.rx_align_ena = 0;
+ cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+ cvmx_dprintf("ILK%d: Lane alignment failed\n", interface);
+ goto out;
+ }
+
+ if (ilk_rxx_int.s.lane_align_done)
+ {
+ //cvmx_dprintf("ILK%d: Lane alignment complete\n", interface);
+ }
+
+ lanes = cvmx_pop(ilk_rxx_cfg1.s.rx_bdry_lock_ena);
+
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = cvmx_qlm_get_gbaud_mhz(1+interface) * 64 / 67;
+ result.s.speed *= lanes;
+
+out:
+ /* If the link is down we will force disable the RX path. If it up, we'll
+ set it to match the TX state set by the if_enable call */
+ if (result.s.link_up)
+ {
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ ilk_txx_cfg1.u64 = cvmx_read_csr(CVMX_ILK_TXX_CFG1(interface));
+ ilk_rxx_cfg1.s.pkt_ena = ilk_txx_cfg1.s.pkt_ena;
+ cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+ //cvmx_dprintf("ILK%d: link up, %d Mbps, Full duplex mode, %d lanes\n", interface, result.s.speed, lanes);
+ }
+ else
+ {
+ ilk_rxx_cfg1.s.pkt_ena = 0;
+ cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+ //cvmx_dprintf("ILK link down\n");
+ }
+ return result;
+}
+
+/**
+ * @INTERNAL
+ * Set the link state of an IPD/PKO port.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_ilk_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ /* nothing to do */
+
+ return 0;
+}
+
+/**
+ * Display ilk interface statistics.
+ *
+ */
+void __cvmx_helper_ilk_show_stats (void)
+{
+ int i, j;
+ unsigned char *pchans, num_chans;
+ unsigned int chan_tmp[CVMX_MAX_ILK_CHANS];
+ cvmx_ilk_stats_ctrl_t ilk_stats_ctrl;
+
+ for (i = 0; i < CVMX_NUM_ILK_INTF; i++)
+ {
+ cvmx_ilk_get_chan_info (i, &pchans, &num_chans);
+
+ memset (chan_tmp, 0, CVMX_MAX_ILK_CHANS * sizeof (int));
+ for (j = 0; j < num_chans; j++)
+ chan_tmp[j] = pchans[j];
+
+ ilk_stats_ctrl.chan_list = chan_tmp;
+ ilk_stats_ctrl.num_chans = num_chans;
+ ilk_stats_ctrl.clr_on_rd = 0;
+ cvmx_ilk_show_stats (i, &ilk_stats_ctrl);
+ }
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,111 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for ILK initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+#ifndef __CVMX_HELPER_ILK_H__
+#define __CVMX_HELPER_ILK_H__
+
+extern int __cvmx_helper_ilk_enumerate(int interface);
+
+/**
+ * @INTERNAL
+ * Probe a ILK interface and determine the number of ports
+ * connected to it. The ILK interface should still be down after
+ * this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_ilk_probe(int interface);
+
+/**
+ * @INTERNAL
+ * Bringup and enable a ILK interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_ilk_enable(int interface);
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by ILK link status.
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port);
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_ilk_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
+
+extern void __cvmx_helper_ilk_show_stats (void);
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-ilk.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,231 @@
+/* $MidnightBSD$ */
+
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Helper utilities for qlm_jtag.
+ *
+ * <hr>$Revision: 42480 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-helper-jtag.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#endif
+#include "cvmx.h"
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#include "cvmx-helper-jtag.h"
+#endif
+#endif
+
+/**
+ * Initialize the internal QLM JTAG logic to allow programming
+ * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
+ * These functions should only be used at the direction of Cavium
+ * Networks. Programming incorrect values into the JTAG chain
+ * can cause chip damage.
+ */
+void cvmx_helper_qlm_jtag_init(void)
+{
+ cvmx_ciu_qlm_jtgc_t jtgc;
+ int clock_div = 0;
+ int divisor;
+
+ divisor = cvmx_clock_get_rate(CVMX_CLOCK_SCLK) / (1000000 *
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) ? 10 : 25));
+
+ divisor = (divisor-1)>>2;
+ /* Convert the divisor into a power of 2 shift */
+ while (divisor)
+ {
+ clock_div++;
+ divisor>>=1;
+ }
+
+ /* Clock divider for QLM JTAG operations. sclk is divided by 2^(CLK_DIV + 2) */
+ jtgc.u64 = 0;
+ jtgc.s.clk_div = clock_div;
+ jtgc.s.mux_sel = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ jtgc.s.bypass = 0x3;
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX))
+ jtgc.s.bypass = 0x7;
+ else
+ jtgc.s.bypass = 0xf;
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ jtgc.s.bypass_ext = 1;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
+ cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+}
+
+
+/**
+ * Write up to 32bits into the QLM jtag chain. Bits are shifted
+ * into the MSB and out the LSB, so you should shift in the low
+ * order bits followed by the high order bits. The JTAG chain for
+ * CN52XX and CN56XX is 4 * 268 bits long, or 1072. The JTAG chain
+ * for CN63XX is 4 * 300 bits long, or 1200.
+ *
+ * @param qlm QLM to shift value into
+ * @param bits Number of bits to shift in (1-32).
+ * @param data Data to shift in. Bit 0 enters the chain first, followed by
+ * bit 1, etc.
+ *
+ * @return The low order bits of the JTAG chain that shifted out of the
+ * circle.
+ */
+uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
+{
+ cvmx_ciu_qlm_jtgc_t jtgc;
+ cvmx_ciu_qlm_jtgd_t jtgd;
+
+ jtgc.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+ jtgc.s.mux_sel = qlm;
+ if (!OCTEON_IS_MODEL(OCTEON_CN6XXX) && !OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ jtgc.s.bypass = 1<<qlm;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
+ cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+
+ jtgd.u64 = 0;
+ jtgd.s.shift = 1;
+ jtgd.s.shft_cnt = bits-1;
+ jtgd.s.shft_reg = data;
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ jtgd.s.select = 1 << qlm;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
+ do
+ {
+ jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+ } while (jtgd.s.shift);
+ return jtgd.s.shft_reg >> (32-bits);
+}
+
+
+/**
+ * Shift long sequences of zeros into the QLM JTAG chain. It is
+ * common to need to shift more than 32 bits of zeros into the
+ * chain. This function is a convience wrapper around
+ * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
+ * zeros at a time.
+ *
+ * @param qlm QLM to shift zeros into
+ * @param bits
+ */
+void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits)
+{
+ while (bits > 0)
+ {
+ int n = bits;
+ if (n > 32)
+ n = 32;
+ cvmx_helper_qlm_jtag_shift(qlm, n, 0);
+ bits -= n;
+ }
+}
+
+
+/**
+ * Program the QLM JTAG chain into all lanes of the QLM. You must
+ * have already shifted in the proper number of bits into the
+ * JTAG chain. Updating invalid values can possibly cause chip damage.
+ *
+ * @param qlm QLM to program
+ */
+void cvmx_helper_qlm_jtag_update(int qlm)
+{
+ cvmx_ciu_qlm_jtgc_t jtgc;
+ cvmx_ciu_qlm_jtgd_t jtgd;
+
+ jtgc.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+ jtgc.s.mux_sel = qlm;
+ if (!OCTEON_IS_MODEL(OCTEON_CN6XXX) && !OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ jtgc.s.bypass = 1<<qlm;
+
+ cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
+ cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+
+ /* Update the new data */
+ jtgd.u64 = 0;
+ jtgd.s.update = 1;
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ jtgd.s.select = 1 << qlm;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
+ do
+ {
+ jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+ } while (jtgd.s.update);
+}
+
+
+/**
+ * Load the QLM JTAG chain with data from all lanes of the QLM.
+ *
+ * @param qlm QLM to program
+ */
+void cvmx_helper_qlm_jtag_capture(int qlm)
+{
+ cvmx_ciu_qlm_jtgc_t jtgc;
+ cvmx_ciu_qlm_jtgd_t jtgd;
+
+ jtgc.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+ jtgc.s.mux_sel = qlm;
+ if (!OCTEON_IS_MODEL(OCTEON_CN6XXX) && !OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ jtgc.s.bypass = 1<<qlm;
+
+ cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
+ cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+
+ jtgd.u64 = 0;
+ jtgd.s.capture = 1;
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ jtgd.s.select = 1 << qlm;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
+ do
+ {
+ jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+ } while (jtgd.s.capture);
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,107 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Helper utilities for qlm_jtag.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+
+#ifndef __CVMX_HELPER_JTAG_H__
+#define __CVMX_HELPER_JTAG_H__
+
+/**
+ * Initialize the internal QLM JTAG logic to allow programming
+ * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
+ * These functions should only be used at the direction of Cavium
+ * Networks. Programming incorrect values into the JTAG chain
+ * can cause chip damage.
+ */
+extern void cvmx_helper_qlm_jtag_init(void);
+
+/**
+ * Write up to 32bits into the QLM jtag chain. Bits are shifted
+ * into the MSB and out the LSB, so you should shift in the low
+ * order bits followed by the high order bits. The JTAG chain for
+ * CN52XX and CN56XX is 4 * 268 bits long, or 1072. The JTAG chain
+ * for CN63XX is 4 * 300 bits long, or 1200.
+ *
+ * @param qlm QLM to shift value into
+ * @param bits Number of bits to shift in (1-32).
+ * @param data Data to shift in. Bit 0 enters the chain first, followed by
+ * bit 1, etc.
+ *
+ * @return The low order bits of the JTAG chain that shifted out of the
+ * circle.
+ */
+extern uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data);
+
+/**
+ * Shift long sequences of zeros into the QLM JTAG chain. It is
+ * common to need to shift more than 32 bits of zeros into the
+ * chain. This function is a convience wrapper around
+ * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
+ * zeros at a time.
+ *
+ * @param qlm QLM to shift zeros into
+ * @param bits
+ */
+extern void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits);
+
+/**
+ * Program the QLM JTAG chain into all lanes of the QLM. You must
+ * have already shifted in the proper number of bits into the
+ * JTAG chain. Updating invalid values can possibly cause chip damage.
+ *
+ * @param qlm QLM to program
+ */
+extern void cvmx_helper_qlm_jtag_update(int qlm);
+
+/**
+ * Load the QLM JTAG chain with data from all lanes of the QLM.
+ *
+ * @param qlm QLM to program
+ */
+extern void cvmx_helper_qlm_jtag_capture(int qlm);
+
+#endif /* __CVMX_HELPER_JTAG_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-jtag.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,147 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for LOOP initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+#include <asm/octeon/cvmx-helper.h>
+#endif
+#include <asm/octeon/cvmx-pip-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+#include "cvmx.h"
+#include "cvmx-helper.h"
+#endif
+#else
+#include "cvmx.h"
+#include "cvmx-helper.h"
+#endif
+#endif
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+
+int __cvmx_helper_loop_enumerate(int interface)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX) ? 8 : 4);
+}
+
+/**
+ * @INTERNAL
+ * Probe a LOOP interface and determine the number of ports
+ * connected to it. The LOOP interface should still be down
+ * after this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_loop_probe(int interface)
+{
+ return __cvmx_helper_loop_enumerate(interface);
+}
+
+
+/**
+ * @INTERNAL
+ * Bringup and enable a LOOP interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_loop_enable(int interface)
+{
+ cvmx_pip_prt_cfgx_t port_cfg;
+ int num_ports, index;
+ unsigned long offset;
+
+ num_ports = __cvmx_helper_get_num_ipd_ports(interface);
+
+ /*
+ * We need to disable length checking so packet < 64 bytes and jumbo
+ * frames don't get errors
+ */
+ for (index = 0; index < num_ports; index++) {
+ offset = ((octeon_has_feature(OCTEON_FEATURE_PKND)) ?
+ cvmx_helper_get_pknd(interface, index) :
+ cvmx_helper_get_ipd_port(interface, index));
+
+ port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(offset));
+ port_cfg.s.maxerr_en = 0;
+ port_cfg.s.minerr_en = 0;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(offset), port_cfg.u64);
+ }
+
+ /*
+ * Disable FCS stripping for loopback ports
+ */
+ if (!octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ cvmx_ipd_sub_port_fcs_t ipd_sub_port_fcs;
+ ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
+ ipd_sub_port_fcs.s.port_bit2 = 0;
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
+ }
+
+ return 0;
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,84 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for LOOP initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_LOOP_H__
+#define __CVMX_HELPER_LOOP_H__
+
+/**
+ * @INTERNAL
+ * Probe a LOOP interface and determine the number of ports
+ * connected to it. The LOOP interface should still be down after
+ * this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_loop_probe(int interface);
+extern int __cvmx_helper_loop_enumerate(int interface);
+
+/**
+ * @INTERNAL
+ * Bringup and enable a LOOP interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_loop_enable(int interface);
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-loop.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,180 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for NPI initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-sli-defs.h>
+#endif
+#include <asm/octeon/cvmx-pip-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-pko.h"
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+#include "cvmx-helper.h"
+#endif
+#else
+#include "cvmx.h"
+#include "cvmx-pko.h"
+#include "cvmx-helper.h"
+#endif
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/**
+ * @INTERNAL
+ * Probe a NPI interface and determine the number of ports
+ * connected to it. The NPI interface should still be down
+ * after this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_npi_probe(int interface)
+{
+#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return 32;
+#if 0
+ /* Technically CN30XX, CN31XX, and CN50XX contain packet engines, but
+ nobody ever uses them. Since this is the case, we disable them here */
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ return 2;
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ return 1;
+#endif
+ else if (!(OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)))
+ return 4; /* The packet engines didn't exist before cn56xx pass 2 */
+#endif
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Bringup and enable a NPI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_npi_enable(int interface)
+{
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+
+ /* On CN50XX, CN52XX, and CN56XX we need to disable length checking
+ so packet < 64 bytes and jumbo frames don't get errors */
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && !OCTEON_IS_MODEL(OCTEON_CN58XX))
+ {
+ int port;
+ for (port=0; port<num_ports; port++)
+ {
+ cvmx_pip_prt_cfgx_t port_cfg;
+ int ipd_port = (OCTEON_IS_MODEL(OCTEON_CN68XX)) ?
+ cvmx_helper_get_pknd(interface, port) :
+ cvmx_helper_get_ipd_port(interface, port);
+ port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ port_cfg.s.lenerr_en = 0;
+ port_cfg.s.maxerr_en = 0;
+ port_cfg.s.minerr_en = 0;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ /*
+ * Set up pknd and bpid
+ */
+ cvmx_sli_portx_pkind_t config;
+ config.u64 = cvmx_read_csr(CVMX_PEXP_SLI_PORTX_PKIND(port));
+ config.s.bpkind = cvmx_helper_get_bpid(interface, port);
+ config.s.pkind = cvmx_helper_get_pknd(interface, port);
+ cvmx_write_csr(CVMX_PEXP_SLI_PORTX_PKIND(port), config.u64);
+ }
+ }
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ /*
+ * Set up pko pipes.
+ */
+ cvmx_sli_tx_pipe_t config;
+ config.u64 = cvmx_read_csr(CVMX_PEXP_SLI_TX_PIPE);
+ config.s.base = __cvmx_pko_get_pipe (interface, 0);
+#ifdef CVMX_HELPER_NPI_MAX_PIPES
+ config.s.nump = CVMX_HELPER_NPI_MAX_PIPES;
+#else
+ config.s.nump = num_ports;
+#endif
+ cvmx_write_csr(CVMX_PEXP_SLI_TX_PIPE, config.u64);
+ }
+
+
+ /* Enables are controlled by the remote host, so nothing to do here */
+ return 0;
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,87 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for NPI initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_NPI_H__
+#define __CVMX_HELPER_NPI_H__
+
+/**
+ * @INTERNAL
+ * Probe a NPI interface and determine the number of ports
+ * connected to it. The NPI interface should still be down after
+ * this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_npi_probe(int interface);
+static inline int __cvmx_helper_npi_enumerate(int interface)
+{
+ return __cvmx_helper_npi_probe(interface);
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a NPI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_npi_enable(int interface);
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-npi.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,558 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for RGMII/GMII/MII initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+#endif
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-dbg-defs.h>
+
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-mdio.h"
+#include "cvmx-pko.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+#endif
+#else
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-mdio.h"
+#include "cvmx-pko.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+#endif
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/**
+ * @INTERNAL
+ * Probe RGMII ports and determine the number present
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of RGMII/GMII/MII ports (0-4).
+ */
+int __cvmx_helper_rgmii_probe(int interface)
+{
+ int num_ports = 0;
+ cvmx_gmxx_inf_mode_t mode;
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (mode.s.type)
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ {
+ cvmx_dprintf("ERROR: RGMII initialize called in SPI interface\n");
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ {
+ /* On these chips "type" says we're in GMII/MII mode. This
+ limits us to 2 ports */
+ num_ports = 2;
+ }
+ else
+ {
+ cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", __FUNCTION__);
+ }
+ }
+ else
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ {
+ num_ports = 4;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ {
+ num_ports = 3;
+ }
+ else
+ {
+ cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", __FUNCTION__);
+ }
+ }
+ return num_ports;
+}
+
+
+/**
+ * Put an RGMII interface in loopback mode. Internal packets sent
+ * out will be received back again on the same port. Externally
+ * received packets will echo back out.
+ *
+ * @param port IPD port number to loop.
+ */
+void cvmx_helper_rgmii_internal_loopback(int port)
+{
+ int interface = (port >> 4) & 1;
+ int index = port & 0xf;
+ uint64_t tmp;
+
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ gmx_cfg.u64 = 0;
+ gmx_cfg.s.duplex = 1;
+ gmx_cfg.s.slottime = 1;
+ gmx_cfg.s.speed = 1;
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
+ tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
+ tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+}
+
+
+/**
+ * @INTERNAL
+ * Configure all of the ASX, GMX, and PKO regsiters required
+ * to get RGMII to function on the supplied interface.
+ *
+ * @param interface PKO Interface to configure (0 or 1)
+ *
+ * @return Zero on success
+ */
+int __cvmx_helper_rgmii_enable(int interface)
+{
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+ cvmx_gmxx_inf_mode_t mode;
+ cvmx_asxx_tx_prt_en_t asx_tx;
+ cvmx_asxx_rx_prt_en_t asx_rx;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (mode.s.en == 0)
+ return -1;
+ if ((OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1) /* Ignore SPI interfaces */
+ return -1;
+
+ /* Configure the ASX registers needed to use the RGMII ports */
+ asx_tx.u64 = 0;
+ asx_tx.s.prt_en = cvmx_build_mask(num_ports);
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
+
+ asx_rx.u64 = 0;
+ asx_rx.s.prt_en = cvmx_build_mask(num_ports);
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
+
+ /* Configure the GMX registers needed to use the RGMII ports */
+ for (port=0; port<num_ports; port++)
+ {
+ /* Setting of CVMX_GMXX_TXX_THRESH has been moved to
+ __cvmx_helper_setup_gmx() */
+
+ /* Configure more flexible RGMII preamble checking. Pass 1 doesn't
+ support this feature. */
+ cvmx_gmxx_rxx_frm_ctl_t frm_ctl;
+ frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface));
+ frm_ctl.s.pre_free = 1; /* New field, so must be compile time */
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface), frm_ctl.u64);
+
+ /* Each pause frame transmitted will ask for about 10M bit times
+ before resume. If buffer space comes available before that time
+ has expired, an XON pause frame (0 time) will be transmitted to
+ restart the flow. */
+ cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), 20000);
+ cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(port, interface), 19000);
+
+ /*
+ * Board types we have to know at compile-time.
+ */
+#if defined(OCTEON_BOARD_CAPK_0100ND)
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 26);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 26);
+#else
+ /*
+ * Vendor-defined board types.
+ */
+#if defined(OCTEON_VENDOR_LANNER)
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR320:
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR321X:
+ if (port == 0) {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 4);
+ } else {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 7);
+ }
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 0);
+ break;
+ }
+#else
+ /*
+ * For board types we can determine at runtime.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 16);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 16);
+ }
+ else
+ {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 24);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 24);
+ }
+#endif
+#endif
+ }
+
+ __cvmx_helper_setup_gmx(interface, num_ports);
+
+ /* enable the ports now */
+ for (port=0; port<num_ports; port++)
+ {
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, port));
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface), gmx_cfg.u64);
+ }
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ cvmx_asxx_prt_loop_t asxx_prt_loop;
+
+ asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+ if (asxx_prt_loop.s.int_loop & (1<<index))
+ {
+ /* Force 1Gbps full duplex on internal loopback */
+ cvmx_helper_link_info_t result;
+ result.u64 = 0;
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+ else
+ return __cvmx_helper_board_link_get(ipd_port);
+}
+
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ int result = 0;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ cvmx_gmxx_prtx_cfg_t original_gmx_cfg;
+ cvmx_gmxx_prtx_cfg_t new_gmx_cfg;
+ cvmx_pko_mem_queue_qos_t pko_mem_queue_qos;
+ cvmx_pko_mem_queue_qos_t pko_mem_queue_qos_save[16];
+ cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp;
+ cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp_save;
+ int i;
+
+ /* Ignore speed sets in the simulator */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ return 0;
+
+ /* Read the current settings so we know the current enable state */
+ original_gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ new_gmx_cfg = original_gmx_cfg;
+
+ /* Disable the lowest level RX */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+ cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1<<index));
+
+ memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
+ /* Disable all queues so that TX should become idle */
+ for (i=0; i<cvmx_pko_get_num_queues(ipd_port); i++)
+ {
+ int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
+ pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
+ pko_mem_queue_qos.s.pid = ipd_port;
+ pko_mem_queue_qos.s.qid = queue;
+ pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
+ pko_mem_queue_qos.s.qos_mask = 0;
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
+ }
+
+ /* Disable backpressure */
+ gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
+ gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
+ gmx_tx_ovr_bp.s.bp &= ~(1<<index);
+ gmx_tx_ovr_bp.s.en |= 1<<index;
+ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
+ cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
+
+ /* Poll the GMX state machine waiting for it to become idle. Preferably we
+ should only change speed when it is idle. If it doesn't become idle we
+ will still do the speed change, but there is a slight chance that GMX
+ will lockup */
+ cvmx_write_csr(CVMX_NPI_DBG_SELECT, interface*0x800 + index*0x100 + 0x880);
+ CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&7, ==, 0, 10000);
+ CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&0xf, ==, 0, 10000);
+
+ /* Disable the port before we make any changes */
+ new_gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Set full/half duplex */
+ if (!link_info.s.link_up)
+ new_gmx_cfg.s.duplex = 1; /* Force full duplex on down links */
+ else
+ new_gmx_cfg.s.duplex = link_info.s.full_duplex;
+
+ /* Set the link speed. Anything unknown is set to 1Gbps */
+ if (link_info.s.speed == 10)
+ {
+ new_gmx_cfg.s.slottime = 0;
+ new_gmx_cfg.s.speed = 0;
+ }
+ else if (link_info.s.speed == 100)
+ {
+ new_gmx_cfg.s.slottime = 0;
+ new_gmx_cfg.s.speed = 0;
+ }
+ else
+ {
+ new_gmx_cfg.s.slottime = 1;
+ new_gmx_cfg.s.speed = 1;
+ }
+
+ /* Adjust the clocks */
+ if (link_info.s.speed == 10)
+ {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ }
+ else if (link_info.s.speed == 100)
+ {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ }
+ else
+ {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ {
+ if ((link_info.s.speed == 10) || (link_info.s.speed == 100))
+ {
+ cvmx_gmxx_inf_mode_t mode;
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ /*
+ ** Port .en .type .p0mii Configuration
+ ** ---- --- ----- ------ -----------------------------------------
+ ** X 0 X X All links are disabled.
+ ** 0 1 X 0 Port 0 is RGMII
+ ** 0 1 X 1 Port 0 is MII
+ ** 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
+ ** 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
+ ** MII port is selected by GMX_PRT1_CFG[SPEED].
+ */
+
+ /* In MII mode, CLK_CNT = 1. */
+ if (((index == 0) && (mode.s.p0mii == 1)) || ((index != 0) && (mode.s.type == 1)))
+ {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ }
+ }
+ }
+
+ /* Do a read to make sure all setup stuff is complete */
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Save the new GMX setting without enabling the port */
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+ /* Enable the lowest level RX */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+ cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1<<index));
+
+ /* Re-enable the TX path */
+ for (i=0; i<cvmx_pko_get_num_queues(ipd_port); i++)
+ {
+ int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64);
+ }
+
+ /* Restore backpressure */
+ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
+
+ /* Restore the GMX enable state. Port config is complete */
+ new_gmx_cfg.s.en = original_gmx_cfg.s.en;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+ return result;
+}
+
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ int original_enable;
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ cvmx_asxx_prt_loop_t asxx_prt_loop;
+
+ /* Read the current enable state and save it */
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ original_enable = gmx_cfg.s.en;
+ /* Force port to be disabled */
+ gmx_cfg.s.en = 0;
+ if (enable_internal)
+ {
+ /* Force speed if we're doing internal loopback */
+ gmx_cfg.s.duplex = 1;
+ gmx_cfg.s.slottime = 1;
+ gmx_cfg.s.speed = 1;
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ }
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+ /* Set the loopback bits */
+ asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+ if (enable_internal)
+ asxx_prt_loop.s.int_loop |= 1<<index;
+ else
+ asxx_prt_loop.s.int_loop &= ~(1<<index);
+ if (enable_external)
+ asxx_prt_loop.s.ext_loop |= 1<<index;
+ else
+ asxx_prt_loop.s.ext_loop &= ~(1<<index);
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
+
+ /* Force enables in internal loopback */
+ if (enable_internal)
+ {
+ uint64_t tmp;
+ tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
+ tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
+ original_enable = 1;
+ }
+
+ /* Restore the enable state */
+ gmx_cfg.s.en = original_enable;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ return 0;
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,136 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for RGMII/GMII/MII initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_RGMII_H__
+#define __CVMX_HELPER_RGMII_H__
+
+/**
+ * @INTERNAL
+ * Probe RGMII ports and determine the number present
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of RGMII/GMII/MII ports (0-4).
+ */
+extern int __cvmx_helper_rgmii_probe(int interface);
+static inline int __cvmx_helper_rgmii_enumerate(int interface)
+{
+ return __cvmx_helper_rgmii_probe(interface);
+}
+
+/**
+ * Put an RGMII interface in loopback mode. Internal packets sent
+ * out will be received back again on the same port. Externally
+ * received packets will echo back out.
+ *
+ * @param port IPD port number to loop.
+ */
+extern void cvmx_helper_rgmii_internal_loopback(int port);
+
+/**
+ * @INTERNAL
+ * Configure all of the ASX, GMX, and PKO regsiters required
+ * to get RGMII to function on the supplied interface.
+ *
+ * @param interface PKO Interface to configure (0 or 1)
+ *
+ * @return Zero on success
+ */
+extern int __cvmx_helper_rgmii_enable(int interface);
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port);
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external);
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-rgmii.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,779 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for SGMII initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-qlm.h>
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#endif
+#include <asm/octeon/cvmx-pcsx-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-ciu-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-mdio.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+#include "cvmx-helper-cfg.h"
+#include "cvmx-qlm.h"
+#endif
+#else
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-mdio.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+#include "cvmx-qlm.h"
+#endif
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/**
+ * @INTERNAL
+ * Perform initialization required only once for an SGMII port.
+ *
+ * @param interface Interface to init
+ * @param index Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
+{
+ const uint64_t clock_mhz = cvmx_clock_get_rate(CVMX_CLOCK_SCLK) / 1000000;
+ cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
+ cvmx_pcsx_linkx_timer_count_reg_t pcsx_linkx_timer_count_reg;
+ cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
+
+ /* Disable GMX */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the appropriate
+ value. 1000BASE-X specifies a 10ms interval. SGMII specifies a 1.6ms
+ interval. */
+ pcsx_miscx_ctl_reg.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ pcsx_linkx_timer_count_reg.u64 = cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
+ if (pcsx_miscx_ctl_reg.s.mode
+#if defined(OCTEON_VENDOR_GEFES)
+ /* GEF Fiber SFP testing on W5650 showed this to cause link issues for 1000BASE-X*/
+ && (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_CUST_W5650)
+ && (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_CUST_W63XX)
+#endif
+ )
+ {
+ /* 1000BASE-X */
+ pcsx_linkx_timer_count_reg.s.count = (10000ull * clock_mhz) >> 10;
+ }
+ else
+ {
+ /* SGMII */
+ pcsx_linkx_timer_count_reg.s.count = (1600ull * clock_mhz) >> 10;
+ }
+ cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface), pcsx_linkx_timer_count_reg.u64);
+
+ /* Write the advertisement register to be used as the
+ tx_Config_Reg<D15:D0> of the autonegotiation.
+ In 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
+ In SGMII PHY mode, tx_Config_Reg<D15:D0> is PCS*_SGM*_AN_ADV_REG.
+ In SGMII MAC mode, tx_Config_Reg<D15:D0> is the fixed value 0x4001, so
+ this step can be skipped. */
+ if (pcsx_miscx_ctl_reg.s.mode)
+ {
+ /* 1000BASE-X */
+ cvmx_pcsx_anx_adv_reg_t pcsx_anx_adv_reg;
+ pcsx_anx_adv_reg.u64 = cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
+ pcsx_anx_adv_reg.s.rem_flt = 0;
+ pcsx_anx_adv_reg.s.pause = 3;
+ pcsx_anx_adv_reg.s.hfd = 1;
+ pcsx_anx_adv_reg.s.fd = 1;
+ cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface), pcsx_anx_adv_reg.u64);
+ }
+ else
+ {
+#ifdef CVMX_HELPER_CONFIG_NO_PHY
+ /* If the interface does not have PHY, then set explicitly in PHY mode
+ so that link will be set during auto negotiation. */
+ if (!pcsx_miscx_ctl_reg.s.mac_phy)
+ {
+ cvmx_dprintf("SGMII%d%d: Forcing PHY mode as PHY address is not set\n", interface, index);
+ pcsx_miscx_ctl_reg.s.mac_phy = 1;
+ cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), pcsx_miscx_ctl_reg.u64);
+ }
+#endif
+ if (pcsx_miscx_ctl_reg.s.mac_phy)
+ {
+ /* PHY Mode */
+ cvmx_pcsx_sgmx_an_adv_reg_t pcsx_sgmx_an_adv_reg;
+ pcsx_sgmx_an_adv_reg.u64 = cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG(index, interface));
+ pcsx_sgmx_an_adv_reg.s.dup = 1;
+ pcsx_sgmx_an_adv_reg.s.speed= 2;
+ cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG(index, interface), pcsx_sgmx_an_adv_reg.u64);
+ }
+ else
+ {
+ /* MAC Mode - Nothing to do */
+ }
+ }
+ return 0;
+}
+
+static int __cvmx_helper_need_g15618(void)
+{
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM
+ || OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)
+ || OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)
+ || OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1)
+ || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X)
+ || OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X))
+ return 1;
+ else
+ return 0;
+ }
+
+/**
+ * @INTERNAL
+ * Initialize the SERTES link for the first time or after a loss
+ * of link.
+ *
+ * @param interface Interface to init
+ * @param index Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
+{
+ cvmx_pcsx_mrx_control_reg_t control_reg;
+ uint64_t link_timeout;
+
+#if defined(OCTEON_VENDOR_GEFES)
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_TNPA5651X) {
+ return 0; /* no auto-negotiation */
+ }
+#endif
+
+
+ /* Take PCS through a reset sequence.
+ PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
+ Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the value of
+ the other PCS*_MR*_CONTROL_REG bits).
+ Read PCS*_MR*_CONTROL_REG[RESET] until it changes value to zero. */
+ control_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+
+ /* Errata G-15618 requires disabling PCS soft reset in CN63XX pass upto 2.1. */
+ if (!__cvmx_helper_need_g15618())
+ {
+ link_timeout = 200000;
+#if defined(OCTEON_VENDOR_GEFES)
+ if( (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_TNPA56X4) && (interface == 0) )
+ {
+ link_timeout = 5000000;
+ }
+#endif
+ control_reg.s.reset = 1;
+ cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_CONTROL_REG(index, interface), cvmx_pcsx_mrx_control_reg_t, reset, ==, 0, link_timeout))
+ {
+ cvmx_dprintf("SGMII%d: Timeout waiting for port %d to finish reset\n", interface, index);
+ return -1;
+ }
+ }
+
+ /* Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh sgmii negotiation starts. */
+ control_reg.s.rst_an = 1;
+ control_reg.s.an_en = 1;
+ control_reg.s.pwr_dn = 0;
+ cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
+
+ /* Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating that
+ sgmii autonegotiation is complete. In MAC mode this isn't an ethernet
+ link, but a link between Octeon and the PHY */
+ if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+ CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface), cvmx_pcsx_mrx_status_reg_t, an_cpt, ==, 1, 10000))
+ {
+ //cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index);
+ return -1;
+ }
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Configure an SGMII link to the specified speed after the SERTES
+ * link is up.
+ *
+ * @param interface Interface to init
+ * @param index Index of prot on the interface
+ * @param link_info Link state to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface, int index, cvmx_helper_link_info_t link_info)
+{
+ int is_enabled;
+ cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
+ cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
+
+#if defined(OCTEON_VENDOR_GEFES)
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_TNPA5651X)
+ return 0; /* no auto-negotiation */
+#endif
+
+ /* Disable GMX before we make any changes. Remember the enable state */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ is_enabled = gmxx_prtx_cfg.s.en;
+ gmxx_prtx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Wait for GMX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, 10000) ||
+ CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, 10000))
+ {
+ cvmx_dprintf("SGMII%d: Timeout waiting for port %d to be idle\n", interface, index);
+ return -1;
+ }
+
+ /* Read GMX CFG again to make sure the disable completed */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Get the misc control for PCS. We will need to set the duplication amount */
+ pcsx_miscx_ctl_reg.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+ /* Use GMXENO to force the link down if the status we get says it should be down */
+ pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
+
+ /* Only change the duplex setting if the link is up */
+ if (link_info.s.link_up)
+ gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
+
+ /* Do speed based setting for GMX */
+ switch (link_info.s.speed)
+ {
+ case 10:
+ gmxx_prtx_cfg.s.speed = 0;
+ gmxx_prtx_cfg.s.speed_msb = 1;
+ gmxx_prtx_cfg.s.slottime = 0;
+ pcsx_miscx_ctl_reg.s.samp_pt = 25; /* Setting from GMX-603 */
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ break;
+ case 100:
+ gmxx_prtx_cfg.s.speed = 0;
+ gmxx_prtx_cfg.s.speed_msb = 0;
+ gmxx_prtx_cfg.s.slottime = 0;
+ pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ break;
+ case 1000:
+ gmxx_prtx_cfg.s.speed = 1;
+ gmxx_prtx_cfg.s.speed_msb = 0;
+ gmxx_prtx_cfg.s.slottime = 1;
+ pcsx_miscx_ctl_reg.s.samp_pt = 1;
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
+ if (gmxx_prtx_cfg.s.duplex)
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); // full duplex
+ else
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192); // half duplex
+ break;
+ default:
+ break;
+ }
+
+ /* Write the new misc control for PCS */
+ cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), pcsx_miscx_ctl_reg.u64);
+
+ /* Write the new GMX settings with the port still disabled */
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Read GMX CFG again to make sure the config completed */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Restore the enabled / disabled state */
+ gmxx_prtx_cfg.s.en = is_enabled;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Bring up the SGMII interface to be ready for packet I/O but
+ * leave I/O disabled using the GMX override. This function
+ * follows the bringup documented in 10.6.3 of the manual.
+ *
+ * @param interface Interface to bringup
+ * @param num_ports Number of ports on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
+{
+ int index;
+ int do_link_set = 1;
+
+ /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
+ {
+ cvmx_ciu_qlm2_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM2);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 0xf;
+ ciu_qlm.s.txmargin = 0xd;
+ cvmx_write_csr(CVMX_CIU_QLM2, ciu_qlm.u64);
+ }
+
+ /* CN63XX Pass 2.0 and 2.1 errata G-15273 requires the QLM De-emphasis be
+ programmed when using a 156.25Mhz ref clock */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1))
+ {
+ /* Read the QLM speed pins */
+ cvmx_mio_rst_boot_t mio_rst_boot;
+ mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+
+ if (mio_rst_boot.cn63xx.qlm2_spd == 4)
+ {
+ cvmx_ciu_qlm2_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM2);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 0x0;
+ ciu_qlm.s.txmargin = 0xf;
+ cvmx_write_csr(CVMX_CIU_QLM2, ciu_qlm.u64);
+ }
+ }
+
+ __cvmx_helper_setup_gmx(interface, num_ports);
+
+ for (index=0; index<num_ports; index++)
+ {
+ int ipd_port = cvmx_helper_get_ipd_port(interface, index);
+ __cvmx_helper_sgmii_hardware_init_one_time(interface, index);
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ /* Linux kernel driver will call ....link_set with the proper link
+ state. In the simulator there is no link state polling and
+ hence it is set from here. */
+ if (!(cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM))
+ do_link_set = 0;
+#endif
+ if (do_link_set)
+ __cvmx_helper_sgmii_link_set(ipd_port, __cvmx_helper_sgmii_link_get(ipd_port));
+ }
+
+ return 0;
+}
+
+int __cvmx_helper_sgmii_enumerate(int interface)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ return 2;
+
+ return 4;
+}
+
+/**
+ * @INTERNAL
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_sgmii_probe(int interface)
+{
+ cvmx_gmxx_inf_mode_t mode;
+
+ /* Check if QLM is configured correct for SGMII, verify the speed
+ as well as mode */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ int qlm = cvmx_qlm_interface(interface);
+
+ if (cvmx_qlm_get_status(qlm) != 1)
+ return 0;
+ }
+
+ /* Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the interface
+ needs to be enabled before IPD otherwise per port backpressure
+ may not work properly */
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ mode.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+
+ return __cvmx_helper_sgmii_enumerate(interface);
+}
+
+
+/**
+ * @INTERNAL
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_enable(int interface)
+{
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int index;
+
+ /* Setup PKND and BPID */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ for (index = 0; index < num_ports; index++)
+ {
+ cvmx_gmxx_bpid_msk_t bpid_msk;
+ cvmx_gmxx_bpid_mapx_t bpid_map;
+ cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
+
+ /* Setup PKIND */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.pknd = cvmx_helper_get_pknd(interface, index);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Setup BPID */
+ bpid_map.u64 = cvmx_read_csr(CVMX_GMXX_BPID_MAPX(index, interface));
+ bpid_map.s.val = 1;
+ bpid_map.s.bpid = cvmx_helper_get_bpid(interface, index);
+ cvmx_write_csr(CVMX_GMXX_BPID_MAPX(index, interface), bpid_map.u64);
+
+ bpid_msk.u64 = cvmx_read_csr(CVMX_GMXX_BPID_MSK(interface));
+ bpid_msk.s.msk_or |= (1<<index);
+ bpid_msk.s.msk_and &= ~(1<<index);
+ cvmx_write_csr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
+ }
+ }
+
+ __cvmx_helper_sgmii_hardware_init(interface, num_ports);
+
+ /* CN68XX adds the padding and FCS in PKO, not GMX */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ cvmx_gmxx_txx_append_t gmxx_txx_append_cfg;
+
+ for (index = 0; index < num_ports; index++)
+ {
+ gmxx_txx_append_cfg.u64 = cvmx_read_csr(
+ CVMX_GMXX_TXX_APPEND(index, interface));
+ gmxx_txx_append_cfg.s.fcs = 0;
+ gmxx_txx_append_cfg.s.pad = 0;
+ cvmx_write_csr(CVMX_GMXX_TXX_APPEND(index, interface),
+ gmxx_txx_append_cfg.u64);
+ }
+ }
+
+ for (index=0; index<num_ports; index++)
+ {
+ cvmx_gmxx_txx_append_t append_cfg;
+ cvmx_gmxx_txx_sgmii_ctl_t sgmii_ctl;
+ cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
+
+ /* Clear the align bit if preamble is set to attain maximum tx rate. */
+ append_cfg.u64 = cvmx_read_csr(CVMX_GMXX_TXX_APPEND(index, interface));
+ sgmii_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TXX_SGMII_CTL(index, interface));
+ sgmii_ctl.s.align = append_cfg.s.preamble ? 0 : 1;
+ cvmx_write_csr(CVMX_GMXX_TXX_SGMII_CTL(index, interface), sgmii_ctl.u64);
+
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+ }
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ cvmx_pcsx_mrx_control_reg_t pcsx_mrx_control_reg;
+ int speed = 1000;
+ int qlm;
+
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ {
+ /* The simulator gives you a simulated 1Gbps full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = speed;
+ return result;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ {
+ cvmx_gmxx_inf_mode_t inf_mode;
+ inf_mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ if (inf_mode.s.rate & (1<<index))
+ speed = 2500;
+ else
+ speed = 1000;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ qlm = cvmx_qlm_interface(interface);
+
+ speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
+ }
+
+ result.u64 = 0;
+
+ pcsx_mrx_control_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ if (pcsx_mrx_control_reg.s.loopbck1)
+ {
+ /* Force 1Gbps full duplex link for internal loopback */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = speed;
+ return result;
+ }
+
+
+ pcsx_miscx_ctl_reg.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ if (pcsx_miscx_ctl_reg.s.mode)
+ {
+#if defined(OCTEON_VENDOR_GEFES)
+ /* 1000BASE-X */
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ cvmx_pcsx_miscx_ctl_reg_t mode_type;
+ cvmx_pcsx_anx_results_reg_t inband_status;
+ cvmx_pcsx_mrx_status_reg_t mrx_status;
+ cvmx_pcsx_anx_adv_reg_t anxx_adv;
+
+ anxx_adv.u64 = cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
+ mrx_status.u64 = cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG(index, interface));
+ mode_type.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+ /* Read Octeon's inband status */
+ inband_status.u64 = cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG(index, interface));
+
+ result.s.link_up = inband_status.s.link_ok;/* this is only accurate for 1000-base x */
+
+ result.s.full_duplex = inband_status.s.dup;
+ switch (inband_status.s.spd)
+ {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ break;
+ }
+#endif /* Actually not 100% this is GEFES specific */
+ }
+ else
+ {
+ if (pcsx_miscx_ctl_reg.s.mac_phy)
+ {
+ /* PHY Mode */
+ cvmx_pcsx_mrx_status_reg_t pcsx_mrx_status_reg;
+ cvmx_pcsx_anx_results_reg_t pcsx_anx_results_reg;
+
+ /* Don't bother continuing if the SERTES low level link is down */
+ pcsx_mrx_status_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG(index, interface));
+ if (pcsx_mrx_status_reg.s.lnk_st == 0)
+ {
+ if (__cvmx_helper_sgmii_hardware_init_link(interface, index) != 0)
+ return result;
+ }
+
+ /* Read the autoneg results */
+ pcsx_anx_results_reg.u64 = cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG(index, interface));
+ if (pcsx_anx_results_reg.s.an_cpt)
+ {
+ /* Auto negotiation is complete. Set status accordingly */
+ result.s.full_duplex = pcsx_anx_results_reg.s.dup;
+ result.s.link_up = pcsx_anx_results_reg.s.link_ok;
+ switch (pcsx_anx_results_reg.s.spd)
+ {
+ case 0:
+ result.s.speed = speed / 100;
+ break;
+ case 1:
+ result.s.speed = speed / 10;
+ break;
+ case 2:
+ result.s.speed = speed;
+ break;
+ default:
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ break;
+ }
+ }
+ else
+ {
+ /* Auto negotiation isn't complete. Return link down */
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ }
+ }
+ else /* MAC Mode */
+ {
+ result = __cvmx_helper_board_link_get(ipd_port);
+ }
+ }
+ return result;
+}
+
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (link_info.s.link_up || !__cvmx_helper_need_g15618()) {
+ __cvmx_helper_sgmii_hardware_init_link(interface, index);
+ } else {
+ cvmx_pcsx_mrx_control_reg_t control_reg;
+ cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
+
+ control_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ control_reg.s.an_en = 0;
+ cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
+ cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ /* Use GMXENO to force the link down it will get reenabled later... */
+ pcsx_miscx_ctl_reg.s.gmxeno = 1;
+ cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), pcsx_miscx_ctl_reg.u64);
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ return 0;
+ }
+ return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index, link_info);
+}
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ cvmx_pcsx_mrx_control_reg_t pcsx_mrx_control_reg;
+ cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
+
+ pcsx_mrx_control_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
+ cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), pcsx_mrx_control_reg.u64);
+
+ pcsx_miscx_ctl_reg.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
+ cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), pcsx_miscx_ctl_reg.u64);
+
+ __cvmx_helper_sgmii_hardware_init_link(interface, index);
+ return 0;
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,127 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for SGMII initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_SGMII_H__
+#define __CVMX_HELPER_SGMII_H__
+
+/**
+ * @INTERNAL
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_sgmii_probe(int interface);
+extern int __cvmx_helper_sgmii_enumerate(int interface);
+
+/**
+ * @INTERNAL
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_sgmii_enable(int interface);
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port);
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_sgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external);
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-sgmii.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,278 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for SPI initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-helper.h>
+#endif
+#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-pip-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+#include "cvmx.h"
+#include "cvmx-spi.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-helper.h"
+#endif
+#else
+#include "cvmx.h"
+#include "cvmx-spi.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-helper.h"
+#endif
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/* CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI initialization
+ routines wait for SPI training. You can override the value using
+ executive-config.h if necessary */
+#ifndef CVMX_HELPER_SPI_TIMEOUT
+#define CVMX_HELPER_SPI_TIMEOUT 10
+#endif
+
+int __cvmx_helper_spi_enumerate(int interface)
+{
+#if defined(OCTEON_VENDOR_LANNER)
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_LANNER_MR955)
+ {
+ cvmx_pko_reg_crc_enable_t enable;
+
+ enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
+ enable.s.enable &= 0xffff << (16 - (interface*16));
+ cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
+
+ if (interface == 1)
+ return 12;
+ /* XXX This is not entirely true. */
+ return 0;
+ }
+#endif
+
+#if defined(OCTEON_VENDOR_RADISYS)
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
+ if (interface == 0)
+ return 13;
+ if (interface == 1)
+ return 8;
+ return 0;
+ }
+#endif
+
+ if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+ cvmx_spi4000_is_present(interface))
+ return 10;
+ else
+ return 16;
+}
+
+/**
+ * @INTERNAL
+ * Probe a SPI interface and determine the number of ports
+ * connected to it. The SPI interface should still be down after
+ * this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_spi_probe(int interface)
+{
+ int num_ports = __cvmx_helper_spi_enumerate(interface);
+
+ if (num_ports == 16) {
+ cvmx_pko_reg_crc_enable_t enable;
+ /*
+ * Unlike the SPI4000, most SPI devices don't
+ * automatically put on the L2 CRC. For everything
+ * except for the SPI4000 have PKO append the L2 CRC
+ * to the packet
+ */
+ enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
+ enable.s.enable |= 0xffff << (interface*16);
+ cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
+ }
+ __cvmx_helper_setup_gmx(interface, num_ports);
+ return num_ports;
+}
+
+
+/**
+ * @INTERNAL
+ * Bringup and enable a SPI interface. After this call packet I/O
+ * should be fully functional. This is called with IPD enabled but
+ * PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_spi_enable(int interface)
+{
+ /* Normally the ethernet L2 CRC is checked and stripped in the GMX block.
+ When you are using SPI, this isn' the case and IPD needs to check
+ the L2 CRC */
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int ipd_port;
+ for (ipd_port=interface*16; ipd_port<interface*16+num_ports; ipd_port++)
+ {
+ cvmx_pip_prt_cfgx_t port_config;
+ port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ port_config.s.crc_en = 1;
+#ifdef OCTEON_VENDOR_RADISYS
+ /*
+ * Incoming packets on the RSYS4GBE have the FCS stripped.
+ */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE)
+ port_config.s.crc_en = 0;
+#endif
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
+ }
+
+ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
+ {
+ cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports);
+ if (cvmx_spi4000_is_present(interface))
+ cvmx_spi4000_initialize(interface);
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ result.u64 = 0;
+
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ {
+ /* The simulator gives you a simulated full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10000;
+ }
+ else if (cvmx_spi4000_is_present(interface))
+ {
+ cvmx_gmxx_rxx_rx_inbnd_t inband = cvmx_spi4000_check_speed(interface, index);
+ result.s.link_up = inband.s.status;
+ result.s.full_duplex = inband.s.duplex;
+ switch (inband.s.speed)
+ {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ break;
+ }
+ }
+ else
+ {
+ /* For generic SPI we can't determine the link, just return some
+ sane results */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10000;
+ }
+ return result;
+}
+
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ /* Nothing to do. If we have a SPI4000 then the setup was already performed
+ by cvmx_spi4000_check_speed(). If not then there isn't any link
+ info */
+ return 0;
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,111 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for SPI initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_SPI_H__
+#define __CVMX_HELPER_SPI_H__
+
+/**
+ * @INTERNAL
+ * Probe a SPI interface and determine the number of ports
+ * connected to it. The SPI interface should still be down after
+ * this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_spi_probe(int interface);
+extern int __cvmx_helper_spi_enumerate(int interface);
+
+/**
+ * @INTERNAL
+ * Bringup and enable a SPI interface. After this call packet I/O
+ * should be fully functional. This is called with IPD enabled but
+ * PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_spi_enable(int interface);
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port);
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-spi.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,358 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for SRIO initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-qlm.h>
+#include <asm/octeon/cvmx-srio.h>
+#include <asm/octeon/cvmx-pip-defs.h>
+#include <asm/octeon/cvmx-sriox-defs.h>
+#include <asm/octeon/cvmx-sriomaintx-defs.h>
+#include <asm/octeon/cvmx-dpi-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+#include "cvmx.h"
+#include "cvmx-helper.h"
+#include "cvmx-srio.h"
+#endif
+#include "cvmx-qlm.h"
+#else
+#include "cvmx.h"
+#include "cvmx-helper.h"
+#include "cvmx-qlm.h"
+#include "cvmx-srio.h"
+#endif
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/**
+ * @INTERNAL
+ * Probe a SRIO interface and determine the number of ports
+ * connected to it. The SRIO interface should still be down
+ * after this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_srio_probe(int interface)
+{
+ cvmx_sriox_status_reg_t srio0_status_reg;
+ cvmx_sriox_status_reg_t srio1_status_reg;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_SRIO))
+ return 0;
+
+ /* Read MIO_QLMX_CFG CSRs to find SRIO status. */
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ {
+ int status = cvmx_qlm_get_status(0);
+ int srio_port = interface - 4;
+ switch(srio_port)
+ {
+ case 0: /* 1x4 lane */
+ if (status == 4)
+ return 2;
+ break;
+ case 2: /* 2x2 lane */
+ if (status == 5)
+ return 2;
+ break;
+ case 1: /* 4x1 long/short */
+ case 3: /* 4x1 long/short */
+ if (status == 6)
+ return 2;
+ break;
+ }
+ return 0;
+ }
+
+ srio0_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0));
+ srio1_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1));
+ if (srio0_status_reg.s.srio || srio1_status_reg.s.srio)
+ return 2;
+ else
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Bringup and enable SRIO interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_srio_enable(int interface)
+{
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int index;
+ cvmx_sriomaintx_core_enables_t sriomaintx_core_enables;
+ cvmx_sriox_imsg_ctrl_t sriox_imsg_ctrl;
+ cvmx_sriox_status_reg_t srio_status_reg;
+ cvmx_dpi_ctl_t dpi_ctl;
+ int srio_port = interface - 4;
+
+ /* All SRIO ports have a cvmx_srio_rx_message_header_t header
+ on them that must be skipped by IPD */
+ for (index=0; index<num_ports; index++)
+ {
+ cvmx_pip_prt_cfgx_t port_config;
+ cvmx_sriox_omsg_portx_t sriox_omsg_portx;
+ cvmx_sriox_omsg_sp_mrx_t sriox_omsg_sp_mrx;
+ cvmx_sriox_omsg_fmp_mrx_t sriox_omsg_fmp_mrx;
+ cvmx_sriox_omsg_nmp_mrx_t sriox_omsg_nmp_mrx;
+ int ipd_port = cvmx_helper_get_ipd_port(interface, index);
+ port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ /* Only change the skip if the user hasn't already set it */
+ if (!port_config.s.skip)
+ {
+ port_config.s.skip = sizeof(cvmx_srio_rx_message_header_t);
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
+ }
+
+ /* Enable TX with PKO */
+ sriox_omsg_portx.u64 = cvmx_read_csr(CVMX_SRIOX_OMSG_PORTX(index, srio_port));
+ sriox_omsg_portx.s.port = (srio_port) * 2 + index;
+ sriox_omsg_portx.s.enable = 1;
+ cvmx_write_csr(CVMX_SRIOX_OMSG_PORTX(index, srio_port), sriox_omsg_portx.u64);
+
+ /* Allow OMSG controller to send regardless of the state of any other
+ controller. Allow messages to different IDs and MBOXes to go in
+ parallel */
+ sriox_omsg_sp_mrx.u64 = 0;
+ sriox_omsg_sp_mrx.s.xmbox_sp = 1;
+ sriox_omsg_sp_mrx.s.ctlr_sp = 1;
+ sriox_omsg_sp_mrx.s.ctlr_fmp = 1;
+ sriox_omsg_sp_mrx.s.ctlr_nmp = 1;
+ sriox_omsg_sp_mrx.s.id_sp = 1;
+ sriox_omsg_sp_mrx.s.id_fmp = 1;
+ sriox_omsg_sp_mrx.s.id_nmp = 1;
+ sriox_omsg_sp_mrx.s.mbox_sp = 1;
+ sriox_omsg_sp_mrx.s.mbox_fmp = 1;
+ sriox_omsg_sp_mrx.s.mbox_nmp = 1;
+ sriox_omsg_sp_mrx.s.all_psd = 1;
+ cvmx_write_csr(CVMX_SRIOX_OMSG_SP_MRX(index, srio_port), sriox_omsg_sp_mrx.u64);
+
+ /* Allow OMSG controller to send regardless of the state of any other
+ controller. Allow messages to different IDs and MBOXes to go in
+ parallel */
+ sriox_omsg_fmp_mrx.u64 = 0;
+ sriox_omsg_fmp_mrx.s.ctlr_sp = 1;
+ sriox_omsg_fmp_mrx.s.ctlr_fmp = 1;
+ sriox_omsg_fmp_mrx.s.ctlr_nmp = 1;
+ sriox_omsg_fmp_mrx.s.id_sp = 1;
+ sriox_omsg_fmp_mrx.s.id_fmp = 1;
+ sriox_omsg_fmp_mrx.s.id_nmp = 1;
+ sriox_omsg_fmp_mrx.s.mbox_sp = 1;
+ sriox_omsg_fmp_mrx.s.mbox_fmp = 1;
+ sriox_omsg_fmp_mrx.s.mbox_nmp = 1;
+ sriox_omsg_fmp_mrx.s.all_psd = 1;
+ cvmx_write_csr(CVMX_SRIOX_OMSG_FMP_MRX(index, srio_port), sriox_omsg_fmp_mrx.u64);
+
+ /* Once the first part of a message is accepted, always acept the rest
+ of the message */
+ sriox_omsg_nmp_mrx.u64 = 0;
+ sriox_omsg_nmp_mrx.s.all_sp = 1;
+ sriox_omsg_nmp_mrx.s.all_fmp = 1;
+ sriox_omsg_nmp_mrx.s.all_nmp = 1;
+ cvmx_write_csr(CVMX_SRIOX_OMSG_NMP_MRX(index, srio_port), sriox_omsg_nmp_mrx.u64);
+
+ }
+
+ /* Choose the receive controller based on the mailbox */
+ sriox_imsg_ctrl.u64 = cvmx_read_csr(CVMX_SRIOX_IMSG_CTRL(srio_port));
+ sriox_imsg_ctrl.s.prt_sel = 0;
+ sriox_imsg_ctrl.s.mbox = 0xa;
+ cvmx_write_csr(CVMX_SRIOX_IMSG_CTRL(srio_port), sriox_imsg_ctrl.u64);
+
+ /* DPI must be enabled for us to RX messages */
+ dpi_ctl.u64 = cvmx_read_csr(CVMX_DPI_CTL);
+ dpi_ctl.s.clk = 1;
+ dpi_ctl.s.en = 1;
+ cvmx_write_csr(CVMX_DPI_CTL, dpi_ctl.u64);
+
+ /* Make sure register access is allowed */
+ srio_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(srio_port));
+ if (!srio_status_reg.s.access)
+ return 0;
+
+ /* Enable RX */
+ if (!cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
+ CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), &sriomaintx_core_enables.u32))
+ {
+ sriomaintx_core_enables.s.imsg0 = 1;
+ sriomaintx_core_enables.s.imsg1 = 1;
+ cvmx_srio_config_write32(srio_port, 0, -1, 0, 0,
+ CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), sriomaintx_core_enables.u32);
+ }
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by SRIO link status.
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_srio_link_get(int ipd_port)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int srio_port = interface - 4;
+ cvmx_helper_link_info_t result;
+ cvmx_sriox_status_reg_t srio_status_reg;
+ cvmx_sriomaintx_port_0_err_stat_t sriomaintx_port_0_err_stat;
+ cvmx_sriomaintx_port_0_ctl_t sriomaintx_port_0_ctl;
+ cvmx_sriomaintx_port_0_ctl2_t sriomaintx_port_0_ctl2;
+
+ result.u64 = 0;
+
+ /* Make sure register access is allowed */
+ srio_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(srio_port));
+ if (!srio_status_reg.s.access)
+ return result;
+
+ /* Read the port link status */
+ if (cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
+ CVMX_SRIOMAINTX_PORT_0_ERR_STAT(srio_port),
+ &sriomaintx_port_0_err_stat.u32))
+ return result;
+
+ /* Return if link is down */
+ if (!sriomaintx_port_0_err_stat.s.pt_ok)
+ return result;
+
+ /* Read the port link width and speed */
+ if (cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
+ CVMX_SRIOMAINTX_PORT_0_CTL(srio_port),
+ &sriomaintx_port_0_ctl.u32))
+ return result;
+ if (cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
+ CVMX_SRIOMAINTX_PORT_0_CTL2(srio_port),
+ &sriomaintx_port_0_ctl2.u32))
+ return result;
+
+ /* Link is up */
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ switch (sriomaintx_port_0_ctl2.s.sel_baud)
+ {
+ case 1:
+ result.s.speed = 1250;
+ break;
+ case 2:
+ result.s.speed = 2500;
+ break;
+ case 3:
+ result.s.speed = 3125;
+ break;
+ case 4:
+ result.s.speed = 5000;
+ break;
+ case 5:
+ result.s.speed = 6250;
+ break;
+ default:
+ result.s.speed = 0;
+ break;
+ }
+ switch (sriomaintx_port_0_ctl.s.it_width)
+ {
+ case 2: /* Four lanes */
+ result.s.speed += 40000;
+ break;
+ case 3: /* Two lanes */
+ result.s.speed += 20000;
+ break;
+ default: /* One lane */
+ result.s.speed += 10000;
+ break;
+ }
+ return result;
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_srio_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ return 0;
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,112 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for SRIO initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+#ifndef __CVMX_HELPER_SRIO_H__
+#define __CVMX_HELPER_SRIO_H__
+
+/**
+ * @INTERNAL
+ * Probe a SRIO interface and determine the number of ports
+ * connected to it. The SRIO interface should still be down after
+ * this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_srio_probe(int interface);
+static inline int __cvmx_helper_srio_enumerate(int interface)
+{
+ return __cvmx_helper_srio_probe(interface);
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a SRIO interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_srio_enable(int interface);
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by SRIO link status.
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_srio_link_get(int ipd_port);
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_srio_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-srio.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-util.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-util.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-util.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,865 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Small helper utilities.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <linux/slab.h> \
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-sli-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#endif
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-fpa.h"
+#include "cvmx-pip.h"
+#include "cvmx-pko.h"
+#include "cvmx-ilk.h"
+#include "cvmx-ipd.h"
+#include "cvmx-gmx.h"
+#include "cvmx-spi.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-util.h"
+#include "cvmx-version.h"
+#include "cvmx-helper-ilk.h"
+#include "cvmx-helper-cfg.h"
+#endif
+
+#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
+
+struct cvmx_iface {
+ int cvif_ipd_nports;
+ int cvif_has_fcs; /* PKO fcs for this interface. */
+ enum cvmx_pko_padding cvif_padding;
+ cvmx_helper_link_info_t *cvif_ipd_port_link_info;
+};
+
+/*
+ * This has to be static as u-boot expects to probe an interface and
+ * gets the number of its ports.
+ */
+static CVMX_SHARED struct cvmx_iface cvmx_interfaces[CVMX_HELPER_MAX_IFACE];
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+/**
+ * Get the version of the CVMX libraries.
+ *
+ * @return Version string. Note this buffer is allocated statically
+ * and will be shared by all callers.
+ */
+const char *cvmx_helper_get_version(void)
+{
+ return OCTEON_SDK_VERSION_STRING;
+}
+#endif
+
+/**
+ * Convert a interface mode into a human readable string
+ *
+ * @param mode Mode to convert
+ *
+ * @return String
+ */
+const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode)
+{
+ switch (mode)
+ {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED: return "DISABLED";
+ case CVMX_HELPER_INTERFACE_MODE_RGMII: return "RGMII";
+ case CVMX_HELPER_INTERFACE_MODE_GMII: return "GMII";
+ case CVMX_HELPER_INTERFACE_MODE_SPI: return "SPI";
+ case CVMX_HELPER_INTERFACE_MODE_PCIE: return "PCIE";
+ case CVMX_HELPER_INTERFACE_MODE_XAUI: return "XAUI";
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI: return "RXAUI";
+ case CVMX_HELPER_INTERFACE_MODE_SGMII: return "SGMII";
+ case CVMX_HELPER_INTERFACE_MODE_PICMG: return "PICMG";
+ case CVMX_HELPER_INTERFACE_MODE_NPI: return "NPI";
+ case CVMX_HELPER_INTERFACE_MODE_LOOP: return "LOOP";
+ case CVMX_HELPER_INTERFACE_MODE_SRIO: return "SRIO";
+ case CVMX_HELPER_INTERFACE_MODE_ILK: return "ILK";
+ }
+ return "UNKNOWN";
+}
+
+
+/**
+ * Debug routine to dump the packet structure to the console
+ *
+ * @param work Work queue entry containing the packet to dump
+ * @return
+ */
+int cvmx_helper_dump_packet(cvmx_wqe_t *work)
+{
+ uint64_t count;
+ uint64_t remaining_bytes;
+ cvmx_buf_ptr_t buffer_ptr;
+ uint64_t start_of_buffer;
+ uint8_t * data_address;
+ uint8_t * end_of_data;
+
+ cvmx_dprintf("Packet Length: %u\n", cvmx_wqe_get_len(work));
+ cvmx_dprintf(" Input Port: %u\n", cvmx_wqe_get_port(work));
+ cvmx_dprintf(" QoS: %u\n", cvmx_wqe_get_qos(work));
+ cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
+
+ if (work->word2.s.bufs == 0)
+ {
+ cvmx_ipd_wqe_fpa_queue_t wqe_pool;
+ wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
+ buffer_ptr.u64 = 0;
+ buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
+ buffer_ptr.s.size = 128;
+ buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
+ if (cvmx_likely(!work->word2.s.not_IP))
+ {
+ cvmx_pip_ip_offset_t pip_ip_offset;
+ pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
+ buffer_ptr.s.addr += (pip_ip_offset.s.offset<<3) - work->word2.s.ip_offset;
+ buffer_ptr.s.addr += (work->word2.s.is_v6^1)<<2;
+ }
+ else
+ {
+ /* WARNING: This code assume that the packet is not RAW. If it was,
+ we would use PIP_GBL_CFG[RAW_SHF] instead of
+ PIP_GBL_CFG[NIP_SHF] */
+ cvmx_pip_gbl_cfg_t pip_gbl_cfg;
+ pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
+ buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
+ }
+ }
+ else
+ buffer_ptr = work->packet_ptr;
+ remaining_bytes = cvmx_wqe_get_len(work);
+
+ while (remaining_bytes)
+ {
+ start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
+ cvmx_dprintf(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
+ cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
+ cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
+ cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
+ cvmx_dprintf(" Buffer Data: %llx\n", (unsigned long long)buffer_ptr.s.addr);
+ cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
+
+ cvmx_dprintf("\t\t");
+ data_address = (uint8_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr);
+ end_of_data = data_address + buffer_ptr.s.size;
+ count = 0;
+ while (data_address < end_of_data)
+ {
+ if (remaining_bytes == 0)
+ break;
+ else
+ remaining_bytes--;
+ cvmx_dprintf("%02x", (unsigned int)*data_address);
+ data_address++;
+ if (remaining_bytes && (count == 7))
+ {
+ cvmx_dprintf("\n\t\t");
+ count = 0;
+ }
+ else
+ count++;
+ }
+ cvmx_dprintf("\n");
+
+ if (remaining_bytes)
+ buffer_ptr = *(cvmx_buf_ptr_t*)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+ }
+ return 0;
+}
+
+
+/**
+ * Setup Random Early Drop on a specific input queue
+ *
+ * @param queue Input queue to setup RED on (0-7)
+ * @param pass_thresh
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @param drop_thresh
+ * All incomming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
+{
+ cvmx_ipd_qosx_red_marks_t red_marks;
+ cvmx_ipd_red_quex_param_t red_param;
+
+ /* Set RED to begin dropping packets when there are pass_thresh buffers
+ left. It will linearly drop more packets until reaching drop_thresh
+ buffers */
+ red_marks.u64 = 0;
+ red_marks.s.drop = drop_thresh;
+ red_marks.s.pass = pass_thresh;
+ cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
+
+ /* Use the actual queue 0 counter, not the average */
+ red_param.u64 = 0;
+ red_param.s.prb_con = (255ul<<24) / (red_marks.s.pass - red_marks.s.drop);
+ red_param.s.avg_con = 1;
+ red_param.s.new_con = 255;
+ red_param.s.use_pcnt = 1;
+ cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
+ return 0;
+}
+
+
+/**
+ * Setup Random Early Drop to automatically begin dropping packets.
+ *
+ * @param pass_thresh
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @param drop_thresh
+ * All incomming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
+{
+ int queue;
+ int interface;
+ int port;
+
+ /*
+ * Disable backpressure based on queued buffers. It needs SW support
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ int bpid;
+ for (interface = 0; interface < CVMX_HELPER_MAX_GMX; interface++)
+ {
+ int num_ports;
+
+ num_ports = cvmx_helper_ports_on_interface(interface);
+ for (port = 0; port < num_ports; port++) {
+ bpid = cvmx_helper_get_bpid(interface, port);
+ if (bpid == CVMX_INVALID_BPID)
+ cvmx_dprintf(
+ "setup_red: cvmx_helper_get_bpid(%d, %d) = %d\n",
+ interface, port, cvmx_helper_get_bpid(interface, port));
+ else
+ cvmx_write_csr(CVMX_IPD_BPIDX_MBUF_TH(bpid), 0);
+ }
+ }
+ }
+ else
+ {
+ cvmx_ipd_portx_bp_page_cnt_t page_cnt;
+
+ page_cnt.u64 = 0;
+ page_cnt.s.bp_enb = 0;
+ page_cnt.s.page_cnt = 100;
+ for (interface = 0; interface < CVMX_HELPER_MAX_GMX; interface++)
+ {
+ for (port = cvmx_helper_get_first_ipd_port(interface);
+ port < cvmx_helper_get_last_ipd_port(interface); port++)
+ cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port), page_cnt.u64);
+ }
+ }
+
+ for (queue = 0; queue < 8; queue++)
+ cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
+
+ /*
+ * Shutoff the dropping based on the per port page count. SW isn't
+ * decrementing it right now
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ cvmx_write_csr(CVMX_IPD_ON_BP_DROP_PKTX(0), 0);
+ else
+ cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, 0);
+
+#define IPD_RED_AVG_DLY 1000
+#define IPD_RED_PRB_DLY 1000
+ /*
+ * Setting up avg_dly and prb_dly, enable bits
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ cvmx_ipd_red_delay_t red_delay;
+ cvmx_ipd_red_bpid_enablex_t red_bpid_enable;
+
+ red_delay.u64 = 0;
+ red_delay.s.avg_dly = IPD_RED_AVG_DLY;
+ red_delay.s.prb_dly = IPD_RED_PRB_DLY;
+ cvmx_write_csr(CVMX_IPD_RED_DELAY, red_delay.u64);
+
+ /*
+ * Only enable the gmx ports
+ */
+ red_bpid_enable.u64 = 0;
+ for (interface = 0; interface < CVMX_HELPER_MAX_GMX; interface++)
+ {
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ for (port = 0; port < num_ports; port++)
+ red_bpid_enable.u64 |= (((uint64_t) 1) <<
+ cvmx_helper_get_bpid(interface, port));
+ }
+ cvmx_write_csr(CVMX_IPD_RED_BPID_ENABLEX(0), red_bpid_enable.u64);
+ }
+ else
+ {
+ cvmx_ipd_red_port_enable_t red_port_enable;
+
+ red_port_enable.u64 = 0;
+ red_port_enable.s.prt_enb = 0xfffffffffull;
+ red_port_enable.s.avg_dly = IPD_RED_AVG_DLY;
+ red_port_enable.s.prb_dly = IPD_RED_PRB_DLY;
+ cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
+
+ /*
+ * Shutoff the dropping of packets based on RED for SRIO ports
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_SRIO))
+ {
+ cvmx_ipd_red_port_enable2_t red_port_enable2;
+ red_port_enable2.u64 = 0;
+ red_port_enable2.s.prt_enb = 0xf0;
+ cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE2, red_port_enable2.u64);
+ }
+ }
+
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_setup_red);
+#endif
+
+
+/**
+ * @INTERNAL
+ * Setup the common GMX settings that determine the number of
+ * ports. These setting apply to almost all configurations of all
+ * chips.
+ *
+ * @param interface Interface to configure
+ * @param num_ports Number of ports on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_setup_gmx(int interface, int num_ports)
+{
+ cvmx_gmxx_tx_prts_t gmx_tx_prts;
+ cvmx_gmxx_rx_prts_t gmx_rx_prts;
+ cvmx_pko_reg_gmx_port_mode_t pko_mode;
+ cvmx_gmxx_txx_thresh_t gmx_tx_thresh;
+ int index;
+
+ /*
+ * Tell GMX the number of TX ports on this interface
+ */
+ gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
+ gmx_tx_prts.s.prts = num_ports;
+ cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
+
+ /*
+ * Tell GMX the number of RX ports on this interface. This only applies
+ * to GMII and XAUI ports
+ */
+ if (cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_RGMII
+ || cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_SGMII
+ || cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_GMII
+ || cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_XAUI)
+ {
+ if (num_ports > 4)
+ {
+ cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal num_ports\n");
+ return(-1);
+ }
+
+ gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
+ gmx_rx_prts.s.prts = num_ports;
+ cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
+ }
+
+ /*
+ * Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, 50XX,
+ * and 68XX.
+ */
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN50XX) && !OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ /* Tell PKO the number of ports on this interface */
+ pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
+ if (interface == 0)
+ {
+ if (num_ports == 1)
+ pko_mode.s.mode0 = 4;
+ else if (num_ports == 2)
+ pko_mode.s.mode0 = 3;
+ else if (num_ports <= 4)
+ pko_mode.s.mode0 = 2;
+ else if (num_ports <= 8)
+ pko_mode.s.mode0 = 1;
+ else
+ pko_mode.s.mode0 = 0;
+ }
+ else
+ {
+ if (num_ports == 1)
+ pko_mode.s.mode1 = 4;
+ else if (num_ports == 2)
+ pko_mode.s.mode1 = 3;
+ else if (num_ports <= 4)
+ pko_mode.s.mode1 = 2;
+ else if (num_ports <= 8)
+ pko_mode.s.mode1 = 1;
+ else
+ pko_mode.s.mode1 = 0;
+ }
+ cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
+ }
+
+ /*
+ * Set GMX to buffer as much data as possible before starting
+ * transmit. This reduces the chances that we have a TX under run
+ * due to memory contention. Any packet that fits entirely in the
+ * GMX FIFO can never have an under run regardless of memory load.
+ */
+ gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN50XX))
+ /* These chips have a fixed max threshold of 0x40 */
+ gmx_tx_thresh.s.cnt = 0x40;
+ else
+ {
+ /* ccn - common cnt numberator */
+ int ccn = 0x100;
+
+ /* Choose the max value for the number of ports */
+ if (num_ports <= 1)
+ gmx_tx_thresh.s.cnt = ccn / 1;
+ else if (num_ports == 2)
+ gmx_tx_thresh.s.cnt = ccn / 2;
+ else
+ gmx_tx_thresh.s.cnt = ccn / 4;
+ }
+
+ /*
+ * SPI and XAUI can have lots of ports but the GMX hardware only ever has
+ * a max of 4
+ */
+ if (num_ports > 4)
+ num_ports = 4;
+ for (index = 0; index < num_ports; index++)
+ cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
+ gmx_tx_thresh.u64);
+
+ /*
+ * For o68, we need to setup the pipes
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX) && interface < CVMX_HELPER_MAX_GMX)
+ {
+ cvmx_gmxx_txx_pipe_t config;
+
+ for (index = 0; index < num_ports; index++)
+ {
+ config.u64 = 0;
+
+ if (__cvmx_helper_cfg_pko_port_base(interface, index) >= 0)
+ {
+ config.u64 = cvmx_read_csr(
+ CVMX_GMXX_TXX_PIPE(index, interface));
+ config.s.nump = __cvmx_helper_cfg_pko_port_num(interface, index);
+ config.s.base = __cvmx_helper_cfg_pko_port_base(interface, index);
+ cvmx_write_csr(CVMX_GMXX_TXX_PIPE(index, interface),
+ config.u64);
+ }
+ }
+ }
+
+ return 0;
+}
+
+int cvmx_helper_get_pko_port(int interface, int port)
+{
+ return cvmx_pko_get_base_pko_port(interface, port);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_get_pko_port);
+#endif
+
+int cvmx_helper_get_ipd_port(int interface, int port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ if (interface >= 0 && interface <= 4)
+ {
+ cvmx_helper_interface_mode_t mode = cvmx_helper_interface_get_mode(interface);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI || mode == CVMX_HELPER_INTERFACE_MODE_RXAUI)
+ return 0x840 + (interface * 0x100);
+ else
+ return 0x800 + (interface * 0x100) + (port * 16);
+ }
+ else if (interface == 5 || interface == 6)
+ return 0x400 + (interface - 5) * 0x100 + port;
+ else if (interface == 7)
+ return 0x100 + port;
+ else if (interface == 8)
+ return port;
+ else
+ return -1;
+ }
+ switch (interface)
+ {
+ case 0: return port;
+ case 1: return port + 16;
+ case 2: return port + 32;
+ case 3: return port + 36;
+ case 4: return port + 40;
+ case 5: return port + 42;
+ case 6: return port + 44;
+ }
+ return -1;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_get_ipd_port);
+#endif
+
+int __cvmx_helper_get_num_ipd_ports(int interface)
+{
+ struct cvmx_iface *piface;
+
+ if (interface >= cvmx_helper_get_number_of_interfaces())
+ return -1;
+
+ piface = &cvmx_interfaces[interface];
+ return piface->cvif_ipd_nports;
+}
+
+enum cvmx_pko_padding __cvmx_helper_get_pko_padding(int interface)
+{
+ struct cvmx_iface *piface;
+
+ if (interface >= cvmx_helper_get_number_of_interfaces())
+ return CVMX_PKO_PADDING_NONE;
+
+ piface = &cvmx_interfaces[interface];
+ return piface->cvif_padding;
+}
+
+int __cvmx_helper_init_interface(int interface, int num_ipd_ports, int has_fcs, enum cvmx_pko_padding pad)
+{
+ struct cvmx_iface *piface;
+ int sz;
+
+ if (interface >= cvmx_helper_get_number_of_interfaces())
+ return -1;
+
+ piface = &cvmx_interfaces[interface];
+ piface->cvif_ipd_nports = num_ipd_ports;
+ piface->cvif_padding = pad;
+
+ piface->cvif_has_fcs = has_fcs;
+
+ /*
+ * allocate the per-ipd_port link_info structure
+ */
+ sz = piface->cvif_ipd_nports * sizeof(cvmx_helper_link_info_t);
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ if (sz == 0)
+ sz = sizeof(cvmx_helper_link_info_t);
+ piface->cvif_ipd_port_link_info = (cvmx_helper_link_info_t *)kmalloc(sz, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(piface->cvif_ipd_port_link_info))
+ panic("Cannot allocate memory in __cvmx_helper_init_interface.");
+#else
+ piface->cvif_ipd_port_link_info = (cvmx_helper_link_info_t *)cvmx_bootmem_alloc(sz, sizeof(cvmx_helper_link_info_t));
+#endif
+ if (!piface->cvif_ipd_port_link_info)
+ return -1;
+
+ /* Initialize 'em */ {
+ int i;
+ cvmx_helper_link_info_t *p;
+ p = piface->cvif_ipd_port_link_info;
+
+ for (i = 0; i < piface->cvif_ipd_nports; i++)
+ {
+ (*p).u64 = 0;
+ p++;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Shut down the interfaces; free the resources.
+ * @INTERNAL
+ */
+void __cvmx_helper_shutdown_interfaces(void)
+{
+ int i;
+ int nifaces; /* number of interfaces */
+ struct cvmx_iface *piface;
+
+ nifaces = cvmx_helper_get_number_of_interfaces();
+ for (i = 0; i < nifaces; i++)
+ {
+ piface = cvmx_interfaces + i;
+ if (piface->cvif_ipd_port_link_info)
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ kfree(piface->cvif_ipd_port_link_info);
+#else
+ /*
+ * For SE apps, bootmem was meant to be allocated and never
+ * freed.
+ */
+#endif
+ piface->cvif_ipd_port_link_info = 0;
+ }
+}
+
+int __cvmx_helper_set_link_info(int interface, int port,
+ cvmx_helper_link_info_t link_info)
+{
+ struct cvmx_iface *piface;
+
+ if (interface >= cvmx_helper_get_number_of_interfaces())
+ return -1;
+
+ piface = &cvmx_interfaces[interface];
+
+ if (piface->cvif_ipd_port_link_info)
+ {
+ piface->cvif_ipd_port_link_info[port] = link_info;
+ return 0;
+ }
+
+ return -1;
+}
+
+cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface, int port)
+{
+ struct cvmx_iface *piface;
+ cvmx_helper_link_info_t err;
+
+ err.u64 = 0;
+
+ if (interface >= cvmx_helper_get_number_of_interfaces())
+ return err;
+ piface = &cvmx_interfaces[interface];
+
+ if (piface->cvif_ipd_port_link_info)
+ return piface->cvif_ipd_port_link_info[port];
+
+ return err;
+}
+
+int __cvmx_helper_get_has_fcs(int interface)
+{
+ return cvmx_interfaces[interface].cvif_has_fcs;
+}
+
+int cvmx_helper_get_pknd(int interface, int port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return __cvmx_helper_cfg_pknd(interface, port);
+
+ return CVMX_INVALID_PKND;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_get_pknd);
+#endif
+
+int cvmx_helper_get_bpid(int interface, int port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return __cvmx_helper_cfg_bpid(interface, port);
+
+ return CVMX_INVALID_BPID;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_get_bpid);
+#endif
+
+/**
+ * Display interface statistics.
+ *
+ * @param port IPD/PKO port number
+ *
+ * @return none
+ */
+void cvmx_helper_show_stats(int port)
+{
+ cvmx_pip_port_status_t status;
+ cvmx_pko_port_status_t pko_status;
+
+ /* ILK stats */
+ if (octeon_has_feature(OCTEON_FEATURE_ILK))
+ __cvmx_helper_ilk_show_stats();
+
+ /* PIP stats */
+ cvmx_pip_get_port_status (port, 0, &status);
+ cvmx_dprintf ("port %d: the number of packets - ipd: %d\n", port, (int)status.packets);
+
+ /* PKO stats */
+ cvmx_pko_get_port_status (port, 0, &pko_status);
+ cvmx_dprintf ("port %d: the number of packets - pko: %d\n", port, (int)pko_status.packets);
+
+ /* TODO: other stats */
+}
+#endif /* CVMX_ENABLE_HELPER_FUNCTIONS */
+
+/**
+ * Returns the interface number for an IPD/PKO port number.
+ *
+ * @param ipd_port IPD/PKO port number
+ *
+ * @return Interface number
+ */
+int cvmx_helper_get_interface_num(int ipd_port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ if (ipd_port >= 0x800 && ipd_port < 0x900)
+ return 0;
+ else if (ipd_port >= 0x900 && ipd_port < 0xa00)
+ return 1;
+ else if (ipd_port >= 0xa00 && ipd_port < 0xb00)
+ return 2;
+ else if (ipd_port >= 0xb00 && ipd_port < 0xc00)
+ return 3;
+ else if (ipd_port >= 0xc00 && ipd_port < 0xd00)
+ return 4;
+ else if (ipd_port >= 0x400 && ipd_port < 0x500)
+ return 5;
+ else if (ipd_port >= 0x500 && ipd_port < 0x600)
+ return 6;
+ else if (ipd_port >= 0x100 && ipd_port < 0x120)
+ return 7;
+ else if (ipd_port < 8)
+ return 8;
+ } else {
+ if (ipd_port < 16)
+ return 0;
+ else if (ipd_port < 32)
+ return 1;
+ else if (ipd_port < 36)
+ return 2;
+ else if (ipd_port < 40)
+ return 3;
+ else if (ipd_port < 42)
+ return 4;
+ else if (ipd_port < 44)
+ return 5;
+ else if (ipd_port < 46)
+ return 6;
+ }
+ cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD port number %d\n", ipd_port);
+ return -1;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_get_interface_num);
+#endif
+
+
+/**
+ * Returns the interface index number for an IPD/PKO port
+ * number.
+ *
+ * @param ipd_port IPD/PKO port number
+ *
+ * @return Interface index number
+ */
+int cvmx_helper_get_interface_index_num(int ipd_port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ if (ipd_port >= 0x800 && ipd_port < 0xd00)
+ {
+ int port = ((ipd_port & 0xff) >> 6);
+ return ((port) ? (port - 1) : ((ipd_port & 0xff) >> 4));
+ }
+ else if (ipd_port >= 0x400 && ipd_port < 0x600)
+ return (ipd_port & 0xff);
+ else if (ipd_port >= 0x100 && ipd_port < 0x120)
+ return (ipd_port & 0xff);
+ else if (ipd_port < 8)
+ return ipd_port;
+ else
+ cvmx_dprintf("cvmx_helper_get_interface_index_num: Illegal IPD port number %d\n", ipd_port);
+ return -1;
+ }
+ if (ipd_port < 32)
+ return ipd_port & 15;
+ else if (ipd_port < 40)
+ return ipd_port & 3;
+ else if (ipd_port < 44)
+ return ipd_port & 1;
+ else if (ipd_port < 46)
+ return ipd_port & 1;
+ else
+ cvmx_dprintf("cvmx_helper_get_interface_index_num: Illegal IPD port number\n");
+
+ return -1;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_get_interface_index_num);
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-util.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-util.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-util.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-util.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,355 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Small helper utilities.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_HELPER_UTIL_H__
+#define __CVMX_HELPER_UTIL_H__
+
+#include "cvmx.h"
+#include "cvmx-mio-defs.h"
+
+#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
+
+typedef char cvmx_pknd_t;
+typedef char cvmx_bpid_t;
+
+#define CVMX_INVALID_PKND ((cvmx_pknd_t) -1)
+#define CVMX_INVALID_BPID ((cvmx_bpid_t) -1)
+#define CVMX_MAX_PKND ((cvmx_pknd_t) 64)
+#define CVMX_MAX_BPID ((cvmx_bpid_t) 64)
+
+#define CVMX_HELPER_MAX_IFACE 9
+
+/**
+ * Convert a interface mode into a human readable string
+ *
+ * @param mode Mode to convert
+ *
+ * @return String
+ */
+extern const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
+
+/**
+ * Debug routine to dump the packet structure to the console
+ *
+ * @param work Work queue entry containing the packet to dump
+ * @return
+ */
+extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
+
+/**
+ * Setup Random Early Drop on a specific input queue
+ *
+ * @param queue Input queue to setup RED on (0-7)
+ * @param pass_thresh
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @param drop_thresh
+ * All incomming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * @return Zero on success. Negative on failure
+ */
+extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh);
+
+/**
+ * Setup Random Early Drop to automatically begin dropping packets.
+ *
+ * @param pass_thresh
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @param drop_thresh
+ * All incomming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * @return Zero on success. Negative on failure
+ */
+extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
+
+
+/**
+ * Get the version of the CVMX libraries.
+ *
+ * @return Version string. Note this buffer is allocated statically
+ * and will be shared by all callers.
+ */
+extern const char *cvmx_helper_get_version(void);
+
+
+/**
+ * @INTERNAL
+ * Setup the common GMX settings that determine the number of
+ * ports. These setting apply to almost all configurations of all
+ * chips.
+ *
+ * @param interface Interface to configure
+ * @param num_ports Number of ports on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_setup_gmx(int interface, int num_ports);
+
+/**
+ * @INTERNAL
+ * Get the number of ipd_ports on an interface.
+ *
+ * @param interface
+ *
+ * @return the number of ipd_ports on the interface and -1 for error.
+ */
+extern int __cvmx_helper_get_num_ipd_ports(int interface);
+
+/**
+ * @INTERNAL
+ * Get the number of pko_ports on an interface.
+ *
+ * @param interface
+ *
+ * @return the number of pko_ports on the interface.
+ */
+extern int __cvmx_helper_get_num_pko_ports(int interface);
+
+/*
+ * @INTERNAL
+ *
+ * @param interface
+ * @param port
+ * @param link_info
+ *
+ * @return 0 for success and -1 for failure
+ */
+extern int __cvmx_helper_set_link_info(int interface, int port,
+ cvmx_helper_link_info_t link_info);
+
+/**
+ * @INTERNAL
+ *
+ * @param interface
+ * @param port
+ *
+ * @return valid link_info on success or -1 on failure
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface,
+ int port);
+
+enum cvmx_pko_padding {
+ CVMX_PKO_PADDING_NONE = 0,
+ CVMX_PKO_PADDING_60 = 1,
+};
+
+/**
+ * @INTERNAL
+ *
+ * @param interface
+ * @param num_ipd_ports is the number of ipd_ports on the interface
+ * @param has_fcs indicates if PKO does FCS for the ports on this
+ * @param pad The padding that PKO should apply.
+ * interface.
+ *
+ * @return 0 for success and -1 for failure
+ */
+extern int __cvmx_helper_init_interface(int interface, int num_ipd_ports, int has_fcs, enum cvmx_pko_padding pad);
+
+/**
+ * @INTERNAL
+ *
+ * @param interface
+ *
+ * @return 0 if PKO does not do FCS and 1 otherwise.
+ */
+extern int __cvmx_helper_get_has_fcs(int interface);
+
+
+extern enum cvmx_pko_padding __cvmx_helper_get_pko_padding(int interface);
+
+/**
+ * Returns the IPD port number for a port on the given
+ * interface.
+ *
+ * @param interface Interface to use
+ * @param port Port on the interface
+ *
+ * @return IPD port number
+ */
+extern int cvmx_helper_get_ipd_port(int interface, int port);
+
+/**
+ * Returns the PKO port number for a port on the given interface,
+ * This is the base pko_port for o68 and ipd_port for older models.
+ *
+ * @param interface Interface to use
+ * @param port Port on the interface
+ *
+ * @return PKO port number and -1 on error.
+ */
+extern int cvmx_helper_get_pko_port(int interface, int port);
+
+/**
+ * Returns the IPD/PKO port number for the first port on the given
+ * interface.
+ *
+ * @param interface Interface to use
+ *
+ * @return IPD/PKO port number
+ */
+static inline int cvmx_helper_get_first_ipd_port(int interface)
+{
+ return (cvmx_helper_get_ipd_port (interface, 0));
+}
+
+/**
+ * Returns the IPD/PKO port number for the last port on the given
+ * interface.
+ *
+ * @param interface Interface to use
+ *
+ * @return IPD/PKO port number
+ */
+static inline int cvmx_helper_get_last_ipd_port (int interface)
+{
+ return (cvmx_helper_get_first_ipd_port (interface) +
+ cvmx_helper_ports_on_interface (interface) - 1);
+}
+
+
+/**
+ * Free the packet buffers contained in a work queue entry.
+ * The work queue entry is not freed.
+ *
+ * @param work Work queue entry with packet to free
+ */
+static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
+{
+ uint64_t number_buffers;
+ cvmx_buf_ptr_t buffer_ptr;
+ cvmx_buf_ptr_t next_buffer_ptr;
+ uint64_t start_of_buffer;
+
+ number_buffers = work->word2.s.bufs;
+ if (number_buffers == 0)
+ return;
+ buffer_ptr = work->packet_ptr;
+
+ /* Since the number of buffers is not zero, we know this is not a dynamic
+ short packet. We need to check if it is a packet received with
+ IPD_CTL_STATUS[NO_WPTR]. If this is true, we need to free all buffers
+ except for the first one. The caller doesn't expect their WQE pointer
+ to be freed */
+ start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
+ if (cvmx_ptr_to_phys(work) == start_of_buffer)
+ {
+ next_buffer_ptr = *(cvmx_buf_ptr_t*)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+ buffer_ptr = next_buffer_ptr;
+ number_buffers--;
+ }
+
+ while (number_buffers--)
+ {
+ /* Remember the back pointer is in cache lines, not 64bit words */
+ start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
+ /* Read pointer to next buffer before we free the current buffer. */
+ next_buffer_ptr = *(cvmx_buf_ptr_t*)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+ cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer), buffer_ptr.s.pool, 0);
+ buffer_ptr = next_buffer_ptr;
+ }
+}
+
+#endif /* CVMX_ENABLE_HELPER_FUNCTIONS */
+
+/**
+ * Returns the interface number for an IPD/PKO port number.
+ *
+ * @param ipd_port IPD/PKO port number
+ *
+ * @return Interface number
+ */
+extern int cvmx_helper_get_interface_num(int ipd_port);
+
+/**
+ * Returns the interface index number for an IPD/PKO port
+ * number.
+ *
+ * @param ipd_port IPD/PKO port number
+ *
+ * @return Interface index number
+ */
+extern int cvmx_helper_get_interface_index_num(int ipd_port);
+
+/**
+ * Get port kind for a given port in an interface.
+ *
+ * @param interface Interface
+ * @param port index of the port in the interface
+ *
+ * @return port kind on sucicess and -1 on failure
+ */
+extern int cvmx_helper_get_pknd(int interface, int port);
+
+/**
+ * Get bpid for a given port in an interface.
+ *
+ * @param interface Interface
+ * @param port index of the port in the interface
+ *
+ * @return port kind on sucicess and -1 on failure
+ */
+extern int cvmx_helper_get_bpid(int interface, int port);
+
+
+/**
+ * Internal functions.
+ */
+extern int __cvmx_helper_post_init_interfaces(void);
+extern void __cvmx_helper_shutdown_interfaces(void);
+
+extern void cvmx_helper_show_stats(int port);
+
+#endif /* __CVMX_HELPER_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-util.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,477 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for XAUI initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+#include <asm/octeon/cvmx-qlm.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#endif
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
+#include <asm/octeon/cvmx-pcsxx-defs.h>
+#include <asm/octeon/cvmx-ciu-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+#include "cvmx.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-cfg.h"
+#include "cvmx-qlm.h"
+#endif
+#else
+#include "cvmx.h"
+#include "cvmx-helper.h"
+#include "cvmx-qlm.h"
+#endif
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+int __cvmx_helper_xaui_enumerate(int interface)
+{
+ union cvmx_gmxx_hg2_control gmx_hg2_control;
+
+ /* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
+ gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
+ if (gmx_hg2_control.s.hg2tx_en)
+ return 16;
+ else
+ return 1;
+}
+
+/**
+ * @INTERNAL
+ * Probe a XAUI interface and determine the number of ports
+ * connected to it. The XAUI interface should still be down
+ * after this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_xaui_probe(int interface)
+{
+ int i;
+ cvmx_gmxx_inf_mode_t mode;
+
+ /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
+ {
+ cvmx_ciu_qlm2_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM2);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 0x5;
+ ciu_qlm.s.txmargin = 0x1a;
+ cvmx_write_csr(CVMX_CIU_QLM2, ciu_qlm.u64);
+ }
+
+ /* CN63XX Pass 2.0 and 2.1 errata G-15273 requires the QLM De-emphasis be
+ programmed when using a 156.25Mhz ref clock */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1))
+ {
+ /* Read the QLM speed pins */
+ cvmx_mio_rst_boot_t mio_rst_boot;
+ mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+
+ if (mio_rst_boot.cn63xx.qlm2_spd == 0xb)
+ {
+ cvmx_ciu_qlm2_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM2);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 0xa;
+ ciu_qlm.s.txmargin = 0x1f;
+ cvmx_write_csr(CVMX_CIU_QLM2, ciu_qlm.u64);
+ }
+ }
+
+ /* Check if QLM is configured correct for XAUI/RXAUI, verify the
+ speed as well as mode */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ int qlm, status;
+
+ qlm = cvmx_qlm_interface(interface);
+ status = cvmx_qlm_get_status(qlm);
+ if (status != 2 && status != 10)
+ return 0;
+ }
+
+ /* Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the interface
+ needs to be enabled before IPD otherwise per port backpressure
+ may not work properly */
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ mode.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+
+ __cvmx_helper_setup_gmx(interface, 1);
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ /* Setup PKO to support 16 ports for HiGig2 virtual ports. We're pointing
+ all of the PKO packet ports for this interface to the XAUI. This allows
+ us to use HiGig2 backpressure per port */
+ for (i=0; i<16; i++)
+ {
+ cvmx_pko_mem_port_ptrs_t pko_mem_port_ptrs;
+ pko_mem_port_ptrs.u64 = 0;
+ /* We set each PKO port to have equal priority in a round robin
+ fashion */
+ pko_mem_port_ptrs.s.static_p = 0;
+ pko_mem_port_ptrs.s.qos_mask = 0xff;
+ /* All PKO ports map to the same XAUI hardware port */
+ pko_mem_port_ptrs.s.eid = interface*4;
+ pko_mem_port_ptrs.s.pid = interface*16 + i;
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
+ }
+ }
+
+ return __cvmx_helper_xaui_enumerate(interface);
+}
+
+/**
+ * @INTERNAL
+ * Bringup XAUI interface. After this call packet I/O should be
+ * fully functional.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_xaui_link_init(int interface)
+{
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ cvmx_pcsxx_control1_reg_t xauiCtl;
+ cvmx_pcsxx_misc_ctl_reg_t xauiMiscCtl;
+ cvmx_gmxx_tx_xaui_ctl_t gmxXauiTxCtl;
+
+ /* (1) Interface has already been enabled. */
+
+ /* (2) Disable GMX. */
+ xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
+ xauiMiscCtl.s.gmxeno = 1;
+ cvmx_write_csr (CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
+
+ /* (3) Disable GMX and PCSX interrupts. */
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0,interface), 0x0);
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
+ cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+
+ /* (4) Bring up the PCSX and GMX reconciliation layer. */
+ /* (4)a Set polarity and lane swapping. */
+ /* (4)b */
+ gmxXauiTxCtl.u64 = cvmx_read_csr (CVMX_GMXX_TX_XAUI_CTL(interface));
+ gmxXauiTxCtl.s.dic_en = 1; /* Enable better IFG packing and improves performance */
+ gmxXauiTxCtl.s.uni_en = 0;
+ cvmx_write_csr (CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64);
+
+ /* (4)c Aply reset sequence */
+ xauiCtl.u64 = cvmx_read_csr (CVMX_PCSXX_CONTROL1_REG(interface));
+ xauiCtl.s.lo_pwr = 0;
+
+ /* Errata G-15618 requires disabling PCS soft reset in some OCTEON II models. */
+ if (!OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)
+ && !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)
+ && !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1)
+ && !OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X)
+ && !OCTEON_IS_MODEL(OCTEON_CN68XX))
+ xauiCtl.s.reset = 1;
+ cvmx_write_csr (CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
+
+ /* Wait for PCS to come out of reset */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_CONTROL1_REG(interface), cvmx_pcsxx_control1_reg_t, reset, ==, 0, 10000))
+ return -1;
+ /* Wait for PCS to be aligned */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_10GBX_STATUS_REG(interface), cvmx_pcsxx_10gbx_status_reg_t, alignd, ==, 1, 10000))
+ return -1;
+ /* Wait for RX to be ready */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_RX_XAUI_CTL(interface), cvmx_gmxx_rx_xaui_ctl_t, status, ==, 0, 10000))
+ return -1;
+
+ /* (6) Configure GMX */
+
+ /* Wait for GMX RX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface), cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, 10000))
+ return -1;
+ /* Wait for GMX TX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface), cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, 10000))
+ return -1;
+
+ /* GMX configure */
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmx_cfg.s.speed = 1;
+ gmx_cfg.s.speed_msb = 0;
+ gmx_cfg.s.slottime = 1;
+ cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+ /* Wait for receive link */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS1_REG(interface), cvmx_pcsxx_status1_reg_t, rcv_lnk, ==, 1, 10000))
+ return -1;
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface), cvmx_pcsxx_status2_reg_t, xmtflt, ==, 0, 10000))
+ return -1;
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface), cvmx_pcsxx_status2_reg_t, rcvflt, ==, 0, 10000))
+ return -1;
+
+ /* (8) Enable packet reception */
+ xauiMiscCtl.s.gmxeno = 0;
+ cvmx_write_csr (CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
+
+ /* Clear all error interrupts before enabling the interface. */
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0,interface), ~0x0ull);
+ cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface), ~0x0ull);
+ cvmx_write_csr(CVMX_PCSXX_INT_REG(interface), ~0x0ull);
+
+ /* Enable GMX */
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a XAUI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_enable(int interface)
+{
+ /* Setup PKND and BPID */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ cvmx_gmxx_bpid_msk_t bpid_msk;
+ cvmx_gmxx_bpid_mapx_t bpid_map;
+ cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
+ cvmx_gmxx_txx_append_t gmxx_txx_append_cfg;
+
+ /* Setup PKIND */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmxx_prtx_cfg.s.pknd = cvmx_helper_get_pknd(interface, 0);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmxx_prtx_cfg.u64);
+
+ /* Setup BPID */
+ bpid_map.u64 = cvmx_read_csr(CVMX_GMXX_BPID_MAPX(0, interface));
+ bpid_map.s.val = 1;
+ bpid_map.s.bpid = cvmx_helper_get_bpid(interface, 0);
+ cvmx_write_csr(CVMX_GMXX_BPID_MAPX(0, interface), bpid_map.u64);
+
+ bpid_msk.u64 = cvmx_read_csr(CVMX_GMXX_BPID_MSK(interface));
+ bpid_msk.s.msk_or |= 1;
+ bpid_msk.s.msk_and &= ~1;
+ cvmx_write_csr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
+
+ /* CN68XX adds the padding and FCS in PKO, not GMX */
+ gmxx_txx_append_cfg.u64 = cvmx_read_csr(CVMX_GMXX_TXX_APPEND(0, interface));
+ gmxx_txx_append_cfg.s.fcs = 0;
+ gmxx_txx_append_cfg.s.pad = 0;
+ cvmx_write_csr(CVMX_GMXX_TXX_APPEND(0, interface), gmxx_txx_append_cfg.u64);
+ }
+
+ __cvmx_helper_xaui_link_init(interface);
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ cvmx_gmxx_tx_xaui_ctl_t gmxx_tx_xaui_ctl;
+ cvmx_gmxx_rx_xaui_ctl_t gmxx_rx_xaui_ctl;
+ cvmx_pcsxx_status1_reg_t pcsxx_status1_reg;
+ cvmx_helper_link_info_t result;
+
+ gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+ gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
+ pcsxx_status1_reg.u64 = cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface));
+ result.u64 = 0;
+
+ /* Only return a link if both RX and TX are happy */
+ if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) &&
+ (pcsxx_status1_reg.s.rcv_lnk == 1))
+ {
+ cvmx_pcsxx_misc_ctl_reg_t misc_ctl;
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ cvmx_mio_qlmx_cfg_t qlm_cfg;
+ int lanes;
+ int qlm = (interface == 1) ? 0 : interface;
+
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
+ result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
+ lanes = (qlm_cfg.s.qlm_cfg == 7) ? 2 : 4;
+ result.s.speed *= lanes;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ int qlm = cvmx_qlm_interface(interface);
+ result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
+ result.s.speed *= 4;
+ }
+ else
+ result.s.speed = 10000;
+ misc_ctl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
+ if (misc_ctl.s.gmxeno)
+ __cvmx_helper_xaui_link_init(interface);
+ }
+ else
+ {
+ /* Disable GMX and PCSX interrupts. */
+ cvmx_write_csr (CVMX_GMXX_RXX_INT_EN(0,interface), 0x0);
+ cvmx_write_csr (CVMX_GMXX_TX_INT_EN(interface), 0x0);
+ cvmx_write_csr (CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+ }
+ return result;
+}
+
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ cvmx_gmxx_tx_xaui_ctl_t gmxx_tx_xaui_ctl;
+ cvmx_gmxx_rx_xaui_ctl_t gmxx_rx_xaui_ctl;
+
+ gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+ gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
+
+ /* If the link shouldn't be up, then just return */
+ if (!link_info.s.link_up)
+ return 0;
+
+ /* Do nothing if both RX and TX are happy */
+ if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0))
+ return 0;
+
+ /* Bring the link up */
+ return __cvmx_helper_xaui_link_init(interface);
+}
+
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_xaui_configure_loopback(int ipd_port, int enable_internal, int enable_external)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ cvmx_pcsxx_control1_reg_t pcsxx_control1_reg;
+ cvmx_gmxx_xaui_ext_loopback_t gmxx_xaui_ext_loopback;
+
+ /* Set the internal loop */
+ pcsxx_control1_reg.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
+ pcsxx_control1_reg.s.loopbck1 = enable_internal;
+ cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), pcsxx_control1_reg.u64);
+
+ /* Set the external loop */
+ gmxx_xaui_ext_loopback.u64 = cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
+ gmxx_xaui_ext_loopback.s.en = enable_external;
+ cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface), gmxx_xaui_ext_loopback.u64);
+
+ /* Take the link through a reset */
+ return __cvmx_helper_xaui_link_init(interface);
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,128 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for XAUI initialization, configuration,
+ * and monitoring.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_HELPER_XAUI_H__
+#define __CVMX_HELPER_XAUI_H__
+
+/**
+ * @INTERNAL
+ * Probe a XAUI interface and determine the number of ports
+ * connected to it. The XAUI interface should still be down
+ * after this call.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_xaui_probe(int interface);
+extern int __cvmx_helper_xaui_enumerate(int interface);
+
+/**
+ * @INTERNAL
+ * Bringup and enable a XAUI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_xaui_enable(int interface);
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port);
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_xaui_configure_loopback(int ipd_port, int enable_internal, int enable_external);
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper-xaui.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,2000 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Helper functions for common, but complicated tasks.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#include <asm/octeon/cvmx-sriox-defs.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-pip-defs.h>
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-smix-defs.h>
+#include <asm/octeon/cvmx-dbg-defs.h>
+#include <asm/octeon/cvmx-sso-defs.h>
+
+#include <asm/octeon/cvmx-gmx.h>
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+#include <asm/octeon/cvmx-helper-errata.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#endif
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-version.h"
+#include "cvmx-helper-check-defines.h"
+#include "cvmx-gmx.h"
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "cvmx-error.h"
+#include "cvmx-config.h"
+#endif
+
+#include "cvmx-fpa.h"
+#include "cvmx-pip.h"
+#include "cvmx-pko.h"
+#include "cvmx-ipd.h"
+#include "cvmx-spi.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+#include "cvmx-helper-errata.h"
+#include "cvmx-helper-cfg.h"
+#endif
+
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/**
+ * cvmx_override_pko_queue_priority(int pko_port, uint64_t
+ * priorities[16]) is a function pointer. It is meant to allow
+ * customization of the PKO queue priorities based on the port
+ * number. Users should set this pointer to a function before
+ * calling any cvmx-helper operations.
+ */
+CVMX_SHARED void (*cvmx_override_pko_queue_priority)(int ipd_port,
+ uint64_t *priorities) = NULL;
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_override_pko_queue_priority);
+#endif
+
+/**
+ * cvmx_override_ipd_port_setup(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the IPD
+ * port/port kind setup before packet input/output comes online.
+ * It is called after cvmx-helper does the default IPD configuration,
+ * but before IPD is enabled. Users should set this pointer to a
+ * function before calling any cvmx-helper operations.
+ */
+CVMX_SHARED void (*cvmx_override_ipd_port_setup)(int ipd_port) = NULL;
+
+/**
+ * Return the number of interfaces the chip has. Each interface
+ * may have multiple ports. Most chips support two interfaces,
+ * but the CNX0XX and CNX1XX are exceptions. These only support
+ * one interface.
+ *
+ * @return Number of interfaces on chip
+ */
+int cvmx_helper_get_number_of_interfaces(void)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+#if defined(OCTEON_VENDOR_LANNER)
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR955:
+ return 2;
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
+ return 1;
+#endif
+#if defined(OCTEON_VENDOR_RADISYS)
+ case CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE:
+ return 2;
+#endif
+ default:
+ break;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return 9;
+ else if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
+ return 7;
+ else
+ return 8;
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return 6;
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX))
+ return 4;
+ else
+ return 3;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_get_number_of_interfaces);
+#endif
+
+
+/**
+ * Return the number of ports on an interface. Depending on the
+ * chip and configuration, this can be 1-16. A value of 0
+ * specifies that the interface doesn't exist or isn't usable.
+ *
+ * @param interface Interface to get the port count for
+ *
+ * @return Number of ports on interface. Can be Zero.
+ */
+int cvmx_helper_ports_on_interface(int interface)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return cvmx_helper_interface_enumerate(interface);
+ else
+ return __cvmx_helper_get_num_ipd_ports(interface);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_ports_on_interface);
+#endif
+
+
+/**
+ * Get the operating mode of an interface. Depending on the Octeon
+ * chip and configuration, this function returns an enumeration
+ * of the type of packet I/O supported by an interface.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Mode of the interface. Unknown or unsupported interfaces return
+ * DISABLED.
+ */
+cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
+{
+ cvmx_gmxx_inf_mode_t mode;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ cvmx_mio_qlmx_cfg_t qlm_cfg;
+ switch(interface)
+ {
+ case 0:
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlm_cfg.s.qlm_spd == 15)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (qlm_cfg.s.qlm_cfg == 7)
+ return CVMX_HELPER_INTERFACE_MODE_RXAUI;
+ else if (qlm_cfg.s.qlm_cfg == 2)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (qlm_cfg.s.qlm_cfg == 3)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ break;
+ case 1:
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlm_cfg.s.qlm_spd == 15)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (qlm_cfg.s.qlm_cfg == 7)
+ return CVMX_HELPER_INTERFACE_MODE_RXAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ break;
+ case 2:
+ case 3:
+ case 4:
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlm_cfg.s.qlm_spd == 15)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (qlm_cfg.s.qlm_cfg == 2)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (qlm_cfg.s.qlm_cfg == 3)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ break;
+ case 5:
+ case 6:
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface - 4));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlm_cfg.s.qlm_spd == 15)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (qlm_cfg.s.qlm_cfg == 1)
+ {
+ return CVMX_HELPER_INTERFACE_MODE_ILK;
+ }
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ break;
+ case 7:
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(3));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlm_cfg.s.qlm_spd == 15)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ else if (qlm_cfg.s.qlm_cfg != 0)
+ {
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
+ if (qlm_cfg.s.qlm_cfg != 0)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ return CVMX_HELPER_INTERFACE_MODE_NPI;
+ break;
+ case 8:
+ return CVMX_HELPER_INTERFACE_MODE_LOOP;
+ break;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ break;
+ }
+ }
+
+ if (interface == 2)
+ return CVMX_HELPER_INTERFACE_MODE_NPI;
+
+ if (interface == 3)
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX)
+ || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ return CVMX_HELPER_INTERFACE_MODE_LOOP;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+
+ /* Only present in CN63XX & CN66XX Octeon model */
+ if ((OCTEON_IS_MODEL(OCTEON_CN63XX) && (interface == 4 || interface == 5))
+ || (OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 && interface <= 7))
+ {
+ cvmx_sriox_status_reg_t sriox_status_reg;
+
+ /* cn66xx pass1.0 has only 2 SRIO interfaces. */
+ if ((interface == 5 || interface == 7) && OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(interface-4));
+ if (sriox_status_reg.s.srio)
+ return CVMX_HELPER_INTERFACE_MODE_SRIO;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+
+ /* Interface 5 always disabled in CN66XX */
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ {
+ cvmx_mio_qlmx_cfg_t mio_qlm_cfg;
+
+ /* QLM2 is SGMII0 and QLM1 is SGMII1 */
+ if (interface == 0)
+ mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
+ else if (interface == 1)
+ mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (mio_qlm_cfg.s.qlm_spd == 15)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (mio_qlm_cfg.s.qlm_cfg == 9)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (mio_qlm_cfg.s.qlm_cfg == 11)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
+ {
+ cvmx_mio_qlmx_cfg_t qlm_cfg;
+
+ if (interface == 0)
+ {
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
+ if (qlm_cfg.s.qlm_cfg == 2)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (qlm_cfg.s.qlm_cfg == 3)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ else if (interface == 1)
+ {
+ /* If QLM 1 is PEV0/PEM1 mode, them QLM0 cannot be SGMII/XAUI */
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
+ if (qlm_cfg.s.qlm_cfg == 1)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
+ if (qlm_cfg.s.qlm_cfg == 2)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (qlm_cfg.s.qlm_cfg == 3)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ }
+
+ if (interface == 0 && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5 && cvmx_sysinfo_get()->board_rev_major == 1)
+ {
+ /* Lie about interface type of CN3005 board. This board has a switch on port 1 like
+ ** the other evaluation boards, but it is connected over RGMII instead of GMII. Report
+ ** GMII mode so that the speed is forced to 1 Gbit full duplex. Other than some initial configuration
+ ** (which does not use the output of this function) there is no difference in setup between GMII and RGMII modes.
+ */
+ return CVMX_HELPER_INTERFACE_MODE_GMII;
+ }
+
+ /* Interface 1 is always disabled on CN31XX and CN30XX */
+ if ((interface == 1)
+ && (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN63XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ {
+ switch(mode.cn56xx.mode)
+ {
+ case 0: return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ case 1: return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ case 2: return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ case 3: return CVMX_HELPER_INTERFACE_MODE_PICMG;
+ default:return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ {
+ switch(mode.cn63xx.mode)
+ {
+ case 0: return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ case 1: return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ default: return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ }
+ else
+ {
+ if (!mode.s.en)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (mode.s.type)
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ return CVMX_HELPER_INTERFACE_MODE_SPI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_GMII;
+ }
+ else
+ return CVMX_HELPER_INTERFACE_MODE_RGMII;
+ }
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_interface_get_mode);
+#endif
+
+/**
+ * @INTERNAL
+ * Configure the IPD/PIP tagging and QoS options for a specific
+ * port. This function determines the POW work queue entry
+ * contents for a port. The setup performed here is controlled by
+ * the defines in executive-config.h.
+ *
+ * @param ipd_port Port/Port kind to configure. This follows the IPD numbering,
+ * not the per interface numbering
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_port_setup_ipd(int ipd_port)
+{
+ cvmx_pip_prt_cfgx_t port_config;
+ cvmx_pip_prt_tagx_t tag_config;
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ int interface, index, pknd;
+ cvmx_pip_prt_cfgbx_t prt_cfgbx;
+
+ interface = cvmx_helper_get_interface_num(ipd_port);
+ index = cvmx_helper_get_interface_index_num(ipd_port);
+ pknd = cvmx_helper_get_pknd(interface, index);
+
+ port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(pknd));
+ tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pknd));
+
+ port_config.s.qos = pknd & 0x7;
+
+ /* Default BPID to use for packets on this port-kind */
+ prt_cfgbx.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGBX(pknd));
+ prt_cfgbx.s.bpid = pknd;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGBX(pknd), prt_cfgbx.u64);
+ }
+ else
+ {
+ port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
+
+ /* Have each port go to a different POW queue */
+ port_config.s.qos = ipd_port & 0x7;
+ }
+
+ /* Process the headers and place the IP header in the work queue */
+ port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
+
+ tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
+ tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
+ tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
+ tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
+ tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
+ tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
+ tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
+ tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
+ tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
+ tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
+ tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
+ tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ /* Put all packets in group 0. Other groups can be used by the app */
+ tag_config.s.grp = 0;
+
+ cvmx_pip_config_port(ipd_port, port_config, tag_config);
+
+ /* Give the user a chance to override our setting for each port */
+ if (cvmx_override_ipd_port_setup)
+ cvmx_override_ipd_port_setup(ipd_port);
+
+ return 0;
+}
+
+/**
+ * Enable or disable FCS stripping for all the ports on an interface.
+ *
+ * @param interface
+ * @param nports number of ports
+ * @param has_fcs 0 for disable and !0 for enable
+ */
+static int cvmx_helper_fcs_op(int interface, int nports, int has_fcs)
+{
+ uint64_t port_bit;
+ int index;
+ int pknd;
+ cvmx_pip_sub_pkind_fcsx_t pkind_fcsx;
+ cvmx_pip_prt_cfgx_t port_cfg;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_PKND))
+ return 0;
+
+ port_bit = 0;
+ for (index = 0; index < nports; index++)
+ port_bit |= ((uint64_t)1 << cvmx_helper_get_pknd(interface, index));
+
+ pkind_fcsx.u64 = cvmx_read_csr(CVMX_PIP_SUB_PKIND_FCSX(0));
+ if (has_fcs)
+ pkind_fcsx.s.port_bit |= port_bit;
+ else
+ pkind_fcsx.s.port_bit &= ~port_bit;
+ cvmx_write_csr(CVMX_PIP_SUB_PKIND_FCSX(0), pkind_fcsx.u64);
+
+ for (pknd = 0; pknd < 64; pknd++)
+ {
+ if ((1ull << pknd) & port_bit)
+ {
+ port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(pknd));
+ port_cfg.s.crc_en = (has_fcs) ? 1 : 0;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Determine the actual number of hardware ports connected to an
+ * interface. It doesn't setup the ports or enable them.
+ *
+ * @param interface Interface to enumerate
+ *
+ * @return The number of ports on the interface, negative on failure
+ */
+int cvmx_helper_interface_enumerate(int interface)
+{
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ /* XAUI is a single high speed port */
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ return __cvmx_helper_xaui_enumerate(interface);
+ /* RGMII/GMII/MII are all treated about the same. Most functions
+ refer to these ports as RGMII */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ return __cvmx_helper_rgmii_enumerate(interface);
+ /* SPI4 can have 1-16 ports depending on the device at the other end */
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ return __cvmx_helper_spi_enumerate(interface);
+ /* SGMII can have 1-4 ports depending on how many are hooked up */
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ return __cvmx_helper_sgmii_enumerate(interface);
+ /* PCI target Network Packet Interface */
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ return __cvmx_helper_npi_enumerate(interface);
+ /* Special loopback only ports. These are not the same
+ * as other ports in loopback mode */
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ return __cvmx_helper_loop_enumerate(interface);
+ /* SRIO has 2^N ports, where N is number of interfaces */
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ return __cvmx_helper_srio_enumerate(interface);
+
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ return __cvmx_helper_ilk_enumerate(interface);
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ default:
+ return 0;
+ }
+}
+
+/**
+ * This function probes an interface to determine the actual number of
+ * hardware ports connected to it. It does some setup the ports but
+ * doesn't enable them. The main goal here is to set the global
+ * interface_port_count[interface] correctly. Final hardware setup of
+ * the ports will be performed later.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_helper_interface_probe(int interface)
+{
+ /* At this stage in the game we don't want packets to be moving yet.
+ The following probe calls should perform hardware setup
+ needed to determine port counts. Receive must still be disabled */
+ int nports;
+ int has_fcs;
+ enum cvmx_pko_padding padding = CVMX_PKO_PADDING_NONE;
+
+ nports = -1;
+ has_fcs = 0;
+ switch (cvmx_helper_interface_get_mode(interface))
+ {
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ nports = 0;
+ break;
+ /* XAUI is a single high speed port */
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ nports = __cvmx_helper_xaui_probe(interface);
+ has_fcs = 1;
+ padding = CVMX_PKO_PADDING_60;
+ break;
+ /* RGMII/GMII/MII are all treated about the same. Most functions
+ refer to these ports as RGMII */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ nports = __cvmx_helper_rgmii_probe(interface);
+ padding = CVMX_PKO_PADDING_60;
+ break;
+ /* SPI4 can have 1-16 ports depending on the device at the other end */
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ nports = __cvmx_helper_spi_probe(interface);
+ padding = CVMX_PKO_PADDING_60;
+ break;
+ /* SGMII can have 1-4 ports depending on how many are hooked up */
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ padding = CVMX_PKO_PADDING_60;
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ nports = __cvmx_helper_sgmii_probe(interface);
+ has_fcs = 1;
+ break;
+ /* PCI target Network Packet Interface */
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ nports = __cvmx_helper_npi_probe(interface);
+ break;
+ /* Special loopback only ports. These are not the same as other ports
+ in loopback mode */
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ nports = __cvmx_helper_loop_probe(interface);
+ break;
+ /* SRIO has 2^N ports, where N is number of interfaces */
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ nports = __cvmx_helper_srio_probe(interface);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ nports = __cvmx_helper_ilk_probe(interface);
+ has_fcs = 1;
+ padding = CVMX_PKO_PADDING_60;
+ break;
+ }
+
+ if (nports == -1)
+ return -1;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_PKND))
+ has_fcs = 0;
+
+ nports = __cvmx_helper_board_interface_probe(interface, nports);
+ __cvmx_helper_init_interface(interface, nports, has_fcs, padding);
+ cvmx_helper_fcs_op(interface, nports, has_fcs);
+
+ /* Make sure all global variables propagate to other cores */
+ CVMX_SYNCWS;
+
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Setup the IPD/PIP for the ports on an interface. Packet
+ * classification and tagging are set for every port on the
+ * interface. The number of ports on the interface must already
+ * have been probed.
+ *
+ * @param interface Interface to setup IPD/PIP for
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_interface_setup_ipd(int interface)
+{
+
+ cvmx_helper_interface_mode_t mode;
+ int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int delta;
+
+ if (num_ports == CVMX_HELPER_CFG_INVALID_VALUE)
+ return 0;
+
+ mode = cvmx_helper_interface_get_mode(interface);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
+ __cvmx_helper_loop_enable(interface);
+
+ delta = 1;
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII)
+ delta = 16;
+ }
+
+ while (num_ports--)
+ {
+ __cvmx_helper_port_setup_ipd(ipd_port);
+ ipd_port += delta;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Setup global setting for IPD/PIP not related to a specific
+ * interface or port. This must be called before IPD is enabled.
+ *
+ * @return Zero on success, negative on failure.
+ */
+static int __cvmx_helper_global_setup_ipd(void)
+{
+#ifndef CVMX_HELPER_IPD_DRAM_MODE
+#define CVMX_HELPER_IPD_DRAM_MODE CVMX_IPD_OPC_MODE_STT
+#endif
+ /* Setup the global packet input options */
+ cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE/8,
+ CVMX_HELPER_FIRST_MBUFF_SKIP/8,
+ CVMX_HELPER_NOT_FIRST_MBUFF_SKIP/8,
+ (CVMX_HELPER_FIRST_MBUFF_SKIP+8) / 128, /* The +8 is to account for the next ptr */
+ (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP+8) / 128, /* The +8 is to account for the next ptr */
+ CVMX_FPA_WQE_POOL,
+ CVMX_HELPER_IPD_DRAM_MODE,
+ 1);
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Setup the PKO for the ports on an interface. The number of
+ * queues per port and the priority of each PKO output queue
+ * is set here. PKO must be disabled when this function is called.
+ *
+ * @param interface Interface to setup PKO for
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_interface_setup_pko(int interface)
+{
+ /* Each packet output queue has an associated priority. The higher the
+ priority, the more often it can send a packet. A priority of 8 means
+ it can send in all 8 rounds of contention. We're going to make each
+ queue one less than the last.
+ The vector of priorities has been extended to support CN5xxx CPUs,
+ where up to 16 queues can be associated to a port.
+ To keep backward compatibility we don't change the initial 8
+ priorities and replicate them in the second half.
+ With per-core PKO queues (PKO lockless operation) all queues have
+ the same priority. */
+ /* uint64_t priorities[16] = {8,7,6,5,4,3,2,1,8,7,6,5,4,3,2,1}; */
+ uint64_t priorities[16] = {[0 ... 15] = 8};
+
+ /* Setup the IPD/PIP and PKO for the ports discovered above. Here packet
+ classification, tagging and output priorities are set */
+ int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ while (num_ports--)
+ {
+ /* Give the user a chance to override the per queue priorities */
+ if (cvmx_override_pko_queue_priority)
+ cvmx_override_pko_queue_priority(ipd_port, priorities);
+
+ cvmx_pko_config_port(ipd_port, cvmx_pko_get_base_queue_per_core(ipd_port, 0),
+ cvmx_pko_get_num_queues(ipd_port), priorities);
+ ipd_port++;
+ }
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Setup global setting for PKO not related to a specific
+ * interface or port. This must be called before PKO is enabled.
+ *
+ * @return Zero on success, negative on failure.
+ */
+static int __cvmx_helper_global_setup_pko(void)
+{
+ /* Disable tagwait FAU timeout. This needs to be done before anyone might
+ start packet output using tags */
+ cvmx_iob_fau_timeout_t fau_to;
+ fau_to.u64 = 0;
+ fau_to.s.tout_val = 0xfff;
+ fau_to.s.tout_enb = 0;
+ cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_pko_reg_min_pkt_t min_pkt;
+
+ min_pkt.u64 = 0;
+ min_pkt.s.size1 = 59;
+ min_pkt.s.size2 = 59;
+ min_pkt.s.size3 = 59;
+ min_pkt.s.size4 = 59;
+ min_pkt.s.size5 = 59;
+ min_pkt.s.size6 = 59;
+ min_pkt.s.size7 = 59;
+ cvmx_write_csr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
+ }
+
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Setup global backpressure setting.
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_global_setup_backpressure(void)
+{
+#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
+ /* Disable backpressure if configured to do so */
+ /* Disable backpressure (pause frame) generation */
+ int num_interfaces = cvmx_helper_get_number_of_interfaces();
+ int interface;
+ for (interface=0; interface<num_interfaces; interface++)
+ {
+ switch (cvmx_helper_interface_get_mode(interface))
+ {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ cvmx_gmx_set_backpressure_override(interface, 0xf);
+ break;
+ }
+ }
+ //cvmx_dprintf("Disabling backpressure\n");
+#endif
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Verify the per port IPD backpressure is aligned properly.
+ * @return Zero if working, non zero if misaligned
+ */
+static int __cvmx_helper_backpressure_is_misaligned(void)
+{
+ uint64_t ipd_int_enb;
+ cvmx_ipd_ctl_status_t ipd_reg;
+ uint64_t bp_status0;
+ uint64_t bp_status1;
+ const int port0 = 0;
+ const int port1 = 16;
+ cvmx_helper_interface_mode_t mode0 = cvmx_helper_interface_get_mode(0);
+ cvmx_helper_interface_mode_t mode1 = cvmx_helper_interface_get_mode(1);
+
+ /* Disable error interrupts while we check backpressure */
+ ipd_int_enb = cvmx_read_csr(CVMX_IPD_INT_ENB);
+ cvmx_write_csr(CVMX_IPD_INT_ENB, 0);
+
+ /* Enable per port backpressure */
+ ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_reg.s.pbp_en = 1;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+
+ if (mode0 != CVMX_HELPER_INTERFACE_MODE_DISABLED)
+ {
+ /* Enable backpressure for port with a zero threshold */
+ cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port0), 1<<17);
+ /* Add 1000 to the page count to simulate packets coming in */
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port0<<25) | 1000);
+ }
+
+ if (mode1 != CVMX_HELPER_INTERFACE_MODE_DISABLED)
+ {
+ /* Enable backpressure for port with a zero threshold */
+ cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port1), 1<<17);
+ /* Add 1000 to the page count to simulate packets coming in */
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port1<<25) | 1000);
+ }
+
+ /* Wait 500 cycles for the BP to update */
+ cvmx_wait(500);
+
+ /* Read the BP state from the debug select register */
+ switch (mode0)
+ {
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x9004);
+ bp_status0 = cvmx_read_csr(CVMX_DBG_DATA);
+ bp_status0 = 0xffff & ~bp_status0;
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x0e00);
+ bp_status0 = 0xffff & cvmx_read_csr(CVMX_DBG_DATA);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, 0x0e00);
+ bp_status0 = 0xffff & cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ break;
+ default:
+ bp_status0 = 1<<port0;
+ break;
+ }
+
+ /* Read the BP state from the debug select register */
+ switch (mode1)
+ {
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x9804);
+ bp_status1 = cvmx_read_csr(CVMX_DBG_DATA);
+ bp_status1 = 0xffff & ~bp_status1;
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x1600);
+ bp_status1 = 0xffff & cvmx_read_csr(CVMX_DBG_DATA);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, 0x1600);
+ bp_status1 = 0xffff & cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ break;
+ default:
+ bp_status1 = 1<<(port1-16);
+ break;
+ }
+
+ if (mode0 != CVMX_HELPER_INTERFACE_MODE_DISABLED)
+ {
+ /* Shutdown BP */
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port0<<25) | (0x1ffffff & -1000));
+ cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port0), 0);
+ }
+
+ if (mode1 != CVMX_HELPER_INTERFACE_MODE_DISABLED)
+ {
+ /* Shutdown BP */
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port1<<25) | (0x1ffffff & -1000));
+ cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port1), 0);
+ }
+
+ /* Clear any error interrupts that might have been set */
+ cvmx_write_csr(CVMX_IPD_INT_SUM, 0x1f);
+ cvmx_write_csr(CVMX_IPD_INT_ENB, ipd_int_enb);
+
+ return ((bp_status0 != 1ull<<port0) || (bp_status1 != 1ull<<(port1-16)));
+}
+
+
+/**
+ * @INTERNAL
+ * Enable packet input/output from the hardware. This function is
+ * called after all internal setup is complete and IPD is enabled.
+ * After this function completes, packets will be accepted from the
+ * hardware ports. PKO should still be disabled to make sure packets
+ * aren't sent out partially setup hardware.
+ *
+ * @param interface Interface to enable
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_packet_hardware_enable(int interface)
+{
+ int result = 0;
+ switch (cvmx_helper_interface_get_mode(interface))
+ {
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ /* Nothing to do */
+ break;
+ /* XAUI is a single high speed port */
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ result = __cvmx_helper_xaui_enable(interface);
+ break;
+ /* RGMII/GMII/MII are all treated about the same. Most functions
+ refer to these ports as RGMII */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ result = __cvmx_helper_rgmii_enable(interface);
+ break;
+ /* SPI4 can have 1-16 ports depending on the device at the other end */
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ result = __cvmx_helper_spi_enable(interface);
+ break;
+ /* SGMII can have 1-4 ports depending on how many are hooked up */
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_enable(interface);
+ break;
+ /* PCI target Network Packet Interface */
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ result = __cvmx_helper_npi_enable(interface);
+ break;
+ /* SRIO has 2^N ports, where N is number of interfaces */
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ result = __cvmx_helper_srio_enable(interface);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ result = __cvmx_helper_ilk_enable(interface);
+ break;
+ }
+ result |= __cvmx_helper_board_hardware_enable(interface);
+ return result;
+}
+
+
+/**
+ * Called after all internal packet IO paths are setup. This
+ * function enables IPD/PIP and begins packet input and output.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_helper_ipd_and_packet_input_enable(void)
+{
+ int num_interfaces;
+ int interface;
+
+ /* Enable IPD */
+ cvmx_ipd_enable();
+
+ /* Time to enable hardware ports packet input and output. Note that at this
+ point IPD/PIP must be fully functional and PKO must be disabled */
+ num_interfaces = cvmx_helper_get_number_of_interfaces();
+ for (interface=0; interface<num_interfaces; interface++)
+ {
+ if (cvmx_helper_ports_on_interface(interface) > 0)
+ {
+ //cvmx_dprintf("Enabling packet I/O on interface %d\n", interface);
+ __cvmx_helper_packet_hardware_enable(interface);
+ }
+ }
+
+ /* Finally enable PKO now that the entire path is up and running */
+ cvmx_pko_enable();
+
+ if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1) || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1)) &&
+ (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
+ __cvmx_helper_errata_fix_ipd_ptr_alignment();
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_ipd_and_packet_input_enable);
+#endif
+
+#define __CVMX_SSO_RWQ_SIZE 256
+
+int cvmx_helper_initialize_sso(int wqe_entries)
+{
+ int cvm_oct_sso_number_rwq_bufs;
+ char *mem;
+ int i;
+ cvmx_sso_cfg_t sso_cfg;
+ cvmx_fpa_fpfx_marks_t fpa_marks;
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return 0;
+
+ /*
+ * CN68XX-P1 may reset with the wrong values, put in
+ * the correct values.
+ */
+ fpa_marks.u64 = 0;
+ fpa_marks.s.fpf_wr = 0xa4;
+ fpa_marks.s.fpf_rd = 0x40;
+ cvmx_write_csr(CVMX_FPA_FPF8_MARKS, fpa_marks.u64);
+
+ cvm_oct_sso_number_rwq_bufs = ((wqe_entries - 1) / 26) + 1 + 48 + 8;
+
+ mem = cvmx_bootmem_alloc(__CVMX_SSO_RWQ_SIZE * cvm_oct_sso_number_rwq_bufs, CVMX_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ cvmx_dprintf("Out of memory initializing sso pool\n");
+ return -1;
+ }
+ /* Make sure RWI/RWO is disabled. */
+ sso_cfg.u64 = cvmx_read_csr(CVMX_SSO_CFG);
+ sso_cfg.s.rwen = 0;
+ cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
+
+ for (i = cvm_oct_sso_number_rwq_bufs - 8; i > 0; i--) {
+ cvmx_sso_rwq_psh_fptr_t fptr;
+
+ for (;;) {
+ fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_PSH_FPTR);
+ if (!fptr.s.full)
+ break;
+ cvmx_wait(1000);
+ }
+ fptr.s.fptr = cvmx_ptr_to_phys(mem) >> 7;
+ cvmx_write_csr(CVMX_SSO_RWQ_PSH_FPTR, fptr.u64);
+ mem = mem + __CVMX_SSO_RWQ_SIZE;
+ }
+
+ for (i = 0; i < 8; i++) {
+ cvmx_sso_rwq_head_ptrx_t head_ptr;
+ cvmx_sso_rwq_tail_ptrx_t tail_ptr;
+
+ head_ptr.u64 = 0;
+ tail_ptr.u64 = 0;
+ head_ptr.s.ptr = cvmx_ptr_to_phys(mem) >> 7;
+ tail_ptr.s.ptr = head_ptr.s.ptr;
+ cvmx_write_csr(CVMX_SSO_RWQ_HEAD_PTRX(i), head_ptr.u64);
+ cvmx_write_csr(CVMX_SSO_RWQ_TAIL_PTRX(i), tail_ptr.u64);
+ mem = mem + __CVMX_SSO_RWQ_SIZE;
+ }
+
+ sso_cfg.u64 = cvmx_read_csr(CVMX_SSO_CFG);
+ sso_cfg.s.rwen = 1;
+ sso_cfg.s.dwb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB);
+ sso_cfg.s.rwq_byp_dis = 0;
+ sso_cfg.s.rwio_byp_dis = 0;
+ cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
+
+ return 0;
+}
+
+int cvmx_helper_uninitialize_sso(void)
+{
+ cvmx_fpa_quex_available_t queue_available;
+ cvmx_sso_cfg_t sso_cfg;
+ cvmx_sso_rwq_pop_fptr_t pop_fptr;
+ cvmx_sso_rwq_psh_fptr_t fptr;
+ cvmx_sso_fpage_cnt_t fpage_cnt;
+ int num_to_transfer, i;
+ char *mem;
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return 0;
+
+ sso_cfg.u64 = cvmx_read_csr(CVMX_SSO_CFG);
+ sso_cfg.s.rwen = 0;
+ sso_cfg.s.rwq_byp_dis = 1;
+ cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
+ cvmx_read_csr(CVMX_SSO_CFG);
+ queue_available.u64 = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(8));
+
+ /* Make CVMX_FPA_QUEX_AVAILABLE(8) % 16 == 0*/
+ for (num_to_transfer = (16 - queue_available.s.que_siz) % 16;
+ num_to_transfer > 0; num_to_transfer--) {
+ do {
+ pop_fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_POP_FPTR);
+ } while (!pop_fptr.s.val);
+ for (;;) {
+ fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_PSH_FPTR);
+ if (!fptr.s.full)
+ break;
+ cvmx_wait(1000);
+ }
+ fptr.s.fptr = pop_fptr.s.fptr;
+ cvmx_write_csr(CVMX_SSO_RWQ_PSH_FPTR, fptr.u64);
+ }
+ cvmx_read_csr(CVMX_SSO_CFG);
+
+ do {
+ queue_available.u64 = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(8));
+ } while (queue_available.s.que_siz % 16);
+
+ sso_cfg.s.rwen = 1;
+ sso_cfg.s.rwq_byp_dis = 0;
+ cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
+
+ for (i = 0; i < 8; i++) {
+ cvmx_sso_rwq_head_ptrx_t head_ptr;
+ cvmx_sso_rwq_tail_ptrx_t tail_ptr;
+
+ head_ptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_HEAD_PTRX(i));
+ tail_ptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_TAIL_PTRX(i));
+ if (head_ptr.s.ptr != tail_ptr.s.ptr) {
+ cvmx_dprintf("head_ptr.s.ptr != tail_ptr.s.ptr, idx: %d\n", i);
+ }
+
+ mem = cvmx_phys_to_ptr(((uint64_t)head_ptr.s.ptr) << 7);
+ /* Leak the memory */
+ }
+
+ do {
+ do {
+ pop_fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_POP_FPTR);
+ if (pop_fptr.s.val) {
+ mem = cvmx_phys_to_ptr(((uint64_t)pop_fptr.s.fptr) << 7);
+ /* Leak the memory */
+ }
+ } while (pop_fptr.s.val);
+ fpage_cnt.u64 = cvmx_read_csr(CVMX_SSO_FPAGE_CNT);
+ } while (fpage_cnt.s.fpage_cnt);
+
+ sso_cfg.s.rwen = 0;
+ sso_cfg.s.rwq_byp_dis = 0;
+ cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
+
+ return 0;
+}
+
+/**
+ * Initialize the PIP, IPD, and PKO hardware to support
+ * simple priority based queues for the ethernet ports. Each
+ * port is configured with a number of priority queues based
+ * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
+ * priority than the previous.
+ *
+ * @return Zero on success, non-zero on failure
+ */
+int cvmx_helper_initialize_packet_io_global(void)
+{
+ int result = 0;
+ int interface;
+ cvmx_l2c_cfg_t l2c_cfg;
+ cvmx_smix_en_t smix_en;
+ const int num_interfaces = cvmx_helper_get_number_of_interfaces();
+
+ /* CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to be disabled */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
+ __cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
+
+ /* Tell L2 to give the IOB statically higher priority compared to the
+ cores. This avoids conditions where IO blocks might be starved under
+ very high L2 loads */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ {
+ cvmx_l2c_ctl_t l2c_ctl;
+ l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
+ l2c_ctl.s.rsp_arb_mode = 1;
+ l2c_ctl.s.xmc_arb_mode = 0;
+ cvmx_write_csr(CVMX_L2C_CTL, l2c_ctl.u64);
+ }
+ else
+ {
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ l2c_cfg.s.lrf_arb_mode = 0;
+ l2c_cfg.s.rfb_arb_mode = 0;
+ cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
+ }
+
+ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
+ {
+ int smi_inf = 1;
+ int i;
+
+ /* Newer chips have more than one SMI/MDIO interface */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ smi_inf = 4;
+ else if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)
+ && !OCTEON_IS_MODEL(OCTEON_CN58XX)
+ && !OCTEON_IS_MODEL(OCTEON_CN50XX))
+ smi_inf = 2;
+
+ for (i = 0; i < smi_inf; i++)
+ {
+ /* Make sure SMI/MDIO is enabled so we can query PHYs */
+ smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(i));
+ if (!smix_en.s.en)
+ {
+ smix_en.s.en = 1;
+ cvmx_write_csr(CVMX_SMIX_EN(i), smix_en.u64);
+ }
+ }
+ }
+
+ __cvmx_helper_cfg_init();
+
+ for (interface=0; interface<num_interfaces; interface++)
+ result |= cvmx_helper_interface_probe(interface);
+
+ cvmx_pko_initialize_global();
+ for (interface=0; interface<num_interfaces; interface++)
+ {
+ if (cvmx_helper_ports_on_interface(interface) > 0)
+ cvmx_dprintf("Interface %d has %d ports (%s)\n",
+ interface, cvmx_helper_ports_on_interface(interface),
+ cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(interface)));
+ result |= __cvmx_helper_interface_setup_ipd(interface);
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ result |= __cvmx_helper_interface_setup_pko(interface);
+ }
+
+ result |= __cvmx_helper_global_setup_ipd();
+ result |= __cvmx_helper_global_setup_pko();
+
+ /* Enable any flow control and backpressure */
+ result |= __cvmx_helper_global_setup_backpressure();
+
+#if CVMX_HELPER_ENABLE_IPD
+ result |= cvmx_helper_ipd_and_packet_input_enable();
+#endif
+ return result;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_initialize_packet_io_global);
+#endif
+
+
+/**
+ * Does core local initialization for packet io
+ *
+ * @return Zero on success, non-zero on failure
+ */
+int cvmx_helper_initialize_packet_io_local(void)
+{
+ return cvmx_pko_initialize_local();
+}
+
+/**
+ * wait for the pko queue to drain
+ *
+ * @param queue a valid pko queue
+ * @return count is the length of the queue after calling this
+ * function
+ */
+static int cvmx_helper_wait_pko_queue_drain(int queue)
+{
+ const int timeout = 5; /* Wait up to 5 seconds for timeouts */
+ int count;
+ uint64_t start_cycle, stop_cycle;
+
+ count = cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue));
+ start_cycle = cvmx_get_cycle();
+ stop_cycle = start_cycle + cvmx_clock_get_rate(CVMX_CLOCK_CORE) * timeout;
+ while (count && (cvmx_get_cycle() < stop_cycle))
+ {
+ cvmx_wait(10000);
+ count = cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue));
+ }
+
+ return count;
+}
+
+struct cvmx_buffer_list {
+ struct cvmx_buffer_list *next;
+};
+
+/**
+ * Undo the initialization performed in
+ * cvmx_helper_initialize_packet_io_global(). After calling this routine and the
+ * local version on each core, packet IO for Octeon will be disabled and placed
+ * in the initial reset state. It will then be safe to call the initialize
+ * later on. Note that this routine does not empty the FPA pools. It frees all
+ * buffers used by the packet IO hardware to the FPA so a function emptying the
+ * FPA after shutdown should find all packet buffers in the FPA.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_helper_shutdown_packet_io_global(void)
+{
+ const int timeout = 5; /* Wait up to 5 seconds for timeouts */
+ int result = 0;
+ int num_interfaces;
+ int interface;
+ int num_ports;
+ int index;
+ struct cvmx_buffer_list *pool0_buffers;
+ struct cvmx_buffer_list *pool0_buffers_tail;
+ cvmx_wqe_t *work;
+
+ /* Step 1: Disable all backpressure */
+ for (interface=0; interface<CVMX_HELPER_MAX_GMX; interface++)
+ if (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_DISABLED)
+ cvmx_gmx_set_backpressure_override(interface, 0xf);
+
+step2:
+ /* Step 2: Wait for the PKO queues to drain */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ int queue, max_queue;
+
+ max_queue = __cvmx_helper_cfg_pko_max_queue();
+ for (queue = 0; queue < max_queue; queue++)
+ {
+ if (cvmx_helper_wait_pko_queue_drain(queue))
+ {
+ result = -1;
+ goto step3;
+ }
+ }
+ }
+ else
+ {
+ num_interfaces = cvmx_helper_get_number_of_interfaces();
+ for (interface=0; interface<num_interfaces; interface++)
+ {
+ num_ports = cvmx_helper_ports_on_interface(interface);
+ for (index=0; index<num_ports; index++)
+ {
+ int pko_port = cvmx_helper_get_ipd_port(interface, index);
+ int queue = cvmx_pko_get_base_queue(pko_port);
+ int max_queue = queue + cvmx_pko_get_num_queues(pko_port);
+ while (queue < max_queue)
+ {
+ if (cvmx_helper_wait_pko_queue_drain(queue))
+ {
+ result = -1;
+ goto step3;
+ }
+ queue++;
+ }
+ }
+ }
+ }
+
+step3:
+ /* Step 3: Disable TX and RX on all ports */
+ for (interface=0; interface<CVMX_HELPER_MAX_GMX; interface++)
+ {
+ switch (cvmx_helper_interface_get_mode(interface))
+ {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ /* Not a packet interface */
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ /* We don't handle the NPI/NPEI/SRIO packet engines. The caller
+ must know these are idle */
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ /* Nothing needed. Once PKO is idle, the loopback devices
+ must be idle */
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ /* SPI cannot be disabled from Octeon. It is the responsibility
+ of the caller to make sure SPI is idle before doing
+ shutdown */
+ /* Fall through and do the same processing as RGMII/GMII */
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ /* Disable outermost RX at the ASX block */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), 0);
+ num_ports = cvmx_helper_ports_on_interface(interface);
+ if (num_ports > 4)
+ num_ports = 4;
+ for (index=0; index<num_ports; index++)
+ {
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ /* Poll the GMX state machine waiting for it to become idle */
+ cvmx_write_csr(CVMX_NPI_DBG_SELECT, interface*0x800 + index*0x100 + 0x880);
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&7, ==, 0, timeout*1000000))
+ {
+ cvmx_dprintf("GMX RX path timeout waiting for idle\n");
+ result = -1;
+ }
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&0xf, ==, 0, timeout*1000000))
+ {
+ cvmx_dprintf("GMX TX path timeout waiting for idle\n");
+ result = -1;
+ }
+ }
+ /* Disable outermost TX at the ASX block */
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), 0);
+ /* Disable interrupts for interface */
+ cvmx_write_csr(CVMX_ASXX_INT_EN(interface), 0);
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ num_ports = cvmx_helper_ports_on_interface(interface);
+ if (num_ports > 4)
+ num_ports = 4;
+ for (index=0; index<num_ports; index++)
+ {
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, timeout*1000000))
+ {
+ cvmx_dprintf("GMX RX path timeout waiting for idle\n");
+ result = -1;
+ }
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, timeout*1000000))
+ {
+ cvmx_dprintf("GMX TX path timeout waiting for idle\n");
+ result = -1;
+ }
+ }
+ break;
+ }
+ }
+
+ /* Step 4: Retrieve all packets from the POW and free them */
+ while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT)))
+ {
+ cvmx_helper_free_packet_data(work);
+ cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 0);
+ }
+
+ /* Step 4b: Special workaround for pass 2 errata */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+ {
+ cvmx_ipd_ptr_count_t ipd_cnt;
+ int to_add;
+ ipd_cnt.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
+ to_add = (ipd_cnt.s.wqev_cnt + ipd_cnt.s.wqe_pcnt) & 0x7;
+ if (to_add)
+ {
+ int port = -1;
+ cvmx_dprintf("Aligning CN38XX pass 2 IPD counters\n");
+ if (cvmx_helper_interface_get_mode(0) == CVMX_HELPER_INTERFACE_MODE_RGMII)
+ port = 0;
+ else if (cvmx_helper_interface_get_mode(1) == CVMX_HELPER_INTERFACE_MODE_RGMII)
+ port = 16;
+
+ if (port != -1)
+ {
+ char *buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
+ if (buffer)
+ {
+ int queue = cvmx_pko_get_base_queue(port);
+ cvmx_pko_command_word0_t pko_command;
+ cvmx_buf_ptr_t packet;
+ uint64_t start_cycle;
+ uint64_t stop_cycle;
+
+ /* Populate a minimal packet */
+ memset(buffer, 0xff, 6);
+ memset(buffer+6, 0, 54);
+ pko_command.u64 = 0;
+ pko_command.s.dontfree = 1;
+ pko_command.s.total_bytes = 60;
+ pko_command.s.segs = 1;
+ packet.u64 = 0;
+ packet.s.addr = cvmx_ptr_to_phys(buffer);
+ packet.s.size = CVMX_FPA_PACKET_POOL_SIZE;
+ __cvmx_helper_rgmii_configure_loopback(port, 1, 0);
+ while (to_add--)
+ {
+ cvmx_pko_send_packet_prepare(port, queue, CVMX_PKO_LOCK_CMD_QUEUE);
+ if (cvmx_pko_send_packet_finish(port, queue, pko_command, packet, CVMX_PKO_LOCK_CMD_QUEUE))
+ {
+ cvmx_dprintf("ERROR: Unable to align IPD counters (PKO failed)\n");
+ break;
+ }
+ }
+ cvmx_fpa_free(buffer, CVMX_FPA_PACKET_POOL, 0);
+
+ /* Wait for the packets to loop back */
+ start_cycle = cvmx_get_cycle();
+ stop_cycle = start_cycle + cvmx_clock_get_rate(CVMX_CLOCK_CORE) * timeout;
+ while (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue)) &&
+ (cvmx_get_cycle() < stop_cycle))
+ {
+ cvmx_wait(1000);
+ }
+ cvmx_wait(1000);
+ __cvmx_helper_rgmii_configure_loopback(port, 0, 0);
+ if (to_add == -1)
+ goto step2;
+ }
+ else
+ cvmx_dprintf("ERROR: Unable to align IPD counters (Packet pool empty)\n");
+ }
+ else
+ cvmx_dprintf("ERROR: Unable to align IPD counters\n");
+ }
+ }
+
+ /* Step 5 */
+ cvmx_ipd_disable();
+
+ /* Step 6: Drain all prefetched buffers from IPD/PIP. Note that IPD/PIP
+ have not been reset yet */
+ __cvmx_ipd_free_ptr();
+
+ /* Step 7: Free the PKO command buffers and put PKO in reset */
+ cvmx_pko_shutdown();
+
+ /* Step 8: Disable MAC address filtering */
+ for (interface=0; interface<CVMX_HELPER_MAX_GMX; interface++)
+ {
+ switch (cvmx_helper_interface_get_mode(interface))
+ {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ num_ports = cvmx_helper_ports_on_interface(interface);
+ if (num_ports > 4)
+ num_ports = 4;
+ for (index=0; index<num_ports; index++)
+ {
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 1);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 0);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 0);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 0);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 0);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 0);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 0);
+ }
+ break;
+ }
+ }
+
+ /* Step 9: Drain all FPA buffers out of pool 0 before we reset
+ * IPD/PIP. This is needed to keep IPD_QUE0_FREE_PAGE_CNT in
+ * sync. We temporarily keep the buffers in the pool0_buffers
+ * list.
+ */
+ pool0_buffers = NULL;
+ pool0_buffers_tail = NULL;
+ while (1)
+ {
+ struct cvmx_buffer_list *buffer = cvmx_fpa_alloc(0);
+ if (buffer) {
+ buffer->next = NULL;
+
+ if (pool0_buffers == NULL)
+ pool0_buffers = buffer;
+ else
+ pool0_buffers_tail->next = buffer;
+
+ pool0_buffers_tail = buffer;
+ }
+ else
+ break;
+ }
+
+ /* Step 10: Reset IPD and PIP */
+ {
+ cvmx_ipd_ctl_status_t ipd_ctl_status;
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_status.s.reset = 1;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+
+ if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+ (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ {
+ /* only try 1000 times. Normally if this works it will happen in
+ ** the first 50 loops. */
+ int max_loops = 1000;
+ int loop = 0;
+ /* Per port backpressure counters can get misaligned after an
+ IPD reset. This code realigns them by performing repeated
+ resets. See IPD-13473 */
+ cvmx_wait(100);
+ if (__cvmx_helper_backpressure_is_misaligned())
+ {
+ cvmx_dprintf("Starting to align per port backpressure counters.\n");
+ while (__cvmx_helper_backpressure_is_misaligned() && (loop++ < max_loops))
+ {
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+ cvmx_wait(123);
+ }
+ if (loop < max_loops)
+ cvmx_dprintf("Completed aligning per port backpressure counters (%d loops).\n", loop);
+ else
+ {
+ cvmx_dprintf("ERROR: unable to align per port backpressure counters.\n");
+ /* For now, don't hang.... */
+ }
+ }
+ }
+
+ /* PIP_SFT_RST not present in CN38XXp{1,2} */
+ if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+ {
+ cvmx_pip_sft_rst_t pip_sft_rst;
+ pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
+ pip_sft_rst.s.rst = 1;
+ cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
+ }
+
+ /* Make sure IPD has finished reset. */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_IPD_CTL_STATUS, cvmx_ipd_ctl_status_t, rst_done, ==, 0, 1000))
+ {
+ cvmx_dprintf("IPD reset timeout waiting for idle\n");
+ result = -1;
+ }
+ }
+ }
+
+ /* Step 11: Restore the FPA buffers into pool 0 */
+ while (pool0_buffers) {
+ struct cvmx_buffer_list *n = pool0_buffers->next;
+ cvmx_fpa_free(pool0_buffers, 0, 0);
+ pool0_buffers = n;
+ }
+
+ /* Step 12: Release interface structures */
+ __cvmx_helper_shutdown_interfaces();
+
+ return result;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_shutdown_packet_io_global);
+#endif
+
+
+/**
+ * Does core local shutdown of packet io
+ *
+ * @return Zero on success, non-zero on failure
+ */
+int cvmx_helper_shutdown_packet_io_local(void)
+{
+ /* Currently there is nothing to do per core. This may change in
+ the future */
+ return 0;
+}
+
+
+
+/**
+ * Auto configure an IPD/PKO port link state and speed. This
+ * function basically does the equivalent of:
+ * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
+ *
+ * @param ipd_port IPD/PKO port to auto configure
+ *
+ * @return Link state after configure
+ */
+cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port)
+{
+ cvmx_helper_link_info_t link_info;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (index >= cvmx_helper_ports_on_interface(interface))
+ {
+ link_info.u64 = 0;
+ return link_info;
+ }
+
+ link_info = cvmx_helper_link_get(ipd_port);
+ if (link_info.u64 == (__cvmx_helper_get_link_info(interface, index)).u64)
+ return link_info;
+
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+ if (!link_info.s.link_up)
+ cvmx_error_disable_group(CVMX_ERROR_GROUP_ETHERNET, ipd_port);
+#endif
+
+ /* If we fail to set the link speed, port_link_info will not change */
+ cvmx_helper_link_set(ipd_port, link_info);
+
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+ if (link_info.s.link_up)
+ cvmx_error_enable_group(CVMX_ERROR_GROUP_ETHERNET, ipd_port);
+#endif
+
+ return link_info;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_link_autoconf);
+#endif
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ /* The default result will be a down link unless the code below
+ changes it */
+ result.u64 = 0;
+
+ if (index >= cvmx_helper_ports_on_interface(interface))
+ return result;
+
+ switch (cvmx_helper_interface_get_mode(interface))
+ {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ /* Network links are not supported */
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ result = __cvmx_helper_xaui_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ if (index == 0)
+ result = __cvmx_helper_rgmii_link_get(ipd_port);
+ else
+ {
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ }
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ result = __cvmx_helper_rgmii_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ result = __cvmx_helper_spi_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ result = __cvmx_helper_srio_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ result = __cvmx_helper_ilk_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ /* Network links are not supported */
+ break;
+ }
+ return result;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_link_get);
+#endif
+
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ int result = -1;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (index >= cvmx_helper_ports_on_interface(interface))
+ return -1;
+
+ switch (cvmx_helper_interface_get_mode(interface))
+ {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
+ break;
+ /* RGMII/GMII/MII are all treated about the same. Most functions
+ refer to these ports as RGMII */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ result = __cvmx_helper_spi_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ result = __cvmx_helper_srio_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ result = __cvmx_helper_ilk_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ break;
+ }
+ /* Set the port_link_info here so that the link status is updated
+ no matter how cvmx_helper_link_set is called. We don't change
+ the value if link_set failed */
+ if (result == 0)
+ __cvmx_helper_set_link_info(interface, index, link_info);
+ return result;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_helper_link_set);
+#endif
+
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, int enable_external)
+{
+ int result = -1;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (index >= cvmx_helper_ports_on_interface(interface))
+ return -1;
+
+ switch (cvmx_helper_interface_get_mode(interface))
+ {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ result = __cvmx_helper_xaui_configure_loopback(ipd_port, enable_internal, enable_external);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ result = __cvmx_helper_rgmii_configure_loopback(ipd_port, enable_internal, enable_external);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_configure_loopback(ipd_port, enable_internal, enable_external);
+ break;
+ }
+ return result;
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-helper.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-helper.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-helper.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,374 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Helper functions for common, but complicated tasks.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_HELPER_H__
+#define __CVMX_HELPER_H__
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#elif !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#endif
+
+#include "cvmx-fpa.h"
+#include "cvmx-wqe.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Max number of GMXX */
+#define CVMX_HELPER_MAX_GMX (OCTEON_IS_MODEL(OCTEON_CN68XX) ? 5 : 2)
+
+#define CVMX_HELPER_CSR_INIT0 0 /* Do not change as
+ CVMX_HELPER_WRITE_CSR()
+ assumes it */
+#define CVMX_HELPER_CSR_INIT_READ -1
+
+/*
+ * CVMX_HELPER_WRITE_CSR--set a field in a CSR with a value.
+ *
+ * @param chcsr_init intial value of the csr (CVMX_HELPER_CSR_INIT_READ
+ * means to use the existing csr value as the
+ * initial value.)
+ * @param chcsr_csr the name of the csr
+ * @param chcsr_type the type of the csr (see the -defs.h)
+ * @param chcsr_chip the chip for the csr/field
+ * @param chcsr_fld the field in the csr
+ * @param chcsr_val the value for field
+ */
+#define CVMX_HELPER_WRITE_CSR(chcsr_init, chcsr_csr, chcsr_type, \
+ chcsr_chip, chcsr_fld, chcsr_val) \
+ do { \
+ chcsr_type csr; \
+ if ((chcsr_init) == CVMX_HELPER_CSR_INIT_READ) \
+ csr.u64 = cvmx_read_csr(chcsr_csr); \
+ else \
+ csr.u64 = (chcsr_init); \
+ csr.chcsr_chip.chcsr_fld = (chcsr_val); \
+ cvmx_write_csr((chcsr_csr), csr.u64); \
+ } while(0)
+
+/*
+ * CVMX_HELPER_WRITE_CSR0--set a field in a CSR with the initial value of 0
+ */
+#define CVMX_HELPER_WRITE_CSR0(chcsr_csr, chcsr_type, chcsr_chip, \
+ chcsr_fld, chcsr_val) \
+ CVMX_HELPER_WRITE_CSR(CVMX_HELPER_CSR_INIT0, chcsr_csr, \
+ chcsr_type, chcsr_chip, chcsr_fld, chcsr_val)
+
+/*
+ * CVMX_HELPER_WRITE_CSR1--set a field in a CSR with the initial value of
+ * the CSR's current value.
+ */
+#define CVMX_HELPER_WRITE_CSR1(chcsr_csr, chcsr_type, chcsr_chip, \
+ chcsr_fld, chcsr_val) \
+ CVMX_HELPER_WRITE_CSR(CVMX_HELPER_CSR_INIT_READ, chcsr_csr, \
+ chcsr_type, chcsr_chip, chcsr_fld, chcsr_val)
+
+
+typedef enum
+{
+ CVMX_HELPER_INTERFACE_MODE_DISABLED,
+ CVMX_HELPER_INTERFACE_MODE_RGMII,
+ CVMX_HELPER_INTERFACE_MODE_GMII,
+ CVMX_HELPER_INTERFACE_MODE_SPI,
+ CVMX_HELPER_INTERFACE_MODE_PCIE,
+ CVMX_HELPER_INTERFACE_MODE_XAUI,
+ CVMX_HELPER_INTERFACE_MODE_SGMII,
+ CVMX_HELPER_INTERFACE_MODE_PICMG,
+ CVMX_HELPER_INTERFACE_MODE_NPI,
+ CVMX_HELPER_INTERFACE_MODE_LOOP,
+ CVMX_HELPER_INTERFACE_MODE_SRIO,
+ CVMX_HELPER_INTERFACE_MODE_ILK,
+ CVMX_HELPER_INTERFACE_MODE_RXAUI,
+} cvmx_helper_interface_mode_t;
+
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t reserved_20_63 : 44;
+ uint64_t link_up : 1; /**< Is the physical link up? */
+ uint64_t full_duplex : 1; /**< 1 if the link is full duplex */
+ uint64_t speed : 18; /**< Speed of the link in Mbps */
+ } s;
+} cvmx_helper_link_info_t;
+
+#include "cvmx-helper-fpa.h"
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+#include "cvmx-helper-errata.h"
+#include "cvmx-helper-ilk.h"
+#include "cvmx-helper-loop.h"
+#include "cvmx-helper-npi.h"
+#include "cvmx-helper-rgmii.h"
+#include "cvmx-helper-sgmii.h"
+#include "cvmx-helper-spi.h"
+#include "cvmx-helper-srio.h"
+#include "cvmx-helper-xaui.h"
+
+/**
+ * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
+ * priorities[16]) is a function pointer. It is meant to allow
+ * customization of the PKO queue priorities based on the port
+ * number. Users should set this pointer to a function before
+ * calling any cvmx-helper operations.
+ */
+extern CVMX_SHARED void (*cvmx_override_pko_queue_priority)(int ipd_port, uint64_t *priorities);
+
+/**
+ * cvmx_override_ipd_port_setup(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the IPD port/port kind
+ * setup before packet input/output comes online. It is called
+ * after cvmx-helper does the default IPD configuration, but
+ * before IPD is enabled. Users should set this pointer to a
+ * function before calling any cvmx-helper operations.
+ */
+extern CVMX_SHARED void (*cvmx_override_ipd_port_setup)(int ipd_port);
+
+/**
+ * This function enables the IPD and also enables the packet interfaces.
+ * The packet interfaces (RGMII and SPI) must be enabled after the
+ * IPD. This should be called by the user program after any additional
+ * IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
+ * is not set in the executive-config.h file.
+ *
+ * @return 0 on success
+ * -1 on failure
+ */
+extern int cvmx_helper_ipd_and_packet_input_enable(void);
+
+/**
+ * Initialize and allocate memory for the SSO.
+ *
+ * @param wqe_entries The maximum number of work queue entries to be
+ * supported.
+ *
+ * @return Zero on success, non-zero on failure.
+ */
+extern int cvmx_helper_initialize_sso(int wqe_entries);
+
+/**
+ * Undo the effect of cvmx_helper_initialize_sso().
+ *
+ * Warning: since cvmx_bootmem_alloc() memory cannot be freed, the
+ * memory allocated by cvmx_helper_initialize_sso() will be leaked.
+ *
+ * @return Zero on success, non-zero on failure.
+ */
+extern int cvmx_helper_uninitialize_sso(void);
+
+/**
+ * Initialize the PIP, IPD, and PKO hardware to support
+ * simple priority based queues for the ethernet ports. Each
+ * port is configured with a number of priority queues based
+ * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
+ * priority than the previous.
+ *
+ * @return Zero on success, non-zero on failure
+ */
+extern int cvmx_helper_initialize_packet_io_global(void);
+
+/**
+ * Does core local initialization for packet io
+ *
+ * @return Zero on success, non-zero on failure
+ */
+extern int cvmx_helper_initialize_packet_io_local(void);
+
+/**
+ * Undo the initialization performed in
+ * cvmx_helper_initialize_packet_io_global(). After calling this routine and the
+ * local version on each core, packet IO for Octeon will be disabled and placed
+ * in the initial reset state. It will then be safe to call the initialize
+ * later on. Note that this routine does not empty the FPA pools. It frees all
+ * buffers used by the packet IO hardware to the FPA so a function emptying the
+ * FPA after shutdown should find all packet buffers in the FPA.
+ *
+ * @return Zero on success, negative on failure.
+ */
+extern int cvmx_helper_shutdown_packet_io_global(void);
+
+/**
+ * Does core local shutdown of packet io
+ *
+ * @return Zero on success, non-zero on failure
+ */
+extern int cvmx_helper_shutdown_packet_io_local(void);
+
+/**
+ * Returns the number of ports on the given interface.
+ * The interface must be initialized before the port count
+ * can be returned.
+ *
+ * @param interface Which interface to return port count for.
+ *
+ * @return Port count for interface
+ * -1 for uninitialized interface
+ */
+extern int cvmx_helper_ports_on_interface(int interface);
+
+/**
+ * Return the number of interfaces the chip has. Each interface
+ * may have multiple ports. Most chips support two interfaces,
+ * but the CNX0XX and CNX1XX are exceptions. These only support
+ * one interface.
+ *
+ * @return Number of interfaces on chip
+ */
+extern int cvmx_helper_get_number_of_interfaces(void);
+
+/**
+ * Get the operating mode of an interface. Depending on the Octeon
+ * chip and configuration, this function returns an enumeration
+ * of the type of packet I/O supported by an interface.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Mode of the interface. Unknown or unsupported interfaces return
+ * DISABLED.
+ */
+extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface);
+
+/**
+ * Auto configure an IPD/PKO port link state and speed. This
+ * function basically does the equivalent of:
+ * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
+ *
+ * @param ipd_port IPD/PKO port to auto configure
+ *
+ * @return Link state after configure
+ */
+extern cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+extern cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
+
+
+
+/**
+ * This function probes an interface to determine the actual number of
+ * hardware ports connected to it. It does some setup the ports but
+ * doesn't enable them. The main goal here is to set the global
+ * interface_port_count[interface] correctly. Final hardware setup of
+ * the ports will be performed later.
+ *
+ * @param interface Interface to probe
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int cvmx_helper_interface_probe(int interface);
+
+/**
+ * Determine the actual number of hardware ports connected to an
+ * interface. It doesn't setup the ports or enable them.
+ *
+ * @param interface Interface to enumerate
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int cvmx_helper_interface_enumerate(int interface);
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, int enable_external);
+
+#include "cvmx-helper-util.h"
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_HELPER_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-helper.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-hfa.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-hfa.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-hfa.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,175 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support library for the CN63XX, CN68XX hardware HFA engine.
+ *
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-dfa-defs.h>
+#include <asm/octeon/cvmx-hfa.h>
+#else
+#include "executive-config.h"
+#ifdef CVMX_ENABLE_DFA_FUNCTIONS
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-fau.h"
+#include "cvmx-cmd-queue.h"
+#include "cvmx-hfa.h"
+#endif
+#endif
+
+#ifdef CVMX_ENABLE_DFA_FUNCTIONS
+
+/**
+ * Initialize the DFA block
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_hfa_initialize(void)
+{
+ cvmx_dfa_difctl_t control;
+ cvmx_cmd_queue_result_t result;
+ void *initial_base_address;
+ int cmdsize;
+
+ cmdsize = ((CVMX_FPA_DFA_POOL_SIZE - 8) / sizeof (cvmx_dfa_command_t)) *
+ sizeof (cvmx_dfa_command_t);
+ result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_DFA, 0,
+ CVMX_FPA_DFA_POOL, cmdsize + 8);
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return -1;
+
+ control.u64 = 0;
+ control.s.dwbcnt = CVMX_FPA_DFA_POOL_SIZE / 128;
+ control.s.pool = CVMX_FPA_DFA_POOL;
+ control.s.size = cmdsize / sizeof(cvmx_dfa_command_t);
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_DFA_DIFCTL, control.u64);
+ initial_base_address = cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DFA);
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_DFA_DIFRDPTR, cvmx_ptr_to_phys(initial_base_address));
+ cvmx_read_csr(CVMX_DFA_DIFRDPTR); /* Read to make sure setup is complete */
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_hfa_initialize);
+#endif
+
+/**
+ * Shutdown the DFA block. DFA must be idle when
+ * this function is called.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_hfa_shutdown(void)
+{
+ if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_DFA))
+ {
+ cvmx_dprintf("ERROR: cvmx_hfa_shutdown: DFA not idle.\n");
+ return -1;
+ }
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_DFA);
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_hfa_shutdown);
+#endif
+
+/**
+ * Submit a command to the DFA block
+ *
+ * @param command DFA command to submit
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_hfa_submit(cvmx_dfa_command_t *command)
+{
+ cvmx_cmd_queue_result_t result = cvmx_cmd_queue_write(CVMX_CMD_QUEUE_DFA, 1, 4, command->u64);
+ if (result == CVMX_CMD_QUEUE_SUCCESS)
+ cvmx_write_csr(CVMX_DFA_DBELL, 1);
+ return result;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_hfa_submit);
+#endif
+
+void *hfa_bootmem_alloc (uint64_t size, uint64_t alignment)
+{
+ int64_t address;
+
+ address = cvmx_bootmem_phy_alloc(size, 0, 0, alignment, 0);
+
+ if (address > 0)
+ return cvmx_phys_to_ptr(address);
+ else
+ return NULL;
+}
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(hfa_bootmem_alloc);
+#endif
+
+int hfa_bootmem_free (void *ptr, uint64_t size)
+{
+ uint64_t address;
+ address = cvmx_ptr_to_phys (ptr);
+ return __cvmx_bootmem_phy_free (address, size, 0);
+}
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(hfa_bootmem_free);
+#endif
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-hfa.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-hfa.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-hfa.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-hfa.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,438 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the CN63XX, CN68XX hardware HFA engine.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ */
+
+#ifndef __CVMX_HFA_H__
+#define __CVMX_HFA_H__
+
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+#include "cvmx-llm.h"
+#include "cvmx-wqe.h"
+#include "cvmx-fpa.h"
+#include "cvmx-bootmem.h"
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+
+#ifdef CVMX_ENABLE_DFA_FUNCTIONS
+/* DFA queue cmd buffers */
+
+#define CVMX_FPA_DFA_POOL (4) /**< DFA command buffers */
+#define CVMX_FPA_DFA_POOL_SIZE (2 * CVMX_CACHE_LINE_SIZE)
+#endif
+
+#else
+#include "executive-config.h"
+#ifdef CVMX_ENABLE_DFA_FUNCTIONS
+#include "cvmx-config.h"
+#endif
+#endif
+#endif
+
+#define ENABLE_DEPRECATED /* Set to enable the old 18/36 bit names */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_DFA_ITYPE_MEMLOAD 0x0
+#define CVMX_DFA_ITYPE_CACHELOAD 0x1
+#define CVMX_DFA_ITYPE_GRAPHFREE 0x3
+#define CVMX_DFA_ITYPE_GRAPHWALK 0x4
+
+typedef union {
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t size:24;
+ uint64_t addr:40;
+#else
+ uint64_t addr:40;
+ uint64_t size:24;
+#endif
+ } s;
+} cvmx_dfa_gather_entry_t;
+
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t f1:3;
+ uint64_t unused1:2;
+ uint64_t snode:27;
+ uint64_t gather_mode:1;
+ uint64_t little_endian:1;
+ uint64_t store_full:1;
+ uint64_t load_through:1;
+ uint64_t small:1;
+ uint64_t itype:3;
+ uint64_t unused0:2;
+ uint64_t mbase:22;
+#else
+ uint64_t mbase:22;
+ uint64_t unused0:2;
+ uint64_t itype:3;
+ uint64_t small:1;
+ uint64_t load_through:1;
+ uint64_t store_full:1;
+ uint64_t little_endian:1;
+ uint64_t gather_mode:1;
+ uint64_t snode:27;
+ uint64_t unused1:2;
+ uint64_t f1:3;
+#endif
+ } walk;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused4:7;
+ uint64_t dbase:9;
+ uint64_t unused3:2;
+ uint64_t cbase:14;
+ uint64_t gather_mode:1;
+ uint64_t little_endian:1;
+ uint64_t store_full:1;
+ uint64_t load_through:1;
+ uint64_t unused2:1;
+ uint64_t itype:3;
+ uint64_t unused1:6;
+ uint64_t dsize:10;
+ uint64_t unused0:2;
+ uint64_t pgid:6;
+#else
+ uint64_t pgid:6;
+ uint64_t unused0:2;
+ uint64_t dsize:10;
+ uint64_t unused1:6;
+ uint64_t itype:3;
+ uint64_t unused2:1;
+ uint64_t load_through:1;
+ uint64_t store_full:1;
+ uint64_t little_endian:1;
+ uint64_t gather_mode:1;
+ uint64_t cbase:14;
+ uint64_t unused3:2;
+ uint64_t dbase:9;
+ uint64_t unused4:7;
+#endif
+ } cload;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused2:32;
+ uint64_t gather_mode:1;
+ uint64_t little_endian:1;
+ uint64_t store_full:1;
+ uint64_t load_through:1;
+ uint64_t unused1:1;
+ uint64_t itype:3;
+ uint64_t unused0:2;
+ uint64_t mbase:22;
+#else
+ uint64_t mbase:22;
+ uint64_t unused0:2;
+ uint64_t itype:3;
+ uint64_t unused1:1;
+ uint64_t load_through:1;
+ uint64_t store_full:1;
+ uint64_t little_endian:1;
+ uint64_t gather_mode:1;
+ uint64_t unused2:32;
+#endif
+ } mload;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused2:34;
+ uint64_t store_full:1;
+ uint64_t unused1:2;
+ uint64_t itype:3;
+ uint64_t unused0:24;
+#else
+ uint64_t unused0:24;
+ uint64_t itype:3;
+ uint64_t unused1:2;
+ uint64_t store_full:1;
+ uint64_t unused2:34;
+#endif
+ } free;
+} cvmx_dfa_word0_t;
+
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rmax:16;
+ uint64_t f2:8;
+ uint64_t rptr:40;
+#else
+ uint64_t rptr:40;
+ uint64_t f2:8;
+ uint64_t rmax:16;
+#endif
+ } walk;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused1:13;
+ uint64_t rmax:3;
+ uint64_t unused0:8;
+ uint64_t rptr:40;
+#else
+ uint64_t rptr:40;
+ uint64_t unused0:8;
+ uint64_t rmax:3;
+ uint64_t unused1:13;
+#endif
+ } cload;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused1:4;
+ uint64_t rmax:12;
+ uint64_t unused0:8;
+ uint64_t rptr:40;
+#else
+ uint64_t rptr:40;
+ uint64_t unused0:8;
+ uint64_t rmax:12;
+ uint64_t unused1:4;
+#endif
+ } mload;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused:24;
+ uint64_t rptr:40;
+#else
+ uint64_t rptr:40;
+ uint64_t unused:24;
+#endif
+ } free;
+} cvmx_dfa_word1_t;
+
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dlen:16;
+ uint64_t srepl:2;
+ uint64_t unused:2;
+ uint64_t clmsk:4;
+ uint64_t dptr:40;
+#else
+ uint64_t dptr:40;
+ uint64_t clmsk:4;
+ uint64_t unused:2;
+ uint64_t srepl:2;
+ uint64_t dlen:16;
+#endif
+ } walk;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dlen:16;
+ uint64_t unused:4;
+ uint64_t clmsk:4;
+ uint64_t dptr:40;
+#else
+ uint64_t dptr:40;
+ uint64_t clmsk:4;
+ uint64_t unused:4;
+ uint64_t dlen:16;
+#endif
+ } cload;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dlen:16;
+ uint64_t repl:2;
+ uint64_t unused:2;
+ uint64_t clmsk:4;
+ uint64_t dptr:40;
+#else
+ uint64_t dptr:40;
+ uint64_t clmsk:4;
+ uint64_t unused:2;
+ uint64_t repl:2;
+ uint64_t dlen:16;
+#endif
+ } mload;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused1:20;
+ uint64_t clmsk:4;
+ uint64_t unused0:40;
+#else
+ uint64_t unused0:40;
+ uint64_t clmsk:4;
+ uint64_t unused1:20;
+#endif
+ } free;
+} cvmx_dfa_word2_t;
+
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused1:2;
+ uint64_t vgid:8;
+ uint64_t unused0:5;
+ uint64_t f3:9;
+ uint64_t wqptr:40;
+#else
+ uint64_t wqptr:40;
+ uint64_t f3:9;
+ uint64_t unused0:5;
+ uint64_t vgid:8;
+ uint64_t unused1:2;
+#endif
+ } walk;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused1:2;
+ uint64_t vgid:8;
+ uint64_t unused0:7;
+ uint64_t f4:7;
+ uint64_t wqptr:40;
+#else
+ uint64_t wqptr:40;
+ uint64_t f4:7;
+ uint64_t unused0:7;
+ uint64_t vgid:8;
+ uint64_t unused1:2;
+#endif
+ } cload;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused1:2;
+ uint64_t vgid:8;
+ uint64_t unused0:7;
+ uint64_t f4:7;
+ uint64_t wqptr:40;
+#else
+ uint64_t wqptr:40;
+ uint64_t f4:7;
+ uint64_t unused0:7;
+ uint64_t vgid:8;
+ uint64_t unused1:2;
+#endif
+ } mload;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused1:2;
+ uint64_t vgid:8;
+ uint64_t unused0:14;
+ uint64_t wqptr:40;
+#else
+ uint64_t wqptr:40;
+ uint64_t unused0:14;
+ uint64_t vgid:8;
+ uint64_t unused1:2;
+#endif
+ } free;
+} cvmx_dfa_word3_t;
+
+typedef union {
+ uint64_t u64[4];
+ struct {
+ cvmx_dfa_word0_t word0;
+ cvmx_dfa_word1_t word1;
+ cvmx_dfa_word2_t word2;
+ cvmx_dfa_word3_t word3;
+ };
+} cvmx_dfa_command_t;
+
+#ifdef CVMX_ENABLE_DFA_FUNCTIONS
+/**
+ * Initialize the DFA hardware before use
+ * Returns 0 on success, -1 on failure
+ */
+int cvmx_hfa_initialize(void);
+
+
+/**
+ * Shutdown and cleanup resources used by the DFA
+ */
+int cvmx_hfa_shutdown(void);
+
+/**
+ * Submit a command to the HFA block
+ *
+ * @param command HFA command to submit
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_hfa_submit(cvmx_dfa_command_t *command);
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+
+void *hfa_bootmem_alloc (uint64_t size, uint64_t alignment);
+
+/**
+ * Frees a block to the bootmem allocator list.
+ *
+ * @param ptr address of block (memory pointer (void*))
+ * @param size size of block in bytes.
+ *
+ * @return 1 on success,
+ * 0 on failure
+ *
+ */
+
+int hfa_bootmem_free (void *ptr, uint64_t size);
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_HFA_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-hfa.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-higig.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-higig.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-higig.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,419 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions and typedefs for using Octeon in HiGig/HiGig+/HiGig2 mode over
+ * XAUI.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_HIGIG_H__
+#define __CVMX_HIGIG_H__
+#include "cvmx-wqe.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct
+{
+ union
+ {
+ uint32_t u32;
+ struct
+ {
+ uint32_t start : 8; /**< 8-bits of Preamble indicating start of frame */
+ uint32_t hgi : 2; /**< HiGig interface format indicator
+ 00 = Reserved
+ 01 = Pure preamble - IEEE standard framing of 10GE
+ 10 = XGS header - framing based on XGS family definition In this
+ format, the default length of the header is 12 bytes and additional
+ bytes are indicated by the HDR_EXT_LEN field
+ 11 = Reserved */
+ uint32_t cng_high : 1; /**< Congestion Bit High flag */
+ uint32_t hdr_ext_len : 3; /**< This field is valid only if the HGI field is a b'10' and it indicates the extension
+ to the standard 12-bytes of XGS HiGig header. Each unit represents 4
+ bytes, giving a total of 16 additional extension bytes. Value of b'101', b'110'
+ and b'111' are reserved. For HGI field value of b'01' this field should be
+ b'01'. For all other values of HGI it is don't care. */
+ uint32_t src_modid_6 : 1; /**< This field is valid only if the HGI field is a b'10' and it represents Bit 6 of
+ SRC_MODID (bits 4:0 are in Byte 4 and bit 5 is in Byte 9). For HGI field
+ value of b'01' this field should be b'0'. For all other values of HGI it is don't
+ care. */
+ uint32_t dst_modid_6 : 1; /**< This field is valid only if the HGI field is a b'10' and it represents Bit 6 of
+ DST_MODID (bits 4:0 are in Byte 7 and bit 5 is in Byte 9). ). For HGI field
+ value of b'01' this field should be b'1'. For all other values of HGI it is don't
+ care. */
+ uint32_t vid_high : 8; /**< 8-bits of the VLAN tag information */
+ uint32_t vid_low : 8; /**< 8 bits LSB of the VLAN tag information */
+ } s;
+ } dw0;
+ union
+ {
+ uint32_t u32;
+ struct
+ {
+ uint32_t src_modid_low : 5; /**< Bits 4:0 of Module ID of the source module on which the packet ingress (bit
+ 5 is in Byte 9 and bit 6 Is in Byte 1) */
+ uint32_t opcode : 3; /**< XGS HiGig op-code, indicating the type of packet
+ 000 = Control frames used for CPU to CPU communications
+ 001 = Unicast packet with destination resolved; The packet can be
+ either Layer 2 unicast packet or L3 unicast packet that was
+ routed in the ingress chip.
+ 010 = Broadcast or unknown Unicast packet or unknown multicast,
+ destined to all members of the VLAN
+ 011 = L2 Multicast packet, destined to all ports of the group indicated
+ in the L2MC_INDEX which is overlayed on DST_PORT/DST_MODID fields
+ 100 = IP Multicast packet, destined to all ports of the group indicated
+ in the IPMC_INDEX which is overlayed on DST_PORT/DST_MODID fields
+ 101 = Reserved
+ 110 = Reserved
+ 111 = Reserved */
+ uint32_t pfm : 2; /**< Three Port Filtering Modes (0, 1, 2) used in handling registed/unregistered
+ multicast (unknown L2 multicast and IPMC) packets. This field is used
+ when OPCODE is 011 or 100 Semantics of PFM bits are as follows;
+ For registered L2 multicast packets:
+ PFM= 0 \xAD Flood to VLAN
+ PFM= 1 or 2 \xAD Send to group members in the L2MC table
+ For unregistered L2 multicast packets:
+ PFM= 0 or 1 \xAD Flood to VLAN
+ PFM= 2 \xAD Drop the packet */
+ uint32_t src_port_tgid : 6; /**< If the MSB of this field is set, then it indicates the LAG the packet ingressed
+ on, else it represents the physical port the packet ingressed on. */
+ uint32_t dst_port : 5; /**< Port number of destination port on which the packet needs to egress. */
+ uint32_t priority : 3; /**< This is the internal priority of the packet. This internal priority will go through
+ COS_SEL mapping registers to map to the actual MMU queues. */
+ uint32_t header_type : 2; /**< Indicates the format of the next 4 bytes of the XGS HiGig header
+ 00 = Overlay 1 (default)
+ 01 = Overlay 2 (Classification Tag)
+ 10 = Reserved
+ 11 = Reserved */
+ uint32_t cng_low : 1; /**< Semantics of CNG_HIGH and CNG_LOW are as follows: The following
+ encodings are to make it backward compatible:
+ [CNG_HIGH, CNG_LOW] - COLOR
+ [0, 0] \xAD Packet is green
+ [0, 1] \xAD Packet is red
+ [1, 1] \xAD Packet is yellow
+ [1, 0] \xAD Undefined */
+ uint32_t dst_modid_low : 5; /**< Bits [4-: 0] of Module ID of the destination port on which the packet needs to egress. */
+ } s;
+ } dw1;
+ union
+ {
+ uint32_t u32;
+ struct
+ {
+ uint32_t dst_t : 1; /**< Destination Trunk: Indicates that the destination port is a member of a trunk
+ group. */
+ uint32_t dst_tgid : 3; /**< Destination Trunk Group ID: Trunk group ID of the destination port. The
+ DO_NOT_LEARN bit is overlaid on the second bit of this field. */
+ uint32_t ingress_tagged : 1; /**< Ingress Tagged: Indicates whether the packet was tagged when it originally
+ ingressed the system. */
+ uint32_t mirror_only : 1; /**< Mirror Only: XGS 1/2 mode: Indicates that the packet was switched and only
+ needs to be mirrored. */
+ uint32_t mirror_done : 1; /**< Mirroring Done: XGS1/2 mode: Indicates that the packet was mirrored and
+ may still need to be switched. */
+ uint32_t mirror : 1; /**< Mirror: XGS3 mode: a mirror copy packet. XGS1/2 mode: Indicates that the
+ packet was switched and only needs to be mirrored. */
+
+ uint32_t src_modid_5 : 1; /**< Source Module ID: Bit 5 of Src_ModID (bits 4:0 are in byte 4 and bit 6 is in
+ byte 1) */
+ uint32_t dst_modid_5 : 1; /**< Destination Module ID: Bit 5 of Dst_ModID (bits 4:0 are in byte 7 and bit 6
+ is in byte 1) */
+ uint32_t l3 : 1; /**< L3: Indicates that the packet is L3 switched */
+ uint32_t label_present : 1; /**< Label Present: Indicates that header contains a 20-bit VC label: HiGig+
+ added field. */
+ uint32_t vc_label_16_19 : 4; /**< VC Label: Bits 19:16 of VC label: HiGig+ added field */
+ uint32_t vc_label_0_15 : 16;/**< VC Label: Bits 15:0 of VC label: HiGig+ added field */
+ } o1;
+ struct
+ {
+ uint32_t classification : 16; /**< Classification tag information from the HiGig device FFP */
+ uint32_t reserved_0_15 : 16;
+
+ } o2;
+ } dw2;
+} cvmx_higig_header_t;
+
+typedef struct
+{
+ union
+ {
+ uint32_t u32;
+ struct
+ {
+ uint32_t k_sop : 8; /**< The delimiter indicating the start of a packet transmission */
+ uint32_t reserved_21_23 : 3;
+ uint32_t mcst : 1; /**< MCST indicates whether the packet should be unicast or
+ multicast forwarded through the XGS switching fabric
+ - 0: Unicast
+ - 1: Mulitcast */
+ uint32_t tc : 4; /**< Traffic Class [3:0] indicates the distinctive Quality of Service (QoS)
+ the switching fabric will provide when forwarding the packet
+ through the fabric */
+ uint32_t dst_modid_mgid : 8; /**< When MCST=0, this field indicates the destination XGS module to
+ which the packet will be delivered. When MCST=1, this field indicates
+ higher order bits of the Multicast Group ID. */
+ uint32_t dst_pid_mgid : 8; /**< When MCST=0, this field indicates a port associated with the
+ module indicated by the DST_MODID, through which the packet
+ will exit the system. When MCST=1, this field indicates lower order
+ bits of the Multicast Group ID */
+ } s;
+ } dw0;
+ union
+ {
+ uint32_t u32;
+ struct
+ {
+ uint32_t src_modid : 8; /**< Source Module ID indicates the source XGS module from which
+ the packet is originated. (It can also be used for the fabric multicast
+ load balancing purpose.) */
+ uint32_t src_pid : 8; /**< Source Port ID indicates a port associated with the module
+ indicated by the SRC_MODID, through which the packet has
+ entered the system */
+ uint32_t lbid : 8; /**< Load Balancing ID indicates a packet flow hashing index
+ computed by the ingress XGS module for statistical distribution of
+ packet flows through a multipath fabric */
+ uint32_t dp : 2; /**< Drop Precedence indicates the traffic rate violation status of the
+ packet measured by the ingress module.
+ - 00: GREEN
+ - 01: RED
+ - 10: Reserved
+ - 11: Yellow */
+ uint32_t reserved_3_5 : 3;
+ uint32_t ppd_type : 3; /**< Packet Processing Descriptor Type
+ - 000: PPD Overlay1
+ - 001: PPD Overlay2
+ - 010~111: Reserved */
+ } s;
+ } dw1;
+ union
+ {
+ uint32_t u32;
+ struct
+ {
+ uint32_t dst_t : 1; /**< Destination Trunk: Indicates that the destination port is a member of a trunk
+ group. */
+ uint32_t dst_tgid : 3; /**< Destination Trunk Group ID: Trunk group ID of the destination port. The
+ DO_NOT_LEARN bit is overlaid on the second bit of this field. */
+ uint32_t ingress_tagged : 1; /**< Ingress Tagged: Indicates whether the packet was tagged when it originally
+ ingressed the system. */
+ uint32_t mirror_only : 1; /**< Mirror Only: XGS 1/2 mode: Indicates that the packet was switched and only
+ needs to be mirrored. */
+ uint32_t mirror_done : 1; /**< Mirroring Done: XGS1/2 mode: Indicates that the packet was mirrored and
+ may still need to be switched. */
+ uint32_t mirror : 1; /**< Mirror: XGS3 mode: a mirror copy packet. XGS1/2 mode: Indicates that the
+ packet was switched and only needs to be mirrored. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t l3 : 1; /**< L3: Indicates that the packet is L3 switched */
+ uint32_t label_present : 1; /**< Label Present: Indicates that header contains a 20-bit VC label: HiGig+
+ added field. */
+ uint32_t vc_label : 20; /**< Refer to the HiGig+ Architecture Specification */
+ } o1;
+ struct
+ {
+ uint32_t classification : 16; /**< Classification tag information from the HiGig device FFP */
+ uint32_t reserved_0_15 : 16;
+ } o2;
+ } dw2;
+ union
+ {
+ uint32_t u32;
+ struct
+ {
+ uint32_t vid : 16; /**< VLAN tag information */
+ uint32_t pfm : 2; /**< Three Port Filtering Modes (0, 1, 2) used in handling registed/unregistered
+ multicast (unknown L2 multicast and IPMC) packets. This field is used
+ when OPCODE is 011 or 100 Semantics of PFM bits are as follows;
+ For registered L2 multicast packets:
+ PFM= 0 \xAD Flood to VLAN
+ PFM= 1 or 2 \xAD Send to group members in the L2MC table
+ For unregistered L2 multicast packets:
+ PFM= 0 or 1 \xAD Flood to VLAN
+ PFM= 2 \xAD Drop the packet */
+ uint32_t src_t : 1; /**< If the MSB of this field is set, then it indicates the LAG the packet ingressed
+ on, else it represents the physical port the packet ingressed on. */
+ uint32_t reserved_11_12 : 2;
+ uint32_t opcode : 3; /**< XGS HiGig op-code, indicating the type of packet
+ 000 = Control frames used for CPU to CPU communications
+ 001 = Unicast packet with destination resolved; The packet can be
+ either Layer 2 unicast packet or L3 unicast packet that was
+ routed in the ingress chip.
+ 010 = Broadcast or unknown Unicast packet or unknown multicast,
+ destined to all members of the VLAN
+ 011 = L2 Multicast packet, destined to all ports of the group indicated
+ in the L2MC_INDEX which is overlayed on DST_PORT/DST_MODID fields
+ 100 = IP Multicast packet, destined to all ports of the group indicated
+ in the IPMC_INDEX which is overlayed on DST_PORT/DST_MODID fields
+ 101 = Reserved
+ 110 = Reserved
+ 111 = Reserved */
+ uint32_t hdr_ext_len : 3; /**< This field is valid only if the HGI field is a b'10' and it indicates the extension
+ to the standard 12-bytes of XGS HiGig header. Each unit represents 4
+ bytes, giving a total of 16 additional extension bytes. Value of b'101', b'110'
+ and b'111' are reserved. For HGI field value of b'01' this field should be
+ b'01'. For all other values of HGI it is don't care. */
+ uint32_t reserved_0_4 : 5;
+ } s;
+ } dw3;
+} cvmx_higig2_header_t;
+
+
+/**
+ * Initialize the HiGig aspects of a XAUI interface. This function
+ * should be called before the cvmx-helper generic init.
+ *
+ * @param interface Interface to initialize HiGig on (0-1)
+ * @param enable_higig2
+ * Non zero to enable HiGig2 support. Zero to support HiGig
+ * and HiGig+.
+ *
+ * @return Zero on success, negative on failure
+ */
+static inline int cvmx_higig_initialize(int interface, int enable_higig2)
+{
+ cvmx_pip_prt_cfgx_t pip_prt_cfg;
+ cvmx_gmxx_rxx_udd_skp_t gmx_rx_udd_skp;
+ cvmx_gmxx_txx_min_pkt_t gmx_tx_min_pkt;
+ cvmx_gmxx_txx_append_t gmx_tx_append;
+ cvmx_gmxx_tx_ifg_t gmx_tx_ifg;
+ cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp;
+ cvmx_gmxx_rxx_frm_ctl_t gmx_rx_frm_ctl;
+ cvmx_gmxx_tx_xaui_ctl_t gmx_tx_xaui_ctl;
+ int i, pknd;
+ int header_size = (enable_higig2) ? 16 : 12;
+
+ /* Setup PIP to handle HiGig */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ pknd = cvmx_helper_get_pknd(interface, 0);
+ else
+ pknd = interface*16;
+ pip_prt_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(pknd));
+ pip_prt_cfg.s.dsa_en = 0;
+ pip_prt_cfg.s.higig_en = 1;
+ pip_prt_cfg.s.hg_qos = 1;
+ pip_prt_cfg.s.skip = header_size;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(pknd), pip_prt_cfg.u64);
+
+ /* Setup some sample QoS defaults. These can be changed later */
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ for (i=0; i<64; i++)
+ {
+ cvmx_pip_hg_pri_qos_t pip_hg_pri_qos;
+ pip_hg_pri_qos.u64 = 0;
+ pip_hg_pri_qos.s.up_qos = 1;
+ pip_hg_pri_qos.s.pri = i;
+ pip_hg_pri_qos.s.qos = i&7;
+ cvmx_write_csr(CVMX_PIP_HG_PRI_QOS, pip_hg_pri_qos.u64);
+ }
+ }
+
+ /* Setup GMX RX to treat the HiGig header as user data to ignore */
+ gmx_rx_udd_skp.u64 = cvmx_read_csr(CVMX_GMXX_RXX_UDD_SKP(0, interface));
+ gmx_rx_udd_skp.s.len = header_size;
+ gmx_rx_udd_skp.s.fcssel = 0;
+ cvmx_write_csr(CVMX_GMXX_RXX_UDD_SKP(0, interface), gmx_rx_udd_skp.u64);
+
+ /* Disable GMX preamble checking */
+ gmx_rx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(0, interface));
+ gmx_rx_frm_ctl.s.pre_chk = 0;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(0, interface), gmx_rx_frm_ctl.u64);
+
+ /* Setup GMX TX to pad properly min sized packets */
+ gmx_tx_min_pkt.u64 = cvmx_read_csr(CVMX_GMXX_TXX_MIN_PKT(0, interface));
+ gmx_tx_min_pkt.s.min_size = 59 + header_size;
+ cvmx_write_csr(CVMX_GMXX_TXX_MIN_PKT(0, interface), gmx_tx_min_pkt.u64);
+
+ /* Setup GMX TX to not add a preamble */
+ gmx_tx_append.u64 = cvmx_read_csr(CVMX_GMXX_TXX_APPEND(0, interface));
+ gmx_tx_append.s.preamble = 0;
+ cvmx_write_csr(CVMX_GMXX_TXX_APPEND(0, interface), gmx_tx_append.u64);
+
+ /* Reduce the inter frame gap to 8 bytes */
+ gmx_tx_ifg.u64 = cvmx_read_csr(CVMX_GMXX_TX_IFG(interface));
+ gmx_tx_ifg.s.ifg1 = 4;
+ gmx_tx_ifg.s.ifg2 = 4;
+ cvmx_write_csr(CVMX_GMXX_TX_IFG(interface), gmx_tx_ifg.u64);
+
+ /* Disable GMX backpressure */
+ gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
+ gmx_tx_ovr_bp.s.bp = 0;
+ gmx_tx_ovr_bp.s.en = 0xf;
+ gmx_tx_ovr_bp.s.ign_full = 0xf;
+ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
+
+ if (enable_higig2)
+ {
+ /* Enable HiGig2 support and forwarding of virtual port backpressure
+ to PKO */
+ cvmx_gmxx_hg2_control_t gmx_hg2_control;
+ gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
+ gmx_hg2_control.s.hg2rx_en = 1;
+ gmx_hg2_control.s.hg2tx_en = 1;
+ gmx_hg2_control.s.logl_en = 0xffff;
+ gmx_hg2_control.s.phys_en = 1;
+ cvmx_write_csr(CVMX_GMXX_HG2_CONTROL(interface), gmx_hg2_control.u64);
+ }
+
+ /* Enable HiGig */
+ gmx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+ gmx_tx_xaui_ctl.s.hg_en = 1;
+ cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmx_tx_xaui_ctl.u64);
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __CVMX_HIGIG_H__
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-higig.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ilk-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ilk-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ilk-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,3530 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-ilk-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon ilk.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_ILK_DEFS_H__
+#define __CVMX_ILK_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_BIST_SUM CVMX_ILK_BIST_SUM_FUNC()
+static inline uint64_t CVMX_ILK_BIST_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_BIST_SUM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014000038ull);
+}
+#else
+#define CVMX_ILK_BIST_SUM (CVMX_ADD_IO_SEG(0x0001180014000038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_GBL_CFG CVMX_ILK_GBL_CFG_FUNC()
+static inline uint64_t CVMX_ILK_GBL_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_GBL_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014000000ull);
+}
+#else
+#define CVMX_ILK_GBL_CFG (CVMX_ADD_IO_SEG(0x0001180014000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_GBL_INT CVMX_ILK_GBL_INT_FUNC()
+static inline uint64_t CVMX_ILK_GBL_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_GBL_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014000008ull);
+}
+#else
+#define CVMX_ILK_GBL_INT (CVMX_ADD_IO_SEG(0x0001180014000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_GBL_INT_EN CVMX_ILK_GBL_INT_EN_FUNC()
+static inline uint64_t CVMX_ILK_GBL_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_GBL_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014000010ull);
+}
+#else
+#define CVMX_ILK_GBL_INT_EN (CVMX_ADD_IO_SEG(0x0001180014000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_INT_SUM CVMX_ILK_INT_SUM_FUNC()
+static inline uint64_t CVMX_ILK_INT_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_INT_SUM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014000030ull);
+}
+#else
+#define CVMX_ILK_INT_SUM (CVMX_ADD_IO_SEG(0x0001180014000030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_LNE_DBG CVMX_ILK_LNE_DBG_FUNC()
+static inline uint64_t CVMX_ILK_LNE_DBG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_LNE_DBG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014030008ull);
+}
+#else
+#define CVMX_ILK_LNE_DBG (CVMX_ADD_IO_SEG(0x0001180014030008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_LNE_STS_MSG CVMX_ILK_LNE_STS_MSG_FUNC()
+static inline uint64_t CVMX_ILK_LNE_STS_MSG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_LNE_STS_MSG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014030000ull);
+}
+#else
+#define CVMX_ILK_LNE_STS_MSG (CVMX_ADD_IO_SEG(0x0001180014030000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_RXF_IDX_PMAP CVMX_ILK_RXF_IDX_PMAP_FUNC()
+static inline uint64_t CVMX_ILK_RXF_IDX_PMAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_RXF_IDX_PMAP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014000020ull);
+}
+#else
+#define CVMX_ILK_RXF_IDX_PMAP (CVMX_ADD_IO_SEG(0x0001180014000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_RXF_MEM_PMAP CVMX_ILK_RXF_MEM_PMAP_FUNC()
+static inline uint64_t CVMX_ILK_RXF_MEM_PMAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_RXF_MEM_PMAP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014000028ull);
+}
+#else
+#define CVMX_ILK_RXF_MEM_PMAP (CVMX_ADD_IO_SEG(0x0001180014000028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_CFG0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_CFG0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020000ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_CFG0(offset) (CVMX_ADD_IO_SEG(0x0001180014020000ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_CFG1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_CFG1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020008ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_CFG1(offset) (CVMX_ADD_IO_SEG(0x0001180014020008ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_FLOW_CTL0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_FLOW_CTL0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020090ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_FLOW_CTL0(offset) (CVMX_ADD_IO_SEG(0x0001180014020090ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_FLOW_CTL1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_FLOW_CTL1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020098ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_FLOW_CTL1(offset) (CVMX_ADD_IO_SEG(0x0001180014020098ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_IDX_CAL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_IDX_CAL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800140200A0ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_IDX_CAL(offset) (CVMX_ADD_IO_SEG(0x00011800140200A0ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_IDX_STAT0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_IDX_STAT0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020070ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_IDX_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014020070ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_IDX_STAT1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_IDX_STAT1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020078ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_IDX_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014020078ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_INT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_INT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020010ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180014020010ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_INT_EN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_INT_EN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020018ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x0001180014020018ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_JABBER(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_JABBER(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800140200B8ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800140200B8ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_MEM_CAL0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_MEM_CAL0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800140200A8ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_MEM_CAL0(offset) (CVMX_ADD_IO_SEG(0x00011800140200A8ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_MEM_CAL1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_MEM_CAL1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800140200B0ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_MEM_CAL1(offset) (CVMX_ADD_IO_SEG(0x00011800140200B0ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_MEM_STAT0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_MEM_STAT0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020080ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_MEM_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014020080ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_MEM_STAT1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_MEM_STAT1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020088ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_MEM_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014020088ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_RID(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_RID(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800140200C0ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_RID(offset) (CVMX_ADD_IO_SEG(0x00011800140200C0ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020020ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014020020ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020028ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014020028ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020030ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x0001180014020030ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020038ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x0001180014020038ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020040ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x0001180014020040ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT5(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT5(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020048ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x0001180014020048ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT6(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT6(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020050ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x0001180014020050ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT7(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT7(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020058ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x0001180014020058ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT8(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT8(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020060ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x0001180014020060ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RXX_STAT9(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_RXX_STAT9(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014020068ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_RXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x0001180014020068ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_CFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_CFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038000ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_CFG(offset) (CVMX_ADD_IO_SEG(0x0001180014038000ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_INT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_INT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038008ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180014038008ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_INT_EN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_INT_EN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038010ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x0001180014038010ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038018ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014038018ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038020ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014038020ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038028ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT2(offset) (CVMX_ADD_IO_SEG(0x0001180014038028ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038030ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT3(offset) (CVMX_ADD_IO_SEG(0x0001180014038030ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038038ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT4(offset) (CVMX_ADD_IO_SEG(0x0001180014038038ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT5(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT5(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038040ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT5(offset) (CVMX_ADD_IO_SEG(0x0001180014038040ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT6(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT6(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038048ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT6(offset) (CVMX_ADD_IO_SEG(0x0001180014038048ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT7(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT7(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038050ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT7(offset) (CVMX_ADD_IO_SEG(0x0001180014038050ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT8(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT8(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038058ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT8(offset) (CVMX_ADD_IO_SEG(0x0001180014038058ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_RX_LNEX_STAT9(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_ILK_RX_LNEX_STAT9(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014038060ull) + ((offset) & 7) * 1024;
+}
+#else
+#define CVMX_ILK_RX_LNEX_STAT9(offset) (CVMX_ADD_IO_SEG(0x0001180014038060ull) + ((offset) & 7) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ILK_SER_CFG CVMX_ILK_SER_CFG_FUNC()
+static inline uint64_t CVMX_ILK_SER_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ILK_SER_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180014000018ull);
+}
+#else
+#define CVMX_ILK_SER_CFG (CVMX_ADD_IO_SEG(0x0001180014000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_CFG0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_CFG0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010000ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_CFG0(offset) (CVMX_ADD_IO_SEG(0x0001180014010000ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_CFG1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_CFG1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010008ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_CFG1(offset) (CVMX_ADD_IO_SEG(0x0001180014010008ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_DBG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_DBG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010070ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_DBG(offset) (CVMX_ADD_IO_SEG(0x0001180014010070ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_FLOW_CTL0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_FLOW_CTL0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010048ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_FLOW_CTL0(offset) (CVMX_ADD_IO_SEG(0x0001180014010048ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_FLOW_CTL1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_FLOW_CTL1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010050ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_FLOW_CTL1(offset) (CVMX_ADD_IO_SEG(0x0001180014010050ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_IDX_CAL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_IDX_CAL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010058ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_IDX_CAL(offset) (CVMX_ADD_IO_SEG(0x0001180014010058ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_IDX_PMAP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_IDX_PMAP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010010ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_IDX_PMAP(offset) (CVMX_ADD_IO_SEG(0x0001180014010010ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_IDX_STAT0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_IDX_STAT0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010020ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_IDX_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014010020ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_IDX_STAT1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_IDX_STAT1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010028ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_IDX_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014010028ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_INT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_INT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010078ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180014010078ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_INT_EN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_INT_EN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010080ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x0001180014010080ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_MEM_CAL0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_MEM_CAL0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010060ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_MEM_CAL0(offset) (CVMX_ADD_IO_SEG(0x0001180014010060ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_MEM_CAL1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_MEM_CAL1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010068ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_MEM_CAL1(offset) (CVMX_ADD_IO_SEG(0x0001180014010068ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_MEM_PMAP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_MEM_PMAP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010018ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_MEM_PMAP(offset) (CVMX_ADD_IO_SEG(0x0001180014010018ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_MEM_STAT0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_MEM_STAT0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010030ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_MEM_STAT0(offset) (CVMX_ADD_IO_SEG(0x0001180014010030ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_MEM_STAT1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_MEM_STAT1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010038ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_MEM_STAT1(offset) (CVMX_ADD_IO_SEG(0x0001180014010038ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_PIPE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_PIPE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010088ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_PIPE(offset) (CVMX_ADD_IO_SEG(0x0001180014010088ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ILK_TXX_RMATCH(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ILK_TXX_RMATCH(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180014010040ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_ILK_TXX_RMATCH(offset) (CVMX_ADD_IO_SEG(0x0001180014010040ull) + ((offset) & 1) * 16384)
+#endif
+
+/**
+ * cvmx_ilk_bist_sum
+ */
+union cvmx_ilk_bist_sum {
+ uint64_t u64;
+ struct cvmx_ilk_bist_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t rxf_x2p1 : 1; /**< Bist status of rxf.x2p_fif_mem1 */
+ uint64_t rxf_x2p0 : 1; /**< Bist status of rxf.x2p_fif_mem0 */
+ uint64_t rxf_pmap : 1; /**< Bist status of rxf.rx_map_mem */
+ uint64_t rxf_mem2 : 1; /**< Bist status of rxf.rx_fif_mem2 */
+ uint64_t rxf_mem1 : 1; /**< Bist status of rxf.rx_fif_mem1 */
+ uint64_t rxf_mem0 : 1; /**< Bist status of rxf.rx_fif_mem0 */
+ uint64_t reserved_36_51 : 16;
+ uint64_t rle7_dsk1 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem1 */
+ uint64_t rle7_dsk0 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem0 */
+ uint64_t rle6_dsk1 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem1 */
+ uint64_t rle6_dsk0 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem0 */
+ uint64_t rle5_dsk1 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem1 */
+ uint64_t rle5_dsk0 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem0 */
+ uint64_t rle4_dsk1 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem1 */
+ uint64_t rle4_dsk0 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem0 */
+ uint64_t rle3_dsk1 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem1 */
+ uint64_t rle3_dsk0 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem0 */
+ uint64_t rle2_dsk1 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem1 */
+ uint64_t rle2_dsk0 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem0 */
+ uint64_t rle1_dsk1 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem1 */
+ uint64_t rle1_dsk0 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem0 */
+ uint64_t rle0_dsk1 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem1 */
+ uint64_t rle0_dsk0 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem0 */
+ uint64_t reserved_19_19 : 1;
+ uint64_t rlk1_stat1 : 1; /**< Bist status of rlk1.csr.stat_mem1 ***NOTE: Added in pass 2.0 */
+ uint64_t rlk1_fwc : 1; /**< Bist status of rlk1.fwc.cal_chan_ram */
+ uint64_t rlk1_stat : 1; /**< Bist status of rlk1.csr.stat_mem */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rlk0_stat1 : 1; /**< Bist status of rlk0.csr.stat_mem1 ***NOTE: Added in pass 2.0 */
+ uint64_t rlk0_fwc : 1; /**< Bist status of rlk0.fwc.cal_chan_ram */
+ uint64_t rlk0_stat : 1; /**< Bist status of rlk0.csr.stat_mem */
+ uint64_t tlk1_stat1 : 1; /**< Bist status of tlk1.csr.stat_mem1 */
+ uint64_t tlk1_fwc : 1; /**< Bist status of tlk1.fwc.cal_chan_ram */
+ uint64_t reserved_9_9 : 1;
+ uint64_t tlk1_txf2 : 1; /**< Bist status of tlk1.txf.tx_map_mem */
+ uint64_t tlk1_txf1 : 1; /**< Bist status of tlk1.txf.tx_fif_mem1 */
+ uint64_t tlk1_txf0 : 1; /**< Bist status of tlk1.txf.tx_fif_mem0 */
+ uint64_t tlk0_stat1 : 1; /**< Bist status of tlk0.csr.stat_mem1 */
+ uint64_t tlk0_fwc : 1; /**< Bist status of tlk0.fwc.cal_chan_ram */
+ uint64_t reserved_3_3 : 1;
+ uint64_t tlk0_txf2 : 1; /**< Bist status of tlk0.txf.tx_map_mem */
+ uint64_t tlk0_txf1 : 1; /**< Bist status of tlk0.txf.tx_fif_mem1 */
+ uint64_t tlk0_txf0 : 1; /**< Bist status of tlk0.txf.tx_fif_mem0 */
+#else
+ uint64_t tlk0_txf0 : 1;
+ uint64_t tlk0_txf1 : 1;
+ uint64_t tlk0_txf2 : 1;
+ uint64_t reserved_3_3 : 1;
+ uint64_t tlk0_fwc : 1;
+ uint64_t tlk0_stat1 : 1;
+ uint64_t tlk1_txf0 : 1;
+ uint64_t tlk1_txf1 : 1;
+ uint64_t tlk1_txf2 : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t tlk1_fwc : 1;
+ uint64_t tlk1_stat1 : 1;
+ uint64_t rlk0_stat : 1;
+ uint64_t rlk0_fwc : 1;
+ uint64_t rlk0_stat1 : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t rlk1_stat : 1;
+ uint64_t rlk1_fwc : 1;
+ uint64_t rlk1_stat1 : 1;
+ uint64_t reserved_19_19 : 1;
+ uint64_t rle0_dsk0 : 1;
+ uint64_t rle0_dsk1 : 1;
+ uint64_t rle1_dsk0 : 1;
+ uint64_t rle1_dsk1 : 1;
+ uint64_t rle2_dsk0 : 1;
+ uint64_t rle2_dsk1 : 1;
+ uint64_t rle3_dsk0 : 1;
+ uint64_t rle3_dsk1 : 1;
+ uint64_t rle4_dsk0 : 1;
+ uint64_t rle4_dsk1 : 1;
+ uint64_t rle5_dsk0 : 1;
+ uint64_t rle5_dsk1 : 1;
+ uint64_t rle6_dsk0 : 1;
+ uint64_t rle6_dsk1 : 1;
+ uint64_t rle7_dsk0 : 1;
+ uint64_t rle7_dsk1 : 1;
+ uint64_t reserved_36_51 : 16;
+ uint64_t rxf_mem0 : 1;
+ uint64_t rxf_mem1 : 1;
+ uint64_t rxf_mem2 : 1;
+ uint64_t rxf_pmap : 1;
+ uint64_t rxf_x2p0 : 1;
+ uint64_t rxf_x2p1 : 1;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } s;
+ struct cvmx_ilk_bist_sum_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t rxf_x2p1 : 1; /**< Bist status of rxf.x2p_fif_mem1 */
+ uint64_t rxf_x2p0 : 1; /**< Bist status of rxf.x2p_fif_mem0 */
+ uint64_t rxf_pmap : 1; /**< Bist status of rxf.rx_map_mem */
+ uint64_t rxf_mem2 : 1; /**< Bist status of rxf.rx_fif_mem2 */
+ uint64_t rxf_mem1 : 1; /**< Bist status of rxf.rx_fif_mem1 */
+ uint64_t rxf_mem0 : 1; /**< Bist status of rxf.rx_fif_mem0 */
+ uint64_t reserved_36_51 : 16;
+ uint64_t rle7_dsk1 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem1 */
+ uint64_t rle7_dsk0 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem0 */
+ uint64_t rle6_dsk1 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem1 */
+ uint64_t rle6_dsk0 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem0 */
+ uint64_t rle5_dsk1 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem1 */
+ uint64_t rle5_dsk0 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem0 */
+ uint64_t rle4_dsk1 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem1 */
+ uint64_t rle4_dsk0 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem0 */
+ uint64_t rle3_dsk1 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem1 */
+ uint64_t rle3_dsk0 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem0 */
+ uint64_t rle2_dsk1 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem1 */
+ uint64_t rle2_dsk0 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem0 */
+ uint64_t rle1_dsk1 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem1 */
+ uint64_t rle1_dsk0 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem0 */
+ uint64_t rle0_dsk1 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem1 */
+ uint64_t rle0_dsk0 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem0 */
+ uint64_t reserved_19_19 : 1;
+ uint64_t rlk1_stat1 : 1; /**< Bist status of rlk1.csr.stat_mem1 ***NOTE: Added in pass 2.0 */
+ uint64_t rlk1_fwc : 1; /**< Bist status of rlk1.fwc.cal_chan_ram */
+ uint64_t rlk1_stat : 1; /**< Bist status of rlk1.csr.stat_mem0 */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rlk0_stat1 : 1; /**< Bist status of rlk0.csr.stat_mem1 ***NOTE: Added in pass 2.0 */
+ uint64_t rlk0_fwc : 1; /**< Bist status of rlk0.fwc.cal_chan_ram */
+ uint64_t rlk0_stat : 1; /**< Bist status of rlk0.csr.stat_mem0 */
+ uint64_t tlk1_stat1 : 1; /**< Bist status of tlk1.csr.stat_mem1 */
+ uint64_t tlk1_fwc : 1; /**< Bist status of tlk1.fwc.cal_chan_ram */
+ uint64_t tlk1_stat0 : 1; /**< Bist status of tlk1.csr.stat_mem0 */
+ uint64_t tlk1_txf2 : 1; /**< Bist status of tlk1.txf.tx_map_mem */
+ uint64_t tlk1_txf1 : 1; /**< Bist status of tlk1.txf.tx_fif_mem1 */
+ uint64_t tlk1_txf0 : 1; /**< Bist status of tlk1.txf.tx_fif_mem0 */
+ uint64_t tlk0_stat1 : 1; /**< Bist status of tlk0.csr.stat_mem1 */
+ uint64_t tlk0_fwc : 1; /**< Bist status of tlk0.fwc.cal_chan_ram */
+ uint64_t tlk0_stat0 : 1; /**< Bist status of tlk0.csr.stat_mem0 */
+ uint64_t tlk0_txf2 : 1; /**< Bist status of tlk0.txf.tx_map_mem */
+ uint64_t tlk0_txf1 : 1; /**< Bist status of tlk0.txf.tx_fif_mem1 */
+ uint64_t tlk0_txf0 : 1; /**< Bist status of tlk0.txf.tx_fif_mem0 */
+#else
+ uint64_t tlk0_txf0 : 1;
+ uint64_t tlk0_txf1 : 1;
+ uint64_t tlk0_txf2 : 1;
+ uint64_t tlk0_stat0 : 1;
+ uint64_t tlk0_fwc : 1;
+ uint64_t tlk0_stat1 : 1;
+ uint64_t tlk1_txf0 : 1;
+ uint64_t tlk1_txf1 : 1;
+ uint64_t tlk1_txf2 : 1;
+ uint64_t tlk1_stat0 : 1;
+ uint64_t tlk1_fwc : 1;
+ uint64_t tlk1_stat1 : 1;
+ uint64_t rlk0_stat : 1;
+ uint64_t rlk0_fwc : 1;
+ uint64_t rlk0_stat1 : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t rlk1_stat : 1;
+ uint64_t rlk1_fwc : 1;
+ uint64_t rlk1_stat1 : 1;
+ uint64_t reserved_19_19 : 1;
+ uint64_t rle0_dsk0 : 1;
+ uint64_t rle0_dsk1 : 1;
+ uint64_t rle1_dsk0 : 1;
+ uint64_t rle1_dsk1 : 1;
+ uint64_t rle2_dsk0 : 1;
+ uint64_t rle2_dsk1 : 1;
+ uint64_t rle3_dsk0 : 1;
+ uint64_t rle3_dsk1 : 1;
+ uint64_t rle4_dsk0 : 1;
+ uint64_t rle4_dsk1 : 1;
+ uint64_t rle5_dsk0 : 1;
+ uint64_t rle5_dsk1 : 1;
+ uint64_t rle6_dsk0 : 1;
+ uint64_t rle6_dsk1 : 1;
+ uint64_t rle7_dsk0 : 1;
+ uint64_t rle7_dsk1 : 1;
+ uint64_t reserved_36_51 : 16;
+ uint64_t rxf_mem0 : 1;
+ uint64_t rxf_mem1 : 1;
+ uint64_t rxf_mem2 : 1;
+ uint64_t rxf_pmap : 1;
+ uint64_t rxf_x2p0 : 1;
+ uint64_t rxf_x2p1 : 1;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } cn68xx;
+ struct cvmx_ilk_bist_sum_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t rxf_x2p1 : 1; /**< Bist status of rxf.x2p_fif_mem1 */
+ uint64_t rxf_x2p0 : 1; /**< Bist status of rxf.x2p_fif_mem0 */
+ uint64_t rxf_pmap : 1; /**< Bist status of rxf.rx_map_mem */
+ uint64_t rxf_mem2 : 1; /**< Bist status of rxf.rx_fif_mem2 */
+ uint64_t rxf_mem1 : 1; /**< Bist status of rxf.rx_fif_mem1 */
+ uint64_t rxf_mem0 : 1; /**< Bist status of rxf.rx_fif_mem0 */
+ uint64_t reserved_36_51 : 16;
+ uint64_t rle7_dsk1 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem1 */
+ uint64_t rle7_dsk0 : 1; /**< Bist status of lne.rle7.dsk.dsk_fif_mem0 */
+ uint64_t rle6_dsk1 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem1 */
+ uint64_t rle6_dsk0 : 1; /**< Bist status of lne.rle6.dsk.dsk_fif_mem0 */
+ uint64_t rle5_dsk1 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem1 */
+ uint64_t rle5_dsk0 : 1; /**< Bist status of lne.rle5.dsk.dsk_fif_mem0 */
+ uint64_t rle4_dsk1 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem1 */
+ uint64_t rle4_dsk0 : 1; /**< Bist status of lne.rle4.dsk.dsk_fif_mem0 */
+ uint64_t rle3_dsk1 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem1 */
+ uint64_t rle3_dsk0 : 1; /**< Bist status of lne.rle3.dsk.dsk_fif_mem0 */
+ uint64_t rle2_dsk1 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem1 */
+ uint64_t rle2_dsk0 : 1; /**< Bist status of lne.rle2.dsk.dsk_fif_mem0 */
+ uint64_t rle1_dsk1 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem1 */
+ uint64_t rle1_dsk0 : 1; /**< Bist status of lne.rle1.dsk.dsk_fif_mem0 */
+ uint64_t rle0_dsk1 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem1 */
+ uint64_t rle0_dsk0 : 1; /**< Bist status of lne.rle0.dsk.dsk_fif_mem0 */
+ uint64_t reserved_18_19 : 2;
+ uint64_t rlk1_fwc : 1; /**< Bist status of rlk1.fwc.cal_chan_ram */
+ uint64_t rlk1_stat : 1; /**< Bist status of rlk1.csr.stat_mem */
+ uint64_t reserved_14_15 : 2;
+ uint64_t rlk0_fwc : 1; /**< Bist status of rlk0.fwc.cal_chan_ram */
+ uint64_t rlk0_stat : 1; /**< Bist status of rlk0.csr.stat_mem */
+ uint64_t reserved_11_11 : 1;
+ uint64_t tlk1_fwc : 1; /**< Bist status of tlk1.fwc.cal_chan_ram */
+ uint64_t tlk1_stat : 1; /**< Bist status of tlk1.csr.stat_mem */
+ uint64_t tlk1_txf2 : 1; /**< Bist status of tlk1.txf.tx_map_mem */
+ uint64_t tlk1_txf1 : 1; /**< Bist status of tlk1.txf.tx_fif_mem1 */
+ uint64_t tlk1_txf0 : 1; /**< Bist status of tlk1.txf.tx_fif_mem0 */
+ uint64_t reserved_5_5 : 1;
+ uint64_t tlk0_fwc : 1; /**< Bist status of tlk0.fwc.cal_chan_ram */
+ uint64_t tlk0_stat : 1; /**< Bist status of tlk0.csr.stat_mem */
+ uint64_t tlk0_txf2 : 1; /**< Bist status of tlk0.txf.tx_map_mem */
+ uint64_t tlk0_txf1 : 1; /**< Bist status of tlk0.txf.tx_fif_mem1 */
+ uint64_t tlk0_txf0 : 1; /**< Bist status of tlk0.txf.tx_fif_mem0 */
+#else
+ uint64_t tlk0_txf0 : 1;
+ uint64_t tlk0_txf1 : 1;
+ uint64_t tlk0_txf2 : 1;
+ uint64_t tlk0_stat : 1;
+ uint64_t tlk0_fwc : 1;
+ uint64_t reserved_5_5 : 1;
+ uint64_t tlk1_txf0 : 1;
+ uint64_t tlk1_txf1 : 1;
+ uint64_t tlk1_txf2 : 1;
+ uint64_t tlk1_stat : 1;
+ uint64_t tlk1_fwc : 1;
+ uint64_t reserved_11_11 : 1;
+ uint64_t rlk0_stat : 1;
+ uint64_t rlk0_fwc : 1;
+ uint64_t reserved_14_15 : 2;
+ uint64_t rlk1_stat : 1;
+ uint64_t rlk1_fwc : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t rle0_dsk0 : 1;
+ uint64_t rle0_dsk1 : 1;
+ uint64_t rle1_dsk0 : 1;
+ uint64_t rle1_dsk1 : 1;
+ uint64_t rle2_dsk0 : 1;
+ uint64_t rle2_dsk1 : 1;
+ uint64_t rle3_dsk0 : 1;
+ uint64_t rle3_dsk1 : 1;
+ uint64_t rle4_dsk0 : 1;
+ uint64_t rle4_dsk1 : 1;
+ uint64_t rle5_dsk0 : 1;
+ uint64_t rle5_dsk1 : 1;
+ uint64_t rle6_dsk0 : 1;
+ uint64_t rle6_dsk1 : 1;
+ uint64_t rle7_dsk0 : 1;
+ uint64_t rle7_dsk1 : 1;
+ uint64_t reserved_36_51 : 16;
+ uint64_t rxf_mem0 : 1;
+ uint64_t rxf_mem1 : 1;
+ uint64_t rxf_mem2 : 1;
+ uint64_t rxf_pmap : 1;
+ uint64_t rxf_x2p0 : 1;
+ uint64_t rxf_x2p1 : 1;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_bist_sum cvmx_ilk_bist_sum_t;
+
+/**
+ * cvmx_ilk_gbl_cfg
+ */
+union cvmx_ilk_gbl_cfg {
+ uint64_t u64;
+ struct cvmx_ilk_gbl_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t rid_rstdis : 1; /**< Disable automatic reassembly-id error recovery. For diagnostic
+ use only.
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t reset : 1; /**< Reset ILK. For diagnostic use only.
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t cclk_dis : 1; /**< Disable ILK conditional clocking. For diagnostic use only. */
+ uint64_t rxf_xlink : 1; /**< Causes external loopback traffic to switch links. Enabling
+ this allow simultaneous use of external and internal loopback. */
+#else
+ uint64_t rxf_xlink : 1;
+ uint64_t cclk_dis : 1;
+ uint64_t reset : 1;
+ uint64_t rid_rstdis : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ilk_gbl_cfg_s cn68xx;
+ struct cvmx_ilk_gbl_cfg_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t cclk_dis : 1; /**< Disable ILK conditional clocking. For diagnostic use only. */
+ uint64_t rxf_xlink : 1; /**< Causes external loopback traffic to switch links. Enabling
+ this allow simultaneous use of external and internal loopback. */
+#else
+ uint64_t rxf_xlink : 1;
+ uint64_t cclk_dis : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_gbl_cfg cvmx_ilk_gbl_cfg_t;
+
+/**
+ * cvmx_ilk_gbl_int
+ */
+union cvmx_ilk_gbl_int {
+ uint64_t u64;
+ struct cvmx_ilk_gbl_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t rxf_push_full : 1; /**< RXF overflow */
+ uint64_t rxf_pop_empty : 1; /**< RXF underflow */
+ uint64_t rxf_ctl_perr : 1; /**< RXF parity error occurred on sideband control signals. Data
+ cycle will be dropped. */
+ uint64_t rxf_lnk1_perr : 1; /**< RXF parity error occurred on RxLink1 packet data
+ Packet will be marked with error at eop */
+ uint64_t rxf_lnk0_perr : 1; /**< RXF parity error occurred on RxLink0 packet data. Packet will
+ be marked with error at eop */
+#else
+ uint64_t rxf_lnk0_perr : 1;
+ uint64_t rxf_lnk1_perr : 1;
+ uint64_t rxf_ctl_perr : 1;
+ uint64_t rxf_pop_empty : 1;
+ uint64_t rxf_push_full : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_ilk_gbl_int_s cn68xx;
+ struct cvmx_ilk_gbl_int_s cn68xxp1;
+};
+typedef union cvmx_ilk_gbl_int cvmx_ilk_gbl_int_t;
+
+/**
+ * cvmx_ilk_gbl_int_en
+ */
+union cvmx_ilk_gbl_int_en {
+ uint64_t u64;
+ struct cvmx_ilk_gbl_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t rxf_push_full : 1; /**< RXF overflow */
+ uint64_t rxf_pop_empty : 1; /**< RXF underflow */
+ uint64_t rxf_ctl_perr : 1; /**< RXF parity error occurred on sideband control signals. Data
+ cycle will be dropped. */
+ uint64_t rxf_lnk1_perr : 1; /**< RXF parity error occurred on RxLink1 packet data
+ Packet will be marked with error at eop */
+ uint64_t rxf_lnk0_perr : 1; /**< RXF parity error occurred on RxLink0 packet data
+ Packet will be marked with error at eop */
+#else
+ uint64_t rxf_lnk0_perr : 1;
+ uint64_t rxf_lnk1_perr : 1;
+ uint64_t rxf_ctl_perr : 1;
+ uint64_t rxf_pop_empty : 1;
+ uint64_t rxf_push_full : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_ilk_gbl_int_en_s cn68xx;
+ struct cvmx_ilk_gbl_int_en_s cn68xxp1;
+};
+typedef union cvmx_ilk_gbl_int_en cvmx_ilk_gbl_int_en_t;
+
+/**
+ * cvmx_ilk_int_sum
+ */
+union cvmx_ilk_int_sum {
+ uint64_t u64;
+ struct cvmx_ilk_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t rle7_int : 1; /**< RxLane7 interrupt status. See ILK_RX_LNE7_INT */
+ uint64_t rle6_int : 1; /**< RxLane6 interrupt status. See ILK_RX_LNE6_INT */
+ uint64_t rle5_int : 1; /**< RxLane5 interrupt status. See ILK_RX_LNE5_INT */
+ uint64_t rle4_int : 1; /**< RxLane4 interrupt status. See ILK_RX_LNE4_INT */
+ uint64_t rle3_int : 1; /**< RxLane3 interrupt status. See ILK_RX_LNE3_INT */
+ uint64_t rle2_int : 1; /**< RxLane2 interrupt status. See ILK_RX_LNE2_INT */
+ uint64_t rle1_int : 1; /**< RxLane1 interrupt status. See ILK_RX_LNE1_INT */
+ uint64_t rle0_int : 1; /**< RxLane0 interrupt status. See ILK_RX_LNE0_INT */
+ uint64_t rlk1_int : 1; /**< RxLink1 interrupt status. See ILK_RX1_INT */
+ uint64_t rlk0_int : 1; /**< RxLink0 interrupt status. See ILK_RX0_INT */
+ uint64_t tlk1_int : 1; /**< TxLink1 interrupt status. See ILK_TX1_INT */
+ uint64_t tlk0_int : 1; /**< TxLink0 interrupt status. See ILK_TX0_INT */
+ uint64_t gbl_int : 1; /**< Global interrupt status. See ILK_GBL_INT */
+#else
+ uint64_t gbl_int : 1;
+ uint64_t tlk0_int : 1;
+ uint64_t tlk1_int : 1;
+ uint64_t rlk0_int : 1;
+ uint64_t rlk1_int : 1;
+ uint64_t rle0_int : 1;
+ uint64_t rle1_int : 1;
+ uint64_t rle2_int : 1;
+ uint64_t rle3_int : 1;
+ uint64_t rle4_int : 1;
+ uint64_t rle5_int : 1;
+ uint64_t rle6_int : 1;
+ uint64_t rle7_int : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_ilk_int_sum_s cn68xx;
+ struct cvmx_ilk_int_sum_s cn68xxp1;
+};
+typedef union cvmx_ilk_int_sum cvmx_ilk_int_sum_t;
+
+/**
+ * cvmx_ilk_lne_dbg
+ */
+union cvmx_ilk_lne_dbg {
+ uint64_t u64;
+ struct cvmx_ilk_lne_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t tx_bad_crc32 : 1; /**< Send 1 diagnostic word with bad CRC32 to the selected lane.
+ Note: injects just once */
+ uint64_t tx_bad_6467_cnt : 5; /**< Send N bad 64B/67B codewords on selected lane */
+ uint64_t tx_bad_sync_cnt : 3; /**< Send N bad sync words on selected lane */
+ uint64_t tx_bad_scram_cnt : 3; /**< Send N bad scram state on selected lane */
+ uint64_t reserved_40_47 : 8;
+ uint64_t tx_bad_lane_sel : 8; /**< Select lane to apply error injection counts */
+ uint64_t reserved_24_31 : 8;
+ uint64_t tx_dis_dispr : 8; /**< Per-lane disparity disable */
+ uint64_t reserved_8_15 : 8;
+ uint64_t tx_dis_scram : 8; /**< Per-lane scrambler disable */
+#else
+ uint64_t tx_dis_scram : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t tx_dis_dispr : 8;
+ uint64_t reserved_24_31 : 8;
+ uint64_t tx_bad_lane_sel : 8;
+ uint64_t reserved_40_47 : 8;
+ uint64_t tx_bad_scram_cnt : 3;
+ uint64_t tx_bad_sync_cnt : 3;
+ uint64_t tx_bad_6467_cnt : 5;
+ uint64_t tx_bad_crc32 : 1;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } s;
+ struct cvmx_ilk_lne_dbg_s cn68xx;
+ struct cvmx_ilk_lne_dbg_s cn68xxp1;
+};
+typedef union cvmx_ilk_lne_dbg cvmx_ilk_lne_dbg_t;
+
+/**
+ * cvmx_ilk_lne_sts_msg
+ */
+union cvmx_ilk_lne_sts_msg {
+ uint64_t u64;
+ struct cvmx_ilk_lne_sts_msg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t rx_lnk_stat : 8; /**< Link status received in the diagnostic word (per-lane) */
+ uint64_t reserved_40_47 : 8;
+ uint64_t rx_lne_stat : 8; /**< Lane status received in the diagnostic word (per-lane) */
+ uint64_t reserved_24_31 : 8;
+ uint64_t tx_lnk_stat : 8; /**< Link status transmitted in the diagnostic word (per-lane) */
+ uint64_t reserved_8_15 : 8;
+ uint64_t tx_lne_stat : 8; /**< Lane status transmitted in the diagnostic word (per-lane) */
+#else
+ uint64_t tx_lne_stat : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t tx_lnk_stat : 8;
+ uint64_t reserved_24_31 : 8;
+ uint64_t rx_lne_stat : 8;
+ uint64_t reserved_40_47 : 8;
+ uint64_t rx_lnk_stat : 8;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_ilk_lne_sts_msg_s cn68xx;
+ struct cvmx_ilk_lne_sts_msg_s cn68xxp1;
+};
+typedef union cvmx_ilk_lne_sts_msg cvmx_ilk_lne_sts_msg_t;
+
+/**
+ * cvmx_ilk_rx#_cfg0
+ */
+union cvmx_ilk_rxx_cfg0 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_cfg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ext_lpbk_fc : 1; /**< Enable Rx-Tx flowcontrol loopback (external) */
+ uint64_t ext_lpbk : 1; /**< Enable Rx-Tx data loopback (external). Note that with differing
+ transmit & receive clocks, skip word are inserted/deleted */
+ uint64_t reserved_60_61 : 2;
+ uint64_t lnk_stats_wrap : 1; /**< Upon overflow, a statistics counter should wrap instead of
+ saturating.
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t bcw_push : 1; /**< The 8 byte burst control word containing the SOP will be
+ prepended to the corresponding packet.
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t mproto_ign : 1; /**< When LA_MODE=1 and MPROTO_IGN=0, the multi-protocol bit of the
+ LA control word is used to determine if the burst is an LA or
+ non-LA burst. When LA_MODE=1 and MPROTO_IGN=1, all bursts
+ are treated LA. When LA_MODE=0, this field is ignored
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t ptrn_mode : 1; /**< Enable programmable test pattern mode */
+ uint64_t lnk_stats_rdclr : 1; /**< CSR read to ILK_RXx_STAT* clears the counter after returning
+ its current value. */
+ uint64_t lnk_stats_ena : 1; /**< Enable link statistics counters */
+ uint64_t mltuse_fc_ena : 1; /**< Use multi-use field for calendar */
+ uint64_t cal_ena : 1; /**< Enable Rx calendar. When the calendar table is disabled, all
+ port-pipes receive XON. */
+ uint64_t mfrm_len : 13; /**< The quantity of data sent on each lane including one sync word,
+ scrambler state, diag word, zero or more skip words, and the
+ data payload. Must be large than ILK_RXX_CFG1[SKIP_CNT]+9.
+ Supported range:ILK_RXX_CFG1[SKIP_CNT]+9 < MFRM_LEN <= 4096) */
+ uint64_t brst_shrt : 7; /**< Minimum interval between burst control words, as a multiple of
+ 8 bytes. Supported range from 8 bytes to 512 (ie. 0 <
+ BRST_SHRT <= 64)
+ This field affects the ILK_RX*_STAT4[BRST_SHRT_ERR_CNT]
+ counter. It does not affect correct operation of the link. */
+ uint64_t lane_rev : 1; /**< Lane reversal. When enabled, lane de-striping is performed
+ from most significant lane enabled to least significant lane
+ enabled. LANE_ENA must be zero before changing LANE_REV. */
+ uint64_t brst_max : 5; /**< Maximum size of a data burst, as a multiple of 64 byte blocks.
+ Supported range is from 64 bytes to 1024 bytes. (ie. 0 <
+ BRST_MAX <= 16)
+ This field affects the ILK_RX*_STAT2[BRST_NOT_FULL_CNT] and
+ ILK_RX*_STAT3[BRST_MAX_ERR_CNT] counters. It does not affect
+ correct operation of the link. */
+ uint64_t reserved_25_25 : 1;
+ uint64_t cal_depth : 9; /**< Number of valid entries in the calendar. Supported range from
+ 1 to 288. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t lane_ena : 8; /**< Lane enable mask. Link is enabled if any lane is enabled. The
+ same lane should not be enabled in multiple ILK_RXx_CFG0. Each
+ bit of LANE_ENA maps to a RX lane (RLE) and a QLM lane. NOTE:
+ LANE_REV has no effect on this mapping.
+
+ LANE_ENA[0] = RLE0 = QLM1 lane 0
+ LANE_ENA[1] = RLE1 = QLM1 lane 1
+ LANE_ENA[2] = RLE2 = QLM1 lane 2
+ LANE_ENA[3] = RLE3 = QLM1 lane 3
+ LANE_ENA[4] = RLE4 = QLM2 lane 0
+ LANE_ENA[5] = RLE5 = QLM2 lane 1
+ LANE_ENA[6] = RLE6 = QLM2 lane 2
+ LANE_ENA[7] = RLE7 = QLM2 lane 3 */
+#else
+ uint64_t lane_ena : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t cal_depth : 9;
+ uint64_t reserved_25_25 : 1;
+ uint64_t brst_max : 5;
+ uint64_t lane_rev : 1;
+ uint64_t brst_shrt : 7;
+ uint64_t mfrm_len : 13;
+ uint64_t cal_ena : 1;
+ uint64_t mltuse_fc_ena : 1;
+ uint64_t lnk_stats_ena : 1;
+ uint64_t lnk_stats_rdclr : 1;
+ uint64_t ptrn_mode : 1;
+ uint64_t mproto_ign : 1;
+ uint64_t bcw_push : 1;
+ uint64_t lnk_stats_wrap : 1;
+ uint64_t reserved_60_61 : 2;
+ uint64_t ext_lpbk : 1;
+ uint64_t ext_lpbk_fc : 1;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_cfg0_s cn68xx;
+ struct cvmx_ilk_rxx_cfg0_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ext_lpbk_fc : 1; /**< Enable Rx-Tx flowcontrol loopback (external) */
+ uint64_t ext_lpbk : 1; /**< Enable Rx-Tx data loopback (external). Note that with differing
+ transmit & receive clocks, skip word are inserted/deleted */
+ uint64_t reserved_57_61 : 5;
+ uint64_t ptrn_mode : 1; /**< Enable programmable test pattern mode */
+ uint64_t lnk_stats_rdclr : 1; /**< CSR read to ILK_RXx_STAT* clears the counter after returning
+ its current value. */
+ uint64_t lnk_stats_ena : 1; /**< Enable link statistics counters */
+ uint64_t mltuse_fc_ena : 1; /**< Use multi-use field for calendar */
+ uint64_t cal_ena : 1; /**< Enable Rx calendar. When the calendar table is disabled, all
+ port-pipes receive XON. */
+ uint64_t mfrm_len : 13; /**< The quantity of data sent on each lane including one sync word,
+ scrambler state, diag word, zero or more skip words, and the
+ data payload. Must be large than ILK_RXX_CFG1[SKIP_CNT]+9.
+ Supported range:ILK_RXX_CFG1[SKIP_CNT]+9 < MFRM_LEN <= 4096) */
+ uint64_t brst_shrt : 7; /**< Minimum interval between burst control words, as a multiple of
+ 8 bytes. Supported range from 8 bytes to 512 (ie. 0 <
+ BRST_SHRT <= 64)
+ This field affects the ILK_RX*_STAT4[BRST_SHRT_ERR_CNT]
+ counter. It does not affect correct operation of the link. */
+ uint64_t lane_rev : 1; /**< Lane reversal. When enabled, lane de-striping is performed
+ from most significant lane enabled to least significant lane
+ enabled. LANE_ENA must be zero before changing LANE_REV. */
+ uint64_t brst_max : 5; /**< Maximum size of a data burst, as a multiple of 64 byte blocks.
+ Supported range is from 64 bytes to 1024 bytes. (ie. 0 <
+ BRST_MAX <= 16)
+ This field affects the ILK_RX*_STAT2[BRST_NOT_FULL_CNT] and
+ ILK_RX*_STAT3[BRST_MAX_ERR_CNT] counters. It does not affect
+ correct operation of the link. */
+ uint64_t reserved_25_25 : 1;
+ uint64_t cal_depth : 9; /**< Number of valid entries in the calendar. Supported range from
+ 1 to 288. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t lane_ena : 8; /**< Lane enable mask. Link is enabled if any lane is enabled. The
+ same lane should not be enabled in multiple ILK_RXx_CFG0. Each
+ bit of LANE_ENA maps to a RX lane (RLE) and a QLM lane. NOTE:
+ LANE_REV has no effect on this mapping.
+
+ LANE_ENA[0] = RLE0 = QLM1 lane 0
+ LANE_ENA[1] = RLE1 = QLM1 lane 1
+ LANE_ENA[2] = RLE2 = QLM1 lane 2
+ LANE_ENA[3] = RLE3 = QLM1 lane 3
+ LANE_ENA[4] = RLE4 = QLM2 lane 0
+ LANE_ENA[5] = RLE5 = QLM2 lane 1
+ LANE_ENA[6] = RLE6 = QLM2 lane 2
+ LANE_ENA[7] = RLE7 = QLM2 lane 3 */
+#else
+ uint64_t lane_ena : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t cal_depth : 9;
+ uint64_t reserved_25_25 : 1;
+ uint64_t brst_max : 5;
+ uint64_t lane_rev : 1;
+ uint64_t brst_shrt : 7;
+ uint64_t mfrm_len : 13;
+ uint64_t cal_ena : 1;
+ uint64_t mltuse_fc_ena : 1;
+ uint64_t lnk_stats_ena : 1;
+ uint64_t lnk_stats_rdclr : 1;
+ uint64_t ptrn_mode : 1;
+ uint64_t reserved_57_61 : 5;
+ uint64_t ext_lpbk : 1;
+ uint64_t ext_lpbk_fc : 1;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_cfg0 cvmx_ilk_rxx_cfg0_t;
+
+/**
+ * cvmx_ilk_rx#_cfg1
+ */
+union cvmx_ilk_rxx_cfg1 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_cfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t rx_fifo_cnt : 12; /**< Number of 64-bit words currently consumed by this link in the
+ RX fifo. */
+ uint64_t reserved_48_49 : 2;
+ uint64_t rx_fifo_hwm : 12; /**< Number of 64-bit words consumed by this link before switch
+ transmitted link flow control status from XON to XOFF.
+
+ XON = RX_FIFO_CNT < RX_FIFO_HWM
+ XOFF = RX_FIFO_CNT >= RX_FIFO_HWM. */
+ uint64_t reserved_34_35 : 2;
+ uint64_t rx_fifo_max : 12; /**< Maximum number of 64-bit words consumed by this link in the RX
+ fifo. The sum of all links should be equal to 2048 (16KB) */
+ uint64_t pkt_flush : 1; /**< Packet receive flush. Writing PKT_FLUSH=1 will cause all open
+ packets to be error-out, just as though the link went down. */
+ uint64_t pkt_ena : 1; /**< Packet receive enable. When PKT_ENA=0, any received SOP causes
+ the entire packet to be dropped. */
+ uint64_t la_mode : 1; /**< 0 = Interlaken
+ 1 = Interlaken Look-Aside */
+ uint64_t tx_link_fc : 1; /**< Link flow control status transmitted by the Tx-Link
+ XON when RX_FIFO_CNT <= RX_FIFO_HWM and lane alignment is done */
+ uint64_t rx_link_fc : 1; /**< Link flow control status received in burst/idle control words.
+ XOFF will cause Tx-Link to stop transmitting on all channels. */
+ uint64_t rx_align_ena : 1; /**< Enable the lane alignment. This should only be done after all
+ enabled lanes have achieved word boundary lock and scrambler
+ synchronization. Note: Hardware will clear this when any
+ participating lane loses either word boundary lock or scrambler
+ synchronization */
+ uint64_t reserved_8_15 : 8;
+ uint64_t rx_bdry_lock_ena : 8; /**< Enable word boundary lock. While disabled, received data is
+ tossed. Once enabled, received data is searched for legal
+ 2bit patterns. Automatically cleared for disabled lanes. */
+#else
+ uint64_t rx_bdry_lock_ena : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t rx_align_ena : 1;
+ uint64_t rx_link_fc : 1;
+ uint64_t tx_link_fc : 1;
+ uint64_t la_mode : 1;
+ uint64_t pkt_ena : 1;
+ uint64_t pkt_flush : 1;
+ uint64_t rx_fifo_max : 12;
+ uint64_t reserved_34_35 : 2;
+ uint64_t rx_fifo_hwm : 12;
+ uint64_t reserved_48_49 : 2;
+ uint64_t rx_fifo_cnt : 12;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_cfg1_s cn68xx;
+ struct cvmx_ilk_rxx_cfg1_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_cfg1 cvmx_ilk_rxx_cfg1_t;
+
+/**
+ * cvmx_ilk_rx#_flow_ctl0
+ */
+union cvmx_ilk_rxx_flow_ctl0 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_flow_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t status : 64; /**< Flow control status for port-pipes 63-0, where a 1 indicates
+ the presence of backpressure (ie. XOFF) and 0 indicates the
+ absence of backpressure (ie. XON) */
+#else
+ uint64_t status : 64;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_flow_ctl0_s cn68xx;
+ struct cvmx_ilk_rxx_flow_ctl0_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_flow_ctl0 cvmx_ilk_rxx_flow_ctl0_t;
+
+/**
+ * cvmx_ilk_rx#_flow_ctl1
+ */
+union cvmx_ilk_rxx_flow_ctl1 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_flow_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t status : 64; /**< Flow control status for port-pipes 127-64, where a 1 indicates
+ the presence of backpressure (ie. XOFF) and 0 indicates the
+ absence of backpressure (ie. XON) */
+#else
+ uint64_t status : 64;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_flow_ctl1_s cn68xx;
+ struct cvmx_ilk_rxx_flow_ctl1_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_flow_ctl1 cvmx_ilk_rxx_flow_ctl1_t;
+
+/**
+ * cvmx_ilk_rx#_idx_cal
+ */
+union cvmx_ilk_rxx_idx_cal {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_idx_cal_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t inc : 6; /**< Increment to add to current index for next index. NOTE:
+ Increment performed after access to ILK_RXx_MEM_CAL1 */
+ uint64_t reserved_6_7 : 2;
+ uint64_t index : 6; /**< Specify the group of 8 entries accessed by the next CSR
+ read/write to calendar table memory. Software must never write
+ IDX >= 36 */
+#else
+ uint64_t index : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t inc : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_idx_cal_s cn68xx;
+ struct cvmx_ilk_rxx_idx_cal_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_idx_cal cvmx_ilk_rxx_idx_cal_t;
+
+/**
+ * cvmx_ilk_rx#_idx_stat0
+ */
+union cvmx_ilk_rxx_idx_stat0 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_idx_stat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t clr : 1; /**< CSR read to ILK_RXx_MEM_STAT0 clears the selected counter after
+ returning its current value. */
+ uint64_t reserved_24_30 : 7;
+ uint64_t inc : 8; /**< Increment to add to current index for next index */
+ uint64_t reserved_8_15 : 8;
+ uint64_t index : 8; /**< Specify the channel accessed during the next CSR read to the
+ ILK_RXx_MEM_STAT0 */
+#else
+ uint64_t index : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t inc : 8;
+ uint64_t reserved_24_30 : 7;
+ uint64_t clr : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_idx_stat0_s cn68xx;
+ struct cvmx_ilk_rxx_idx_stat0_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_idx_stat0 cvmx_ilk_rxx_idx_stat0_t;
+
+/**
+ * cvmx_ilk_rx#_idx_stat1
+ */
+union cvmx_ilk_rxx_idx_stat1 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_idx_stat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t clr : 1; /**< CSR read to ILK_RXx_MEM_STAT1 clears the selected counter after
+ returning its current value. */
+ uint64_t reserved_24_30 : 7;
+ uint64_t inc : 8; /**< Increment to add to current index for next index */
+ uint64_t reserved_8_15 : 8;
+ uint64_t index : 8; /**< Specify the channel accessed during the next CSR read to the
+ ILK_RXx_MEM_STAT1 */
+#else
+ uint64_t index : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t inc : 8;
+ uint64_t reserved_24_30 : 7;
+ uint64_t clr : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_idx_stat1_s cn68xx;
+ struct cvmx_ilk_rxx_idx_stat1_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_idx_stat1 cvmx_ilk_rxx_idx_stat1_t;
+
+/**
+ * cvmx_ilk_rx#_int
+ */
+union cvmx_ilk_rxx_int {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t pkt_drop_sop : 1; /**< Entire packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX,
+ lack of reassembly-ids or because ILK_RXX_CFG1[PKT_ENA]=0 | $RW
+ because ILK_RXX_CFG1[PKT_ENA]=0
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t pkt_drop_rid : 1; /**< Entire packet dropped due to the lack of reassembly-ids or
+ because ILK_RXX_CFG1[PKT_ENA]=0 */
+ uint64_t pkt_drop_rxf : 1; /**< Some/all of a packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX */
+ uint64_t lane_bad_word : 1; /**< A lane encountered either a bad 64B/67B codeword or an unknown
+ control word type. */
+ uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */
+ uint64_t lane_align_done : 1; /**< Lane alignment successful */
+ uint64_t word_sync_done : 1; /**< All enabled lanes have achieved word boundary lock and
+ scrambler synchronization. Lane alignment may now be enabled. */
+ uint64_t crc24_err : 1; /**< Burst CRC24 error. All open packets will be receive an error. */
+ uint64_t lane_align_fail : 1; /**< Lane Alignment fails (4 tries). Hardware will repeat lane
+ alignment until is succeeds or until ILK_RXx_CFG1[RX_ALIGN_ENA]
+ is cleared. */
+#else
+ uint64_t lane_align_fail : 1;
+ uint64_t crc24_err : 1;
+ uint64_t word_sync_done : 1;
+ uint64_t lane_align_done : 1;
+ uint64_t stat_cnt_ovfl : 1;
+ uint64_t lane_bad_word : 1;
+ uint64_t pkt_drop_rxf : 1;
+ uint64_t pkt_drop_rid : 1;
+ uint64_t pkt_drop_sop : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_int_s cn68xx;
+ struct cvmx_ilk_rxx_int_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t pkt_drop_rid : 1; /**< Entire packet dropped due to the lack of reassembly-ids or
+ because ILK_RXX_CFG1[PKT_ENA]=0 */
+ uint64_t pkt_drop_rxf : 1; /**< Some/all of a packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX */
+ uint64_t lane_bad_word : 1; /**< A lane encountered either a bad 64B/67B codeword or an unknown
+ control word type. */
+ uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */
+ uint64_t lane_align_done : 1; /**< Lane alignment successful */
+ uint64_t word_sync_done : 1; /**< All enabled lanes have achieved word boundary lock and
+ scrambler synchronization. Lane alignment may now be enabled. */
+ uint64_t crc24_err : 1; /**< Burst CRC24 error. All open packets will be receive an error. */
+ uint64_t lane_align_fail : 1; /**< Lane Alignment fails (4 tries). Hardware will repeat lane
+ alignment until is succeeds or until ILK_RXx_CFG1[RX_ALIGN_ENA]
+ is cleared. */
+#else
+ uint64_t lane_align_fail : 1;
+ uint64_t crc24_err : 1;
+ uint64_t word_sync_done : 1;
+ uint64_t lane_align_done : 1;
+ uint64_t stat_cnt_ovfl : 1;
+ uint64_t lane_bad_word : 1;
+ uint64_t pkt_drop_rxf : 1;
+ uint64_t pkt_drop_rid : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_int cvmx_ilk_rxx_int_t;
+
+/**
+ * cvmx_ilk_rx#_int_en
+ */
+union cvmx_ilk_rxx_int_en {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t pkt_drop_sop : 1; /**< Entire packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX,
+ lack of reassembly-ids or because ILK_RXX_CFG1[PKT_ENA]=0 | $PRW
+ because ILK_RXX_CFG1[PKT_ENA]=0
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t pkt_drop_rid : 1; /**< Entire packet dropped due to the lack of reassembly-ids or
+ because ILK_RXX_CFG1[PKT_ENA]=0 */
+ uint64_t pkt_drop_rxf : 1; /**< Some/all of a packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX */
+ uint64_t lane_bad_word : 1; /**< A lane encountered either a bad 64B/67B codeword or an unknown
+ control word type. */
+ uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */
+ uint64_t lane_align_done : 1; /**< Lane alignment successful */
+ uint64_t word_sync_done : 1; /**< All enabled lanes have achieved word boundary lock and
+ scrambler synchronization. Lane alignment may now be enabled. */
+ uint64_t crc24_err : 1; /**< Burst CRC24 error. All open packets will be receive an error. */
+ uint64_t lane_align_fail : 1; /**< Lane Alignment fails (4 tries) */
+#else
+ uint64_t lane_align_fail : 1;
+ uint64_t crc24_err : 1;
+ uint64_t word_sync_done : 1;
+ uint64_t lane_align_done : 1;
+ uint64_t stat_cnt_ovfl : 1;
+ uint64_t lane_bad_word : 1;
+ uint64_t pkt_drop_rxf : 1;
+ uint64_t pkt_drop_rid : 1;
+ uint64_t pkt_drop_sop : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_int_en_s cn68xx;
+ struct cvmx_ilk_rxx_int_en_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t pkt_drop_rid : 1; /**< Entire packet dropped due to the lack of reassembly-ids or
+ because ILK_RXX_CFG1[PKT_ENA]=0 */
+ uint64_t pkt_drop_rxf : 1; /**< Some/all of a packet dropped due to RX_FIFO_CNT == RX_FIFO_MAX */
+ uint64_t lane_bad_word : 1; /**< A lane encountered either a bad 64B/67B codeword or an unknown
+ control word type. */
+ uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */
+ uint64_t lane_align_done : 1; /**< Lane alignment successful */
+ uint64_t word_sync_done : 1; /**< All enabled lanes have achieved word boundary lock and
+ scrambler synchronization. Lane alignment may now be enabled. */
+ uint64_t crc24_err : 1; /**< Burst CRC24 error. All open packets will be receive an error. */
+ uint64_t lane_align_fail : 1; /**< Lane Alignment fails (4 tries) */
+#else
+ uint64_t lane_align_fail : 1;
+ uint64_t crc24_err : 1;
+ uint64_t word_sync_done : 1;
+ uint64_t lane_align_done : 1;
+ uint64_t stat_cnt_ovfl : 1;
+ uint64_t lane_bad_word : 1;
+ uint64_t pkt_drop_rxf : 1;
+ uint64_t pkt_drop_rid : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_int_en cvmx_ilk_rxx_int_en_t;
+
+/**
+ * cvmx_ilk_rx#_jabber
+ */
+union cvmx_ilk_rxx_jabber {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_jabber_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< Byte count for jabber check. Failing packets will be
+ truncated to CNT bytes.
+
+ NOTE: Hardware tracks the size of up to two concurrent packet
+ per link. If using segment mode with more than 2 channels,
+ some large packets may not be flagged or truncated.
+
+ NOTE: CNT must be 8-byte aligned such that CNT[2:0] == 0 */
+#else
+ uint64_t cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_jabber_s cn68xx;
+ struct cvmx_ilk_rxx_jabber_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_jabber cvmx_ilk_rxx_jabber_t;
+
+/**
+ * cvmx_ilk_rx#_mem_cal0
+ *
+ * Notes:
+ * Software must program the calendar table prior to enabling the
+ * link.
+ *
+ * Software must always write ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ *
+ * A given calendar table entry has no effect on PKO pipe
+ * backpressure when either:
+ * - ENTRY_CTLx=Link (1), or
+ * - ENTRY_CTLx=XON (3) and PORT_PIPEx is outside the range of ILK_TXx_PIPE[BASE/NUMP].
+ *
+ * Within the 8 calendar table entries of one IDX value, if more
+ * than one affects the same PKO pipe, XOFF always wins over XON,
+ * regardless of the calendar table order.
+ *
+ * Software must always read ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1. Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ */
+union cvmx_ilk_rxx_mem_cal0 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_mem_cal0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t entry_ctl3 : 2; /**< XON/XOFF destination for entry (IDX*8)+3
+
+ - 0: PKO port-pipe Apply backpressure received from the
+ remote tranmitter to the PKO pipe selected
+ by PORT_PIPE3.
+
+ - 1: Link Apply the backpressure received from the
+ remote transmitter to link backpressure.
+ PORT_PIPE3 is unused.
+
+ - 2: XOFF Apply XOFF to the PKO pipe selected by
+ PORT_PIPE3.
+
+ - 3: XON Apply XON to the PKO pipe selected by
+ PORT_PIPE3. The calendar table entry is
+ effectively unused if PORT_PIPE3 is out of
+ range of ILK_TXx_PIPE[BASE/NUMP]. */
+ uint64_t port_pipe3 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+3
+
+ PORT_PIPE3 must reside in the range of ILK_TXx_PIPE[BASE/NUMP]
+ when ENTRY_CTL3 is "XOFF" (2) or "PKO port-pipe" (0). */
+ uint64_t entry_ctl2 : 2; /**< XON/XOFF destination for entry (IDX*8)+2
+
+ - 0: PKO port-pipe Apply backpressure received from the
+ remote tranmitter to the PKO pipe selected
+ by PORT_PIPE2.
+
+ - 1: Link Apply the backpressure received from the
+ remote transmitter to link backpressure.
+ PORT_PIPE2 is unused.
+
+ - 2: XOFF Apply XOFF to the PKO pipe selected by
+ PORT_PIPE2.
+
+ - 3: XON Apply XON to the PKO pipe selected by
+ PORT_PIPE2. The calendar table entry is
+ effectively unused if PORT_PIPE2 is out of
+ range of ILK_TXx_PIPE[BASE/NUMP]. */
+ uint64_t port_pipe2 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+2
+
+ PORT_PIPE2 must reside in the range of ILK_TXx_PIPE[BASE/NUMP]
+ when ENTRY_CTL2 is "XOFF" (2) or "PKO port-pipe" (0). */
+ uint64_t entry_ctl1 : 2; /**< XON/XOFF destination for entry (IDX*8)+1
+
+ - 0: PKO port-pipe Apply backpressure received from the
+ remote tranmitter to the PKO pipe selected
+ by PORT_PIPE1.
+
+ - 1: Link Apply the backpressure received from the
+ remote transmitter to link backpressure.
+ PORT_PIPE1 is unused.
+
+ - 2: XOFF Apply XOFF to the PKO pipe selected by
+ PORT_PIPE1.
+
+ - 3: XON Apply XON to the PKO pipe selected by
+ PORT_PIPE1. The calendar table entry is
+ effectively unused if PORT_PIPE1 is out of
+ range of ILK_TXx_PIPE[BASE/NUMP]. */
+ uint64_t port_pipe1 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+1
+
+ PORT_PIPE1 must reside in the range of ILK_TXx_PIPE[BASE/NUMP]
+ when ENTRY_CTL1 is "XOFF" (2) or "PKO port-pipe" (0). */
+ uint64_t entry_ctl0 : 2; /**< XON/XOFF destination for entry (IDX*8)+0
+
+ - 0: PKO port-pipe Apply backpressure received from the
+ remote tranmitter to the PKO pipe selected
+ by PORT_PIPE0.
+
+ - 1: Link Apply the backpressure received from the
+ remote transmitter to link backpressure.
+ PORT_PIPE0 is unused.
+
+ - 2: XOFF Apply XOFF to the PKO pipe selected by
+ PORT_PIPE0.
+
+ - 3: XON Apply XON to the PKO pipe selected by
+ PORT_PIPE0. The calendar table entry is
+ effectively unused if PORT_PIPEx is out of
+ range of ILK_TXx_PIPE[BASE/NUMP]. */
+ uint64_t port_pipe0 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+0
+
+ PORT_PIPE0 must reside in the range of ILK_TXx_PIPE[BASE/NUMP]
+ when ENTRY_CTL0 is "XOFF" (2) or "PKO port-pipe" (0). */
+#else
+ uint64_t port_pipe0 : 7;
+ uint64_t entry_ctl0 : 2;
+ uint64_t port_pipe1 : 7;
+ uint64_t entry_ctl1 : 2;
+ uint64_t port_pipe2 : 7;
+ uint64_t entry_ctl2 : 2;
+ uint64_t port_pipe3 : 7;
+ uint64_t entry_ctl3 : 2;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_mem_cal0_s cn68xx;
+ struct cvmx_ilk_rxx_mem_cal0_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_mem_cal0 cvmx_ilk_rxx_mem_cal0_t;
+
+/**
+ * cvmx_ilk_rx#_mem_cal1
+ *
+ * Notes:
+ * Software must program the calendar table prior to enabling the
+ * link.
+ *
+ * Software must always write ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ *
+ * A given calendar table entry has no effect on PKO pipe
+ * backpressure when either:
+ * - ENTRY_CTLx=Link (1), or
+ * - ENTRY_CTLx=XON (3) and PORT_PIPEx is outside the range of ILK_TXx_PIPE[BASE/NUMP].
+ *
+ * Within the 8 calendar table entries of one IDX value, if more
+ * than one affects the same PKO pipe, XOFF always wins over XON,
+ * regardless of the calendar table order.
+ *
+ * Software must always read ILK_RXx_MEM_CAL0 then ILK_Rx_MEM_CAL1. Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ */
+union cvmx_ilk_rxx_mem_cal1 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_mem_cal1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t entry_ctl7 : 2; /**< XON/XOFF destination for entry (IDX*8)+7
+
+ - 0: PKO port-pipe Apply backpressure received from the
+ remote tranmitter to the PKO pipe selected
+ by PORT_PIPE7.
+
+ - 1: Link Apply the backpressure received from the
+ remote transmitter to link backpressure.
+ PORT_PIPE7 is unused.
+
+ - 2: XOFF Apply XOFF to the PKO pipe selected by
+ PORT_PIPE7.
+
+ - 3: XON Apply XON to the PKO pipe selected by
+ PORT_PIPE7. The calendar table entry is
+ effectively unused if PORT_PIPE3 is out of
+ range of ILK_TXx_PIPE[BASE/NUMP]. */
+ uint64_t port_pipe7 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+7
+
+ PORT_PIPE7 must reside in the range of ILK_TXx_PIPE[BASE/NUMP]
+ when ENTRY_CTL7 is "XOFF" (2) or "PKO port-pipe" (0). */
+ uint64_t entry_ctl6 : 2; /**< XON/XOFF destination for entry (IDX*8)+6
+
+ - 0: PKO port-pipe Apply backpressure received from the
+ remote tranmitter to the PKO pipe selected
+ by PORT_PIPE6.
+
+ - 1: Link Apply the backpressure received from the
+ remote transmitter to link backpressure.
+ PORT_PIPE6 is unused.
+
+ - 2: XOFF Apply XOFF to the PKO pipe selected by
+ PORT_PIPE6.
+
+ - 3: XON Apply XON to the PKO pipe selected by
+ PORT_PIPE6. The calendar table entry is
+ effectively unused if PORT_PIPE6 is out of
+ range of ILK_TXx_PIPE[BASE/NUMP]. */
+ uint64_t port_pipe6 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+6
+
+ PORT_PIPE6 must reside in the range of ILK_TXx_PIPE[BASE/NUMP]
+ when ENTRY_CTL6 is "XOFF" (2) or "PKO port-pipe" (0). */
+ uint64_t entry_ctl5 : 2; /**< XON/XOFF destination for entry (IDX*8)+5
+
+ - 0: PKO port-pipe Apply backpressure received from the
+ remote tranmitter to the PKO pipe selected
+ by PORT_PIPE5.
+
+ - 1: Link Apply the backpressure received from the
+ remote transmitter to link backpressure.
+ PORT_PIPE5 is unused.
+
+ - 2: XOFF Apply XOFF to the PKO pipe selected by
+ PORT_PIPE5.
+
+ - 3: XON Apply XON to the PKO pipe selected by
+ PORT_PIPE5. The calendar table entry is
+ effectively unused if PORT_PIPE5 is out of
+ range of ILK_TXx_PIPE[BASE/NUMP]. */
+ uint64_t port_pipe5 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+5
+
+ PORT_PIPE5 must reside in the range of ILK_TXx_PIPE[BASE/NUMP]
+ when ENTRY_CTL5 is "XOFF" (2) or "PKO port-pipe" (0). */
+ uint64_t entry_ctl4 : 2; /**< XON/XOFF destination for entry (IDX*8)+4
+
+ - 0: PKO port-pipe Apply backpressure received from the
+ remote tranmitter to the PKO pipe selected
+ by PORT_PIPE4.
+
+ - 1: Link Apply the backpressure received from the
+ remote transmitter to link backpressure.
+ PORT_PIPE4 is unused.
+
+ - 2: XOFF Apply XOFF to the PKO pipe selected by
+ PORT_PIPE4.
+
+ - 3: XON Apply XON to the PKO pipe selected by
+ PORT_PIPE4. The calendar table entry is
+ effectively unused if PORT_PIPE4 is out of
+ range of ILK_TXx_PIPE[BASE/NUMP]. */
+ uint64_t port_pipe4 : 7; /**< Select PKO port-pipe for calendar table entry (IDX*8)+4
+
+ PORT_PIPE4 must reside in the range of ILK_TXx_PIPE[BASE/NUMP]
+ when ENTRY_CTL4 is "XOFF" (2) or "PKO port-pipe" (0). */
+#else
+ uint64_t port_pipe4 : 7;
+ uint64_t entry_ctl4 : 2;
+ uint64_t port_pipe5 : 7;
+ uint64_t entry_ctl5 : 2;
+ uint64_t port_pipe6 : 7;
+ uint64_t entry_ctl6 : 2;
+ uint64_t port_pipe7 : 7;
+ uint64_t entry_ctl7 : 2;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_mem_cal1_s cn68xx;
+ struct cvmx_ilk_rxx_mem_cal1_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_mem_cal1 cvmx_ilk_rxx_mem_cal1_t;
+
+/**
+ * cvmx_ilk_rx#_mem_stat0
+ */
+union cvmx_ilk_rxx_mem_stat0 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_mem_stat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t rx_pkt : 28; /**< Number of packets received (256M)
+ Channel selected by ILK_RXx_IDX_STAT0[IDX]. Saturates.
+ Interrupt on saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t rx_pkt : 28;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_mem_stat0_s cn68xx;
+ struct cvmx_ilk_rxx_mem_stat0_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_mem_stat0 cvmx_ilk_rxx_mem_stat0_t;
+
+/**
+ * cvmx_ilk_rx#_mem_stat1
+ */
+union cvmx_ilk_rxx_mem_stat1 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_mem_stat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t rx_bytes : 36; /**< Number of bytes received (64GB)
+ Channel selected by ILK_RXx_IDX_STAT1[IDX]. Saturates.
+ Interrupt on saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t rx_bytes : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_mem_stat1_s cn68xx;
+ struct cvmx_ilk_rxx_mem_stat1_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_mem_stat1 cvmx_ilk_rxx_mem_stat1_t;
+
+/**
+ * cvmx_ilk_rx#_rid
+ */
+union cvmx_ilk_rxx_rid {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_rid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t max_cnt : 6; /**< Maximum number of reassembly-ids allowed for a given link. If
+ an SOP arrives and the link has already allocated at least
+ MAX_CNT reassembly-ids, the packet will be dropped.
+
+ Note: An an SOP allocates a reassembly-ids.
+ Note: An an EOP frees a reassembly-ids.
+
+ ***NOTE: Added in pass 2.0 */
+#else
+ uint64_t max_cnt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_rid_s cn68xx;
+};
+typedef union cvmx_ilk_rxx_rid cvmx_ilk_rxx_rid_t;
+
+/**
+ * cvmx_ilk_rx#_stat0
+ */
+union cvmx_ilk_rxx_stat0 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t crc24_match_cnt : 33; /**< Number of CRC24 matches received. Saturates. Interrupt on
+ saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t crc24_match_cnt : 33;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat0_s cn68xx;
+ struct cvmx_ilk_rxx_stat0_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t crc24_match_cnt : 27; /**< Number of CRC24 matches received. Saturates. Interrupt on
+ saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t crc24_match_cnt : 27;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat0 cvmx_ilk_rxx_stat0_t;
+
+/**
+ * cvmx_ilk_rx#_stat1
+ */
+union cvmx_ilk_rxx_stat1 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t crc24_err_cnt : 18; /**< Number of bursts with a detected CRC error. Saturates.
+ Interrupt on saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t crc24_err_cnt : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat1_s cn68xx;
+ struct cvmx_ilk_rxx_stat1_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat1 cvmx_ilk_rxx_stat1_t;
+
+/**
+ * cvmx_ilk_rx#_stat2
+ */
+union cvmx_ilk_rxx_stat2 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t brst_not_full_cnt : 16; /**< Number of bursts received which terminated without an eop and
+ contained fewer than BurstMax words. Saturates. Interrupt on
+ saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t brst_cnt : 28; /**< Number of bursts correctly received. (ie. good CRC24, not in
+ violation of BurstMax or BurstShort) */
+#else
+ uint64_t brst_cnt : 28;
+ uint64_t reserved_28_31 : 4;
+ uint64_t brst_not_full_cnt : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat2_s cn68xx;
+ struct cvmx_ilk_rxx_stat2_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t brst_not_full_cnt : 16; /**< Number of bursts received which terminated without an eop and
+ contained fewer than BurstMax words. Saturates. Interrupt on
+ saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+ uint64_t reserved_16_31 : 16;
+ uint64_t brst_cnt : 16; /**< Number of bursts correctly received. (ie. good CRC24, not in
+ violation of BurstMax or BurstShort) */
+#else
+ uint64_t brst_cnt : 16;
+ uint64_t reserved_16_31 : 16;
+ uint64_t brst_not_full_cnt : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat2 cvmx_ilk_rxx_stat2_t;
+
+/**
+ * cvmx_ilk_rx#_stat3
+ */
+union cvmx_ilk_rxx_stat3 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t brst_max_err_cnt : 16; /**< Number of bursts received longer than the BurstMax parameter */
+#else
+ uint64_t brst_max_err_cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat3_s cn68xx;
+ struct cvmx_ilk_rxx_stat3_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat3 cvmx_ilk_rxx_stat3_t;
+
+/**
+ * cvmx_ilk_rx#_stat4
+ */
+union cvmx_ilk_rxx_stat4 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t brst_shrt_err_cnt : 16; /**< Number of bursts received that violate the BurstShort
+ parameter. Saturates. Interrupt on saturation if
+ ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t brst_shrt_err_cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat4_s cn68xx;
+ struct cvmx_ilk_rxx_stat4_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat4 cvmx_ilk_rxx_stat4_t;
+
+/**
+ * cvmx_ilk_rx#_stat5
+ */
+union cvmx_ilk_rxx_stat5 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t align_cnt : 23; /**< Number of alignment sequences received (ie. those that do not
+ violate the current alignment). Saturates. Interrupt on
+ saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t align_cnt : 23;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat5_s cn68xx;
+ struct cvmx_ilk_rxx_stat5_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t align_cnt : 16; /**< Number of alignment sequences received (ie. those that do not
+ violate the current alignment). Saturates. Interrupt on
+ saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t align_cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat5 cvmx_ilk_rxx_stat5_t;
+
+/**
+ * cvmx_ilk_rx#_stat6
+ */
+union cvmx_ilk_rxx_stat6 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t align_err_cnt : 16; /**< Number of alignment sequences received in error (ie. those that
+ violate the current alignment). Saturates. Interrupt on
+ saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t align_err_cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat6_s cn68xx;
+ struct cvmx_ilk_rxx_stat6_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat6 cvmx_ilk_rxx_stat6_t;
+
+/**
+ * cvmx_ilk_rx#_stat7
+ */
+union cvmx_ilk_rxx_stat7 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t bad_64b67b_cnt : 16; /**< Number of bad 64B/67B codewords. Saturates. Interrupt on
+ saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t bad_64b67b_cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat7_s cn68xx;
+ struct cvmx_ilk_rxx_stat7_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat7 cvmx_ilk_rxx_stat7_t;
+
+/**
+ * cvmx_ilk_rx#_stat8
+ */
+union cvmx_ilk_rxx_stat8 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat8_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pkt_drop_rid_cnt : 16; /**< Number of packets dropped due to the lack of reassembly-ids or
+ because ILK_RXX_CFG1[PKT_ENA]=0. Saturates. Interrupt on
+ saturation if ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+ uint64_t pkt_drop_rxf_cnt : 16; /**< Number of packets dropped due to RX_FIFO_CNT >= RX_FIFO_MAX.
+ Saturates. Interrupt on saturation if
+ ILK_RXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t pkt_drop_rxf_cnt : 16;
+ uint64_t pkt_drop_rid_cnt : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat8_s cn68xx;
+ struct cvmx_ilk_rxx_stat8_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat8 cvmx_ilk_rxx_stat8_t;
+
+/**
+ * cvmx_ilk_rx#_stat9
+ */
+union cvmx_ilk_rxx_stat9 {
+ uint64_t u64;
+ struct cvmx_ilk_rxx_stat9_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_ilk_rxx_stat9_s cn68xx;
+ struct cvmx_ilk_rxx_stat9_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxx_stat9 cvmx_ilk_rxx_stat9_t;
+
+/**
+ * cvmx_ilk_rx_lne#_cfg
+ */
+union cvmx_ilk_rx_lnex_cfg {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t rx_dis_psh_skip : 1; /**< When RX_DIS_PSH_SKIP=0, skip words are de-stripped.
+ When RX_DIS_PSH_SKIP=1, skip words are discarded in the lane
+ logic.
+
+ If the lane is in internal loopback mode, RX_DIS_PSH_SKIP
+ is ignored and skip words are always discarded in the lane
+ logic.
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t reserved_6_7 : 2;
+ uint64_t rx_scrm_sync : 1; /**< Rx scrambler synchronization status
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t rx_bdry_sync : 1; /**< Rx word boundary sync status */
+ uint64_t rx_dis_ukwn : 1; /**< Disable normal response to unknown words. They are still
+ logged but do not cause an error to all open channels */
+ uint64_t rx_dis_scram : 1; /**< Disable lane scrambler (debug) */
+ uint64_t stat_rdclr : 1; /**< CSR read to ILK_RX_LNEx_STAT* clears the selected counter after
+ returning its current value. */
+ uint64_t stat_ena : 1; /**< Enable RX lane statistics counters */
+#else
+ uint64_t stat_ena : 1;
+ uint64_t stat_rdclr : 1;
+ uint64_t rx_dis_scram : 1;
+ uint64_t rx_dis_ukwn : 1;
+ uint64_t rx_bdry_sync : 1;
+ uint64_t rx_scrm_sync : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t rx_dis_psh_skip : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_cfg_s cn68xx;
+ struct cvmx_ilk_rx_lnex_cfg_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t rx_bdry_sync : 1; /**< Rx word boundary sync status */
+ uint64_t rx_dis_ukwn : 1; /**< Disable normal response to unknown words. They are still
+ logged but do not cause an error to all open channels */
+ uint64_t rx_dis_scram : 1; /**< Disable lane scrambler (debug) */
+ uint64_t stat_rdclr : 1; /**< CSR read to ILK_RX_LNEx_STAT* clears the selected counter after
+ returning its current value. */
+ uint64_t stat_ena : 1; /**< Enable RX lane statistics counters */
+#else
+ uint64_t stat_ena : 1;
+ uint64_t stat_rdclr : 1;
+ uint64_t rx_dis_scram : 1;
+ uint64_t rx_dis_ukwn : 1;
+ uint64_t rx_bdry_sync : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_cfg cvmx_ilk_rx_lnex_cfg_t;
+
+/**
+ * cvmx_ilk_rx_lne#_int
+ */
+union cvmx_ilk_rx_lnex_int {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t bad_64b67b : 1; /**< Bad 64B/67B codeword encountered. Once the bad word reaches
+ the burst control unit (as deonted by
+ ILK_RXx_INT[LANE_BAD_WORD]) it will be tossed and all open
+ packets will receive an error. */
+ uint64_t stat_cnt_ovfl : 1; /**< Rx lane statistic counter overflow */
+ uint64_t stat_msg : 1; /**< Status bits for the link or a lane transitioned from a '1'
+ (healthy) to a '0' (problem) */
+ uint64_t dskew_fifo_ovfl : 1; /**< Rx deskew fifo overflow occurred. */
+ uint64_t scrm_sync_loss : 1; /**< 4 consecutive bad sync words or 3 consecutive scramble state
+ mismatches */
+ uint64_t ukwn_cntl_word : 1; /**< Unknown framing control word. Block type does not match any of
+ (SYNC,SCRAM,SKIP,DIAG) */
+ uint64_t crc32_err : 1; /**< Diagnostic CRC32 errors */
+ uint64_t bdry_sync_loss : 1; /**< Rx logic loses word boundary sync (16 tries). Hardware will
+ automatically attempt to regain word boundary sync */
+ uint64_t serdes_lock_loss : 1; /**< Rx SERDES loses lock */
+#else
+ uint64_t serdes_lock_loss : 1;
+ uint64_t bdry_sync_loss : 1;
+ uint64_t crc32_err : 1;
+ uint64_t ukwn_cntl_word : 1;
+ uint64_t scrm_sync_loss : 1;
+ uint64_t dskew_fifo_ovfl : 1;
+ uint64_t stat_msg : 1;
+ uint64_t stat_cnt_ovfl : 1;
+ uint64_t bad_64b67b : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_int_s cn68xx;
+ struct cvmx_ilk_rx_lnex_int_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_int cvmx_ilk_rx_lnex_int_t;
+
+/**
+ * cvmx_ilk_rx_lne#_int_en
+ */
+union cvmx_ilk_rx_lnex_int_en {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t bad_64b67b : 1; /**< Bad 64B/67B codeword encountered. Once the bad word reaches
+ the burst control unit (as deonted by
+ ILK_RXx_INT[LANE_BAD_WORD]) it will be tossed and all open
+ packets will receive an error. */
+ uint64_t stat_cnt_ovfl : 1; /**< Rx lane statistic counter overflow */
+ uint64_t stat_msg : 1; /**< Status bits for the link or a lane transitioned from a '1'
+ (healthy) to a '0' (problem) */
+ uint64_t dskew_fifo_ovfl : 1; /**< Rx deskew fifo overflow occurred. */
+ uint64_t scrm_sync_loss : 1; /**< 4 consecutive bad sync words or 3 consecutive scramble state
+ mismatches */
+ uint64_t ukwn_cntl_word : 1; /**< Unknown framing control word. Block type does not match any of
+ (SYNC,SCRAM,SKIP,DIAG) */
+ uint64_t crc32_err : 1; /**< Diagnostic CRC32 error */
+ uint64_t bdry_sync_loss : 1; /**< Rx logic loses word boundary sync (16 tries). Hardware will
+ automatically attempt to regain word boundary sync */
+ uint64_t serdes_lock_loss : 1; /**< Rx SERDES loses lock */
+#else
+ uint64_t serdes_lock_loss : 1;
+ uint64_t bdry_sync_loss : 1;
+ uint64_t crc32_err : 1;
+ uint64_t ukwn_cntl_word : 1;
+ uint64_t scrm_sync_loss : 1;
+ uint64_t dskew_fifo_ovfl : 1;
+ uint64_t stat_msg : 1;
+ uint64_t stat_cnt_ovfl : 1;
+ uint64_t bad_64b67b : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_int_en_s cn68xx;
+ struct cvmx_ilk_rx_lnex_int_en_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_int_en cvmx_ilk_rx_lnex_int_en_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat0
+ */
+union cvmx_ilk_rx_lnex_stat0 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t ser_lock_loss_cnt : 18; /**< Number of times the lane lost clock-data-recovery.
+ Saturates. Interrupt on saturation if
+ ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t ser_lock_loss_cnt : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat0_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat0_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat0 cvmx_ilk_rx_lnex_stat0_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat1
+ */
+union cvmx_ilk_rx_lnex_stat1 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t bdry_sync_loss_cnt : 18; /**< Number of times a lane lost word boundary synchronization.
+ Saturates. Interrupt on saturation if
+ ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t bdry_sync_loss_cnt : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat1_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat1_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat1 cvmx_ilk_rx_lnex_stat1_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat2
+ */
+union cvmx_ilk_rx_lnex_stat2 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63 : 14;
+ uint64_t syncw_good_cnt : 18; /**< Number of good synchronization words. Saturates. Interrupt on
+ saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+ uint64_t reserved_18_31 : 14;
+ uint64_t syncw_bad_cnt : 18; /**< Number of bad synchronization words. Saturates. Interrupt on
+ saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t syncw_bad_cnt : 18;
+ uint64_t reserved_18_31 : 14;
+ uint64_t syncw_good_cnt : 18;
+ uint64_t reserved_50_63 : 14;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat2_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat2_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat2 cvmx_ilk_rx_lnex_stat2_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat3
+ */
+union cvmx_ilk_rx_lnex_stat3 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t bad_64b67b_cnt : 18; /**< Number of bad 64B/67B words, meaning bit 65 or 64 has been
+ corrupted. Saturates. Interrupt on saturation if
+ ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t bad_64b67b_cnt : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat3_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat3_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat3 cvmx_ilk_rx_lnex_stat3_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat4
+ */
+union cvmx_ilk_rx_lnex_stat4 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t cntl_word_cnt : 27; /**< Number of control words received. Saturates. Interrupt on
+ saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+ uint64_t reserved_27_31 : 5;
+ uint64_t data_word_cnt : 27; /**< Number of data words received. Saturates. Interrupt on
+ saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t data_word_cnt : 27;
+ uint64_t reserved_27_31 : 5;
+ uint64_t cntl_word_cnt : 27;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat4_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat4_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat4 cvmx_ilk_rx_lnex_stat4_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat5
+ */
+union cvmx_ilk_rx_lnex_stat5 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t unkwn_word_cnt : 18; /**< Number of unknown control words. Saturates. Interrupt on
+ saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t unkwn_word_cnt : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat5_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat5_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat5 cvmx_ilk_rx_lnex_stat5_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat6
+ */
+union cvmx_ilk_rx_lnex_stat6 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t scrm_sync_loss_cnt : 18; /**< Number of times scrambler synchronization was lost (due to
+ either 4 consecutive bad sync words or 3 consecutive scrambler
+ state mismatches). Saturates. Interrupt on saturation if
+ ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t scrm_sync_loss_cnt : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat6_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat6_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat6 cvmx_ilk_rx_lnex_stat6_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat7
+ */
+union cvmx_ilk_rx_lnex_stat7 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t scrm_match_cnt : 18; /**< Number of scrambler state matches received. Saturates.
+ Interrupt on saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t scrm_match_cnt : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat7_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat7_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat7 cvmx_ilk_rx_lnex_stat7_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat8
+ */
+union cvmx_ilk_rx_lnex_stat8 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat8_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t skipw_good_cnt : 18; /**< Number of good skip words. Saturates. Interrupt on saturation
+ if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t skipw_good_cnt : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat8_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat8_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat8 cvmx_ilk_rx_lnex_stat8_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat9
+ */
+union cvmx_ilk_rx_lnex_stat9 {
+ uint64_t u64;
+ struct cvmx_ilk_rx_lnex_stat9_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63 : 14;
+ uint64_t crc32_err_cnt : 18; /**< Number of errors in the lane CRC. Saturates. Interrupt on
+ saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+ uint64_t reserved_27_31 : 5;
+ uint64_t crc32_match_cnt : 27; /**< Number of CRC32 matches received. Saturates. Interrupt on
+ saturation if ILK_RX_LNEX_INT_EN[STAT_CNT_OVFL]=1 */
+#else
+ uint64_t crc32_match_cnt : 27;
+ uint64_t reserved_27_31 : 5;
+ uint64_t crc32_err_cnt : 18;
+ uint64_t reserved_50_63 : 14;
+#endif
+ } s;
+ struct cvmx_ilk_rx_lnex_stat9_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat9_s cn68xxp1;
+};
+typedef union cvmx_ilk_rx_lnex_stat9 cvmx_ilk_rx_lnex_stat9_t;
+
+/**
+ * cvmx_ilk_rxf_idx_pmap
+ */
+union cvmx_ilk_rxf_idx_pmap {
+ uint64_t u64;
+ struct cvmx_ilk_rxf_idx_pmap_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t inc : 9; /**< Increment to add to current index for next index. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t index : 9; /**< Specify the link/channel accessed by the next CSR read/write to
+ port map memory. IDX[8]=link, IDX[7:0]=channel */
+#else
+ uint64_t index : 9;
+ uint64_t reserved_9_15 : 7;
+ uint64_t inc : 9;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_ilk_rxf_idx_pmap_s cn68xx;
+ struct cvmx_ilk_rxf_idx_pmap_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxf_idx_pmap cvmx_ilk_rxf_idx_pmap_t;
+
+/**
+ * cvmx_ilk_rxf_mem_pmap
+ */
+union cvmx_ilk_rxf_mem_pmap {
+ uint64_t u64;
+ struct cvmx_ilk_rxf_mem_pmap_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t port_kind : 6; /**< Specify the port-kind for the link/channel selected by
+ ILK_IDX_PMAP[IDX] */
+#else
+ uint64_t port_kind : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_ilk_rxf_mem_pmap_s cn68xx;
+ struct cvmx_ilk_rxf_mem_pmap_s cn68xxp1;
+};
+typedef union cvmx_ilk_rxf_mem_pmap cvmx_ilk_rxf_mem_pmap_t;
+
+/**
+ * cvmx_ilk_ser_cfg
+ */
+union cvmx_ilk_ser_cfg {
+ uint64_t u64;
+ struct cvmx_ilk_ser_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63 : 7;
+ uint64_t ser_rxpol_auto : 1; /**< Serdes lane receive polarity auto detection mode */
+ uint64_t reserved_48_55 : 8;
+ uint64_t ser_rxpol : 8; /**< Serdes lane receive polarity
+ - 0: rx without inversion
+ - 1: rx with inversion */
+ uint64_t reserved_32_39 : 8;
+ uint64_t ser_txpol : 8; /**< Serdes lane transmit polarity
+ - 0: tx without inversion
+ - 1: tx with inversion */
+ uint64_t reserved_16_23 : 8;
+ uint64_t ser_reset_n : 8; /**< Serdes lane reset */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ser_pwrup : 2; /**< Serdes modules (QLM) power up. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t ser_haul : 2; /**< Serdes module (QLM) haul mode */
+#else
+ uint64_t ser_haul : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t ser_pwrup : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t ser_reset_n : 8;
+ uint64_t reserved_16_23 : 8;
+ uint64_t ser_txpol : 8;
+ uint64_t reserved_32_39 : 8;
+ uint64_t ser_rxpol : 8;
+ uint64_t reserved_48_55 : 8;
+ uint64_t ser_rxpol_auto : 1;
+ uint64_t reserved_57_63 : 7;
+#endif
+ } s;
+ struct cvmx_ilk_ser_cfg_s cn68xx;
+ struct cvmx_ilk_ser_cfg_s cn68xxp1;
+};
+typedef union cvmx_ilk_ser_cfg cvmx_ilk_ser_cfg_t;
+
+/**
+ * cvmx_ilk_tx#_cfg0
+ */
+union cvmx_ilk_txx_cfg0 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_cfg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ext_lpbk_fc : 1; /**< Enable Rx-Tx flowcontrol loopback (external) */
+ uint64_t ext_lpbk : 1; /**< Enable Rx-Tx data loopback (external). Note that with differing
+ transmit & receive clocks, skip word are inserted/deleted */
+ uint64_t int_lpbk : 1; /**< Enable Tx-Rx loopback (internal) */
+ uint64_t reserved_57_60 : 4;
+ uint64_t ptrn_mode : 1; /**< Enable programmable test pattern mode. This mode allows
+ software to send a packet containing a programmable pattern.
+ While in this mode, the scramblers and disparity inversion will
+ be disabled. In addition, no framing layer control words will
+ be transmitted (ie. no SYNC, scrambler state, skip, or
+ diagnostic words will be transmitted).
+
+ NOTE: Software must first write ILK_TXX_CFG0[LANE_ENA]=0 before
+ enabling/disabling this mode. */
+ uint64_t reserved_55_55 : 1;
+ uint64_t lnk_stats_ena : 1; /**< Enable link statistics counters */
+ uint64_t mltuse_fc_ena : 1; /**< When set, the multi-use field of control words will contain
+ flow control status. Otherwise, the multi-use field will
+ contain ILK_TXX_CFG1[TX_MLTUSE] */
+ uint64_t cal_ena : 1; /**< Enable Tx calendar, else default calendar used:
+ First control word:
+ Entry 0 = link
+ Entry 1 = backpressue id 0
+ Entry 2 = backpressue id 1
+ ...etc.
+ Second control word:
+ Entry 15 = link
+ Entry 16 = backpressue id 15
+ Entry 17 = backpressue id 16
+ ...etc.
+ This continues until the status for all 64 backpressue ids gets
+ transmitted (ie. 0-68 calendar table entries). The remaining 3
+ calendar table entries (ie. 69-71) will always transmit XOFF.
+
+ To disable backpressure completely, enable the calendar table
+ and program each calendar table entry to transmit XON */
+ uint64_t mfrm_len : 13; /**< The quantity of data sent on each lane including one sync word,
+ scrambler state, diag word, zero or more skip words, and the
+ data payload. Must be large than ILK_TXX_CFG1[SKIP_CNT]+9.
+ Supported range:ILK_TXX_CFG1[SKIP_CNT]+9 < MFRM_LEN <= 4096) */
+ uint64_t brst_shrt : 7; /**< Minimum interval between burst control words, as a multiple of
+ 8 bytes. Supported range from 8 bytes to 512 (ie. 0 <
+ BRST_SHRT <= 64) */
+ uint64_t lane_rev : 1; /**< Lane reversal. When enabled, lane striping is performed from
+ most significant lane enabled to least significant lane
+ enabled. LANE_ENA must be zero before changing LANE_REV. */
+ uint64_t brst_max : 5; /**< Maximum size of a data burst, as a multiple of 64 byte blocks.
+ Supported range is from 64 bytes to 1024 bytes. (ie. 0 <
+ BRST_MAX <= 16) */
+ uint64_t reserved_25_25 : 1;
+ uint64_t cal_depth : 9; /**< Number of valid entries in the calendar. CAL_DEPTH[2:0] must
+ be zero. Supported range from 8 to 288. If CAL_ENA is 0,
+ this field has no effect and the calendar depth is 72 entries. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t lane_ena : 8; /**< Lane enable mask. Link is enabled if any lane is enabled. The
+ same lane should not be enabled in multiple ILK_TXx_CFG0. Each
+ bit of LANE_ENA maps to a TX lane (TLE) and a QLM lane. NOTE:
+ LANE_REV has no effect on this mapping.
+
+ LANE_ENA[0] = TLE0 = QLM1 lane 0
+ LANE_ENA[1] = TLE1 = QLM1 lane 1
+ LANE_ENA[2] = TLE2 = QLM1 lane 2
+ LANE_ENA[3] = TLE3 = QLM1 lane 3
+ LANE_ENA[4] = TLE4 = QLM2 lane 0
+ LANE_ENA[5] = TLE5 = QLM2 lane 1
+ LANE_ENA[6] = TLE6 = QLM2 lane 2
+ LANE_ENA[7] = TLE7 = QLM2 lane 3 */
+#else
+ uint64_t lane_ena : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t cal_depth : 9;
+ uint64_t reserved_25_25 : 1;
+ uint64_t brst_max : 5;
+ uint64_t lane_rev : 1;
+ uint64_t brst_shrt : 7;
+ uint64_t mfrm_len : 13;
+ uint64_t cal_ena : 1;
+ uint64_t mltuse_fc_ena : 1;
+ uint64_t lnk_stats_ena : 1;
+ uint64_t reserved_55_55 : 1;
+ uint64_t ptrn_mode : 1;
+ uint64_t reserved_57_60 : 4;
+ uint64_t int_lpbk : 1;
+ uint64_t ext_lpbk : 1;
+ uint64_t ext_lpbk_fc : 1;
+#endif
+ } s;
+ struct cvmx_ilk_txx_cfg0_s cn68xx;
+ struct cvmx_ilk_txx_cfg0_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_cfg0 cvmx_ilk_txx_cfg0_t;
+
+/**
+ * cvmx_ilk_tx#_cfg1
+ */
+union cvmx_ilk_txx_cfg1 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_cfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t pkt_busy : 1; /**< Tx-Link is transmitting data. */
+ uint64_t pipe_crd_dis : 1; /**< Disable pipe credits. Should be set when PKO is configure to
+ ignore pipe credits. */
+ uint64_t ptp_delay : 5; /**< Timestamp commit delay. Must not be zero. */
+ uint64_t skip_cnt : 4; /**< Number of skip words to insert after the scrambler state */
+ uint64_t pkt_flush : 1; /**< Packet transmit flush. While PKT_FLUSH=1, the TxFifo will
+ continuously drain; all data will be dropped. Software should
+ first write PKT_ENA=0 and wait packet transmission to stop. */
+ uint64_t pkt_ena : 1; /**< Packet transmit enable. When PKT_ENA=0, the Tx-Link will stop
+ transmitting packets, as per RX_LINK_FC_PKT */
+ uint64_t la_mode : 1; /**< 0 = Interlaken
+ 1 = Interlaken Look-Aside */
+ uint64_t tx_link_fc : 1; /**< Link flow control status transmitted by the Tx-Link
+ XON when RX_FIFO_CNT <= RX_FIFO_HWM and lane alignment is done */
+ uint64_t rx_link_fc : 1; /**< Link flow control status received in burst/idle control words.
+ When RX_LINK_FC_IGN=0, XOFF will cause Tx-Link to stop
+ transmitting on all channels. */
+ uint64_t reserved_12_16 : 5;
+ uint64_t tx_link_fc_jam : 1; /**< All flow control transmitted in burst/idle control words will
+ be XOFF whenever TX_LINK_FC is XOFF. Enable this to allow
+ link XOFF to automatically XOFF all channels. */
+ uint64_t rx_link_fc_pkt : 1; /**< Link flow control received in burst/idle control words causes
+ Tx-Link to stop transmitting at the end of a packet instead of
+ the end of a burst */
+ uint64_t rx_link_fc_ign : 1; /**< Ignore the link flow control status received in burst/idle
+ control words */
+ uint64_t rmatch : 1; /**< Enable rate matching circuitry */
+ uint64_t tx_mltuse : 8; /**< Multiple Use bits used when ILKx_TX_CFG[LA_MODE=0] and
+ ILKx_TX_CFG[MLTUSE_FC_ENA] is zero */
+#else
+ uint64_t tx_mltuse : 8;
+ uint64_t rmatch : 1;
+ uint64_t rx_link_fc_ign : 1;
+ uint64_t rx_link_fc_pkt : 1;
+ uint64_t tx_link_fc_jam : 1;
+ uint64_t reserved_12_16 : 5;
+ uint64_t rx_link_fc : 1;
+ uint64_t tx_link_fc : 1;
+ uint64_t la_mode : 1;
+ uint64_t pkt_ena : 1;
+ uint64_t pkt_flush : 1;
+ uint64_t skip_cnt : 4;
+ uint64_t ptp_delay : 5;
+ uint64_t pipe_crd_dis : 1;
+ uint64_t pkt_busy : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_ilk_txx_cfg1_s cn68xx;
+ struct cvmx_ilk_txx_cfg1_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pipe_crd_dis : 1; /**< Disable pipe credits. Should be set when PKO is configure to
+ ignore pipe credits. */
+ uint64_t ptp_delay : 5; /**< Timestamp commit delay. Must not be zero. */
+ uint64_t skip_cnt : 4; /**< Number of skip words to insert after the scrambler state */
+ uint64_t pkt_flush : 1; /**< Packet transmit flush. While PKT_FLUSH=1, the TxFifo will
+ continuously drain; all data will be dropped. Software should
+ first write PKT_ENA=0 and wait packet transmission to stop. */
+ uint64_t pkt_ena : 1; /**< Packet transmit enable. When PKT_ENA=0, the Tx-Link will stop
+ transmitting packets, as per RX_LINK_FC_PKT */
+ uint64_t la_mode : 1; /**< 0 = Interlaken
+ 1 = Interlaken Look-Aside */
+ uint64_t tx_link_fc : 1; /**< Link flow control status transmitted by the Tx-Link
+ XON when RX_FIFO_CNT <= RX_FIFO_HWM and lane alignment is done */
+ uint64_t rx_link_fc : 1; /**< Link flow control status received in burst/idle control words.
+ When RX_LINK_FC_IGN=0, XOFF will cause Tx-Link to stop
+ transmitting on all channels. */
+ uint64_t reserved_12_16 : 5;
+ uint64_t tx_link_fc_jam : 1; /**< All flow control transmitted in burst/idle control words will
+ be XOFF whenever TX_LINK_FC is XOFF. Enable this to allow
+ link XOFF to automatically XOFF all channels. */
+ uint64_t rx_link_fc_pkt : 1; /**< Link flow control received in burst/idle control words causes
+ Tx-Link to stop transmitting at the end of a packet instead of
+ the end of a burst */
+ uint64_t rx_link_fc_ign : 1; /**< Ignore the link flow control status received in burst/idle
+ control words */
+ uint64_t rmatch : 1; /**< Enable rate matching circuitry */
+ uint64_t tx_mltuse : 8; /**< Multiple Use bits used when ILKx_TX_CFG[LA_MODE=0] and
+ ILKx_TX_CFG[MLTUSE_FC_ENA] is zero */
+#else
+ uint64_t tx_mltuse : 8;
+ uint64_t rmatch : 1;
+ uint64_t rx_link_fc_ign : 1;
+ uint64_t rx_link_fc_pkt : 1;
+ uint64_t tx_link_fc_jam : 1;
+ uint64_t reserved_12_16 : 5;
+ uint64_t rx_link_fc : 1;
+ uint64_t tx_link_fc : 1;
+ uint64_t la_mode : 1;
+ uint64_t pkt_ena : 1;
+ uint64_t pkt_flush : 1;
+ uint64_t skip_cnt : 4;
+ uint64_t ptp_delay : 5;
+ uint64_t pipe_crd_dis : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_txx_cfg1 cvmx_ilk_txx_cfg1_t;
+
+/**
+ * cvmx_ilk_tx#_dbg
+ */
+union cvmx_ilk_txx_dbg {
+ uint64_t u64;
+ struct cvmx_ilk_txx_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t tx_bad_crc24 : 1; /**< Send a control word with bad CRC24. Hardware will clear this
+ field once the injection is performed. */
+ uint64_t tx_bad_ctlw2 : 1; /**< Send a control word without the control bit set */
+ uint64_t tx_bad_ctlw1 : 1; /**< Send a data word with the control bit set */
+#else
+ uint64_t tx_bad_ctlw1 : 1;
+ uint64_t tx_bad_ctlw2 : 1;
+ uint64_t tx_bad_crc24 : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_ilk_txx_dbg_s cn68xx;
+ struct cvmx_ilk_txx_dbg_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_dbg cvmx_ilk_txx_dbg_t;
+
+/**
+ * cvmx_ilk_tx#_flow_ctl0
+ */
+union cvmx_ilk_txx_flow_ctl0 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_flow_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t status : 64; /**< IPD flow control status for backpressue id 63-0, where a 0
+ indicates the presence of backpressure (ie. XOFF) and 1
+ indicates the absence of backpressure (ie. XON) */
+#else
+ uint64_t status : 64;
+#endif
+ } s;
+ struct cvmx_ilk_txx_flow_ctl0_s cn68xx;
+ struct cvmx_ilk_txx_flow_ctl0_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_flow_ctl0 cvmx_ilk_txx_flow_ctl0_t;
+
+/**
+ * cvmx_ilk_tx#_flow_ctl1
+ *
+ * Notes:
+ * Do not publish.
+ *
+ */
+union cvmx_ilk_txx_flow_ctl1 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_flow_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_ilk_txx_flow_ctl1_s cn68xx;
+ struct cvmx_ilk_txx_flow_ctl1_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_flow_ctl1 cvmx_ilk_txx_flow_ctl1_t;
+
+/**
+ * cvmx_ilk_tx#_idx_cal
+ */
+union cvmx_ilk_txx_idx_cal {
+ uint64_t u64;
+ struct cvmx_ilk_txx_idx_cal_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t inc : 6; /**< Increment to add to current index for next index. NOTE:
+ Increment only performed after *MEM_CAL1 access (ie. not
+ *MEM_CAL0) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t index : 6; /**< Specify the group of 8 entries accessed by the next CSR
+ read/write to calendar table memory. Software must ensure IDX
+ is <36 whenever writing to *MEM_CAL1 */
+#else
+ uint64_t index : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t inc : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_ilk_txx_idx_cal_s cn68xx;
+ struct cvmx_ilk_txx_idx_cal_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_idx_cal cvmx_ilk_txx_idx_cal_t;
+
+/**
+ * cvmx_ilk_tx#_idx_pmap
+ */
+union cvmx_ilk_txx_idx_pmap {
+ uint64_t u64;
+ struct cvmx_ilk_txx_idx_pmap_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t inc : 7; /**< Increment to add to current index for next index. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t index : 7; /**< Specify the port-pipe accessed by the next CSR read/write to
+ ILK_TXx_MEM_PMAP. Note that IDX=n is always port-pipe n,
+ regardless of ILK_TXx_PIPE[BASE] */
+#else
+ uint64_t index : 7;
+ uint64_t reserved_7_15 : 9;
+ uint64_t inc : 7;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_ilk_txx_idx_pmap_s cn68xx;
+ struct cvmx_ilk_txx_idx_pmap_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_idx_pmap cvmx_ilk_txx_idx_pmap_t;
+
+/**
+ * cvmx_ilk_tx#_idx_stat0
+ */
+union cvmx_ilk_txx_idx_stat0 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_idx_stat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t clr : 1; /**< CSR read to ILK_TXx_MEM_STAT0 clears the selected counter after
+ returning its current value. */
+ uint64_t reserved_24_30 : 7;
+ uint64_t inc : 8; /**< Increment to add to current index for next index */
+ uint64_t reserved_8_15 : 8;
+ uint64_t index : 8; /**< Specify the channel accessed during the next CSR read to the
+ ILK_TXx_MEM_STAT0 */
+#else
+ uint64_t index : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t inc : 8;
+ uint64_t reserved_24_30 : 7;
+ uint64_t clr : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ilk_txx_idx_stat0_s cn68xx;
+ struct cvmx_ilk_txx_idx_stat0_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_idx_stat0 cvmx_ilk_txx_idx_stat0_t;
+
+/**
+ * cvmx_ilk_tx#_idx_stat1
+ */
+union cvmx_ilk_txx_idx_stat1 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_idx_stat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t clr : 1; /**< CSR read to ILK_TXx_MEM_STAT1 clears the selected counter after
+ returning its current value. */
+ uint64_t reserved_24_30 : 7;
+ uint64_t inc : 8; /**< Increment to add to current index for next index */
+ uint64_t reserved_8_15 : 8;
+ uint64_t index : 8; /**< Specify the channel accessed during the next CSR read to the
+ ILK_TXx_MEM_STAT1 */
+#else
+ uint64_t index : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t inc : 8;
+ uint64_t reserved_24_30 : 7;
+ uint64_t clr : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ilk_txx_idx_stat1_s cn68xx;
+ struct cvmx_ilk_txx_idx_stat1_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_idx_stat1 cvmx_ilk_txx_idx_stat1_t;
+
+/**
+ * cvmx_ilk_tx#_int
+ */
+union cvmx_ilk_txx_int {
+ uint64_t u64;
+ struct cvmx_ilk_txx_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */
+ uint64_t bad_pipe : 1; /**< Received a PKO port-pipe out of the range specified by
+ ILK_TXX_PIPE */
+ uint64_t bad_seq : 1; /**< Received sequence is not SOP followed by 0 or more data cycles
+ followed by EOP. PKO config assigned multiple engines to the
+ same ILK Tx Link. */
+ uint64_t txf_err : 1; /**< TX fifo parity error occurred. At EOP time, EOP_Format will
+ reflect the error. */
+#else
+ uint64_t txf_err : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_pipe : 1;
+ uint64_t stat_cnt_ovfl : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ilk_txx_int_s cn68xx;
+ struct cvmx_ilk_txx_int_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_int cvmx_ilk_txx_int_t;
+
+/**
+ * cvmx_ilk_tx#_int_en
+ */
+union cvmx_ilk_txx_int_en {
+ uint64_t u64;
+ struct cvmx_ilk_txx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t stat_cnt_ovfl : 1; /**< Statistics counter overflow */
+ uint64_t bad_pipe : 1; /**< Received a PKO port-pipe out of the range specified by
+ ILK_TXX_PIPE. */
+ uint64_t bad_seq : 1; /**< Received sequence is not SOP followed by 0 or more data cycles
+ followed by EOP. PKO config assigned multiple engines to the
+ same ILK Tx Link. */
+ uint64_t txf_err : 1; /**< TX fifo parity error occurred. At EOP time, EOP_Format will
+ reflect the error. */
+#else
+ uint64_t txf_err : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_pipe : 1;
+ uint64_t stat_cnt_ovfl : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ilk_txx_int_en_s cn68xx;
+ struct cvmx_ilk_txx_int_en_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_int_en cvmx_ilk_txx_int_en_t;
+
+/**
+ * cvmx_ilk_tx#_mem_cal0
+ *
+ * Notes:
+ * Software must always read ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1. Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ *
+ * Software must always write ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ */
+union cvmx_ilk_txx_mem_cal0 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_mem_cal0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t entry_ctl3 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+3
+ - 0: IPD backpressue id
+ - 1: Link
+ - 2: XOFF
+ - 3: XON */
+ uint64_t reserved_33_33 : 1;
+ uint64_t bpid3 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+3
+ (unused if ENTRY_CTL3 != 0) */
+ uint64_t entry_ctl2 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+2
+ - 0: IPD backpressue id
+ - 1: Link
+ - 2: XOFF
+ - 3: XON */
+ uint64_t reserved_24_24 : 1;
+ uint64_t bpid2 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+2
+ (unused if ENTRY_CTL2 != 0) */
+ uint64_t entry_ctl1 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+1
+ - 0: IPD backpressue id
+ - 1: Link
+ - 2: XOFF
+ - 3: XON */
+ uint64_t reserved_15_15 : 1;
+ uint64_t bpid1 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+1
+ (unused if ENTRY_CTL1 != 0) */
+ uint64_t entry_ctl0 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+0
+ - 0: IPD backpressue id
+ - 1: Link
+ - 2: XOFF
+ - 3: XON */
+ uint64_t reserved_6_6 : 1;
+ uint64_t bpid0 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+0
+ (unused if ENTRY_CTL0 != 0) */
+#else
+ uint64_t bpid0 : 6;
+ uint64_t reserved_6_6 : 1;
+ uint64_t entry_ctl0 : 2;
+ uint64_t bpid1 : 6;
+ uint64_t reserved_15_15 : 1;
+ uint64_t entry_ctl1 : 2;
+ uint64_t bpid2 : 6;
+ uint64_t reserved_24_24 : 1;
+ uint64_t entry_ctl2 : 2;
+ uint64_t bpid3 : 6;
+ uint64_t reserved_33_33 : 1;
+ uint64_t entry_ctl3 : 2;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_ilk_txx_mem_cal0_s cn68xx;
+ struct cvmx_ilk_txx_mem_cal0_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_mem_cal0 cvmx_ilk_txx_mem_cal0_t;
+
+/**
+ * cvmx_ilk_tx#_mem_cal1
+ *
+ * Notes:
+ * Software must always read ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1. Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ *
+ * Software must always write ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ */
+union cvmx_ilk_txx_mem_cal1 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_mem_cal1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t entry_ctl7 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+7
+ - 0: IPD backpressue id
+ - 1: Link
+ - 2: XOFF
+ - 3: XON */
+ uint64_t reserved_33_33 : 1;
+ uint64_t bpid7 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+7
+ (unused if ENTRY_CTL7 != 0) */
+ uint64_t entry_ctl6 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+6
+ - 0: IPD backpressue id
+ - 1: Link
+ - 2: XOFF
+ - 3: XON */
+ uint64_t reserved_24_24 : 1;
+ uint64_t bpid6 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+6
+ (unused if ENTRY_CTL6 != 0) */
+ uint64_t entry_ctl5 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+5
+ - 0: IPD backpressue id
+ - 1: Link
+ - 2: XOFF
+ - 3: XON */
+ uint64_t reserved_15_15 : 1;
+ uint64_t bpid5 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+5
+ (unused if ENTRY_CTL5 != 0) */
+ uint64_t entry_ctl4 : 2; /**< Select source of XON/XOFF for entry (IDX*8)+4
+ - 0: IPD backpressue id
+ - 1: Link
+ - 2: XOFF
+ - 3: XON */
+ uint64_t reserved_6_6 : 1;
+ uint64_t bpid4 : 6; /**< Select IPD backpressue id for calendar table entry (IDX*8)+4
+ (unused if ENTRY_CTL4 != 0) */
+#else
+ uint64_t bpid4 : 6;
+ uint64_t reserved_6_6 : 1;
+ uint64_t entry_ctl4 : 2;
+ uint64_t bpid5 : 6;
+ uint64_t reserved_15_15 : 1;
+ uint64_t entry_ctl5 : 2;
+ uint64_t bpid6 : 6;
+ uint64_t reserved_24_24 : 1;
+ uint64_t entry_ctl6 : 2;
+ uint64_t bpid7 : 6;
+ uint64_t reserved_33_33 : 1;
+ uint64_t entry_ctl7 : 2;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_ilk_txx_mem_cal1_s cn68xx;
+ struct cvmx_ilk_txx_mem_cal1_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_mem_cal1 cvmx_ilk_txx_mem_cal1_t;
+
+/**
+ * cvmx_ilk_tx#_mem_pmap
+ */
+union cvmx_ilk_txx_mem_pmap {
+ uint64_t u64;
+ struct cvmx_ilk_txx_mem_pmap_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t remap : 1; /**< Dynamically select channel using bits[39:32] of an 8-byte
+ header prepended to any packet transmitted on the port-pipe
+ selected by ILK_TXx_IDX_PMAP[IDX].
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t reserved_8_15 : 8;
+ uint64_t channel : 8; /**< Specify the channel for the port-pipe selected by
+ ILK_TXx_IDX_PMAP[IDX] */
+#else
+ uint64_t channel : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t remap : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_ilk_txx_mem_pmap_s cn68xx;
+ struct cvmx_ilk_txx_mem_pmap_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t channel : 8; /**< Specify the channel for the port-pipe selected by
+ ILK_TXx_IDX_PMAP[IDX] */
+#else
+ uint64_t channel : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_ilk_txx_mem_pmap cvmx_ilk_txx_mem_pmap_t;
+
+/**
+ * cvmx_ilk_tx#_mem_stat0
+ */
+union cvmx_ilk_txx_mem_stat0 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_mem_stat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t tx_pkt : 28; /**< Number of packets transmitted per channel (256M)
+ Channel selected by ILK_TXx_IDX_STAT0[IDX]. Interrupt on
+ saturation if ILK_TXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t tx_pkt : 28;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_ilk_txx_mem_stat0_s cn68xx;
+ struct cvmx_ilk_txx_mem_stat0_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_mem_stat0 cvmx_ilk_txx_mem_stat0_t;
+
+/**
+ * cvmx_ilk_tx#_mem_stat1
+ */
+union cvmx_ilk_txx_mem_stat1 {
+ uint64_t u64;
+ struct cvmx_ilk_txx_mem_stat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t tx_bytes : 36; /**< Number of bytes transmitted per channel (64GB) Channel selected
+ by ILK_TXx_IDX_STAT1[IDX]. Saturates. Interrupt on
+ saturation if ILK_TXX_INT_EN[STAT_CNT_OVFL]=1. */
+#else
+ uint64_t tx_bytes : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_ilk_txx_mem_stat1_s cn68xx;
+ struct cvmx_ilk_txx_mem_stat1_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_mem_stat1 cvmx_ilk_txx_mem_stat1_t;
+
+/**
+ * cvmx_ilk_tx#_pipe
+ */
+union cvmx_ilk_txx_pipe {
+ uint64_t u64;
+ struct cvmx_ilk_txx_pipe_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t nump : 8; /**< Number of pipes assigned to this Tx Link */
+ uint64_t reserved_7_15 : 9;
+ uint64_t base : 7; /**< When NUMP is non-zero, indicates the base pipe number this
+ Tx link will accept. This Tx will accept PKO packets from
+ pipes in the range of: BASE .. (BASE+(NUMP-1))
+
+ BASE and NUMP must be constrained such that
+ 1) BASE+(NUMP-1) < 127
+ 2) Each used PKO pipe must map to exactly
+ one port|channel
+ 3) The pipe ranges must be consistent with
+ the PKO configuration. */
+#else
+ uint64_t base : 7;
+ uint64_t reserved_7_15 : 9;
+ uint64_t nump : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_ilk_txx_pipe_s cn68xx;
+ struct cvmx_ilk_txx_pipe_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_pipe cvmx_ilk_txx_pipe_t;
+
+/**
+ * cvmx_ilk_tx#_rmatch
+ */
+union cvmx_ilk_txx_rmatch {
+ uint64_t u64;
+ struct cvmx_ilk_txx_rmatch_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63 : 14;
+ uint64_t grnlrty : 2; /**< Granularity of a token, where 1 token equal (1<<GRNLRTY) bytes. */
+ uint64_t brst_limit : 16; /**< Size of token bucket, also the maximum quantity of data that
+ may be burst across the interface before invoking rate limiting
+ logic. */
+ uint64_t time_limit : 16; /**< Number of cycles per time interval. (Must be >= 4) */
+ uint64_t rate_limit : 16; /**< Number of tokens added to the bucket when the interval timer
+ expires. */
+#else
+ uint64_t rate_limit : 16;
+ uint64_t time_limit : 16;
+ uint64_t brst_limit : 16;
+ uint64_t grnlrty : 2;
+ uint64_t reserved_50_63 : 14;
+#endif
+ } s;
+ struct cvmx_ilk_txx_rmatch_s cn68xx;
+ struct cvmx_ilk_txx_rmatch_s cn68xxp1;
+};
+typedef union cvmx_ilk_txx_rmatch cvmx_ilk_txx_rmatch_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ilk-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ilk.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ilk.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ilk.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1401 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Support library for the ILK
+ *
+ * <hr>$Revision: 49448 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-ilk.h>
+#include <asm/octeon/cvmx-ilk-defs.h>
+#include <asm/octeon/cvmx-helper-util.h>
+#include <asm/octeon/cvmx-helper-ilk.h>
+#else
+#include "cvmx.h"
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "cvmx-config.h"
+#endif
+#include "cvmx-sysinfo.h"
+#include "cvmx-pko.h"
+#include "cvmx-ilk.h"
+#include "cvmx-helper-util.h"
+#include "cvmx-helper-ilk.h"
+#endif
+
+#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
+
+/*
+ * global configurations. to disable the 2nd ILK, set
+ * cvmx_ilk_lane_mask[CVMX_NUM_ILK_INTF] = {0xff, 0x0} and
+ * cvmx_ilk_chans[CVMX_NUM_ILK_INTF] = {8, 0}
+ */
+unsigned char cvmx_ilk_lane_mask[CVMX_NUM_ILK_INTF] = {0xf, 0xf0};
+//#define SINGLE_PORT_SIM_ILK
+#ifdef SINGLE_PORT_SIM_ILK
+unsigned char cvmx_ilk_chans[CVMX_NUM_ILK_INTF] = {1, 1};
+unsigned char cvmx_ilk_chan_map[CVMX_NUM_ILK_INTF][CVMX_MAX_ILK_CHANS] =
+{{0},
+ {0}};
+#else /* sample case */
+unsigned char cvmx_ilk_chans[CVMX_NUM_ILK_INTF] = {8, 8};
+unsigned char cvmx_ilk_chan_map[CVMX_NUM_ILK_INTF][CVMX_MAX_ILK_CHANS] =
+{{0, 1, 2, 3, 4, 5, 6, 7},
+ {0, 1, 2, 3, 4, 5, 6, 7}};
+#endif
+
+/* Default callbacks, can be overridden
+ * using cvmx_ilk_get_callbacks/cvmx_ilk_set_callbacks
+ */
+static cvmx_ilk_callbacks_t cvmx_ilk_callbacks = {
+ .calendar_setup_rx = cvmx_ilk_cal_setup_rx,
+};
+
+static cvmx_ilk_intf_t cvmx_ilk_intf_cfg[CVMX_NUM_ILK_INTF];
+
+/**
+ * Get current ILK initialization callbacks
+ *
+ * @param callbacks Pointer to the callbacks structure.to fill
+ *
+ * @return Pointer to cvmx_ilk_callbacks_t structure.
+ */
+void cvmx_ilk_get_callbacks(cvmx_ilk_callbacks_t * callbacks)
+{
+ memcpy(callbacks, &cvmx_ilk_callbacks, sizeof(cvmx_ilk_callbacks));
+}
+
+/**
+ * Set new ILK initialization callbacks
+ *
+ * @param new_callbacks Pointer to an updated callbacks structure.
+ */
+void cvmx_ilk_set_callbacks(cvmx_ilk_callbacks_t * new_callbacks)
+{
+ memcpy(&cvmx_ilk_callbacks, new_callbacks, sizeof(cvmx_ilk_callbacks));
+}
+
+/**
+ * Initialize and start the ILK interface.
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param lane_mask the lane group for this interface
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_ilk_start_interface (int interface, unsigned char lane_mask)
+{
+ int res = -1;
+ int other_intf, this_qlm, other_qlm;
+ unsigned char uni_mask;
+ cvmx_mio_qlmx_cfg_t mio_qlmx_cfg, other_mio_qlmx_cfg;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ cvmx_ilk_ser_cfg_t ilk_ser_cfg;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (lane_mask == 0)
+ return res;
+
+ /* check conflicts between 2 ilk interfaces. 1 lane can be assigned to 1
+ * interface only */
+ other_intf = !interface;
+ this_qlm = interface + CVMX_ILK_QLM_BASE;
+ other_qlm = other_intf + CVMX_ILK_QLM_BASE;
+ if (cvmx_ilk_intf_cfg[other_intf].lane_en_mask & lane_mask)
+ {
+ cvmx_dprintf ("ILK%d: %s: lane assignment conflict\n", interface,
+ __FUNCTION__);
+ return res;
+ }
+
+ /* check the legality of the lane mask. interface 0 can have 8 lanes,
+ * while interface 1 can have 4 lanes at most */
+ uni_mask = lane_mask >> (interface * 4);
+ if ((uni_mask != 0x1 && uni_mask != 0x3 && uni_mask != 0xf &&
+ uni_mask != 0xff) || (interface == 1 && lane_mask > 0xf0))
+ {
+#if CVMX_ENABLE_DEBUG_PRINTS
+ cvmx_dprintf ("ILK%d: %s: incorrect lane mask: 0x%x \n", interface,
+ __FUNCTION__, uni_mask);
+#endif
+ return res;
+ }
+
+ /* check the availability of qlms. qlm_cfg = 001 means the chip is fused
+ * to give this qlm to ilk */
+ mio_qlmx_cfg.u64 = cvmx_read_csr (CVMX_MIO_QLMX_CFG(this_qlm));
+ other_mio_qlmx_cfg.u64 = cvmx_read_csr (CVMX_MIO_QLMX_CFG(other_qlm));
+ if (mio_qlmx_cfg.s.qlm_cfg != 1 ||
+ (uni_mask == 0xff && other_mio_qlmx_cfg.s.qlm_cfg != 1))
+ {
+#if CVMX_ENABLE_DEBUG_PRINTS
+ cvmx_dprintf ("ILK%d: %s: qlm unavailable\n", interface, __FUNCTION__);
+#endif
+ return res;
+ }
+
+ /* power up the serdes */
+ ilk_ser_cfg.u64 = cvmx_read_csr (CVMX_ILK_SER_CFG);
+ if (ilk_ser_cfg.s.ser_pwrup == 0)
+ {
+ ilk_ser_cfg.s.ser_rxpol_auto = 1;
+ ilk_ser_cfg.s.ser_rxpol = 0;
+ ilk_ser_cfg.s.ser_txpol = 0;
+ ilk_ser_cfg.s.ser_reset_n = 0xff;
+ ilk_ser_cfg.s.ser_haul = 0;
+ }
+ ilk_ser_cfg.s.ser_pwrup |= ((interface ==0) && (lane_mask > 0xf)) ?
+ 0x3 : (1 << interface);
+ cvmx_write_csr (CVMX_ILK_SER_CFG, ilk_ser_cfg.u64);
+
+ /* configure the lane enable of the interface */
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
+ ilk_txx_cfg0.s.lane_ena = ilk_rxx_cfg0.s.lane_ena = lane_mask;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+ cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+ /* write to local cache. for lane speed, if interface 0 has 8 lanes,
+ * assume both qlms have the same speed */
+ cvmx_ilk_intf_cfg[interface].intf_en = 1;
+ cvmx_ilk_intf_cfg[interface].lane_en_mask = lane_mask;
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set pipe group base and length for the interface
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param pipe_base the base of the pipe group
+ * @param pipe_len the length of the pipe group
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_ilk_set_pipe (int interface, int pipe_base, unsigned int pipe_len)
+{
+ int res = -1;
+ cvmx_ilk_txx_pipe_t ilk_txx_pipe;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ /* base should be between 0 and 127. base + length should be <127 */
+ if (!(pipe_base >= 0 && pipe_base <= 127) || (pipe_base + pipe_len > 127))
+ {
+#if CVMX_ENABLE_DEBUG_PRINTS
+ cvmx_dprintf ("ILK%d: %s: pipe base/length out of bounds\n", interface,
+ __FUNCTION__);
+#endif
+ return res;
+ }
+
+ /* set them in ilk tx section */
+ ilk_txx_pipe.u64 = cvmx_read_csr (CVMX_ILK_TXX_PIPE(interface));
+ ilk_txx_pipe.s.base = pipe_base;
+ ilk_txx_pipe.s.nump = pipe_len;
+ cvmx_write_csr (CVMX_ILK_TXX_PIPE(interface), ilk_txx_pipe.u64);
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set logical channels for tx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param pch pointer to an array of pipe-channel pair
+ * @param num_chs the number of entries in the pipe-channel array
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_ilk_tx_set_channel (int interface, cvmx_ilk_pipe_chan_t *pch,
+ unsigned int num_chs)
+{
+ int res = -1;
+ cvmx_ilk_txx_idx_pmap_t ilk_txx_idx_pmap;
+ unsigned int i;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (pch == NULL || num_chs > CVMX_MAX_ILK_PIPES)
+ return res;
+
+ /* write the pair to ilk tx */
+ for (i = 0; i < num_chs; i++)
+ {
+ ilk_txx_idx_pmap.u64 = 0;
+ ilk_txx_idx_pmap.s.index = pch->pipe;
+ cvmx_write_csr(CVMX_ILK_TXX_IDX_PMAP(interface), ilk_txx_idx_pmap.u64);
+ cvmx_write_csr(CVMX_ILK_TXX_MEM_PMAP(interface), pch->chan);
+ pch++;
+ }
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set pkind for rx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param chpknd pointer to an array of channel-pkind pair
+ * @param num_pknd the number of entries in the channel-pkind array
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_ilk_rx_set_pknd (int interface, cvmx_ilk_chan_pknd_t *chpknd,
+ unsigned int num_pknd)
+{
+ int res = -1;
+ cvmx_ilk_rxf_idx_pmap_t ilk_rxf_idx_pmap;
+ unsigned int i;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (chpknd == NULL || num_pknd > CVMX_MAX_ILK_PKNDS)
+ return res;
+
+ /* write the pair to ilk rx. note the channels for different interfaces
+ * are given in *chpknd and interface is not used as a param */
+ for (i = 0; i < num_pknd; i++)
+ {
+ ilk_rxf_idx_pmap.u64 = 0;
+ ilk_rxf_idx_pmap.s.index = interface * 256 + chpknd->chan;
+ cvmx_write_csr (CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
+ cvmx_write_csr (CVMX_ILK_RXF_MEM_PMAP, chpknd->pknd);
+ chpknd++;
+ }
+ res = 0;
+
+ return res;
+}
+
+/**
+ * configure calendar for rx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent pointer to calendar entries
+ *
+ * @return Zero on success, negative of failure.
+ */
+static int cvmx_ilk_rx_cal_conf (int interface, int cal_depth,
+ cvmx_ilk_cal_entry_t *pent)
+{
+ int res = -1, num_grp, num_rest, i, j;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ cvmx_ilk_rxx_idx_cal_t ilk_rxx_idx_cal;
+ cvmx_ilk_rxx_mem_cal0_t ilk_rxx_mem_cal0;
+ cvmx_ilk_rxx_mem_cal1_t ilk_rxx_mem_cal1;
+ unsigned long int tmp;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (cal_depth < CVMX_ILK_RX_MIN_CAL || cal_depth > CVMX_ILK_MAX_CAL
+ || pent == NULL)
+ return res;
+
+ /* mandatory link-level fc as workarounds for ILK-15397 and ILK-15479 */
+ /* TODO: test effectiveness */
+#if 0
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_0) && pent->ent_ctrl == PIPE_BPID)
+ for (i = 0; i < cal_depth; i++)
+ pent->ent_ctrl = LINK;
+#endif
+
+ /* set the depth */
+ ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.cal_depth = cal_depth;
+ cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+ /* set the calendar index */
+ num_grp = cal_depth / CVMX_ILK_CAL_GRP_SZ;
+ num_rest = cal_depth % CVMX_ILK_CAL_GRP_SZ;
+ ilk_rxx_idx_cal.u64 = 0;
+ ilk_rxx_idx_cal.s.inc = 1;
+ cvmx_write_csr (CVMX_ILK_RXX_IDX_CAL(interface), ilk_rxx_idx_cal.u64);
+
+ /* set the calendar entries. each group has both cal0 and cal1 registers */
+ for (i = 0; i < num_grp; i++)
+ {
+ ilk_rxx_mem_cal0.u64 = 0;
+ for (j = 0; j < CVMX_ILK_CAL_GRP_SZ/2; j++)
+ {
+ tmp = 0;
+ tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j;
+ ilk_rxx_mem_cal0.u64 |= tmp;
+
+ tmp = 0;
+ tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j +
+ CVMX_ILK_PIPE_BPID_SZ;
+ ilk_rxx_mem_cal0.u64 |= tmp;
+ pent++;
+ }
+ cvmx_write_csr(CVMX_ILK_RXX_MEM_CAL0(interface), ilk_rxx_mem_cal0.u64);
+
+ ilk_rxx_mem_cal1.u64 = 0;
+ for (j = 0; j < CVMX_ILK_CAL_GRP_SZ/2; j++)
+ {
+ tmp = 0;
+ tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j;
+ ilk_rxx_mem_cal1.u64 |= tmp;
+
+ tmp = 0;
+ tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j +
+ CVMX_ILK_PIPE_BPID_SZ;
+ ilk_rxx_mem_cal1.u64 |= tmp;
+ pent++;
+ }
+ cvmx_write_csr(CVMX_ILK_RXX_MEM_CAL1(interface), ilk_rxx_mem_cal1.u64);
+ }
+
+ /* set the calendar entries, the fraction of a group. but both cal0 and
+ * cal1 must be written */
+ ilk_rxx_mem_cal0.u64 = 0;
+ ilk_rxx_mem_cal1.u64 = 0;
+ for (i = 0; i < num_rest; i++)
+ {
+ if (i < CVMX_ILK_CAL_GRP_SZ/2)
+ {
+ tmp = 0;
+ tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * i;
+ ilk_rxx_mem_cal0.u64 |= tmp;
+
+ tmp = 0;
+ tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * i +
+ CVMX_ILK_PIPE_BPID_SZ;
+ ilk_rxx_mem_cal0.u64 |= tmp;
+ pent++;
+ }
+
+ if (i >= CVMX_ILK_CAL_GRP_SZ/2)
+ {
+ tmp = 0;
+ tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) *
+ (i - CVMX_ILK_CAL_GRP_SZ/2);
+ ilk_rxx_mem_cal1.u64 |= tmp;
+
+ tmp = 0;
+ tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) *
+ (i - CVMX_ILK_CAL_GRP_SZ/2) + CVMX_ILK_PIPE_BPID_SZ;
+ ilk_rxx_mem_cal1.u64 |= tmp;
+ pent++;
+ }
+ }
+ cvmx_write_csr(CVMX_ILK_RXX_MEM_CAL0(interface), ilk_rxx_mem_cal0.u64);
+ cvmx_write_csr(CVMX_ILK_RXX_MEM_CAL1(interface), ilk_rxx_mem_cal1.u64);
+ cvmx_read_csr (CVMX_ILK_RXX_MEM_CAL1(interface));
+
+ return 0;
+}
+
+/**
+ * set high water mark for rx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param hi_wm high water mark for this interface
+ *
+ * @return Zero on success, negative of failure.
+ */
+static int cvmx_ilk_rx_set_hwm (int interface, int hi_wm)
+{
+ int res = -1;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (hi_wm <= 0)
+ return res;
+
+ /* set the hwm */
+ ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
+ ilk_rxx_cfg1.s.rx_fifo_hwm = hi_wm;
+ cvmx_write_csr (CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+ res = 0;
+
+ return res;
+}
+
+/**
+ * enable calendar for rx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_ena enable or disable calendar
+ *
+ * @return Zero on success, negative of failure.
+ */
+static int cvmx_ilk_rx_cal_ena (int interface, unsigned char cal_ena)
+{
+ int res = -1;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ /* set the enable */
+ ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.cal_ena = cal_ena;
+ cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+ cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set up calendar for rx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent pointer to calendar entries
+ * @param hi_wm high water mark for this interface
+ * @param cal_ena enable or disable calendar
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_ilk_cal_setup_rx (int interface, int cal_depth,
+ cvmx_ilk_cal_entry_t *pent, int hi_wm,
+ unsigned char cal_ena)
+{
+ int res = -1;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ res = cvmx_ilk_rx_cal_conf (interface, cal_depth, pent);
+ if (res < 0)
+ return res;
+
+ res = cvmx_ilk_rx_set_hwm (interface, hi_wm);
+ if (res < 0)
+ return res;
+
+ res = cvmx_ilk_rx_cal_ena (interface, cal_ena);
+ return res;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_ilk_cal_setup_rx);
+#endif
+
+/**
+ * configure calendar for tx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent pointer to calendar entries
+ *
+ * @return Zero on success, negative of failure.
+ */
+static int cvmx_ilk_tx_cal_conf (int interface, int cal_depth,
+ cvmx_ilk_cal_entry_t *pent)
+{
+ int res = -1, num_grp, num_rest, i, j;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+ cvmx_ilk_txx_idx_cal_t ilk_txx_idx_cal;
+ cvmx_ilk_txx_mem_cal0_t ilk_txx_mem_cal0;
+ cvmx_ilk_txx_mem_cal1_t ilk_txx_mem_cal1;
+ unsigned long int tmp;
+ cvmx_ilk_cal_entry_t *ent_tmp;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (cal_depth < CVMX_ILK_TX_MIN_CAL || cal_depth > CVMX_ILK_MAX_CAL
+ || pent == NULL)
+ return res;
+
+ /* mandatory link-level fc as workarounds for ILK-15397 and ILK-15479 */
+ /* TODO: test effectiveness */
+#if 0
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_0) && pent->ent_ctrl == PIPE_BPID)
+ for (i = 0; i < cal_depth; i++)
+ pent->ent_ctrl = LINK;
+#endif
+
+ /* tx calendar depth must be a multiple of 8 */
+ num_grp = (cal_depth - 1) / CVMX_ILK_CAL_GRP_SZ + 1;
+ num_rest = cal_depth % CVMX_ILK_CAL_GRP_SZ;
+ if (num_rest != 0)
+ {
+ ent_tmp = pent + cal_depth;
+ for (i = num_rest; i < 8; i++, ent_tmp++)
+ {
+ ent_tmp->pipe_bpid = 0;
+ ent_tmp->ent_ctrl = XOFF;
+ }
+ }
+ cal_depth = num_grp * 8;
+
+ /* set the depth */
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.cal_depth = cal_depth;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+
+ /* set the calendar index */
+ ilk_txx_idx_cal.u64 = 0;
+ ilk_txx_idx_cal.s.inc = 1;
+ cvmx_write_csr (CVMX_ILK_TXX_IDX_CAL(interface), ilk_txx_idx_cal.u64);
+
+ /* set the calendar entries. each group has both cal0 and cal1 registers */
+ for (i = 0; i < num_grp; i++)
+ {
+ ilk_txx_mem_cal0.u64 = 0;
+ for (j = 0; j < CVMX_ILK_CAL_GRP_SZ/2; j++)
+ {
+ tmp = 0;
+ tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j;
+ ilk_txx_mem_cal0.u64 |= tmp;
+
+ tmp = 0;
+ tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j +
+ CVMX_ILK_PIPE_BPID_SZ;
+ ilk_txx_mem_cal0.u64 |= tmp;
+ pent++;
+ }
+ cvmx_write_csr(CVMX_ILK_TXX_MEM_CAL0(interface), ilk_txx_mem_cal0.u64);
+
+ ilk_txx_mem_cal1.u64 = 0;
+ for (j = 0; j < CVMX_ILK_CAL_GRP_SZ/2; j++)
+ {
+ tmp = 0;
+ tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j;
+ ilk_txx_mem_cal1.u64 |= tmp;
+
+ tmp = 0;
+ tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
+ tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j +
+ CVMX_ILK_PIPE_BPID_SZ;
+ ilk_txx_mem_cal1.u64 |= tmp;
+ pent++;
+ }
+ cvmx_write_csr(CVMX_ILK_TXX_MEM_CAL1(interface), ilk_txx_mem_cal1.u64);
+ }
+ cvmx_read_csr (CVMX_ILK_TXX_MEM_CAL1(interface));
+
+ return 0;
+}
+
+#ifdef CVMX_ILK_BP_CONF_ENA
+/**
+ * configure backpressure for tx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent pointer to calendar entries
+ *
+ * @return Zero on success, negative of failure.
+ */
+static int cvmx_ilk_bp_conf (int interface, int cal_depth, cvmx_ilk_cal_entry_t *pent)
+{
+ int res = -1, i;
+ cvmx_ipd_ctl_status_t ipd_ctl_status;
+ cvmx_ilk_cal_entry_t *tmp;
+ unsigned char bpid;
+ cvmx_ipd_bpidx_mbuf_th_t ipd_bpidx_mbuf_th;
+
+ /* enable bp for the interface */
+ ipd_ctl_status.u64 = cvmx_read_csr (CVMX_IPD_CTL_STATUS);
+ ipd_ctl_status.s.pbp_en = 1;
+ cvmx_write_csr (CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+
+ /* enable bp for each id */
+ for (i = 0, tmp = pent; i < cal_depth; i++, tmp++)
+ {
+ bpid = tmp->pipe_bpid;
+ ipd_bpidx_mbuf_th.u64 =
+ cvmx_read_csr (CVMX_IPD_BPIDX_MBUF_TH(bpid));
+ ipd_bpidx_mbuf_th.s.page_cnt = 1; /* 256 buffers */
+ ipd_bpidx_mbuf_th.s.bp_enb = 1;
+ cvmx_write_csr (CVMX_IPD_BPIDX_MBUF_TH(bpid), ipd_bpidx_mbuf_th.u64);
+ }
+ res = 0;
+
+ return res;
+}
+#endif
+
+/**
+ * enable calendar for tx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_ena enable or disable calendar
+ *
+ * @return Zero on success, negative of failure.
+ */
+static int cvmx_ilk_tx_cal_ena (int interface, unsigned char cal_ena)
+{
+ int res = -1;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ /* set the enable */
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.cal_ena = cal_ena;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+ cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set up calendar for tx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent pointer to calendar entries
+ * @param cal_ena enable or disable calendar
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_ilk_cal_setup_tx (int interface, int cal_depth,
+ cvmx_ilk_cal_entry_t *pent, unsigned char cal_ena)
+{
+ int res = -1;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ res = cvmx_ilk_tx_cal_conf (interface, cal_depth, pent);
+ if (res < 0)
+ return res;
+
+#ifdef CVMX_ILK_BP_CONF_ENA
+ res = cvmx_ilk_bp_conf (interface, cal_depth, pent);
+ if (res < 0)
+ return res;
+#endif
+
+ res = cvmx_ilk_tx_cal_ena (interface, cal_ena);
+ return res;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_ilk_cal_setup_tx);
+#endif
+
+#ifdef CVMX_ILK_STATS_ENA
+static void cvmx_ilk_reg_dump_rx (int interface)
+{
+ int i;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+ cvmx_ilk_rxx_int_t ilk_rxx_int;
+ cvmx_ilk_rxx_jabber_t ilk_rxx_jabber;
+ cvmx_ilk_rx_lnex_cfg_t ilk_rx_lnex_cfg;
+ cvmx_ilk_rx_lnex_int_t ilk_rx_lnex_int;
+ cvmx_ilk_gbl_cfg_t ilk_gbl_cfg;
+ cvmx_ilk_ser_cfg_t ilk_ser_cfg;
+ cvmx_ilk_rxf_idx_pmap_t ilk_rxf_idx_pmap;
+ cvmx_ilk_rxf_mem_pmap_t ilk_rxf_mem_pmap;
+ cvmx_ilk_rxx_idx_cal_t ilk_rxx_idx_cal;
+ cvmx_ilk_rxx_mem_cal0_t ilk_rxx_mem_cal0;
+ cvmx_ilk_rxx_mem_cal1_t ilk_rxx_mem_cal1;
+
+ ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
+ cvmx_dprintf ("ilk rxx cfg0: 0x%16lx\n", ilk_rxx_cfg0.u64);
+
+ ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
+ cvmx_dprintf ("ilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64);
+
+ ilk_rxx_int.u64 = cvmx_read_csr (CVMX_ILK_RXX_INT(interface));
+ cvmx_dprintf ("ilk rxx int: 0x%16lx\n", ilk_rxx_int.u64);
+ cvmx_write_csr (CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
+
+ ilk_rxx_jabber.u64 = cvmx_read_csr (CVMX_ILK_RXX_JABBER(interface));
+ cvmx_dprintf ("ilk rxx jabber: 0x%16lx\n", ilk_rxx_jabber.u64);
+
+#define LNE_NUM_DBG 4
+ for (i = 0; i < LNE_NUM_DBG; i++)
+ {
+ ilk_rx_lnex_cfg.u64 = cvmx_read_csr (CVMX_ILK_RX_LNEX_CFG(i));
+ cvmx_dprintf ("ilk rx lnex cfg lane: %d 0x%16lx\n", i,
+ ilk_rx_lnex_cfg.u64);
+ }
+
+ for (i = 0; i < LNE_NUM_DBG; i++)
+ {
+ ilk_rx_lnex_int.u64 = cvmx_read_csr (CVMX_ILK_RX_LNEX_INT(i));
+ cvmx_dprintf ("ilk rx lnex int lane: %d 0x%16lx\n", i,
+ ilk_rx_lnex_int.u64);
+ cvmx_write_csr (CVMX_ILK_RX_LNEX_INT(i), ilk_rx_lnex_int.u64);
+ }
+
+ ilk_gbl_cfg.u64 = cvmx_read_csr (CVMX_ILK_GBL_CFG);
+ cvmx_dprintf ("ilk gbl cfg: 0x%16lx\n", ilk_gbl_cfg.u64);
+
+ ilk_ser_cfg.u64 = cvmx_read_csr (CVMX_ILK_SER_CFG);
+ cvmx_dprintf ("ilk ser cfg: 0x%16lx\n", ilk_ser_cfg.u64);
+
+#define CHAN_NUM_DBG 8
+ ilk_rxf_idx_pmap.u64 = 0;
+ ilk_rxf_idx_pmap.s.index = interface * 256;
+ ilk_rxf_idx_pmap.s.inc = 1;
+ cvmx_write_csr (CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
+ for (i = 0; i < CHAN_NUM_DBG; i++)
+ {
+ ilk_rxf_mem_pmap.u64 = cvmx_read_csr (CVMX_ILK_RXF_MEM_PMAP);
+ cvmx_dprintf ("ilk rxf mem pmap chan: %3d 0x%16lx\n", i,
+ ilk_rxf_mem_pmap.u64);
+ }
+
+#define CAL_NUM_DBG 2
+ ilk_rxx_idx_cal.u64 = 0;
+ ilk_rxx_idx_cal.s.inc = 1;
+ cvmx_write_csr (CVMX_ILK_RXX_IDX_CAL(interface), ilk_rxx_idx_cal.u64);
+ for (i = 0; i < CAL_NUM_DBG; i++)
+ {
+ ilk_rxx_idx_cal.u64 = cvmx_read_csr(CVMX_ILK_RXX_IDX_CAL(interface));
+ cvmx_dprintf ("ilk rxx idx cal: 0x%16lx\n", ilk_rxx_idx_cal.u64);
+
+ ilk_rxx_mem_cal0.u64 = cvmx_read_csr(CVMX_ILK_RXX_MEM_CAL0(interface));
+ cvmx_dprintf ("ilk rxx mem cal0: 0x%16lx\n", ilk_rxx_mem_cal0.u64);
+ ilk_rxx_mem_cal1.u64 = cvmx_read_csr(CVMX_ILK_RXX_MEM_CAL1(interface));
+ cvmx_dprintf ("ilk rxx mem cal1: 0x%16lx\n", ilk_rxx_mem_cal1.u64);
+ }
+}
+
+static void cvmx_ilk_reg_dump_tx (int interface)
+{
+ int i;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ cvmx_ilk_txx_idx_pmap_t ilk_txx_idx_pmap;
+ cvmx_ilk_txx_mem_pmap_t ilk_txx_mem_pmap;
+ cvmx_ilk_txx_int_t ilk_txx_int;
+ cvmx_ilk_txx_pipe_t ilk_txx_pipe;
+ cvmx_ilk_txx_idx_cal_t ilk_txx_idx_cal;
+ cvmx_ilk_txx_mem_cal0_t ilk_txx_mem_cal0;
+ cvmx_ilk_txx_mem_cal1_t ilk_txx_mem_cal1;
+
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ cvmx_dprintf ("ilk txx cfg0: 0x%16lx\n", ilk_txx_cfg0.u64);
+
+ ilk_txx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));
+ cvmx_dprintf ("ilk txx cfg1: 0x%16lx\n", ilk_txx_cfg1.u64);
+
+ ilk_txx_pipe.u64 = cvmx_read_csr (CVMX_ILK_TXX_PIPE(interface));
+ cvmx_dprintf ("ilk txx pipe: 0x%16lx\n", ilk_txx_pipe.u64);
+
+ ilk_txx_idx_pmap.u64 = 0;
+ ilk_txx_idx_pmap.s.index = ilk_txx_pipe.s.base;
+ ilk_txx_idx_pmap.s.inc = 1;
+ cvmx_write_csr (CVMX_ILK_TXX_IDX_PMAP(interface), ilk_txx_idx_pmap.u64);
+ for (i = 0; i < CHAN_NUM_DBG; i++)
+ {
+ ilk_txx_mem_pmap.u64 = cvmx_read_csr (CVMX_ILK_TXX_MEM_PMAP(interface));
+ cvmx_dprintf ("ilk txx mem pmap pipe: %3d 0x%16lx\n",
+ ilk_txx_pipe.s.base + i, ilk_txx_mem_pmap.u64);
+ }
+
+ ilk_txx_int.u64 = cvmx_read_csr (CVMX_ILK_TXX_INT(interface));
+ cvmx_dprintf ("ilk txx int: 0x%16lx\n", ilk_txx_int.u64);
+
+ ilk_txx_idx_cal.u64 = 0;
+ ilk_txx_idx_cal.s.inc = 1;
+ cvmx_write_csr (CVMX_ILK_TXX_IDX_CAL(interface), ilk_txx_idx_cal.u64);
+ for (i = 0; i < CAL_NUM_DBG; i++)
+ {
+ ilk_txx_idx_cal.u64 = cvmx_read_csr(CVMX_ILK_TXX_IDX_CAL(interface));
+ cvmx_dprintf ("ilk txx idx cal: 0x%16lx\n", ilk_txx_idx_cal.u64);
+
+ ilk_txx_mem_cal0.u64 = cvmx_read_csr(CVMX_ILK_TXX_MEM_CAL0(interface));
+ cvmx_dprintf ("ilk txx mem cal0: 0x%16lx\n", ilk_txx_mem_cal0.u64);
+ ilk_txx_mem_cal1.u64 = cvmx_read_csr(CVMX_ILK_TXX_MEM_CAL1(interface));
+ cvmx_dprintf ("ilk txx mem cal1: 0x%16lx\n", ilk_txx_mem_cal1.u64);
+ }
+}
+#endif
+
+/**
+ * show run time status
+ *
+ * @param interface The identifier of the packet interface to enable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return nothing
+ */
+#ifdef CVMX_ILK_RUNTIME_DBG
+void cvmx_ilk_runtime_status (int interface)
+{
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ cvmx_ilk_txx_flow_ctl0_t ilk_txx_flow_ctl0;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+ cvmx_ilk_rxx_int_t ilk_rxx_int;
+ cvmx_ilk_rxx_flow_ctl0_t ilk_rxx_flow_ctl0;
+ cvmx_ilk_rxx_flow_ctl1_t ilk_rxx_flow_ctl1;
+ cvmx_ilk_gbl_int_t ilk_gbl_int;
+
+ cvmx_dprintf ("\nilk run-time status: interface: %d\n", interface);
+
+ ilk_txx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));
+ cvmx_dprintf ("\nilk txx cfg1: 0x%16lx\n", ilk_txx_cfg1.u64);
+ if (ilk_txx_cfg1.s.rx_link_fc)
+ cvmx_dprintf ("link flow control received\n");
+ if (ilk_txx_cfg1.s.tx_link_fc)
+ cvmx_dprintf ("link flow control sent\n");
+
+ ilk_txx_flow_ctl0.u64 = cvmx_read_csr (CVMX_ILK_TXX_FLOW_CTL0(interface));
+ cvmx_dprintf ("\nilk txx flow ctl0: 0x%16lx\n", ilk_txx_flow_ctl0.u64);
+
+ ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
+ cvmx_dprintf ("\nilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64);
+ cvmx_dprintf ("rx fifo count: %d\n", ilk_rxx_cfg1.s.rx_fifo_cnt);
+
+ ilk_rxx_int.u64 = cvmx_read_csr (CVMX_ILK_RXX_INT(interface));
+ cvmx_dprintf ("\nilk rxx int: 0x%16lx\n", ilk_rxx_int.u64);
+ if (ilk_rxx_int.s.pkt_drop_rxf)
+ cvmx_dprintf ("rx fifo packet drop\n");
+ if (ilk_rxx_int.u64)
+ cvmx_write_csr (CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
+
+ ilk_rxx_flow_ctl0.u64 = cvmx_read_csr (CVMX_ILK_RXX_FLOW_CTL0(interface));
+ cvmx_dprintf ("\nilk rxx flow ctl0: 0x%16lx\n", ilk_rxx_flow_ctl0.u64);
+
+ ilk_rxx_flow_ctl1.u64 = cvmx_read_csr (CVMX_ILK_RXX_FLOW_CTL1(interface));
+ cvmx_dprintf ("\nilk rxx flow ctl1: 0x%16lx\n", ilk_rxx_flow_ctl1.u64);
+
+ ilk_gbl_int.u64 = cvmx_read_csr (CVMX_ILK_GBL_INT);
+ cvmx_dprintf ("\nilk gbl int: 0x%16lx\n", ilk_gbl_int.u64);
+ if (ilk_gbl_int.s.rxf_push_full)
+ cvmx_dprintf ("rx fifo overflow\n");
+ if (ilk_gbl_int.u64)
+ cvmx_write_csr (CVMX_ILK_GBL_INT, ilk_gbl_int.u64);
+}
+#endif
+
+/**
+ * enable interface
+ *
+ * @param interface The identifier of the packet interface to enable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return Zero on success, negative of failure.
+ */
+//#define CVMX_ILK_STATS_ENA 1
+int cvmx_ilk_enable (int interface)
+{
+ int res = -1;
+ int retry_count = 0;
+ cvmx_helper_link_info_t result;
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+#ifdef CVMX_ILK_STATS_ENA
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+#endif
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ result.u64 = 0;
+
+#ifdef CVMX_ILK_STATS_ENA
+ cvmx_dprintf ("\n");
+ cvmx_dprintf ("<<<< ILK%d: Before enabling ilk\n", interface);
+ cvmx_ilk_reg_dump_rx (interface);
+ cvmx_ilk_reg_dump_tx (interface);
+#endif
+
+ /* RX packet will be enabled only if link is up */
+
+ /* TX side */
+ ilk_txx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));
+ ilk_txx_cfg1.s.pkt_ena = 1;
+ ilk_txx_cfg1.s.rx_link_fc_ign = 1; /* cannot use link fc workaround */
+ cvmx_write_csr (CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64);
+ cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));
+
+#ifdef CVMX_ILK_STATS_ENA
+ /* RX side stats */
+ ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.lnk_stats_ena = 1;
+ cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+ /* TX side stats */
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.lnk_stats_ena = 1;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+#endif
+
+retry:
+ retry_count++;
+ if (retry_count > 10)
+ goto out;
+
+ /* Make sure the link is up, so that packets can be sent. */
+ result = __cvmx_helper_ilk_link_get(cvmx_helper_get_ipd_port(interface + CVMX_ILK_GBL_BASE, 0));
+
+ /* Small delay before another retry. */
+ cvmx_wait_usec(100);
+
+ ilk_rxx_cfg1.u64 = cvmx_read_csr(CVMX_ILK_RXX_CFG1(interface));
+ if (ilk_rxx_cfg1.s.pkt_ena == 0)
+ goto retry;
+
+out:
+
+#ifdef CVMX_ILK_STATS_ENA
+ cvmx_dprintf (">>>> ILK%d: After ILK is enabled\n", interface);
+ cvmx_ilk_reg_dump_rx (interface);
+ cvmx_ilk_reg_dump_tx (interface);
+#endif
+
+ if (result.s.link_up)
+ return 0;
+
+ return -1;
+}
+
+/**
+ * Disable interface
+ *
+ * @param interface The identifier of the packet interface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_ilk_disable (int interface)
+{
+ int res = -1;
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+#ifdef CVMX_ILK_STATS_ENA
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+#endif
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ /* TX side */
+ ilk_txx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));
+ ilk_txx_cfg1.s.pkt_ena = 0;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64);
+
+ /* RX side */
+ ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
+ ilk_rxx_cfg1.s.pkt_ena = 0;
+ cvmx_write_csr (CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+
+#ifdef CVMX_ILK_STATS_ENA
+ /* RX side stats */
+ ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.lnk_stats_ena = 0;
+ cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+ /* RX side stats */
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.lnk_stats_ena = 0;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+#endif
+
+ return 0;
+}
+
+/**
+ * Provide interface enable status
+ *
+ * @param interface The identifier of the packet interface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return Zero, not enabled; One, enabled.
+ */
+int cvmx_ilk_get_intf_ena (int interface)
+{
+ return cvmx_ilk_intf_cfg[interface].intf_en;
+}
+
+/**
+ * bit counter
+ *
+ * @param uc the byte to be counted
+ *
+ * @return number of bits set
+ */
+unsigned char cvmx_ilk_bit_count (unsigned char uc)
+{
+ unsigned char count;
+
+ for (count = 0; uc > 0; uc &= uc-1)
+ count++;
+
+ return count;
+}
+
+/**
+ * Provide interface lane mask
+ *
+ * @param interface The identifier of the packet interface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return lane mask
+ */
+unsigned char cvmx_ilk_get_intf_ln_msk (int interface)
+{
+ return cvmx_ilk_intf_cfg[interface].lane_en_mask;
+}
+
+/**
+ * Provide channel info
+ *
+ * @param interface The identifier of the packet interface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ * @param chans A pointer to a channel array
+ * @param num_chan A pointer to the number of channels
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_ilk_get_chan_info (int interface, unsigned char **chans,
+ unsigned char *num_chan)
+{
+ *chans = cvmx_ilk_chan_map[interface];
+ *num_chan = cvmx_ilk_chans[interface];
+
+ return 0;
+}
+
+/**
+ * Show channel statistics
+ *
+ * @param interface The identifier of the packet interface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ * @param pstats A pointer to cvmx_ilk_stats_ctrl_t that specifies which
+ * logical channels to access
+ *
+ * @return nothing
+ */
+void cvmx_ilk_show_stats (int interface, cvmx_ilk_stats_ctrl_t *pstats)
+{
+ unsigned int i;
+ cvmx_ilk_rxx_idx_stat0_t ilk_rxx_idx_stat0;
+ cvmx_ilk_rxx_idx_stat1_t ilk_rxx_idx_stat1;
+ cvmx_ilk_rxx_mem_stat0_t ilk_rxx_mem_stat0;
+ cvmx_ilk_rxx_mem_stat1_t ilk_rxx_mem_stat1;
+
+ cvmx_ilk_txx_idx_stat0_t ilk_txx_idx_stat0;
+ cvmx_ilk_txx_idx_stat1_t ilk_txx_idx_stat1;
+ cvmx_ilk_txx_mem_stat0_t ilk_txx_mem_stat0;
+ cvmx_ilk_txx_mem_stat1_t ilk_txx_mem_stat1;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return;
+
+ if (pstats == NULL)
+ return;
+
+ /* discrete channels */
+ if (pstats->chan_list != NULL)
+ {
+ for (i = 0; i < pstats->num_chans; i++)
+ {
+
+ /* get the number of rx packets */
+ ilk_rxx_idx_stat0.u64 = 0;
+ ilk_rxx_idx_stat0.s.index = *pstats->chan_list;
+ ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
+ cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT0(interface),
+ ilk_rxx_idx_stat0.u64);
+ ilk_rxx_mem_stat0.u64 = cvmx_read_csr
+ (CVMX_ILK_RXX_MEM_STAT0(interface));
+
+ /* get the number of rx bytes */
+ ilk_rxx_idx_stat1.u64 = 0;
+ ilk_rxx_idx_stat1.s.index = *pstats->chan_list;
+ ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
+ cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT1(interface),
+ ilk_rxx_idx_stat1.u64);
+ ilk_rxx_mem_stat1.u64 = cvmx_read_csr
+ (CVMX_ILK_RXX_MEM_STAT1(interface));
+
+ cvmx_dprintf ("ILK%d Channel%d Rx: %d packets %d bytes\n", interface,
+ *pstats->chan_list, ilk_rxx_mem_stat0.s.rx_pkt,
+ (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);
+
+ /* get the number of tx packets */
+ ilk_txx_idx_stat0.u64 = 0;
+ ilk_txx_idx_stat0.s.index = *pstats->chan_list;
+ ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
+ cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT0(interface),
+ ilk_txx_idx_stat0.u64);
+ ilk_txx_mem_stat0.u64 = cvmx_read_csr
+ (CVMX_ILK_TXX_MEM_STAT0(interface));
+
+ /* get the number of tx bytes */
+ ilk_txx_idx_stat1.u64 = 0;
+ ilk_txx_idx_stat1.s.index = *pstats->chan_list;
+ ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
+ cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT1(interface),
+ ilk_txx_idx_stat1.u64);
+ ilk_txx_mem_stat1.u64 = cvmx_read_csr
+ (CVMX_ILK_TXX_MEM_STAT1(interface));
+
+ cvmx_dprintf ("ILK%d Channel%d Tx: %d packets %d bytes\n", interface,
+ *pstats->chan_list, ilk_txx_mem_stat0.s.tx_pkt,
+ (unsigned int) ilk_txx_mem_stat1.s.tx_bytes);
+
+ pstats++;
+ }
+ return;
+ }
+
+ /* continuous channels */
+ ilk_rxx_idx_stat0.u64 = 0;
+ ilk_rxx_idx_stat0.s.index = pstats->chan_start;
+ ilk_rxx_idx_stat0.s.inc = pstats->chan_step;
+ ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
+ cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT0(interface), ilk_rxx_idx_stat0.u64);
+
+ ilk_rxx_idx_stat1.u64 = 0;
+ ilk_rxx_idx_stat1.s.index = pstats->chan_start;
+ ilk_rxx_idx_stat1.s.inc = pstats->chan_step;
+ ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
+ cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT1(interface), ilk_rxx_idx_stat1.u64);
+
+ ilk_txx_idx_stat0.u64 = 0;
+ ilk_txx_idx_stat0.s.index = pstats->chan_start;
+ ilk_txx_idx_stat0.s.inc = pstats->chan_step;
+ ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
+ cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT0(interface), ilk_txx_idx_stat0.u64);
+
+ ilk_txx_idx_stat1.u64 = 0;
+ ilk_txx_idx_stat1.s.index = pstats->chan_start;
+ ilk_txx_idx_stat1.s.inc = pstats->chan_step;
+ ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
+ cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT1(interface), ilk_txx_idx_stat1.u64);
+
+ for (i = pstats->chan_start; i <= pstats->chan_end; i += pstats->chan_step)
+ {
+ ilk_rxx_mem_stat0.u64 = cvmx_read_csr
+ (CVMX_ILK_RXX_MEM_STAT0(interface));
+ ilk_rxx_mem_stat1.u64 = cvmx_read_csr
+ (CVMX_ILK_RXX_MEM_STAT1(interface));
+ cvmx_dprintf ("ILK%d Channel%d Rx: %d packets %d bytes\n", interface, i,
+ ilk_rxx_mem_stat0.s.rx_pkt,
+ (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);
+
+ ilk_txx_mem_stat0.u64 = cvmx_read_csr
+ (CVMX_ILK_TXX_MEM_STAT0(interface));
+ ilk_txx_mem_stat1.u64 = cvmx_read_csr
+ (CVMX_ILK_TXX_MEM_STAT1(interface));
+ cvmx_dprintf ("ILK%d Channel%d Tx: %d packets %d bytes\n", interface, i,
+ ilk_rxx_mem_stat0.s.rx_pkt,
+ (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);
+ }
+
+ return;
+}
+
+/**
+ * enable or disable loopbacks
+ *
+ * @param interface The identifier of the packet interface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ * @param enable Enable or disable loopback
+ * @param mode Internal or external loopback
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_ilk_lpbk (int interface, cvmx_ilk_lpbk_ena_t enable,
+ cvmx_ilk_lpbk_mode_t mode)
+{
+ int res = -1;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ /* internal loopback. only 1 type of loopback can be on at 1 time */
+ if (mode == CVMX_ILK_LPBK_INT)
+ {
+ if (enable == CVMX_ILK_LPBK_ENA)
+ {
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.ext_lpbk = CVMX_ILK_LPBK_DISA;
+ ilk_txx_cfg0.s.ext_lpbk_fc = CVMX_ILK_LPBK_DISA;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+
+ ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.ext_lpbk = CVMX_ILK_LPBK_DISA;
+ ilk_rxx_cfg0.s.ext_lpbk_fc = CVMX_ILK_LPBK_DISA;
+ cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+ }
+
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.int_lpbk = enable;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+
+ res = 0;
+ return res;
+ }
+
+ /* external loopback. only 1 type of loopback can be on at 1 time */
+ if (enable == CVMX_ILK_LPBK_ENA)
+ {
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.int_lpbk = CVMX_ILK_LPBK_DISA;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+ }
+
+ ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.ext_lpbk = enable;
+ ilk_txx_cfg0.s.ext_lpbk_fc = enable;
+ cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+
+ ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.ext_lpbk = enable;
+ ilk_rxx_cfg0.s.ext_lpbk_fc = enable;
+ cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+ res = 0;
+ return res;
+}
+
+#endif /* CVMX_ENABLE_HELPER_FUNCTIONS */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ilk.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ilk.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ilk.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ilk.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,184 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * This file contains defines for the ILK interface
+
+ * <hr>$Revision: 49448 $<hr>
+ *
+ *
+ */
+#ifndef __CVMX_ILK_H__
+#define __CVMX_ILK_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* CSR typedefs have been moved to cvmx-ilk-defs.h */
+
+#define CVMX_ILK_GBL_BASE 5
+#define CVMX_ILK_QLM_BASE 1
+
+typedef struct
+{
+ int intf_en : 8;
+ int lane_en_mask : 8;
+ int lane_speed : 16;
+ /* add more here */
+} cvmx_ilk_intf_t;
+
+#define CVMX_NUM_ILK_INTF 2
+#define CVMX_MAX_ILK_LANES 8
+extern unsigned char cvmx_ilk_lane_mask[CVMX_NUM_ILK_INTF];
+
+typedef struct
+{
+ unsigned int pipe;
+ unsigned int chan;
+} cvmx_ilk_pipe_chan_t;
+
+#define CVMX_ILK_PIPE_BASE 72
+#define CVMX_MAX_ILK_PIPES 45
+#define CVMX_MAX_ILK_CHANS 8
+extern unsigned char cvmx_ilk_chans[CVMX_NUM_ILK_INTF];
+extern unsigned char cvmx_ilk_chan_map[CVMX_NUM_ILK_INTF][CVMX_MAX_ILK_CHANS];
+
+typedef struct
+{
+ unsigned int chan;
+ unsigned int pknd;
+} cvmx_ilk_chan_pknd_t;
+
+#define CVMX_ILK_PKND_BASE 20
+#define CVMX_MAX_ILK_PKNDS 8 /* must be <45 */
+
+typedef struct
+{
+ unsigned int *chan_list; /* for discrete channels. or, must be null */
+ unsigned int num_chans;
+
+ unsigned int chan_start; /* for continuous channels */
+ unsigned int chan_end;
+ unsigned int chan_step;
+
+ unsigned int clr_on_rd;
+} cvmx_ilk_stats_ctrl_t;
+
+#define CVMX_ILK_MAX_CAL 288
+#define CVMX_ILK_TX_MIN_CAL 1
+#define CVMX_ILK_RX_MIN_CAL 1
+#define CVMX_ILK_CAL_GRP_SZ 8
+#define CVMX_ILK_PIPE_BPID_SZ 7
+#define CVMX_ILK_ENT_CTRL_SZ 2
+#define CVMX_ILK_RX_FIFO_WM 0x200
+
+typedef enum
+{
+ PIPE_BPID = 0,
+ LINK,
+ XOFF,
+ XON
+} cvmx_ilk_cal_ent_ctrl_t;
+
+typedef struct
+{
+ unsigned char pipe_bpid;
+ cvmx_ilk_cal_ent_ctrl_t ent_ctrl;
+} cvmx_ilk_cal_entry_t;
+
+/** Callbacks structure to customize ILK initialization sequence */
+typedef struct
+{
+ /** Called to setup rx calendar */
+ int (*calendar_setup_rx) (int interface, int cal_depth,
+ cvmx_ilk_cal_entry_t *pent, int hi_wm,
+ unsigned char cal_ena);
+
+ /** add more here */
+} cvmx_ilk_callbacks_t;
+
+typedef enum
+{
+ CVMX_ILK_LPBK_DISA = 0,
+ CVMX_ILK_LPBK_ENA
+} cvmx_ilk_lpbk_ena_t;
+
+typedef enum
+{
+ CVMX_ILK_LPBK_INT = 0,
+ CVMX_ILK_LPBK_EXT
+} cvmx_ilk_lpbk_mode_t;
+
+extern void cvmx_ilk_get_callbacks(cvmx_ilk_callbacks_t * callbacks);
+extern void cvmx_ilk_set_callbacks(cvmx_ilk_callbacks_t * new_callbacks);
+
+extern int cvmx_ilk_start_interface (int interface, unsigned char num_lanes);
+extern int cvmx_ilk_set_pipe (int interface, int pipe_base,
+ unsigned int pipe_len);
+extern int cvmx_ilk_tx_set_channel (int interface, cvmx_ilk_pipe_chan_t *pch,
+ unsigned int num_chs);
+extern int cvmx_ilk_rx_set_pknd (int interface, cvmx_ilk_chan_pknd_t *chpknd,
+ unsigned int num_pknd);
+extern int cvmx_ilk_calendar_setup_cb (int interface, int num_ports);
+extern int cvmx_ilk_calendar_sync_cb (int interface, int timeout);
+extern int cvmx_ilk_enable (int interface);
+extern int cvmx_ilk_disable (int interface);
+extern int cvmx_ilk_get_intf_ena (int interface);
+extern unsigned char cvmx_ilk_bit_count (unsigned char uc);
+extern unsigned char cvmx_ilk_get_intf_ln_msk (int interface);
+extern int cvmx_ilk_get_chan_info (int interface, unsigned char **chans,
+ unsigned char *num_chan);
+extern void cvmx_ilk_show_stats (int interface, cvmx_ilk_stats_ctrl_t *pstats);
+extern int cvmx_ilk_cal_setup_rx (int interface, int cal_depth,
+ cvmx_ilk_cal_entry_t *pent, int hi_wm,
+ unsigned char cal_ena);
+extern int cvmx_ilk_cal_setup_tx (int interface, int cal_depth,
+ cvmx_ilk_cal_entry_t *pent,
+ unsigned char cal_ena);
+extern int cvmx_ilk_lpbk (int interface, cvmx_ilk_lpbk_ena_t enable,
+ cvmx_ilk_lpbk_mode_t mode);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ILK_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ilk.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-interrupt-handler.S
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-interrupt-handler.S (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-interrupt-handler.S 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,198 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+
+
+
+#include <machine/asm.h>
+#include <machine/regdef.h>
+
+.set noreorder
+.set noat
+
+LEAF(cvmx_interrupt_stage1)
+ dla k0, cvmx_interrupt_stage2
+ jalr k1, k0 // Save our address in k1, so we can tell which
+ // vector we are coming from.
+ nop
+END(cvmx_interrupt_stage1)
+
+#define STACK_SIZE (36*8)
+LEAF(cvmx_interrupt_stage2)
+ dsubu sp, sp, STACK_SIZE
+ sd zero, 0(sp) // Just a place holder
+ sd $1, 8(sp) // start saving registers
+ sd $2, 16(sp)
+ sd $3, 24(sp)
+ sd $4, 32(sp)
+ sd $5, 40(sp)
+ sd $6, 48(sp)
+ sd $7, 56(sp)
+ sd $8, 64(sp)
+ sd $9, 72(sp)
+ sd $10, 80(sp)
+ sd $11, 88(sp)
+ sd $12, 96(sp)
+ sd $13, 104(sp)
+ sd $14, 112(sp)
+ sd $15, 120(sp)
+ sd $16, 128(sp)
+ sd $17, 136(sp)
+ sd $18, 144(sp)
+ sd $19, 152(sp)
+ sd $20, 160(sp)
+ sd $21, 168(sp)
+ sd $22, 176(sp)
+ sd $23, 184(sp)
+ sd $24, 192(sp)
+ sd $25, 200(sp)
+ sd $26, 208(sp)
+ sd $27, 216(sp)
+ mfhi k0 // Reading lo and high takes multiple cycles
+ mflo k1 // Do it here so it completes by the time we need it
+ sd $28, 224(sp)
+ daddu $1, sp, STACK_SIZE // Correct the SP for the space we used
+ sd $1, 232(sp)
+ sd $30, 240(sp)
+ sd $31, 248(sp) // saved all general purpose registers
+ sd k0, 256(sp) // save hi
+ sd k1, 264(sp) // save lo
+ /* Save DCACHE error register early, since any non-errored DCACHE accesses will clear
+ ** error bit */
+ dmfc0 k0, $27, 1
+ sd k0, 272(sp)
+ /* Store EPC for GCC's frame unwinder. */
+ dmfc0 k0, $14
+ sd k0, 280(sp)
+
+ dla k0, cvmx_interrupt_in_isr
+ li k1, 1
+ sw k1, 0(k0)
+
+ dla k0, cvmx_interrupt_do_irq
+ jal k0
+ dadd a0, sp, 0 // First argument is array of registers
+
+ dla k0, cvmx_interrupt_in_isr
+ sw $0, 0(k0)
+
+ ld k0, 256(sp) // read hi
+ ld k1, 264(sp) // read lo
+ mthi k0 // restore hi
+ mtlo k1 // restore lo
+
+ ld $1, 8(sp) // start restoring registers
+ ld $2, 16(sp)
+ ld $3, 24(sp)
+ ld $4, 32(sp)
+ ld $5, 40(sp)
+ ld $6, 48(sp)
+ ld $7, 56(sp)
+ ld $8, 64(sp)
+ ld $9, 72(sp)
+ ld $10, 80(sp)
+ ld $11, 88(sp)
+ ld $12, 96(sp)
+ ld $13, 104(sp)
+ ld $14, 112(sp)
+ ld $15, 120(sp)
+ ld $16, 128(sp)
+ ld $17, 136(sp)
+ ld $18, 144(sp)
+ ld $19, 152(sp)
+ ld $20, 160(sp)
+ ld $21, 168(sp)
+ ld $22, 176(sp)
+ ld $23, 184(sp)
+ ld $24, 192(sp)
+ ld $25, 200(sp)
+ ld $26, 208(sp)
+ ld $28, 224(sp)
+ ld $30, 240(sp)
+ ld $31, 248(sp) // restored all general purpose registers
+ ld $29, 232(sp) // No need to correct for STACK_SIZE
+ eret
+ nop
+END(cvmx_interrupt_stage2)
+
+// Icache and Dcache exception handler. This code is executed
+// with ERL set so we can't us virtual addresses. We save and restore
+// K0 to a global memory location so we can handle cache errors from exception
+// context. This means that if two cores get a cache exception at the same time
+// the K0 might be corrupted. This entire handler MUST fit in 128 bytes.
+#define K0_STORE_LOCATION 8
+#define DCACHE_ERROR_COUNT 16
+#define ICACHE_ERROR_COUNT 24
+LEAF(cvmx_interrupt_cache_error)
+ .set push
+ .set noreorder
+ sd k0, K0_STORE_LOCATION($0) // Store K0 into global loc in case we're in an exception
+ dmfc0 k0, $27, 1 // Get Dcache error status before any loads
+ bbit0 k0, 0, not_dcache_error // Skip dcache count if no error
+ dmtc0 k0, $27, 1 // Clear any Dcache errors
+ ld k0, DCACHE_ERROR_COUNT($0) // Load the dcache error count
+ daddu k0, 1 // Increment the dcache error count
+ sd k0, DCACHE_ERROR_COUNT($0) // Store the dcache error count
+not_dcache_error:
+ dmfc0 k0, $27, 0 // Get the Icache error status
+ bbit0 k0, 0, not_icache_error // Skip Icache count if no error
+ dmtc0 k0, $27, 0 // Clear any Icache errors
+ ld k0, ICACHE_ERROR_COUNT($0) // Load the icache error count
+ daddu k0, 1 // Increment the icache error count
+ sd k0, ICACHE_ERROR_COUNT($0) // Store the icache error count
+not_icache_error:
+ ld k0, K0_STORE_LOCATION($0) // Restore K0 since we might have been in an exception
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop // Keep the ERET 8 instructions away
+ nop // from a branch target.
+ eret // Return from the Icache exception
+ .set pop
+END(cvmx_interrupt_cache_error)
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-interrupt-handler.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-interrupt.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-interrupt.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-interrupt.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1289 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the Mips interrupts.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __U_BOOT__
+#if __GNUC__ >= 4
+/* Backtrace is only available with the new toolchain. */
+#include <execinfo.h>
+#endif
+#endif /* __U_BOOT__ */
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-interrupt.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-uart.h"
+#include "cvmx-pow.h"
+#include "cvmx-ebt3000.h"
+#include "cvmx-coremask.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-atomic.h"
+#include "cvmx-app-init.h"
+#include "cvmx-error.h"
+#include "cvmx-app-hotplug.h"
+#include "cvmx-profiler.h"
+#ifndef __U_BOOT__
+# include <octeon_mem_map.h>
+#else
+# include <asm/arch/octeon_mem_map.h>
+#endif
+EXTERN_ASM void cvmx_interrupt_stage1(void);
+EXTERN_ASM void cvmx_debug_handler_stage1(void);
+EXTERN_ASM void cvmx_interrupt_cache_error(void);
+
+int cvmx_interrupt_in_isr = 0;
+
+struct __cvmx_interrupt_handler {
+ cvmx_interrupt_func_t handler; /**< One function to call per interrupt */
+ void *data; /**< User data per interrupt */
+ int handler_data; /**< Used internally */
+};
+
+/**
+ * Internal status the interrupt registration
+ */
+typedef struct
+{
+ struct __cvmx_interrupt_handler handlers[CVMX_IRQ_MAX];
+ cvmx_interrupt_exception_t exception_handler;
+} cvmx_interrupt_state_t;
+
+/**
+ * Internal state the interrupt registration
+ */
+#ifndef __U_BOOT__
+static CVMX_SHARED cvmx_interrupt_state_t cvmx_interrupt_state;
+static CVMX_SHARED cvmx_spinlock_t cvmx_interrupt_default_lock;
+/* Incremented once first core processing is finished. */
+static CVMX_SHARED int32_t cvmx_interrupt_initialize_flag;
+#endif /* __U_BOOT__ */
+
+#define ULL unsigned long long
+
+#define HI32(data64) ((uint32_t)(data64 >> 32))
+#define LO32(data64) ((uint32_t)(data64 & 0xFFFFFFFF))
+
+static const char reg_names[][32] = { "r0","at","v0","v1","a0","a1","a2","a3",
+ "t0","t1","t2","t3","t4","t5","t6","t7",
+ "s0","s1","s2","s3","s4","s5", "s6","s7",
+ "t8","t9", "k0","k1","gp","sp","s8","ra" };
+
+/**
+ * version of printf that works better in exception context.
+ *
+ * @param format
+ */
+void cvmx_safe_printf(const char *format, ...)
+{
+ char buffer[256];
+ char *ptr = buffer;
+ int count;
+ va_list args;
+
+ va_start(args, format);
+#ifndef __U_BOOT__
+ count = vsnprintf(buffer, sizeof(buffer), format, args);
+#else
+ count = vsprintf(buffer, format, args);
+#endif
+ va_end(args);
+
+ while (count-- > 0)
+ {
+ cvmx_uart_lsr_t lsrval;
+
+ /* Spin until there is room */
+ do
+ {
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(0));
+#if !defined(CONFIG_OCTEON_SIM_SPEED)
+ if (lsrval.s.temt == 0)
+ cvmx_wait(10000); /* Just to reduce the load on the system */
+#endif
+ }
+ while (lsrval.s.temt == 0);
+
+ if (*ptr == '\n')
+ cvmx_write_csr(CVMX_MIO_UARTX_THR(0), '\r');
+ cvmx_write_csr(CVMX_MIO_UARTX_THR(0), *ptr++);
+ }
+}
+
+/* Textual descriptions of cause codes */
+static const char cause_names[][128] = {
+ /* 0 */ "Interrupt",
+ /* 1 */ "TLB modification",
+ /* 2 */ "tlb load/fetch",
+ /* 3 */ "tlb store",
+ /* 4 */ "address exc, load/fetch",
+ /* 5 */ "address exc, store",
+ /* 6 */ "bus error, instruction fetch",
+ /* 7 */ "bus error, load/store",
+ /* 8 */ "syscall",
+ /* 9 */ "breakpoint",
+ /* 10 */ "reserved instruction",
+ /* 11 */ "cop unusable",
+ /* 12 */ "arithmetic overflow",
+ /* 13 */ "trap",
+ /* 14 */ "",
+ /* 15 */ "floating point exc",
+ /* 16 */ "",
+ /* 17 */ "",
+ /* 18 */ "cop2 exception",
+ /* 19 */ "",
+ /* 20 */ "",
+ /* 21 */ "",
+ /* 22 */ "mdmx unusable",
+ /* 23 */ "watch",
+ /* 24 */ "machine check",
+ /* 25 */ "",
+ /* 26 */ "",
+ /* 27 */ "",
+ /* 28 */ "",
+ /* 29 */ "",
+ /* 30 */ "cache error",
+ /* 31 */ ""
+};
+
+/**
+ * @INTERNAL
+ * print_reg64
+ * @param name Name of the value to print
+ * @param reg Value to print
+ */
+static inline void print_reg64(const char *name, uint64_t reg)
+{
+ cvmx_safe_printf("%16s: 0x%08x%08x\n", name, (unsigned int)HI32(reg),(unsigned int)LO32(reg));
+}
+
+/**
+ * @INTERNAL
+ * Dump all useful registers to the console
+ *
+ * @param registers CPU register to dump
+ */
+static void __cvmx_interrupt_dump_registers(uint64_t *registers)
+{
+ uint64_t r1, r2;
+ int reg;
+ for (reg=0; reg<16; reg++)
+ {
+ r1 = registers[reg]; r2 = registers[reg+16];
+ cvmx_safe_printf("%3s ($%02d): 0x%08x%08x \t %3s ($%02d): 0x%08x%08x\n",
+ reg_names[reg], reg, (unsigned int)HI32(r1), (unsigned int)LO32(r1),
+ reg_names[reg+16], reg+16, (unsigned int)HI32(r2), (unsigned int)LO32(r2));
+ }
+ CVMX_MF_COP0 (r1, COP0_CAUSE);
+ print_reg64 ("COP0_CAUSE", r1);
+ CVMX_MF_COP0 (r2, COP0_STATUS);
+ print_reg64 ("COP0_STATUS", r2);
+ CVMX_MF_COP0 (r1, COP0_BADVADDR);
+ print_reg64 ("COP0_BADVADDR", r1);
+ CVMX_MF_COP0 (r2, COP0_EPC);
+ print_reg64 ("COP0_EPC", r2);
+}
+
+/**
+ * @INTERNAL
+ * Default exception handler. Prints out the exception
+ * cause decode and all relevant registers.
+ *
+ * @param registers Registers at time of the exception
+ */
+#ifndef __U_BOOT__
+static
+#endif /* __U_BOOT__ */
+void __cvmx_interrupt_default_exception_handler(uint64_t *registers)
+{
+ uint64_t trap_print_cause;
+ const char *str;
+#ifndef __U_BOOT__
+ int modified_zero_pc = 0;
+
+ ebt3000_str_write("Trap");
+ cvmx_spinlock_lock(&cvmx_interrupt_default_lock);
+#endif
+ CVMX_MF_COP0 (trap_print_cause, COP0_CAUSE);
+ str = cause_names [(trap_print_cause >> 2) & 0x1f];
+ cvmx_safe_printf("Core %d: Unhandled Exception. Cause register decodes to:\n%s\n", (int)cvmx_get_core_num(), str && *str ? str : "Reserved exception cause");
+ cvmx_safe_printf("******************************************************************\n");
+ __cvmx_interrupt_dump_registers(registers);
+
+#ifndef __U_BOOT__
+
+ cvmx_safe_printf("******************************************************************\n");
+#if __GNUC__ >= 4 && !defined(OCTEON_DISABLE_BACKTRACE)
+ cvmx_safe_printf("Backtrace:\n\n");
+ if (registers[35] == 0) {
+ modified_zero_pc = 1;
+ /* If PC is zero we probably did jalr $zero, in which case $31 - 8 is the call site. */
+ registers[35] = registers[31] - 8;
+ }
+ __octeon_print_backtrace_func ((__octeon_backtrace_printf_t)cvmx_safe_printf);
+ if (modified_zero_pc)
+ registers[35] = 0;
+ cvmx_safe_printf("******************************************************************\n");
+#endif
+
+ cvmx_spinlock_unlock(&cvmx_interrupt_default_lock);
+
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ CVMX_BREAK;
+
+ while (1)
+ {
+ /* Interrupts are suppressed when we are in the exception
+ handler (because of SR[EXL]). Spin and poll the uart
+ status and see if the debugger is trying to stop us. */
+ cvmx_uart_lsr_t lsrval;
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(cvmx_debug_uart));
+ if (lsrval.s.dr)
+ {
+ uint64_t tmp;
+ /* Pulse the MCD0 signal. */
+ asm volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ ".set mips64\n"
+ "dmfc0 %0, $22\n"
+ "ori %0, %0, 0x10\n"
+ "dmtc0 %0, $22\n"
+ ".set pop\n"
+ : "=r" (tmp));
+ }
+ }
+#endif /* __U_BOOT__ */
+}
+
+#ifndef __U_BOOT__
+/**
+ * @INTERNAL
+ * Default interrupt handler if the user doesn't register one.
+ *
+ * @param irq_number IRQ that caused this interrupt
+ * @param registers Register at the time of the interrupt
+ * @param user_arg Unused optional user data
+ */
+static void __cvmx_interrupt_default(int irq_number, uint64_t *registers, void *user_arg)
+{
+ cvmx_safe_printf("cvmx_interrupt_default: Received interrupt %d\n", irq_number);
+ __cvmx_interrupt_dump_registers(registers);
+}
+
+/**
+ * Map a ciu bit to an irq number. 0xff for invalid.
+ * 0-63 for en0.
+ * 64-127 for en1.
+ */
+
+static CVMX_SHARED uint8_t cvmx_ciu_to_irq[8][64];
+#define cvmx_ciu_en0_to_irq cvmx_ciu_to_irq[0]
+#define cvmx_ciu_en1_to_irq cvmx_ciu_to_irq[1]
+#define cvmx_ciu2_wrkq_to_irq cvmx_ciu_to_irq[0]
+#define cvmx_ciu2_wdog_to_irq cvmx_ciu_to_irq[1]
+#define cvmx_ciu2_rml_to_irq cvmx_ciu_to_irq[2]
+#define cvmx_ciu2_mio_to_irq cvmx_ciu_to_irq[3]
+#define cvmx_ciu2_io_to_irq cvmx_ciu_to_irq[4]
+#define cvmx_ciu2_mem_to_irq cvmx_ciu_to_irq[5]
+#define cvmx_ciu2_eth_to_irq cvmx_ciu_to_irq[6]
+#define cvmx_ciu2_gpio_to_irq cvmx_ciu_to_irq[7]
+
+static CVMX_SHARED uint8_t cvmx_ciu2_mbox_to_irq[64];
+static CVMX_SHARED uint8_t cvmx_ciu_61xx_timer_to_irq[64];
+
+static void __cvmx_interrupt_set_mapping(int irq, unsigned int en, unsigned int bit)
+{
+ cvmx_interrupt_state.handlers[irq].handler_data = (en << 6) | bit;
+ if (en <= 7)
+ cvmx_ciu_to_irq[en][bit] = irq;
+ else if (en == 8)
+ cvmx_ciu_61xx_timer_to_irq[bit] = irq;
+ else
+ cvmx_ciu2_mbox_to_irq[bit] = irq;
+}
+
+static uint64_t cvmx_interrupt_ciu_en0_mirror;
+static uint64_t cvmx_interrupt_ciu_en1_mirror;
+static uint64_t cvmx_interrupt_ciu_61xx_timer_mirror;
+
+/**
+ * @INTERNAL
+ * Called for all Performance Counter interrupts. Handler for
+ * interrupt line 6
+ *
+ * @param irq_number Interrupt number that we're being called for
+ * @param registers Registers at the time of the interrupt
+ * @param user_arg Unused user argument*
+ */
+static void __cvmx_interrupt_perf(int irq_number, uint64_t *registers, void *user_arg)
+{
+ uint64_t perf_counter;
+ CVMX_MF_COP0(perf_counter, COP0_PERFVALUE0);
+ if (perf_counter & (1ull << 63))
+ cvmx_collect_sample();
+}
+
+/**
+ * @INTERNAL
+ * Handler for interrupt lines 2 and 3. These are directly tied
+ * to the CIU. The handler queries the status of the CIU and
+ * calls the secondary handler for the CIU interrupt that
+ * occurred.
+ *
+ * @param irq_number Interrupt number that fired (2 or 3)
+ * @param registers Registers at the time of the interrupt
+ * @param user_arg Unused user argument
+ */
+static void __cvmx_interrupt_ciu(int irq_number, uint64_t *registers, void *user_arg)
+{
+ int ciu_offset;
+ uint64_t irq_mask;
+ uint64_t irq;
+ int bit;
+ int core = cvmx_get_core_num();
+
+ if (irq_number == CVMX_IRQ_MIPS2) {
+ /* Handle EN0 sources */
+ ciu_offset = core * 2;
+ irq_mask = cvmx_read_csr(CVMX_CIU_INTX_SUM0(ciu_offset)) & cvmx_interrupt_ciu_en0_mirror;
+ CVMX_DCLZ(bit, irq_mask);
+ bit = 63 - bit;
+ /* If ciu_int_sum1<sum2> is set, means its a timer interrupt */
+ if (bit == 51 && (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2))) {
+ uint64_t irq_mask;
+ int bit;
+ irq_mask = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP2(core)) & cvmx_interrupt_ciu_61xx_timer_mirror;
+ CVMX_DCLZ(bit, irq_mask);
+ bit = 63 - bit;
+ /* Handle TIMER(4..9) interrupts */
+ if (bit <= 9 && bit >= 4) {
+ uint64_t irq = cvmx_ciu_61xx_timer_to_irq[bit];
+ if (cvmx_unlikely(irq == 0xff)) {
+ /* No mapping */
+ cvmx_interrupt_ciu_61xx_timer_mirror &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_EN2_PPX_IP2(core), cvmx_interrupt_ciu_61xx_timer_mirror);
+ return;
+ }
+ struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
+ h->handler(irq, registers, h->data);
+ return;
+ }
+ }
+
+ if (bit >= 0) {
+ irq = cvmx_ciu_en0_to_irq[bit];
+ if (cvmx_unlikely(irq == 0xff)) {
+ /* No mapping. */
+ cvmx_interrupt_ciu_en0_mirror &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(ciu_offset), cvmx_interrupt_ciu_en0_mirror);
+ return;
+ }
+ struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
+ h->handler(irq, registers, h->data);
+ return;
+ }
+ } else {
+ /* Handle EN1 sources */
+ ciu_offset = cvmx_get_core_num() * 2 + 1;
+ irq_mask = cvmx_read_csr(CVMX_CIU_INT_SUM1) & cvmx_interrupt_ciu_en1_mirror;
+ CVMX_DCLZ(bit, irq_mask);
+ bit = 63 - bit;
+ if (bit >= 0) {
+ irq = cvmx_ciu_en1_to_irq[bit];
+ if (cvmx_unlikely(irq == 0xff)) {
+ /* No mapping. */
+ cvmx_interrupt_ciu_en1_mirror &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(ciu_offset), cvmx_interrupt_ciu_en1_mirror);
+ return;
+ }
+ struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
+ h->handler(irq, registers, h->data);
+ return;
+ }
+ }
+}
+
+/**
+ * @INTERNAL
+ * Handler for interrupt line 3, the DPI_DMA will have different value
+ * per core, all other fields values are identical for different cores.
+ * These are directly tied to the CIU. The handler queries the status of
+ * the CIU and calls the secondary handler for the CIU interrupt that
+ * occurred.
+ *
+ * @param irq_number Interrupt number that fired (2 or 3)
+ * @param registers Registers at the time of the interrupt
+ * @param user_arg Unused user argument
+ */
+static void __cvmx_interrupt_ciu_cn61xx(int irq_number, uint64_t *registers, void *user_arg)
+{
+ /* Handle EN1 sources */
+ int core = cvmx_get_core_num();
+ int ciu_offset;
+ uint64_t irq_mask;
+ uint64_t irq;
+ int bit;
+
+ ciu_offset = core * 2 + 1;
+ irq_mask = cvmx_read_csr(CVMX_CIU_SUM1_PPX_IP3(core)) & cvmx_interrupt_ciu_en1_mirror;
+ CVMX_DCLZ(bit, irq_mask);
+ bit = 63 - bit;
+ if (bit >= 0) {
+ irq = cvmx_ciu_en1_to_irq[bit];
+ if (cvmx_unlikely(irq == 0xff)) {
+ /* No mapping. */
+ cvmx_interrupt_ciu_en1_mirror &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(ciu_offset), cvmx_interrupt_ciu_en1_mirror);
+ return;
+ }
+ struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
+ h->handler(irq, registers, h->data);
+ return;
+ }
+}
+
+/**
+ * @INTERNAL
+ * Handler for interrupt line 2 on 68XX. These are directly tied
+ * to the CIU2. The handler queries the status of the CIU and
+ * calls the secondary handler for the CIU interrupt that
+ * occurred.
+ *
+ * @param irq_number Interrupt number that fired (2 or 3)
+ * @param registers Registers at the time of the interrupt
+ * @param user_arg Unused user argument
+ */
+static void __cvmx_interrupt_ciu2(int irq_number, uint64_t *registers, void *user_arg)
+{
+ int sum_bit, src_bit;
+ uint64_t irq;
+ uint64_t src_reg, src_val;
+ struct __cvmx_interrupt_handler *h;
+ int core = cvmx_get_core_num();
+ uint64_t sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core));
+
+ CVMX_DCLZ(sum_bit, sum);
+ sum_bit = 63 - sum_bit;
+
+ if (sum_bit >= 0) {
+ switch (sum_bit) {
+ case 63:
+ case 62:
+ case 61:
+ case 60:
+ irq = cvmx_ciu2_mbox_to_irq[sum_bit - 60];
+ if (cvmx_unlikely(irq == 0xff)) {
+ /* No mapping. */
+ uint64_t mask_reg = CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(core);
+ cvmx_write_csr(mask_reg, 1ull << (sum_bit - 60));
+ break;
+ }
+ h = cvmx_interrupt_state.handlers + irq;
+ h->handler(irq, registers, h->data);
+ break;
+
+ case 7:
+ case 6:
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ case 1:
+ case 0:
+ src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core) + (0x1000 * sum_bit);
+ src_val = cvmx_read_csr(src_reg);
+ if (!src_val)
+ break;
+ CVMX_DCLZ(src_bit, src_val);
+ src_bit = 63 - src_bit;
+ irq = cvmx_ciu_to_irq[sum_bit][src_bit];
+ if (cvmx_unlikely(irq == 0xff)) {
+ /* No mapping. */
+ uint64_t mask_reg = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(core) + (0x1000 * sum_bit);
+ cvmx_write_csr(mask_reg, 1ull << src_bit);
+ break;
+ }
+ h = cvmx_interrupt_state.handlers + irq;
+ h->handler(irq, registers, h->data);
+ break;
+
+ default:
+ cvmx_safe_printf("Unknown CIU2 bit: %d\n", sum_bit);
+ break;
+ }
+ }
+ /* Clear the source to reduce the chance for spurious interrupts. */
+
+ /* CN68XX has an CIU-15786 errata that accessing the ACK registers
+ * can stop interrupts from propagating
+ */
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
+ else
+ cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core));
+}
+
+
+/**
+ * @INTERNAL
+ * Called for all RML interrupts. This is usually an ECC error
+ *
+ * @param irq_number Interrupt number that we're being called for
+ * @param registers Registers at the time of the interrupt
+ * @param user_arg Unused user argument
+ */
+static void __cvmx_interrupt_ecc(int irq_number, uint64_t *registers, void *user_arg)
+{
+ cvmx_error_poll();
+}
+
+
+/**
+ * Process an interrupt request
+ *
+ * @param registers Registers at time of interrupt / exception
+ * Registers 0-31 are standard MIPS, others specific to this routine
+ * @return
+ */
+void cvmx_interrupt_do_irq(uint64_t *registers);
+void cvmx_interrupt_do_irq(uint64_t *registers)
+{
+ uint64_t mask;
+ uint64_t cause;
+ uint64_t status;
+ uint64_t cache_err;
+ int i;
+ uint32_t exc_vec;
+ /* Determine the cause of the interrupt */
+ asm volatile ("dmfc0 %0,$13,0" : "=r" (cause));
+ asm volatile ("dmfc0 %0,$12,0" : "=r" (status));
+ /* In case of exception, clear all interrupts to avoid recursive interrupts.
+ Also clear EXL bit to display the correct PC value. */
+ if ((cause & 0x7c) == 0)
+ {
+ asm volatile ("dmtc0 %0, $12, 0" : : "r" (status & ~(0xff02)));
+ }
+ /* The assembly stub at each exception vector saves its address in k1 when
+ ** it calls the stage 2 handler. We use this to compute the exception vector
+ ** that brought us here */
+ exc_vec = (uint32_t)(registers[27] & 0x780); /* Mask off bits we need to ignore */
+
+ /* Check for cache errors. The cache errors go to a separate exception vector,
+ ** so we will only check these if we got here from a cache error exception, and
+ ** the ERL (error level) bit is set. */
+ i = cvmx_get_core_num();
+ if (exc_vec == 0x100 && (status & 0x4))
+ {
+ CVMX_MF_CACHE_ERR(cache_err);
+
+ /* Use copy of DCACHE_ERR register that early exception stub read */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ {
+ if (registers[34] & 0x1)
+ cvmx_safe_printf("Dcache error detected: core: %d, way: %d, va 7:3: 0x%x\n", i, (int)(registers[34] >> 8) & 0x3f, (int)(registers[34] >> 3) & 0x1f);
+ else if (cache_err & 0x1)
+ cvmx_safe_printf("Icache error detected: core: %d, set: %d, way : %d, va 6:3 = 0x%x\n", i, (int)(cache_err >> 5) & 0x3f, (int)(cache_err >> 3) & 0x3, (int)(cache_err >> 11) & 0xf);
+ else
+ cvmx_safe_printf("Cache error exception: core %d\n", i);
+ }
+ else
+ {
+ if (registers[34] & 0x1)
+ cvmx_safe_printf("Dcache error detected: core: %d, way: %d, va 9:7: 0x%x\n", i, (int)(registers[34] >> 10) & 0x1f, (int)(registers[34] >> 7) & 0x3);
+ else if (cache_err & 0x1)
+ cvmx_safe_printf("Icache error detected: core: %d, way : %d, va 9:3 = 0x%x\n", i, (int)(cache_err >> 10) & 0x3f, (int)(cache_err >> 3) & 0x7f);
+ else
+ cvmx_safe_printf("Cache error exception: core %d\n", i);
+ }
+ CVMX_MT_DCACHE_ERR(1);
+ CVMX_MT_CACHE_ERR(0);
+ }
+
+ /* The bus error exceptions can occur due to DID timeout or write buffer,
+ check by reading COP0_CACHEERRD */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ {
+ i = cvmx_get_core_num();
+ if (registers[34] & 0x4)
+ {
+ cvmx_safe_printf("Bus error detected due to DID timeout: core: %d\n", i);
+ CVMX_MT_DCACHE_ERR(4);
+ }
+ else if (registers[34] & 0x2)
+ {
+ cvmx_safe_printf("Bus error detected due to write buffer parity: core: %d\n", i);
+ CVMX_MT_DCACHE_ERR(2);
+ }
+ }
+
+ if ((cause & 0x7c) != 0)
+ {
+ cvmx_interrupt_state.exception_handler(registers);
+ goto return_from_interrupt;
+ }
+
+ /* Convert the cause into an active mask */
+ mask = ((cause & status) >> 8) & 0xff;
+ if (mask == 0)
+ {
+ goto return_from_interrupt; /* Spurious interrupt */
+ }
+
+ for (i=0; i<8; i++)
+ {
+ if (mask & (1<<i))
+ {
+ struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + i;
+ h->handler(i, registers, h->data);
+ goto return_from_interrupt;
+ }
+ }
+
+ /* We should never get here */
+ __cvmx_interrupt_default_exception_handler(registers);
+
+return_from_interrupt:
+ /* Restore Status register before returning from exception. */
+ asm volatile ("dmtc0 %0, $12, 0" : : "r" (status));
+}
+
+void (*cvmx_interrupt_mask_irq)(int irq_number);
+void (*cvmx_interrupt_unmask_irq)(int irq_number);
+
+#define CLEAR_OR_MASK(V,M,O) ({\
+ if (O) \
+ (V) &= ~(M); \
+ else \
+ (V) |= (M); \
+ })
+
+static void __cvmx_interrupt_ciu2_mask_unmask_irq(int irq_number, int op)
+{
+
+ if (irq_number < 0 || irq_number >= CVMX_IRQ_MAX)
+ return;
+
+ if (irq_number <= CVMX_IRQ_MIPS7) {
+ uint32_t flags, mask;
+
+ flags = cvmx_interrupt_disable_save();
+ asm volatile ("mfc0 %0,$12,0" : "=r" (mask));
+ CLEAR_OR_MASK(mask, 1 << (8 + irq_number), op);
+ asm volatile ("mtc0 %0,$12,0" : : "r" (mask));
+ cvmx_interrupt_restore(flags);
+ } else {
+ int idx;
+ uint64_t reg;
+ int core = cvmx_get_core_num();
+
+ int bit = cvmx_interrupt_state.handlers[irq_number].handler_data;
+
+ if (bit < 0)
+ return;
+
+ idx = bit >> 6;
+ bit &= 0x3f;
+ if (idx > 7) {
+ /* MBOX */
+ if (op)
+ reg = CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(core);
+ else
+ reg = CVMX_CIU2_EN_PPX_IP2_MBOX_W1S(core);
+ } else {
+ if (op)
+ reg = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(core) + (0x1000 * idx);
+ else
+ reg = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(core) + (0x1000 * idx);
+ }
+ cvmx_write_csr(reg, 1ull << bit);
+ }
+}
+
+static void __cvmx_interrupt_ciu2_mask_irq(int irq_number)
+{
+ __cvmx_interrupt_ciu2_mask_unmask_irq(irq_number, 1);
+}
+
+static void __cvmx_interrupt_ciu2_unmask_irq(int irq_number)
+{
+ __cvmx_interrupt_ciu2_mask_unmask_irq(irq_number, 0);
+}
+
+static void __cvmx_interrupt_ciu_mask_unmask_irq(int irq_number, int op)
+{
+ uint32_t flags;
+
+ if (irq_number < 0 || irq_number >= CVMX_IRQ_MAX)
+ return;
+
+ flags = cvmx_interrupt_disable_save();
+ if (irq_number <= CVMX_IRQ_MIPS7) {
+ uint32_t mask;
+ asm volatile ("mfc0 %0,$12,0" : "=r" (mask));
+ CLEAR_OR_MASK(mask, 1 << (8 + irq_number), op);
+ asm volatile ("mtc0 %0,$12,0" : : "r" (mask));
+ } else {
+ int ciu_bit, ciu_offset;
+ int bit = cvmx_interrupt_state.handlers[irq_number].handler_data;
+ int is_timer_intr = bit >> 6;
+ int core = cvmx_get_core_num();
+
+ if (bit < 0)
+ goto out;
+
+ ciu_bit = bit & 0x3f;
+ ciu_offset = core * 2;
+
+ if (is_timer_intr == 8)
+ {
+ CLEAR_OR_MASK(cvmx_interrupt_ciu_61xx_timer_mirror, 1ull << ciu_bit, op);
+ CLEAR_OR_MASK(cvmx_interrupt_ciu_en0_mirror, 1ull << 51, op); // SUM2 bit
+ cvmx_write_csr(CVMX_CIU_EN2_PPX_IP2(core), cvmx_interrupt_ciu_61xx_timer_mirror);
+ }
+ else if (bit & 0x40) {
+ /* EN1 */
+ ciu_offset += 1;
+ CLEAR_OR_MASK(cvmx_interrupt_ciu_en1_mirror, 1ull << ciu_bit, op);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(ciu_offset), cvmx_interrupt_ciu_en1_mirror);
+ } else {
+ /* EN0 */
+ CLEAR_OR_MASK(cvmx_interrupt_ciu_en0_mirror, 1ull << ciu_bit, op);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(ciu_offset), cvmx_interrupt_ciu_en0_mirror);
+ }
+ }
+out:
+ cvmx_interrupt_restore(flags);
+}
+
+static void __cvmx_interrupt_ciu_mask_irq(int irq_number)
+{
+ __cvmx_interrupt_ciu_mask_unmask_irq(irq_number, 1);
+}
+
+static void __cvmx_interrupt_ciu_unmask_irq(int irq_number)
+{
+ __cvmx_interrupt_ciu_mask_unmask_irq(irq_number, 0);
+}
+
+/**
+ * Register an interrupt handler for the specified interrupt number.
+ *
+ * @param irq_number Interrupt number to register for See
+ * cvmx-interrupt.h for enumeration and description of sources.
+ * @param func Function to call on interrupt.
+ * @param user_arg User data to pass to the interrupt handler
+ */
+void cvmx_interrupt_register(int irq_number, cvmx_interrupt_func_t func, void *user_arg)
+{
+ if (irq_number >= CVMX_IRQ_MAX || irq_number < 0) {
+ cvmx_warn("cvmx_interrupt_register: Illegal irq_number %d\n", irq_number);
+ return;
+ }
+ cvmx_interrupt_state.handlers[irq_number].handler = func;
+ cvmx_interrupt_state.handlers[irq_number].data = user_arg;
+ CVMX_SYNCWS;
+}
+
+
+static void cvmx_interrupt_ciu_initialize(cvmx_sysinfo_t *sys_info_ptr)
+{
+ int i;
+ int core = cvmx_get_core_num();
+
+ /* Disable all CIU interrupts by default */
+ cvmx_interrupt_ciu_en0_mirror = 0;
+ cvmx_interrupt_ciu_en1_mirror = 0;
+ cvmx_interrupt_ciu_61xx_timer_mirror = 0;
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(core * 2), cvmx_interrupt_ciu_en0_mirror);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((core * 2)+1), cvmx_interrupt_ciu_en0_mirror);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(core * 2), cvmx_interrupt_ciu_en1_mirror);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((core * 2)+1), cvmx_interrupt_ciu_en1_mirror);
+ if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2))
+ cvmx_write_csr(CVMX_CIU_EN2_PPX_IP2(cvmx_get_core_num()), cvmx_interrupt_ciu_61xx_timer_mirror);
+
+ if (!cvmx_coremask_first_core(sys_info_ptr->core_mask)|| is_core_being_hot_plugged())
+ return;
+
+ /* On the first core, set up the maps */
+ for (i = 0; i < 64; i++) {
+ cvmx_ciu_en0_to_irq[i] = 0xff;
+ cvmx_ciu_en1_to_irq[i] = 0xff;
+ cvmx_ciu_61xx_timer_to_irq[i] = 0xff;
+ }
+
+ /* WORKQ */
+ for (i = 0; i < 16; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_WORKQ0 + i, 0, i);
+ /* GPIO */
+ for (i = 0; i < 16; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_GPIO0 + i, 0, i + 16);
+
+ /* MBOX */
+ for (i = 0; i < 2; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_MBOX0 + i, 0, i + 32);
+
+ /* UART */
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + 0, 0, 34);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + 1, 0, 35);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + 2, 1, 16);
+
+ /* PCI */
+ for (i = 0; i < 4; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_INT0 + i, 0, i + 36);
+
+ /* MSI */
+ for (i = 0; i < 4; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_MSI0 + i, 0, i + 40);
+
+ /* TWSI */
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TWSI0 + 0, 0, 45);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TWSI0 + 1, 0, 59);
+
+ /* other */
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_RML, 0, 46);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TRACE0, 0, 47);
+
+ /* GMX_DRP */
+ for (i = 0; i < 2; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_GMX_DRP0 + i, 0, i + 48);
+
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD_DRP, 0, 50);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_KEY_ZERO, 0, 51);
+
+ /* TIMER0 */
+ for (i = 0; i < 4; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TIMER0 + i, 0, i + 52);
+
+ /* TIMER4..9 */
+ for(i = 0; i < 6; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TIMER4 + i, 8, i + 4);
+
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_USB0 + 0, 0, 56);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_USB0 + 1, 1, 17);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PCM, 0, 57);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_MPI, 0, 58);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_POWIQ, 0, 60);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_IPDPPTHR, 0, 61);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_MII0 + 0, 0, 62);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_MII0 + 1, 1, 18);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_BOOTDMA, 0, 63);
+
+ /* WDOG */
+ for (i = 0; i < 16; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_WDOG0 + i, 1, i);
+
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_NAND, 1, 19);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_MIO, 1, 20);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_IOB, 1, 21);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_FPA, 1, 22);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_POW, 1, 23);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_L2C, 1, 24);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD, 1, 25);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PIP, 1, 26);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PKO, 1, 27);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_ZIP, 1, 28);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TIM, 1, 29);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_RAD, 1, 30);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_KEY, 1, 31);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_DFA, 1, 32);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_USBCTL, 1, 33);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_SLI, 1, 34);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI, 1, 35);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_AGX0, 1, 36);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_AGX0 + 1, 1, 37);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI_DMA, 1, 40);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_AGL, 1, 46);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PTP, 1, 47);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM0, 1, 48);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM1, 1, 49);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_SRIO0, 1, 50);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_SRIO1, 1, 51);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_LMC0, 1, 52);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_DFM, 1, 56);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_SRIO2, 1, 60);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_RST, 1, 63);
+}
+
+static void cvmx_interrupt_ciu2_initialize(cvmx_sysinfo_t *sys_info_ptr)
+{
+ int i;
+
+ /* Disable all CIU2 interrupts by default */
+
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_WRKQ(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_WRKQ(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_WRKQ(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_WDOG(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_WDOG(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_WDOG(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_RML(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_RML(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_RML(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_MIO(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_MIO(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_MIO(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_IO(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_IO(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_IO(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_MEM(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_MEM(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_MEM(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_PKT(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_PKT(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_PKT(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_GPIO(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_GPIO(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_GPIO(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_MBOX(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_MBOX(cvmx_get_core_num()), 0);
+ cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_MBOX(cvmx_get_core_num()), 0);
+
+ if (!cvmx_coremask_first_core(sys_info_ptr->core_mask) || is_core_being_hot_plugged())
+ return;
+
+ /* On the first core, set up the maps */
+ for (i = 0; i < 64; i++) {
+ cvmx_ciu2_wrkq_to_irq[i] = 0xff;
+ cvmx_ciu2_wdog_to_irq[i] = 0xff;
+ cvmx_ciu2_rml_to_irq[i] = 0xff;
+ cvmx_ciu2_mio_to_irq[i] = 0xff;
+ cvmx_ciu2_io_to_irq[i] = 0xff;
+ cvmx_ciu2_mem_to_irq[i] = 0xff;
+ cvmx_ciu2_eth_to_irq[i] = 0xff;
+ cvmx_ciu2_gpio_to_irq[i] = 0xff;
+ cvmx_ciu2_mbox_to_irq[i] = 0xff;
+ }
+
+ /* WORKQ */
+ for (i = 0; i < 64; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_WORKQ0 + i, 0, i);
+
+ /* GPIO */
+ for (i = 0; i < 16; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_GPIO0 + i, 7, i);
+
+ /* MBOX */
+ for (i = 0; i < 4; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_MBOX0 + i, 60, i);
+
+ /* UART */
+ for (i = 0; i < 2; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + i, 3, 36 + i);
+
+ /* PCI */
+ for (i = 0; i < 4; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_INT0 + i, 4, 16 + i);
+
+ /* MSI */
+ for (i = 0; i < 4; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_MSI0 + i, 4, 8 + i);
+
+ /* TWSI */
+ for (i = 0; i < 2; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TWSI0 + i, 3, 32 + i);
+
+ /* TRACE */
+ for (i = 0; i < 4; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TRACE0 + i, 2, 52 + i);
+
+ /* GMX_DRP */
+ for (i = 0; i < 5; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_GMX_DRP0 + i, 6, 8 + i);
+
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD_DRP, 3, 2);
+
+ /* TIMER0 */
+ for (i = 0; i < 4; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TIMER0 + i, 3, 8 + i);
+
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_USB0, 3, 44);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_IPDPPTHR, 3, 0);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_MII0, 6, 40);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_BOOTDMA, 3, 18);
+
+ /* WDOG */
+ for (i = 0; i < 32; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_WDOG0 + i, 1, i);
+
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_NAND, 3, 16);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_MIO, 3, 17);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_IOB, 2, 0);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_FPA, 2, 4);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_POW, 2, 16);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_L2C, 2, 48);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD, 2, 5);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PIP, 2, 6);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PKO, 2, 7);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_ZIP, 2, 24);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_TIM, 2, 28);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_RAD, 2, 29);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_KEY, 2, 30);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_DFA, 2, 40);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_USBCTL, 3, 40);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_SLI, 2, 32);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI, 2, 33);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI_DMA, 2, 36);
+
+ /* AGX */
+ for (i = 0; i < 5; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_AGX0 + i, 6, i);
+
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_AGL, 6, 32);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PTP, 3, 48);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM0, 4, 32);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM1, 4, 32);
+
+ /* LMC */
+ for (i = 0; i < 4; i++)
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_LMC0 + i, 5, i);
+
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_RST, 3, 63);
+ __cvmx_interrupt_set_mapping(CVMX_IRQ_ILK, 6, 48);
+}
+
+/**
+ * Initialize the interrupt routine and copy the low level
+ * stub into the correct interrupt vector. This is called
+ * automatically during application startup.
+ */
+void cvmx_interrupt_initialize(void)
+{
+ void *low_level_loc;
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+ int i;
+
+ if (cvmx_coremask_first_core(sys_info_ptr->core_mask) && !is_core_being_hot_plugged()) {
+#ifndef CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ /* We assume this relationship between the registers. */
+ CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x1000 == CVMX_CIU2_SRC_PPX_IP2_WDOG(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x2000 == CVMX_CIU2_SRC_PPX_IP2_RML(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x3000 == CVMX_CIU2_SRC_PPX_IP2_MIO(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x4000 == CVMX_CIU2_SRC_PPX_IP2_IO(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x5000 == CVMX_CIU2_SRC_PPX_IP2_MEM(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x6000 == CVMX_CIU2_SRC_PPX_IP2_PKT(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x7000 == CVMX_CIU2_SRC_PPX_IP2_GPIO(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x1000 == CVMX_CIU2_EN_PPX_IP2_WDOG_W1C(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x2000 == CVMX_CIU2_EN_PPX_IP2_RML_W1C(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x3000 == CVMX_CIU2_EN_PPX_IP2_MIO_W1C(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x4000 == CVMX_CIU2_EN_PPX_IP2_IO_W1C(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x5000 == CVMX_CIU2_EN_PPX_IP2_MEM_W1C(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x6000 == CVMX_CIU2_EN_PPX_IP2_PKT_W1C(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x7000 == CVMX_CIU2_EN_PPX_IP2_GPIO_W1C(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x1000 == CVMX_CIU2_EN_PPX_IP2_WDOG_W1S(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x2000 == CVMX_CIU2_EN_PPX_IP2_RML_W1S(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x3000 == CVMX_CIU2_EN_PPX_IP2_MIO_W1S(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x4000 == CVMX_CIU2_EN_PPX_IP2_IO_W1S(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x5000 == CVMX_CIU2_EN_PPX_IP2_MEM_W1S(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x6000 == CVMX_CIU2_EN_PPX_IP2_PKT_W1S(0));
+ CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x7000 == CVMX_CIU2_EN_PPX_IP2_GPIO_W1S(0));
+#endif /* !CVMX_ENABLE_CSR_ADDRESS_CHECKING */
+
+ for (i = 0; i < CVMX_IRQ_MAX; i++) {
+ cvmx_interrupt_state.handlers[i].handler = __cvmx_interrupt_default;
+ cvmx_interrupt_state.handlers[i].data = NULL;
+ cvmx_interrupt_state.handlers[i].handler_data = -1;
+ }
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ cvmx_interrupt_mask_irq = __cvmx_interrupt_ciu2_mask_irq;
+ cvmx_interrupt_unmask_irq = __cvmx_interrupt_ciu2_unmask_irq;
+ cvmx_interrupt_ciu2_initialize(sys_info_ptr);
+ /* Add an interrupt handlers for chained CIU interrupt */
+ cvmx_interrupt_register(CVMX_IRQ_MIPS2, __cvmx_interrupt_ciu2, NULL);
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2))
+ {
+ cvmx_interrupt_mask_irq = __cvmx_interrupt_ciu_mask_irq;
+ cvmx_interrupt_unmask_irq = __cvmx_interrupt_ciu_unmask_irq;
+ cvmx_interrupt_ciu_initialize(sys_info_ptr);
+
+ /* Add an interrupt handlers for chained CIU interrupts */
+ cvmx_interrupt_register(CVMX_IRQ_MIPS2, __cvmx_interrupt_ciu, NULL);
+ cvmx_interrupt_register(CVMX_IRQ_MIPS3, __cvmx_interrupt_ciu_cn61xx, NULL);
+ }
+ else
+ {
+ cvmx_interrupt_mask_irq = __cvmx_interrupt_ciu_mask_irq;
+ cvmx_interrupt_unmask_irq = __cvmx_interrupt_ciu_unmask_irq;
+ cvmx_interrupt_ciu_initialize(sys_info_ptr);
+
+ /* Add an interrupt handlers for chained CIU interrupts */
+ cvmx_interrupt_register(CVMX_IRQ_MIPS2, __cvmx_interrupt_ciu, NULL);
+ cvmx_interrupt_register(CVMX_IRQ_MIPS3, __cvmx_interrupt_ciu, NULL);
+ }
+
+ /* Move performance counter interrupts to IRQ 6*/
+ cvmx_update_perfcnt_irq();
+
+ /* Add an interrupt handler for Perf counter interrupts */
+ cvmx_interrupt_register(CVMX_IRQ_MIPS6, __cvmx_interrupt_perf, NULL);
+
+ if (cvmx_coremask_first_core(sys_info_ptr->core_mask) && !is_core_being_hot_plugged())
+ {
+ cvmx_interrupt_state.exception_handler = __cvmx_interrupt_default_exception_handler;
+
+ low_level_loc = CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,sys_info_ptr->exception_base_addr));
+ memcpy(low_level_loc + 0x80, (void*)cvmx_interrupt_stage1, 0x80);
+ memcpy(low_level_loc + 0x100, (void*)cvmx_interrupt_cache_error, 0x80);
+ memcpy(low_level_loc + 0x180, (void*)cvmx_interrupt_stage1, 0x80);
+ memcpy(low_level_loc + 0x200, (void*)cvmx_interrupt_stage1, 0x80);
+
+ /* Make sure the locations used to count Icache and Dcache exceptions
+ starts out as zero */
+ cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 8), 0);
+ cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 16), 0);
+ cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 24), 0);
+ CVMX_SYNC;
+
+ /* Add an interrupt handler for ECC failures */
+ if (cvmx_error_initialize(0 /* || CVMX_ERROR_FLAGS_ECC_SINGLE_BIT */))
+ cvmx_warn("cvmx_error_initialize() failed\n");
+
+ /* Enable PIP/IPD, POW, PKO, FPA, NAND, KEY, RAD, L2C, LMC, GMX, AGL,
+ DFM, DFA, error handling interrupts. */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ int i;
+
+ for (i = 0; i < 5; i++)
+ {
+ cvmx_interrupt_register(CVMX_IRQ_AGX0+i, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_AGX0+i);
+ }
+ cvmx_interrupt_register(CVMX_IRQ_NAND, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_NAND);
+ cvmx_interrupt_register(CVMX_IRQ_MIO, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MIO);
+ cvmx_interrupt_register(CVMX_IRQ_FPA, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_FPA);
+ cvmx_interrupt_register(CVMX_IRQ_IPD, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_IPD);
+ cvmx_interrupt_register(CVMX_IRQ_PIP, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_PIP);
+ cvmx_interrupt_register(CVMX_IRQ_POW, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_POW);
+ cvmx_interrupt_register(CVMX_IRQ_L2C, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_L2C);
+ cvmx_interrupt_register(CVMX_IRQ_PKO, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_PKO);
+ cvmx_interrupt_register(CVMX_IRQ_ZIP, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_ZIP);
+ cvmx_interrupt_register(CVMX_IRQ_RAD, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_RAD);
+ cvmx_interrupt_register(CVMX_IRQ_KEY, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_KEY);
+ /* Before enabling SLI interrupt clear any RML_TO interrupt */
+ if (cvmx_read_csr(CVMX_PEXP_SLI_INT_SUM) & 0x1)
+ {
+ cvmx_safe_printf("clearing pending SLI_INT_SUM[RML_TO] interrupt (ignore)\n");
+ cvmx_write_csr(CVMX_PEXP_SLI_INT_SUM, 1);
+ }
+ cvmx_interrupt_register(CVMX_IRQ_SLI, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_SLI);
+ cvmx_interrupt_register(CVMX_IRQ_DPI, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_DPI);
+ cvmx_interrupt_register(CVMX_IRQ_DFA, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_DFA);
+ cvmx_interrupt_register(CVMX_IRQ_AGL, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_AGL);
+ for (i = 0; i < 4; i++)
+ {
+ cvmx_interrupt_register(CVMX_IRQ_LMC0+i, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_LMC0+i);
+ }
+ cvmx_interrupt_register(CVMX_IRQ_DFM, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_DFM);
+ cvmx_interrupt_register(CVMX_IRQ_RST, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_RST);
+ cvmx_interrupt_register(CVMX_IRQ_ILK, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_ILK);
+ }
+ else
+ {
+ cvmx_interrupt_register(CVMX_IRQ_RML, __cvmx_interrupt_ecc, NULL);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_RML);
+ }
+
+ cvmx_atomic_set32(&cvmx_interrupt_initialize_flag, 1);
+ }
+
+ while (!cvmx_atomic_get32(&cvmx_interrupt_initialize_flag))
+ ; /* Wait for first core to finish above. */
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MIPS2);
+ } else {
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MIPS2);
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MIPS3);
+ }
+
+ CVMX_ICACHE_INVALIDATE;
+
+ /* Enable interrupts for each core (bit0 of COP0 Status) */
+ cvmx_interrupt_restore(1);
+}
+
+
+
+/**
+ * Set the exception handler for all non interrupt sources.
+ *
+ * @param handler New exception handler
+ * @return Old exception handler
+ */
+cvmx_interrupt_exception_t cvmx_interrupt_set_exception(cvmx_interrupt_exception_t handler)
+{
+ cvmx_interrupt_exception_t result = cvmx_interrupt_state.exception_handler;
+ cvmx_interrupt_state.exception_handler = handler;
+ CVMX_SYNCWS;
+ return result;
+}
+#endif /* !__U_BOOT__ */
+
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-interrupt.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-interrupt.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-interrupt.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-interrupt.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,242 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the Mips interrupts.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifndef __CVMX_INTERRUPT_H__
+#define __CVMX_INTERRUPT_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Enumeration of Interrupt numbers
+ */
+typedef enum
+{
+ /* 0 - 7 represent the 8 MIPS standard interrupt sources */
+ CVMX_IRQ_SW0 = 0,
+ CVMX_IRQ_SW1,
+ CVMX_IRQ_MIPS2,
+ CVMX_IRQ_MIPS3,
+ CVMX_IRQ_MIPS4,
+ CVMX_IRQ_MIPS5,
+ CVMX_IRQ_MIPS6,
+ CVMX_IRQ_MIPS7,
+ /* 64 WORKQ interrupts. */
+ CVMX_IRQ_WORKQ0,
+ /* 16 GPIO interrupts. */
+ CVMX_IRQ_GPIO0 = CVMX_IRQ_WORKQ0 + 64,
+ /* 4 MBOX interrupts. */
+ CVMX_IRQ_MBOX0 = CVMX_IRQ_GPIO0 + 16,
+ /* 3 UART interrupts. */
+ CVMX_IRQ_UART0 = CVMX_IRQ_MBOX0 + 4,
+ CVMX_IRQ_PCI_INT0 = CVMX_IRQ_UART0 + 3,
+ CVMX_IRQ_PCI_INT1,
+ CVMX_IRQ_PCI_INT2,
+ CVMX_IRQ_PCI_INT3,
+ CVMX_IRQ_PCI_MSI0,
+ CVMX_IRQ_PCI_MSI1,
+ CVMX_IRQ_PCI_MSI2,
+ CVMX_IRQ_PCI_MSI3,
+ /* 2 TWSI interrupts */
+ CVMX_IRQ_TWSI0,
+ CVMX_IRQ_RML = CVMX_IRQ_TWSI0 + 2,
+ /* 4 TRACE interrupts added in CN68XX */
+ CVMX_IRQ_TRACE0,
+ /* 5 GMX_DRP interrupts added in CN68XX */
+ CVMX_IRQ_GMX_DRP0 = CVMX_IRQ_TRACE0 + 4,
+ CVMX_IRQ_GMX_DRP1, /* Doesn't apply on CN52XX or CN63XX */
+ CVMX_IRQ_IPD_DRP = CVMX_IRQ_GMX_DRP0 + 5,
+ CVMX_IRQ_KEY_ZERO, /* Doesn't apply on CN52XX or CN63XX */
+ /* 4 TIMER interrupts. */
+ CVMX_IRQ_TIMER0,
+ /* 2 USB interrupts. */
+ CVMX_IRQ_USB0 = CVMX_IRQ_TIMER0 + 4, /* Doesn't apply on CN38XX or CN58XX */
+ CVMX_IRQ_PCM = CVMX_IRQ_USB0 + 2, /* Doesn't apply on CN52XX or CN63XX */
+ CVMX_IRQ_MPI, /* Doesn't apply on CN52XX or CN63XX */
+ CVMX_IRQ_POWIQ, /* Added in CN56XX */
+ CVMX_IRQ_IPDPPTHR, /* Added in CN56XX */
+ /* 2 MII interrupts. */
+ CVMX_IRQ_MII0, /* Added in CN56XX */
+ CVMX_IRQ_BOOTDMA = CVMX_IRQ_MII0 + 2, /* Added in CN56XX */
+
+ /* 32 WDOG interrupts. */
+ CVMX_IRQ_WDOG0,
+ CVMX_IRQ_NAND = CVMX_IRQ_WDOG0 + 32, /* Added in CN52XX */
+ CVMX_IRQ_MIO, /* Added in CN63XX */
+ CVMX_IRQ_IOB, /* Added in CN63XX */
+ CVMX_IRQ_FPA, /* Added in CN63XX */
+ CVMX_IRQ_POW, /* Added in CN63XX */
+ CVMX_IRQ_L2C, /* Added in CN63XX */
+ CVMX_IRQ_IPD, /* Added in CN63XX */
+ CVMX_IRQ_PIP, /* Added in CN63XX */
+ CVMX_IRQ_PKO, /* Added in CN63XX */
+ CVMX_IRQ_ZIP, /* Added in CN63XX */
+ CVMX_IRQ_TIM, /* Added in CN63XX */
+ CVMX_IRQ_RAD, /* Added in CN63XX */
+ CVMX_IRQ_KEY, /* Added in CN63XX */
+ CVMX_IRQ_DFA, /* Added in CN63XX */
+ CVMX_IRQ_USBCTL, /* Added in CN63XX */
+ CVMX_IRQ_SLI, /* Added in CN63XX */
+ CVMX_IRQ_DPI, /* Added in CN63XX */
+ /* 5 AGX interrupts added in CN68XX. */
+ CVMX_IRQ_AGX0, /* Added in CN63XX */
+
+ CVMX_IRQ_AGL = CVMX_IRQ_AGX0 + 5, /* Added in CN63XX */
+ CVMX_IRQ_PTP, /* Added in CN63XX */
+ CVMX_IRQ_PEM0, /* Added in CN63XX */
+ CVMX_IRQ_PEM1, /* Added in CN63XX */
+ CVMX_IRQ_SRIO0, /* Added in CN63XX */
+ CVMX_IRQ_SRIO1, /* Added in CN63XX */
+ CVMX_IRQ_LMC0, /* Added in CN63XX */
+ /* 4 LMC interrupts added in CN68XX. */
+ CVMX_IRQ_DFM = CVMX_IRQ_LMC0 + 4, /* Added in CN63XX */
+ CVMX_IRQ_RST, /* Added in CN63XX */
+ CVMX_IRQ_ILK, /* Added for CN68XX */
+ CVMX_IRQ_SRIO2, /* Added in CN66XX */
+ CVMX_IRQ_DPI_DMA, /* Added in CN61XX */
+ /* 6 addition timers added in CN61XX */
+ CVMX_IRQ_TIMER4, /* Added in CN61XX */
+ CVMX_IRQ_MAX = CVMX_IRQ_TIMER4 + 6 /* One greater than the last valid number.*/
+} cvmx_irq_t;
+
+/**
+ * Function prototype for the exception handler
+ */
+typedef void (*cvmx_interrupt_exception_t)(uint64_t *registers);
+
+/**
+ * Function prototype for interrupt handlers
+ */
+typedef void (*cvmx_interrupt_func_t)(int irq_number, uint64_t *registers, void *user_arg);
+
+/**
+ * Register an interrupt handler for the specified interrupt number.
+ *
+ * @param irq_number Interrupt number to register for (0-135)
+ * @param func Function to call on interrupt.
+ * @param user_arg User data to pass to the interrupt handler
+ */
+void cvmx_interrupt_register(int irq_number, cvmx_interrupt_func_t func, void *user_arg);
+
+/**
+ * Set the exception handler for all non interrupt sources.
+ *
+ * @param handler New exception handler
+ * @return Old exception handler
+ */
+cvmx_interrupt_exception_t cvmx_interrupt_set_exception(cvmx_interrupt_exception_t handler);
+
+
+/**
+ * Masks a given interrupt number.
+ *
+ * @param irq_number interrupt number to mask
+ */
+extern void (*cvmx_interrupt_mask_irq)(int irq_number);
+
+
+/**
+ * Unmasks a given interrupt number
+ *
+ * @param irq_number interrupt number to unmask
+ */
+extern void (*cvmx_interrupt_unmask_irq)(int irq_number);
+
+
+/* Disable interrupts by clearing bit 0 of the COP0 status register,
+** and return the previous contents of the status register.
+** Note: this is only used to track interrupt status. */
+static inline uint32_t cvmx_interrupt_disable_save(void)
+{
+ uint32_t flags;
+ asm volatile (
+ "DI %[flags]\n"
+ : [flags]"=r" (flags));
+ return(flags);
+}
+
+/* Restore the contents of the cop0 status register. Used with
+** cvmx_interrupt_disable_save to allow recursive interrupt disabling */
+static inline void cvmx_interrupt_restore(uint32_t flags)
+{
+ /* If flags value indicates interrupts should be enabled, then enable them */
+ if (flags & 1)
+ {
+ asm volatile (
+ "EI \n"
+ ::);
+ }
+}
+
+#define cvmx_local_irq_save(x) ({x = cvmx_interrupt_disable_save();})
+#define cvmx_local_irq_restore(x) cvmx_interrupt_restore(x)
+
+/**
+ * Utility function to do interrupt safe printf
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ #define cvmx_safe_printf printk
+#elif defined(CVMX_BUILD_FOR_LINUX_USER)
+ #define cvmx_safe_printf printf
+#else
+ extern void cvmx_safe_printf(const char* format, ... ) __attribute__ ((format(printf, 1, 2)));
+#endif
+
+#define PRINT_ERROR(format, ...) cvmx_safe_printf("ERROR " format, ##__VA_ARGS__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-interrupt.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-iob-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-iob-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-iob-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1952 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-iob-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon iob.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_IOB_DEFS_H__
+#define __CVMX_IOB_DEFS_H__
+
+#define CVMX_IOB_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800F00007F8ull))
+#define CVMX_IOB_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011800F0000050ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_DWB_PRI_CNT CVMX_IOB_DWB_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_DWB_PRI_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_DWB_PRI_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000028ull);
+}
+#else
+#define CVMX_IOB_DWB_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000028ull))
+#endif
+#define CVMX_IOB_FAU_TIMEOUT (CVMX_ADD_IO_SEG(0x00011800F0000000ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_I2C_PRI_CNT CVMX_IOB_I2C_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_I2C_PRI_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_I2C_PRI_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000010ull);
+}
+#else
+#define CVMX_IOB_I2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000010ull))
+#endif
+#define CVMX_IOB_INB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000078ull))
+#define CVMX_IOB_INB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000088ull))
+#define CVMX_IOB_INB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000070ull))
+#define CVMX_IOB_INB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000080ull))
+#define CVMX_IOB_INT_ENB (CVMX_ADD_IO_SEG(0x00011800F0000060ull))
+#define CVMX_IOB_INT_SUM (CVMX_ADD_IO_SEG(0x00011800F0000058ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_N2C_L2C_PRI_CNT CVMX_IOB_N2C_L2C_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_N2C_L2C_PRI_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_N2C_L2C_PRI_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000020ull);
+}
+#else
+#define CVMX_IOB_N2C_L2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_N2C_RSP_PRI_CNT CVMX_IOB_N2C_RSP_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_N2C_RSP_PRI_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_N2C_RSP_PRI_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000008ull);
+}
+#else
+#define CVMX_IOB_N2C_RSP_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_OUTB_COM_PRI_CNT CVMX_IOB_OUTB_COM_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_COM_PRI_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_OUTB_COM_PRI_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000040ull);
+}
+#else
+#define CVMX_IOB_OUTB_COM_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000040ull))
+#endif
+#define CVMX_IOB_OUTB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000098ull))
+#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A8ull))
+#define CVMX_IOB_OUTB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000090ull))
+#define CVMX_IOB_OUTB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A0ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_OUTB_FPA_PRI_CNT CVMX_IOB_OUTB_FPA_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_FPA_PRI_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_OUTB_FPA_PRI_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000048ull);
+}
+#else
+#define CVMX_IOB_OUTB_FPA_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_OUTB_REQ_PRI_CNT CVMX_IOB_OUTB_REQ_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_REQ_PRI_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_OUTB_REQ_PRI_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000038ull);
+}
+#else
+#define CVMX_IOB_OUTB_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_P2C_REQ_PRI_CNT CVMX_IOB_P2C_REQ_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_P2C_REQ_PRI_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_P2C_REQ_PRI_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000018ull);
+}
+#else
+#define CVMX_IOB_P2C_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_PKT_ERR CVMX_IOB_PKT_ERR_FUNC()
+static inline uint64_t CVMX_IOB_PKT_ERR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_PKT_ERR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000068ull);
+}
+#else
+#define CVMX_IOB_PKT_ERR (CVMX_ADD_IO_SEG(0x00011800F0000068ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_CMB_CREDITS CVMX_IOB_TO_CMB_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_CMB_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IOB_TO_CMB_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F00000B0ull);
+}
+#else
+#define CVMX_IOB_TO_CMB_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00000B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_00_CREDITS CVMX_IOB_TO_NCB_DID_00_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_00_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_00_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000800ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_00_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000800ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_111_CREDITS CVMX_IOB_TO_NCB_DID_111_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_111_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_111_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000B78ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_111_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000B78ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_223_CREDITS CVMX_IOB_TO_NCB_DID_223_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_223_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_223_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000EF8ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_223_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000EF8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_24_CREDITS CVMX_IOB_TO_NCB_DID_24_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_24_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_24_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F00008C0ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_24_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00008C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_32_CREDITS CVMX_IOB_TO_NCB_DID_32_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_32_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_32_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000900ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_32_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000900ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_40_CREDITS CVMX_IOB_TO_NCB_DID_40_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_40_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_40_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000940ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_40_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000940ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_55_CREDITS CVMX_IOB_TO_NCB_DID_55_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_55_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_55_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F00009B8ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_55_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00009B8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_64_CREDITS CVMX_IOB_TO_NCB_DID_64_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_64_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_64_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000A00ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_64_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000A00ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_79_CREDITS CVMX_IOB_TO_NCB_DID_79_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_79_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_79_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000A78ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_79_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000A78ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_96_CREDITS CVMX_IOB_TO_NCB_DID_96_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_96_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_96_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000B00ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_96_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000B00ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB_TO_NCB_DID_98_CREDITS CVMX_IOB_TO_NCB_DID_98_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_NCB_DID_98_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB_TO_NCB_DID_98_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0000B10ull);
+}
+#else
+#define CVMX_IOB_TO_NCB_DID_98_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000B10ull))
+#endif
+
+/**
+ * cvmx_iob_bist_status
+ *
+ * IOB_BIST_STATUS = BIST Status of IOB Memories
+ *
+ * The result of the BIST run on the IOB memories.
+ */
+union cvmx_iob_bist_status {
+ uint64_t u64;
+ struct cvmx_iob_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t ibd : 1; /**< ibd_bist_mem0_status */
+ uint64_t icd : 1; /**< icd_ncb_fifo_bist_status */
+#else
+ uint64_t icd : 1;
+ uint64_t ibd : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_iob_bist_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t icnrcb : 1; /**< Reserved */
+ uint64_t icr0 : 1; /**< Reserved */
+ uint64_t icr1 : 1; /**< Reserved */
+ uint64_t icnr1 : 1; /**< Reserved */
+ uint64_t icnr0 : 1; /**< icnr_reg_mem0_bist_status */
+ uint64_t ibdr0 : 1; /**< ibdr_bist_req_fifo0_status */
+ uint64_t ibdr1 : 1; /**< ibdr_bist_req_fifo1_status */
+ uint64_t ibr0 : 1; /**< ibr_bist_rsp_fifo0_status */
+ uint64_t ibr1 : 1; /**< ibr_bist_rsp_fifo1_status */
+ uint64_t icnrt : 1; /**< Reserved */
+ uint64_t ibrq0 : 1; /**< ibrq_bist_req_fifo0_status */
+ uint64_t ibrq1 : 1; /**< ibrq_bist_req_fifo1_status */
+ uint64_t icrn0 : 1; /**< icr_ncb_bist_mem0_status */
+ uint64_t icrn1 : 1; /**< icr_ncb_bist_mem1_status */
+ uint64_t icrp0 : 1; /**< icr_pko_bist_mem0_status */
+ uint64_t icrp1 : 1; /**< icr_pko_bist_mem1_status */
+ uint64_t ibd : 1; /**< ibd_bist_mem0_status */
+ uint64_t icd : 1; /**< icd_ncb_fifo_bist_status */
+#else
+ uint64_t icd : 1;
+ uint64_t ibd : 1;
+ uint64_t icrp1 : 1;
+ uint64_t icrp0 : 1;
+ uint64_t icrn1 : 1;
+ uint64_t icrn0 : 1;
+ uint64_t ibrq1 : 1;
+ uint64_t ibrq0 : 1;
+ uint64_t icnrt : 1;
+ uint64_t ibr1 : 1;
+ uint64_t ibr0 : 1;
+ uint64_t ibdr1 : 1;
+ uint64_t ibdr0 : 1;
+ uint64_t icnr0 : 1;
+ uint64_t icnr1 : 1;
+ uint64_t icr1 : 1;
+ uint64_t icr0 : 1;
+ uint64_t icnrcb : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn30xx;
+ struct cvmx_iob_bist_status_cn30xx cn31xx;
+ struct cvmx_iob_bist_status_cn30xx cn38xx;
+ struct cvmx_iob_bist_status_cn30xx cn38xxp2;
+ struct cvmx_iob_bist_status_cn30xx cn50xx;
+ struct cvmx_iob_bist_status_cn30xx cn52xx;
+ struct cvmx_iob_bist_status_cn30xx cn52xxp1;
+ struct cvmx_iob_bist_status_cn30xx cn56xx;
+ struct cvmx_iob_bist_status_cn30xx cn56xxp1;
+ struct cvmx_iob_bist_status_cn30xx cn58xx;
+ struct cvmx_iob_bist_status_cn30xx cn58xxp1;
+ struct cvmx_iob_bist_status_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t xmdfif : 1; /**< xmdfif_bist_status */
+ uint64_t xmcfif : 1; /**< xmcfif_bist_status */
+ uint64_t iorfif : 1; /**< iorfif_bist_status */
+ uint64_t rsdfif : 1; /**< rsdfif_bist_status */
+ uint64_t iocfif : 1; /**< iocfif_bist_status */
+ uint64_t icnrcb : 1; /**< icnr_cb_reg_fifo_bist_status */
+ uint64_t icr0 : 1; /**< icr_bist_req_fifo0_status */
+ uint64_t icr1 : 1; /**< icr_bist_req_fifo1_status */
+ uint64_t icnr1 : 1; /**< Reserved */
+ uint64_t icnr0 : 1; /**< icnr_reg_mem0_bist_status */
+ uint64_t ibdr0 : 1; /**< ibdr_bist_req_fifo0_status */
+ uint64_t ibdr1 : 1; /**< ibdr_bist_req_fifo1_status */
+ uint64_t ibr0 : 1; /**< ibr_bist_rsp_fifo0_status */
+ uint64_t ibr1 : 1; /**< ibr_bist_rsp_fifo1_status */
+ uint64_t icnrt : 1; /**< icnr_tag_cb_reg_fifo_bist_status */
+ uint64_t ibrq0 : 1; /**< ibrq_bist_req_fifo0_status */
+ uint64_t ibrq1 : 1; /**< ibrq_bist_req_fifo1_status */
+ uint64_t icrn0 : 1; /**< icr_ncb_bist_mem0_status */
+ uint64_t icrn1 : 1; /**< icr_ncb_bist_mem1_status */
+ uint64_t icrp0 : 1; /**< icr_pko_bist_mem0_status */
+ uint64_t icrp1 : 1; /**< icr_pko_bist_mem1_status */
+ uint64_t ibd : 1; /**< ibd_bist_mem0_status */
+ uint64_t icd : 1; /**< icd_ncb_fifo_bist_status */
+#else
+ uint64_t icd : 1;
+ uint64_t ibd : 1;
+ uint64_t icrp1 : 1;
+ uint64_t icrp0 : 1;
+ uint64_t icrn1 : 1;
+ uint64_t icrn0 : 1;
+ uint64_t ibrq1 : 1;
+ uint64_t ibrq0 : 1;
+ uint64_t icnrt : 1;
+ uint64_t ibr1 : 1;
+ uint64_t ibr0 : 1;
+ uint64_t ibdr1 : 1;
+ uint64_t ibdr0 : 1;
+ uint64_t icnr0 : 1;
+ uint64_t icnr1 : 1;
+ uint64_t icr1 : 1;
+ uint64_t icr0 : 1;
+ uint64_t icnrcb : 1;
+ uint64_t iocfif : 1;
+ uint64_t rsdfif : 1;
+ uint64_t iorfif : 1;
+ uint64_t xmcfif : 1;
+ uint64_t xmdfif : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } cn61xx;
+ struct cvmx_iob_bist_status_cn61xx cn63xx;
+ struct cvmx_iob_bist_status_cn61xx cn63xxp1;
+ struct cvmx_iob_bist_status_cn61xx cn66xx;
+ struct cvmx_iob_bist_status_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t xmdfif : 1; /**< xmdfif_bist_status */
+ uint64_t xmcfif : 1; /**< xmcfif_bist_status */
+ uint64_t iorfif : 1; /**< iorfif_bist_status */
+ uint64_t rsdfif : 1; /**< rsdfif_bist_status */
+ uint64_t iocfif : 1; /**< iocfif_bist_status */
+ uint64_t icnrcb : 1; /**< icnr_cb_reg_fifo_bist_status */
+ uint64_t icr0 : 1; /**< icr_bist_req_fifo0_status */
+ uint64_t icr1 : 1; /**< icr_bist_req_fifo1_status */
+ uint64_t icnr0 : 1; /**< icnr_reg_mem0_bist_status */
+ uint64_t ibr0 : 1; /**< ibr_bist_rsp_fifo0_status */
+ uint64_t ibr1 : 1; /**< ibr_bist_rsp_fifo1_status */
+ uint64_t icnrt : 1; /**< icnr_tag_cb_reg_fifo_bist_status */
+ uint64_t ibrq0 : 1; /**< ibrq_bist_req_fifo0_status */
+ uint64_t ibrq1 : 1; /**< ibrq_bist_req_fifo1_status */
+ uint64_t icrn0 : 1; /**< icr_ncb_bist_mem0_status */
+ uint64_t icrn1 : 1; /**< icr_ncb_bist_mem1_status */
+ uint64_t ibd : 1; /**< ibd_bist_mem0_status */
+ uint64_t icd : 1; /**< icd_ncb_fifo_bist_status */
+#else
+ uint64_t icd : 1;
+ uint64_t ibd : 1;
+ uint64_t icrn1 : 1;
+ uint64_t icrn0 : 1;
+ uint64_t ibrq1 : 1;
+ uint64_t ibrq0 : 1;
+ uint64_t icnrt : 1;
+ uint64_t ibr1 : 1;
+ uint64_t ibr0 : 1;
+ uint64_t icnr0 : 1;
+ uint64_t icr1 : 1;
+ uint64_t icr0 : 1;
+ uint64_t icnrcb : 1;
+ uint64_t iocfif : 1;
+ uint64_t rsdfif : 1;
+ uint64_t iorfif : 1;
+ uint64_t xmcfif : 1;
+ uint64_t xmdfif : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn68xx;
+ struct cvmx_iob_bist_status_cn68xx cn68xxp1;
+ struct cvmx_iob_bist_status_cn61xx cnf71xx;
+};
+typedef union cvmx_iob_bist_status cvmx_iob_bist_status_t;
+
+/**
+ * cvmx_iob_ctl_status
+ *
+ * IOB Control Status = IOB Control and Status Register
+ *
+ * Provides control for IOB functions.
+ */
+union cvmx_iob_ctl_status {
+ uint64_t u64;
+ struct cvmx_iob_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t fif_dly : 1; /**< Delay async FIFO counts to be used when clock ratio
+ is greater then 3:1. Writes should be followed by an
+ immediate read. */
+ uint64_t xmc_per : 4; /**< IBC XMC PUSH EARLY */
+ uint64_t reserved_5_5 : 1;
+ uint64_t outb_mat : 1; /**< Was a match on the outbound bus to the inb pattern
+ matchers. PASS2 FIELD. */
+ uint64_t inb_mat : 1; /**< Was a match on the inbound bus to the inb pattern
+ matchers. PASS2 FIELD. */
+ uint64_t pko_enb : 1; /**< Toggles the endian style of the FAU for the PKO.
+ '0' is for big-endian and '1' is for little-endian. */
+ uint64_t dwb_enb : 1; /**< Enables the DWB function of the IOB. */
+ uint64_t fau_end : 1; /**< Toggles the endian style of the FAU. '0' is for
+ big-endian and '1' is for little-endian. */
+#else
+ uint64_t fau_end : 1;
+ uint64_t dwb_enb : 1;
+ uint64_t pko_enb : 1;
+ uint64_t inb_mat : 1;
+ uint64_t outb_mat : 1;
+ uint64_t reserved_5_5 : 1;
+ uint64_t xmc_per : 4;
+ uint64_t fif_dly : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_iob_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t outb_mat : 1; /**< Was a match on the outbound bus to the inb pattern
+ matchers. */
+ uint64_t inb_mat : 1; /**< Was a match on the inbound bus to the inb pattern
+ matchers. */
+ uint64_t pko_enb : 1; /**< Toggles the endian style of the FAU for the PKO.
+ '0' is for big-endian and '1' is for little-endian. */
+ uint64_t dwb_enb : 1; /**< Enables the DWB function of the IOB. */
+ uint64_t fau_end : 1; /**< Toggles the endian style of the FAU. '0' is for
+ big-endian and '1' is for little-endian. */
+#else
+ uint64_t fau_end : 1;
+ uint64_t dwb_enb : 1;
+ uint64_t pko_enb : 1;
+ uint64_t inb_mat : 1;
+ uint64_t outb_mat : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn30xx;
+ struct cvmx_iob_ctl_status_cn30xx cn31xx;
+ struct cvmx_iob_ctl_status_cn30xx cn38xx;
+ struct cvmx_iob_ctl_status_cn30xx cn38xxp2;
+ struct cvmx_iob_ctl_status_cn30xx cn50xx;
+ struct cvmx_iob_ctl_status_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t rr_mode : 1; /**< When set to '1' will enable Round-Robin mode of next
+ transaction that could arbitrate for the XMB. */
+ uint64_t outb_mat : 1; /**< Was a match on the outbound bus to the inb pattern
+ matchers. PASS2 FIELD. */
+ uint64_t inb_mat : 1; /**< Was a match on the inbound bus to the inb pattern
+ matchers. PASS2 FIELD. */
+ uint64_t pko_enb : 1; /**< Toggles the endian style of the FAU for the PKO.
+ '0' is for big-endian and '1' is for little-endian. */
+ uint64_t dwb_enb : 1; /**< Enables the DWB function of the IOB. */
+ uint64_t fau_end : 1; /**< Toggles the endian style of the FAU. '0' is for
+ big-endian and '1' is for little-endian. */
+#else
+ uint64_t fau_end : 1;
+ uint64_t dwb_enb : 1;
+ uint64_t pko_enb : 1;
+ uint64_t inb_mat : 1;
+ uint64_t outb_mat : 1;
+ uint64_t rr_mode : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn52xx;
+ struct cvmx_iob_ctl_status_cn30xx cn52xxp1;
+ struct cvmx_iob_ctl_status_cn30xx cn56xx;
+ struct cvmx_iob_ctl_status_cn30xx cn56xxp1;
+ struct cvmx_iob_ctl_status_cn30xx cn58xx;
+ struct cvmx_iob_ctl_status_cn30xx cn58xxp1;
+ struct cvmx_iob_ctl_status_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t fif_dly : 1; /**< Delay async FIFO counts to be used when clock ratio
+ is greater then 3:1. Writes should be followed by an
+ immediate read. */
+ uint64_t xmc_per : 4; /**< IBC XMC PUSH EARLY */
+ uint64_t rr_mode : 1; /**< When set to '1' will enable Round-Robin mode of next
+ transaction that could arbitrate for the XMB. */
+ uint64_t outb_mat : 1; /**< Was a match on the outbound bus to the inb pattern
+ matchers. PASS2 FIELD. */
+ uint64_t inb_mat : 1; /**< Was a match on the inbound bus to the inb pattern
+ matchers. PASS2 FIELD. */
+ uint64_t pko_enb : 1; /**< Toggles the endian style of the FAU for the PKO.
+ '0' is for big-endian and '1' is for little-endian. */
+ uint64_t dwb_enb : 1; /**< Enables the DWB function of the IOB. */
+ uint64_t fau_end : 1; /**< Toggles the endian style of the FAU. '0' is for
+ big-endian and '1' is for little-endian. */
+#else
+ uint64_t fau_end : 1;
+ uint64_t dwb_enb : 1;
+ uint64_t pko_enb : 1;
+ uint64_t inb_mat : 1;
+ uint64_t outb_mat : 1;
+ uint64_t rr_mode : 1;
+ uint64_t xmc_per : 4;
+ uint64_t fif_dly : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn61xx;
+ struct cvmx_iob_ctl_status_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t xmc_per : 4; /**< IBC XMC PUSH EARLY */
+ uint64_t rr_mode : 1; /**< When set to '1' will enable Round-Robin mode of next
+ transaction that could arbitrate for the XMB. */
+ uint64_t outb_mat : 1; /**< Was a match on the outbound bus to the inb pattern
+ matchers. PASS2 FIELD. */
+ uint64_t inb_mat : 1; /**< Was a match on the inbound bus to the inb pattern
+ matchers. PASS2 FIELD. */
+ uint64_t pko_enb : 1; /**< Toggles the endian style of the FAU for the PKO.
+ '0' is for big-endian and '1' is for little-endian. */
+ uint64_t dwb_enb : 1; /**< Enables the DWB function of the IOB. */
+ uint64_t fau_end : 1; /**< Toggles the endian style of the FAU. '0' is for
+ big-endian and '1' is for little-endian. */
+#else
+ uint64_t fau_end : 1;
+ uint64_t dwb_enb : 1;
+ uint64_t pko_enb : 1;
+ uint64_t inb_mat : 1;
+ uint64_t outb_mat : 1;
+ uint64_t rr_mode : 1;
+ uint64_t xmc_per : 4;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn63xx;
+ struct cvmx_iob_ctl_status_cn63xx cn63xxp1;
+ struct cvmx_iob_ctl_status_cn61xx cn66xx;
+ struct cvmx_iob_ctl_status_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t fif_dly : 1; /**< Delay async FIFO counts to be used when clock ratio
+ is greater then 3:1. Writes should be followed by an
+ immediate read. */
+ uint64_t xmc_per : 4; /**< IBC XMC PUSH EARLY */
+ uint64_t rsvr5 : 1; /**< Reserved */
+ uint64_t outb_mat : 1; /**< Was a match on the outbound bus to the inb pattern
+ matchers. */
+ uint64_t inb_mat : 1; /**< Was a match on the inbound bus to the inb pattern
+ matchers. */
+ uint64_t pko_enb : 1; /**< Toggles the endian style of the FAU for the PKO.
+ '0' is for big-endian and '1' is for little-endian. */
+ uint64_t dwb_enb : 1; /**< Enables the DWB function of the IOB. */
+ uint64_t fau_end : 1; /**< Toggles the endian style of the FAU. '0' is for
+ big-endian and '1' is for little-endian. */
+#else
+ uint64_t fau_end : 1;
+ uint64_t dwb_enb : 1;
+ uint64_t pko_enb : 1;
+ uint64_t inb_mat : 1;
+ uint64_t outb_mat : 1;
+ uint64_t rsvr5 : 1;
+ uint64_t xmc_per : 4;
+ uint64_t fif_dly : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn68xx;
+ struct cvmx_iob_ctl_status_cn68xx cn68xxp1;
+ struct cvmx_iob_ctl_status_cn61xx cnf71xx;
+};
+typedef union cvmx_iob_ctl_status cvmx_iob_ctl_status_t;
+
+/**
+ * cvmx_iob_dwb_pri_cnt
+ *
+ * DWB To CMB Priority Counter = Don't Write Back to CMB Priority Counter Enable and Timer Value
+ *
+ * Enables and supplies the timeout count for raising the priority of Don't Write Back request to the L2C.
+ */
+union cvmx_iob_dwb_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_dwb_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority
+ when CNT_VAL is reached. */
+ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising
+ the priority for access to CMB. */
+#else
+ uint64_t cnt_val : 15;
+ uint64_t cnt_enb : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_iob_dwb_pri_cnt_s cn38xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_dwb_pri_cnt_s cn52xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn56xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn58xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn61xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn63xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn66xx;
+ struct cvmx_iob_dwb_pri_cnt_s cnf71xx;
+};
+typedef union cvmx_iob_dwb_pri_cnt cvmx_iob_dwb_pri_cnt_t;
+
+/**
+ * cvmx_iob_fau_timeout
+ *
+ * FAU Timeout = Fetch and Add Unit Tag-Switch Timeout
+ *
+ * How many clokc ticks the FAU unit will wait for a tag-switch before timeing out.
+ * for Queue 0.
+ */
+union cvmx_iob_fau_timeout {
+ uint64_t u64;
+ struct cvmx_iob_fau_timeout_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t tout_enb : 1; /**< The enable for the FAU timeout feature.
+ '1' will enable the timeout, '0' will disable. */
+ uint64_t tout_val : 12; /**< When a tag request arrives from the PP a timer is
+ started associate with that PP. The timer which
+ increments every 256 eclks is compared to TOUT_VAL.
+ When the two are equal the IOB will flag the tag
+ request to complete as a time-out tag operation.
+ The 256 count timer used to increment the PP
+ associated timer is always running so the first
+ increment of the PP associated timer may occur any
+ where within the first 256 eclks. Note that '0'
+ is an illegal value. */
+#else
+ uint64_t tout_val : 12;
+ uint64_t tout_enb : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_iob_fau_timeout_s cn30xx;
+ struct cvmx_iob_fau_timeout_s cn31xx;
+ struct cvmx_iob_fau_timeout_s cn38xx;
+ struct cvmx_iob_fau_timeout_s cn38xxp2;
+ struct cvmx_iob_fau_timeout_s cn50xx;
+ struct cvmx_iob_fau_timeout_s cn52xx;
+ struct cvmx_iob_fau_timeout_s cn52xxp1;
+ struct cvmx_iob_fau_timeout_s cn56xx;
+ struct cvmx_iob_fau_timeout_s cn56xxp1;
+ struct cvmx_iob_fau_timeout_s cn58xx;
+ struct cvmx_iob_fau_timeout_s cn58xxp1;
+ struct cvmx_iob_fau_timeout_s cn61xx;
+ struct cvmx_iob_fau_timeout_s cn63xx;
+ struct cvmx_iob_fau_timeout_s cn63xxp1;
+ struct cvmx_iob_fau_timeout_s cn66xx;
+ struct cvmx_iob_fau_timeout_s cn68xx;
+ struct cvmx_iob_fau_timeout_s cn68xxp1;
+ struct cvmx_iob_fau_timeout_s cnf71xx;
+};
+typedef union cvmx_iob_fau_timeout cvmx_iob_fau_timeout_t;
+
+/**
+ * cvmx_iob_i2c_pri_cnt
+ *
+ * IPD To CMB Store Priority Counter = IPD to CMB Store Priority Counter Enable and Timer Value
+ *
+ * Enables and supplies the timeout count for raising the priority of IPD Store access to the CMB.
+ */
+union cvmx_iob_i2c_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_i2c_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority
+ when CNT_VAL is reached. */
+ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising
+ the priority for access to CMB. */
+#else
+ uint64_t cnt_val : 15;
+ uint64_t cnt_enb : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_iob_i2c_pri_cnt_s cn38xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_i2c_pri_cnt_s cn52xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn56xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn58xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn61xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn63xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn66xx;
+ struct cvmx_iob_i2c_pri_cnt_s cnf71xx;
+};
+typedef union cvmx_iob_i2c_pri_cnt cvmx_iob_i2c_pri_cnt_t;
+
+/**
+ * cvmx_iob_inb_control_match
+ *
+ * IOB_INB_CONTROL_MATCH = IOB Inbound Control Match
+ *
+ * Match pattern for the inbound control to set the INB_MATCH_BIT. PASS-2 Register
+ */
+union cvmx_iob_inb_control_match {
+ uint64_t u64;
+ struct cvmx_iob_inb_control_match_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t mask : 8; /**< Pattern to match on the inbound NCB. */
+ uint64_t opc : 4; /**< Pattern to match on the inbound NCB. */
+ uint64_t dst : 9; /**< Pattern to match on the inbound NCB. */
+ uint64_t src : 8; /**< Pattern to match on the inbound NCB. */
+#else
+ uint64_t src : 8;
+ uint64_t dst : 9;
+ uint64_t opc : 4;
+ uint64_t mask : 8;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_iob_inb_control_match_s cn30xx;
+ struct cvmx_iob_inb_control_match_s cn31xx;
+ struct cvmx_iob_inb_control_match_s cn38xx;
+ struct cvmx_iob_inb_control_match_s cn38xxp2;
+ struct cvmx_iob_inb_control_match_s cn50xx;
+ struct cvmx_iob_inb_control_match_s cn52xx;
+ struct cvmx_iob_inb_control_match_s cn52xxp1;
+ struct cvmx_iob_inb_control_match_s cn56xx;
+ struct cvmx_iob_inb_control_match_s cn56xxp1;
+ struct cvmx_iob_inb_control_match_s cn58xx;
+ struct cvmx_iob_inb_control_match_s cn58xxp1;
+ struct cvmx_iob_inb_control_match_s cn61xx;
+ struct cvmx_iob_inb_control_match_s cn63xx;
+ struct cvmx_iob_inb_control_match_s cn63xxp1;
+ struct cvmx_iob_inb_control_match_s cn66xx;
+ struct cvmx_iob_inb_control_match_s cn68xx;
+ struct cvmx_iob_inb_control_match_s cn68xxp1;
+ struct cvmx_iob_inb_control_match_s cnf71xx;
+};
+typedef union cvmx_iob_inb_control_match cvmx_iob_inb_control_match_t;
+
+/**
+ * cvmx_iob_inb_control_match_enb
+ *
+ * IOB_INB_CONTROL_MATCH_ENB = IOB Inbound Control Match Enable
+ *
+ * Enables the match of the corresponding bit in the IOB_INB_CONTROL_MATCH reister. PASS-2 Register
+ */
+union cvmx_iob_inb_control_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_inb_control_match_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t mask : 8; /**< Pattern to match on the inbound NCB. */
+ uint64_t opc : 4; /**< Pattern to match on the inbound NCB. */
+ uint64_t dst : 9; /**< Pattern to match on the inbound NCB. */
+ uint64_t src : 8; /**< Pattern to match on the inbound NCB. */
+#else
+ uint64_t src : 8;
+ uint64_t dst : 9;
+ uint64_t opc : 4;
+ uint64_t mask : 8;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_iob_inb_control_match_enb_s cn30xx;
+ struct cvmx_iob_inb_control_match_enb_s cn31xx;
+ struct cvmx_iob_inb_control_match_enb_s cn38xx;
+ struct cvmx_iob_inb_control_match_enb_s cn38xxp2;
+ struct cvmx_iob_inb_control_match_enb_s cn50xx;
+ struct cvmx_iob_inb_control_match_enb_s cn52xx;
+ struct cvmx_iob_inb_control_match_enb_s cn52xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn56xx;
+ struct cvmx_iob_inb_control_match_enb_s cn56xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn58xx;
+ struct cvmx_iob_inb_control_match_enb_s cn58xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn61xx;
+ struct cvmx_iob_inb_control_match_enb_s cn63xx;
+ struct cvmx_iob_inb_control_match_enb_s cn63xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn66xx;
+ struct cvmx_iob_inb_control_match_enb_s cn68xx;
+ struct cvmx_iob_inb_control_match_enb_s cn68xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cnf71xx;
+};
+typedef union cvmx_iob_inb_control_match_enb cvmx_iob_inb_control_match_enb_t;
+
+/**
+ * cvmx_iob_inb_data_match
+ *
+ * IOB_INB_DATA_MATCH = IOB Inbound Data Match
+ *
+ * Match pattern for the inbound data to set the INB_MATCH_BIT. PASS-2 Register
+ */
+union cvmx_iob_inb_data_match {
+ uint64_t u64;
+ struct cvmx_iob_inb_data_match_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Pattern to match on the inbound NCB. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_iob_inb_data_match_s cn30xx;
+ struct cvmx_iob_inb_data_match_s cn31xx;
+ struct cvmx_iob_inb_data_match_s cn38xx;
+ struct cvmx_iob_inb_data_match_s cn38xxp2;
+ struct cvmx_iob_inb_data_match_s cn50xx;
+ struct cvmx_iob_inb_data_match_s cn52xx;
+ struct cvmx_iob_inb_data_match_s cn52xxp1;
+ struct cvmx_iob_inb_data_match_s cn56xx;
+ struct cvmx_iob_inb_data_match_s cn56xxp1;
+ struct cvmx_iob_inb_data_match_s cn58xx;
+ struct cvmx_iob_inb_data_match_s cn58xxp1;
+ struct cvmx_iob_inb_data_match_s cn61xx;
+ struct cvmx_iob_inb_data_match_s cn63xx;
+ struct cvmx_iob_inb_data_match_s cn63xxp1;
+ struct cvmx_iob_inb_data_match_s cn66xx;
+ struct cvmx_iob_inb_data_match_s cn68xx;
+ struct cvmx_iob_inb_data_match_s cn68xxp1;
+ struct cvmx_iob_inb_data_match_s cnf71xx;
+};
+typedef union cvmx_iob_inb_data_match cvmx_iob_inb_data_match_t;
+
+/**
+ * cvmx_iob_inb_data_match_enb
+ *
+ * IOB_INB_DATA_MATCH_ENB = IOB Inbound Data Match Enable
+ *
+ * Enables the match of the corresponding bit in the IOB_INB_DATA_MATCH reister. PASS-2 Register
+ */
+union cvmx_iob_inb_data_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_inb_data_match_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Bit to enable match of. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_iob_inb_data_match_enb_s cn30xx;
+ struct cvmx_iob_inb_data_match_enb_s cn31xx;
+ struct cvmx_iob_inb_data_match_enb_s cn38xx;
+ struct cvmx_iob_inb_data_match_enb_s cn38xxp2;
+ struct cvmx_iob_inb_data_match_enb_s cn50xx;
+ struct cvmx_iob_inb_data_match_enb_s cn52xx;
+ struct cvmx_iob_inb_data_match_enb_s cn52xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn56xx;
+ struct cvmx_iob_inb_data_match_enb_s cn56xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn58xx;
+ struct cvmx_iob_inb_data_match_enb_s cn58xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn61xx;
+ struct cvmx_iob_inb_data_match_enb_s cn63xx;
+ struct cvmx_iob_inb_data_match_enb_s cn63xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn66xx;
+ struct cvmx_iob_inb_data_match_enb_s cn68xx;
+ struct cvmx_iob_inb_data_match_enb_s cn68xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cnf71xx;
+};
+typedef union cvmx_iob_inb_data_match_enb cvmx_iob_inb_data_match_enb_t;
+
+/**
+ * cvmx_iob_int_enb
+ *
+ * IOB_INT_ENB = IOB's Interrupt Enable
+ *
+ * The IOB's interrupt enable register. This is a PASS-2 register.
+ */
+union cvmx_iob_int_enb {
+ uint64_t u64;
+ struct cvmx_iob_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t p_dat : 1; /**< When set (1) and bit 5 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+ uint64_t np_dat : 1; /**< When set (1) and bit 4 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+ uint64_t p_eop : 1; /**< When set (1) and bit 3 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+ uint64_t p_sop : 1; /**< When set (1) and bit 2 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+ uint64_t np_eop : 1; /**< When set (1) and bit 1 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+ uint64_t np_sop : 1; /**< When set (1) and bit 0 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+#else
+ uint64_t np_sop : 1;
+ uint64_t np_eop : 1;
+ uint64_t p_sop : 1;
+ uint64_t p_eop : 1;
+ uint64_t np_dat : 1;
+ uint64_t p_dat : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_iob_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t p_eop : 1; /**< When set (1) and bit 3 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+ uint64_t p_sop : 1; /**< When set (1) and bit 2 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+ uint64_t np_eop : 1; /**< When set (1) and bit 1 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+ uint64_t np_sop : 1; /**< When set (1) and bit 0 of the IOB_INT_SUM
+ register is asserted the IOB will assert an
+ interrupt. */
+#else
+ uint64_t np_sop : 1;
+ uint64_t np_eop : 1;
+ uint64_t p_sop : 1;
+ uint64_t p_eop : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_iob_int_enb_cn30xx cn31xx;
+ struct cvmx_iob_int_enb_cn30xx cn38xx;
+ struct cvmx_iob_int_enb_cn30xx cn38xxp2;
+ struct cvmx_iob_int_enb_s cn50xx;
+ struct cvmx_iob_int_enb_s cn52xx;
+ struct cvmx_iob_int_enb_s cn52xxp1;
+ struct cvmx_iob_int_enb_s cn56xx;
+ struct cvmx_iob_int_enb_s cn56xxp1;
+ struct cvmx_iob_int_enb_s cn58xx;
+ struct cvmx_iob_int_enb_s cn58xxp1;
+ struct cvmx_iob_int_enb_s cn61xx;
+ struct cvmx_iob_int_enb_s cn63xx;
+ struct cvmx_iob_int_enb_s cn63xxp1;
+ struct cvmx_iob_int_enb_s cn66xx;
+ struct cvmx_iob_int_enb_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } cn68xx;
+ struct cvmx_iob_int_enb_cn68xx cn68xxp1;
+ struct cvmx_iob_int_enb_s cnf71xx;
+};
+typedef union cvmx_iob_int_enb cvmx_iob_int_enb_t;
+
+/**
+ * cvmx_iob_int_sum
+ *
+ * IOB_INT_SUM = IOB's Interrupt Summary Register
+ *
+ * Contains the diffrent interrupt summary bits of the IOB. This is a PASS-2 register.
+ */
+union cvmx_iob_int_sum {
+ uint64_t u64;
+ struct cvmx_iob_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t p_dat : 1; /**< Set when a data arrives before a SOP for the same
+ port for a passthrough packet.
+ The first detected error associated with bits [5:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+ uint64_t np_dat : 1; /**< Set when a data arrives before a SOP for the same
+ port for a non-passthrough packet.
+ The first detected error associated with bits [5:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+ uint64_t p_eop : 1; /**< Set when a EOP is followed by an EOP for the same
+ port for a passthrough packet.
+ The first detected error associated with bits [5:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+ uint64_t p_sop : 1; /**< Set when a SOP is followed by an SOP for the same
+ port for a passthrough packet.
+ The first detected error associated with bits [5:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+ uint64_t np_eop : 1; /**< Set when a EOP is followed by an EOP for the same
+ port for a non-passthrough packet.
+ The first detected error associated with bits [5:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+ uint64_t np_sop : 1; /**< Set when a SOP is followed by an SOP for the same
+ port for a non-passthrough packet.
+ The first detected error associated with bits [5:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+#else
+ uint64_t np_sop : 1;
+ uint64_t np_eop : 1;
+ uint64_t p_sop : 1;
+ uint64_t p_eop : 1;
+ uint64_t np_dat : 1;
+ uint64_t p_dat : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_iob_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t p_eop : 1; /**< Set when a EOP is followed by an EOP for the same
+ port for a passthrough packet.
+ The first detected error associated with bits [3:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+ uint64_t p_sop : 1; /**< Set when a SOP is followed by an SOP for the same
+ port for a passthrough packet.
+ The first detected error associated with bits [3:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+ uint64_t np_eop : 1; /**< Set when a EOP is followed by an EOP for the same
+ port for a non-passthrough packet.
+ The first detected error associated with bits [3:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+ uint64_t np_sop : 1; /**< Set when a SOP is followed by an SOP for the same
+ port for a non-passthrough packet.
+ The first detected error associated with bits [3:0]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared. */
+#else
+ uint64_t np_sop : 1;
+ uint64_t np_eop : 1;
+ uint64_t p_sop : 1;
+ uint64_t p_eop : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_iob_int_sum_cn30xx cn31xx;
+ struct cvmx_iob_int_sum_cn30xx cn38xx;
+ struct cvmx_iob_int_sum_cn30xx cn38xxp2;
+ struct cvmx_iob_int_sum_s cn50xx;
+ struct cvmx_iob_int_sum_s cn52xx;
+ struct cvmx_iob_int_sum_s cn52xxp1;
+ struct cvmx_iob_int_sum_s cn56xx;
+ struct cvmx_iob_int_sum_s cn56xxp1;
+ struct cvmx_iob_int_sum_s cn58xx;
+ struct cvmx_iob_int_sum_s cn58xxp1;
+ struct cvmx_iob_int_sum_s cn61xx;
+ struct cvmx_iob_int_sum_s cn63xx;
+ struct cvmx_iob_int_sum_s cn63xxp1;
+ struct cvmx_iob_int_sum_s cn66xx;
+ struct cvmx_iob_int_sum_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } cn68xx;
+ struct cvmx_iob_int_sum_cn68xx cn68xxp1;
+ struct cvmx_iob_int_sum_s cnf71xx;
+};
+typedef union cvmx_iob_int_sum cvmx_iob_int_sum_t;
+
+/**
+ * cvmx_iob_n2c_l2c_pri_cnt
+ *
+ * NCB To CMB L2C Priority Counter = NCB to CMB L2C Priority Counter Enable and Timer Value
+ *
+ * Enables and supplies the timeout count for raising the priority of NCB Store/Load access to the CMB.
+ */
+union cvmx_iob_n2c_l2c_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority
+ when CNT_VAL is reached. */
+ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising
+ the priority for access to CMB. */
+#else
+ uint64_t cnt_val : 15;
+ uint64_t cnt_enb : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn61xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn66xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cnf71xx;
+};
+typedef union cvmx_iob_n2c_l2c_pri_cnt cvmx_iob_n2c_l2c_pri_cnt_t;
+
+/**
+ * cvmx_iob_n2c_rsp_pri_cnt
+ *
+ * NCB To CMB Response Priority Counter = NCB to CMB Response Priority Counter Enable and Timer Value
+ *
+ * Enables and supplies the timeout count for raising the priority of NCB Responses access to the CMB.
+ */
+union cvmx_iob_n2c_rsp_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority
+ when CNT_VAL is reached. */
+ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising
+ the priority for access to CMB. */
+#else
+ uint64_t cnt_val : 15;
+ uint64_t cnt_enb : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn61xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn66xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cnf71xx;
+};
+typedef union cvmx_iob_n2c_rsp_pri_cnt cvmx_iob_n2c_rsp_pri_cnt_t;
+
+/**
+ * cvmx_iob_outb_com_pri_cnt
+ *
+ * Commit To NCB Priority Counter = Commit to NCB Priority Counter Enable and Timer Value
+ *
+ * Enables and supplies the timeout count for raising the priority of Commit request to the Outbound NCB.
+ */
+union cvmx_iob_outb_com_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_outb_com_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt_enb : 1; /**< Enables the raising of NCB access priority
+ when CNT_VAL is reached. */
+ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising
+ the priority for access to NCB. */
+#else
+ uint64_t cnt_val : 15;
+ uint64_t cnt_enb : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_iob_outb_com_pri_cnt_s cn38xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_outb_com_pri_cnt_s cn52xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn56xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn58xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn61xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn66xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn68xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn68xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cnf71xx;
+};
+typedef union cvmx_iob_outb_com_pri_cnt cvmx_iob_outb_com_pri_cnt_t;
+
+/**
+ * cvmx_iob_outb_control_match
+ *
+ * IOB_OUTB_CONTROL_MATCH = IOB Outbound Control Match
+ *
+ * Match pattern for the outbound control to set the OUTB_MATCH_BIT. PASS-2 Register
+ */
+union cvmx_iob_outb_control_match {
+ uint64_t u64;
+ struct cvmx_iob_outb_control_match_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t mask : 8; /**< Pattern to match on the outbound NCB. */
+ uint64_t eot : 1; /**< Pattern to match on the outbound NCB. */
+ uint64_t dst : 8; /**< Pattern to match on the outbound NCB. */
+ uint64_t src : 9; /**< Pattern to match on the outbound NCB. */
+#else
+ uint64_t src : 9;
+ uint64_t dst : 8;
+ uint64_t eot : 1;
+ uint64_t mask : 8;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } s;
+ struct cvmx_iob_outb_control_match_s cn30xx;
+ struct cvmx_iob_outb_control_match_s cn31xx;
+ struct cvmx_iob_outb_control_match_s cn38xx;
+ struct cvmx_iob_outb_control_match_s cn38xxp2;
+ struct cvmx_iob_outb_control_match_s cn50xx;
+ struct cvmx_iob_outb_control_match_s cn52xx;
+ struct cvmx_iob_outb_control_match_s cn52xxp1;
+ struct cvmx_iob_outb_control_match_s cn56xx;
+ struct cvmx_iob_outb_control_match_s cn56xxp1;
+ struct cvmx_iob_outb_control_match_s cn58xx;
+ struct cvmx_iob_outb_control_match_s cn58xxp1;
+ struct cvmx_iob_outb_control_match_s cn61xx;
+ struct cvmx_iob_outb_control_match_s cn63xx;
+ struct cvmx_iob_outb_control_match_s cn63xxp1;
+ struct cvmx_iob_outb_control_match_s cn66xx;
+ struct cvmx_iob_outb_control_match_s cn68xx;
+ struct cvmx_iob_outb_control_match_s cn68xxp1;
+ struct cvmx_iob_outb_control_match_s cnf71xx;
+};
+typedef union cvmx_iob_outb_control_match cvmx_iob_outb_control_match_t;
+
+/**
+ * cvmx_iob_outb_control_match_enb
+ *
+ * IOB_OUTB_CONTROL_MATCH_ENB = IOB Outbound Control Match Enable
+ *
+ * Enables the match of the corresponding bit in the IOB_OUTB_CONTROL_MATCH reister. PASS-2 Register
+ */
+union cvmx_iob_outb_control_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_outb_control_match_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t mask : 8; /**< Pattern to match on the outbound NCB. */
+ uint64_t eot : 1; /**< Pattern to match on the outbound NCB. */
+ uint64_t dst : 8; /**< Pattern to match on the outbound NCB. */
+ uint64_t src : 9; /**< Pattern to match on the outbound NCB. */
+#else
+ uint64_t src : 9;
+ uint64_t dst : 8;
+ uint64_t eot : 1;
+ uint64_t mask : 8;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } s;
+ struct cvmx_iob_outb_control_match_enb_s cn30xx;
+ struct cvmx_iob_outb_control_match_enb_s cn31xx;
+ struct cvmx_iob_outb_control_match_enb_s cn38xx;
+ struct cvmx_iob_outb_control_match_enb_s cn38xxp2;
+ struct cvmx_iob_outb_control_match_enb_s cn50xx;
+ struct cvmx_iob_outb_control_match_enb_s cn52xx;
+ struct cvmx_iob_outb_control_match_enb_s cn52xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn56xx;
+ struct cvmx_iob_outb_control_match_enb_s cn56xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn58xx;
+ struct cvmx_iob_outb_control_match_enb_s cn58xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn61xx;
+ struct cvmx_iob_outb_control_match_enb_s cn63xx;
+ struct cvmx_iob_outb_control_match_enb_s cn63xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn66xx;
+ struct cvmx_iob_outb_control_match_enb_s cn68xx;
+ struct cvmx_iob_outb_control_match_enb_s cn68xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cnf71xx;
+};
+typedef union cvmx_iob_outb_control_match_enb cvmx_iob_outb_control_match_enb_t;
+
+/**
+ * cvmx_iob_outb_data_match
+ *
+ * IOB_OUTB_DATA_MATCH = IOB Outbound Data Match
+ *
+ * Match pattern for the outbound data to set the OUTB_MATCH_BIT. PASS-2 Register
+ */
+union cvmx_iob_outb_data_match {
+ uint64_t u64;
+ struct cvmx_iob_outb_data_match_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Pattern to match on the outbound NCB. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_iob_outb_data_match_s cn30xx;
+ struct cvmx_iob_outb_data_match_s cn31xx;
+ struct cvmx_iob_outb_data_match_s cn38xx;
+ struct cvmx_iob_outb_data_match_s cn38xxp2;
+ struct cvmx_iob_outb_data_match_s cn50xx;
+ struct cvmx_iob_outb_data_match_s cn52xx;
+ struct cvmx_iob_outb_data_match_s cn52xxp1;
+ struct cvmx_iob_outb_data_match_s cn56xx;
+ struct cvmx_iob_outb_data_match_s cn56xxp1;
+ struct cvmx_iob_outb_data_match_s cn58xx;
+ struct cvmx_iob_outb_data_match_s cn58xxp1;
+ struct cvmx_iob_outb_data_match_s cn61xx;
+ struct cvmx_iob_outb_data_match_s cn63xx;
+ struct cvmx_iob_outb_data_match_s cn63xxp1;
+ struct cvmx_iob_outb_data_match_s cn66xx;
+ struct cvmx_iob_outb_data_match_s cn68xx;
+ struct cvmx_iob_outb_data_match_s cn68xxp1;
+ struct cvmx_iob_outb_data_match_s cnf71xx;
+};
+typedef union cvmx_iob_outb_data_match cvmx_iob_outb_data_match_t;
+
+/**
+ * cvmx_iob_outb_data_match_enb
+ *
+ * IOB_OUTB_DATA_MATCH_ENB = IOB Outbound Data Match Enable
+ *
+ * Enables the match of the corresponding bit in the IOB_OUTB_DATA_MATCH reister. PASS-2 Register
+ */
+union cvmx_iob_outb_data_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_outb_data_match_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Bit to enable match of. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_iob_outb_data_match_enb_s cn30xx;
+ struct cvmx_iob_outb_data_match_enb_s cn31xx;
+ struct cvmx_iob_outb_data_match_enb_s cn38xx;
+ struct cvmx_iob_outb_data_match_enb_s cn38xxp2;
+ struct cvmx_iob_outb_data_match_enb_s cn50xx;
+ struct cvmx_iob_outb_data_match_enb_s cn52xx;
+ struct cvmx_iob_outb_data_match_enb_s cn52xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn56xx;
+ struct cvmx_iob_outb_data_match_enb_s cn56xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn58xx;
+ struct cvmx_iob_outb_data_match_enb_s cn58xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn61xx;
+ struct cvmx_iob_outb_data_match_enb_s cn63xx;
+ struct cvmx_iob_outb_data_match_enb_s cn63xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn66xx;
+ struct cvmx_iob_outb_data_match_enb_s cn68xx;
+ struct cvmx_iob_outb_data_match_enb_s cn68xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cnf71xx;
+};
+typedef union cvmx_iob_outb_data_match_enb cvmx_iob_outb_data_match_enb_t;
+
+/**
+ * cvmx_iob_outb_fpa_pri_cnt
+ *
+ * FPA To NCB Priority Counter = FPA Returns to NCB Priority Counter Enable and Timer Value
+ *
+ * Enables and supplies the timeout count for raising the priority of FPA Rreturn Page request to the Outbound NCB.
+ */
+union cvmx_iob_outb_fpa_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_outb_fpa_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt_enb : 1; /**< Enables the raising of NCB access priority
+ when CNT_VAL is reached. */
+ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising
+ the priority for access to NCB. */
+#else
+ uint64_t cnt_val : 15;
+ uint64_t cnt_enb : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn38xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn52xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn56xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn61xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn66xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn68xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn68xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cnf71xx;
+};
+typedef union cvmx_iob_outb_fpa_pri_cnt cvmx_iob_outb_fpa_pri_cnt_t;
+
+/**
+ * cvmx_iob_outb_req_pri_cnt
+ *
+ * Request To NCB Priority Counter = Request to NCB Priority Counter Enable and Timer Value
+ *
+ * Enables and supplies the timeout count for raising the priority of Request transfers to the Outbound NCB.
+ */
+union cvmx_iob_outb_req_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_outb_req_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt_enb : 1; /**< Enables the raising of NCB access priority
+ when CNT_VAL is reached. */
+ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising
+ the priority for access to NCB. */
+#else
+ uint64_t cnt_val : 15;
+ uint64_t cnt_enb : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_iob_outb_req_pri_cnt_s cn38xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_outb_req_pri_cnt_s cn52xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn56xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn58xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn61xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn66xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn68xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn68xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cnf71xx;
+};
+typedef union cvmx_iob_outb_req_pri_cnt cvmx_iob_outb_req_pri_cnt_t;
+
+/**
+ * cvmx_iob_p2c_req_pri_cnt
+ *
+ * PKO To CMB Response Priority Counter = PKO to CMB Response Priority Counter Enable and Timer Value
+ *
+ * Enables and supplies the timeout count for raising the priority of PKO Load access to the CMB.
+ */
+union cvmx_iob_p2c_req_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_p2c_req_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority
+ when CNT_VAL is reached. */
+ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising
+ the priority for access to CMB. */
+#else
+ uint64_t cnt_val : 15;
+ uint64_t cnt_enb : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn38xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn52xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn56xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn58xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn61xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn63xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn66xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cnf71xx;
+};
+typedef union cvmx_iob_p2c_req_pri_cnt cvmx_iob_p2c_req_pri_cnt_t;
+
+/**
+ * cvmx_iob_pkt_err
+ *
+ * IOB_PKT_ERR = IOB Packet Error Register
+ *
+ * Provides status about the failing packet recevie error. This is a PASS-2 register.
+ */
+union cvmx_iob_pkt_err {
+ uint64_t u64;
+ struct cvmx_iob_pkt_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t vport : 6; /**< When IOB_INT_SUM[3:0] bit is set, this field
+ latches the failing vport associate with the
+ IOB_INT_SUM[3:0] bit set. */
+ uint64_t port : 6; /**< When IOB_INT_SUM[3:0] bit is set, this field
+ latches the failing port associate with the
+ IOB_INT_SUM[3:0] bit set. */
+#else
+ uint64_t port : 6;
+ uint64_t vport : 6;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_iob_pkt_err_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t port : 6; /**< When IOB_INT_SUM[3:0] bit is set, this field
+ latches the failing port associate with the
+ IOB_INT_SUM[3:0] bit set. */
+#else
+ uint64_t port : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn30xx;
+ struct cvmx_iob_pkt_err_cn30xx cn31xx;
+ struct cvmx_iob_pkt_err_cn30xx cn38xx;
+ struct cvmx_iob_pkt_err_cn30xx cn38xxp2;
+ struct cvmx_iob_pkt_err_cn30xx cn50xx;
+ struct cvmx_iob_pkt_err_cn30xx cn52xx;
+ struct cvmx_iob_pkt_err_cn30xx cn52xxp1;
+ struct cvmx_iob_pkt_err_cn30xx cn56xx;
+ struct cvmx_iob_pkt_err_cn30xx cn56xxp1;
+ struct cvmx_iob_pkt_err_cn30xx cn58xx;
+ struct cvmx_iob_pkt_err_cn30xx cn58xxp1;
+ struct cvmx_iob_pkt_err_s cn61xx;
+ struct cvmx_iob_pkt_err_s cn63xx;
+ struct cvmx_iob_pkt_err_s cn63xxp1;
+ struct cvmx_iob_pkt_err_s cn66xx;
+ struct cvmx_iob_pkt_err_s cnf71xx;
+};
+typedef union cvmx_iob_pkt_err cvmx_iob_pkt_err_t;
+
+/**
+ * cvmx_iob_to_cmb_credits
+ *
+ * IOB_TO_CMB_CREDITS = IOB To CMB Credits
+ *
+ * Controls the number of reads and writes that may be outstanding to the L2C (via the CMB).
+ */
+union cvmx_iob_to_cmb_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_cmb_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t ncb_rd : 3; /**< Number of NCB reads that can be out to L2C where
+ 0 == 8-credits. */
+ uint64_t ncb_wr : 3; /**< Number of NCB/PKI writes that can be out to L2C
+ where 0 == 8-credits. */
+#else
+ uint64_t ncb_wr : 3;
+ uint64_t ncb_rd : 3;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_iob_to_cmb_credits_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t pko_rd : 3; /**< Number of PKO reads that can be out to L2C where
+ 0 == 8-credits. */
+ uint64_t ncb_rd : 3; /**< Number of NCB reads that can be out to L2C where
+ 0 == 8-credits. */
+ uint64_t ncb_wr : 3; /**< Number of NCB/PKI writes that can be out to L2C
+ where 0 == 8-credits. */
+#else
+ uint64_t ncb_wr : 3;
+ uint64_t ncb_rd : 3;
+ uint64_t pko_rd : 3;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn52xx;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn61xx;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn63xx;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn63xxp1;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn66xx;
+ struct cvmx_iob_to_cmb_credits_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t dwb : 3; /**< Number of DWBs that can be out to L2C where
+ 0 == 8-credits. */
+ uint64_t ncb_rd : 3; /**< Number of NCB reads that can be out to L2C where
+ 0 == 8-credits. */
+ uint64_t ncb_wr : 3; /**< Number of NCB/PKI writes that can be out to L2C
+ where 0 == 8-credits. */
+#else
+ uint64_t ncb_wr : 3;
+ uint64_t ncb_rd : 3;
+ uint64_t dwb : 3;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn68xx;
+ struct cvmx_iob_to_cmb_credits_cn68xx cn68xxp1;
+ struct cvmx_iob_to_cmb_credits_cn52xx cnf71xx;
+};
+typedef union cvmx_iob_to_cmb_credits cvmx_iob_to_cmb_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_00_credits
+ *
+ * IOB_TO_NCB_DID_00_CREDITS = IOB NCB DID 00 Credits
+ *
+ * Number of credits for NCB DID 00.
+ */
+union cvmx_iob_to_ncb_did_00_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_00_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_00_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_00_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_00_credits cvmx_iob_to_ncb_did_00_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_111_credits
+ *
+ * IOB_TO_NCB_DID_111_CREDITS = IOB NCB DID 111 Credits
+ *
+ * Number of credits for NCB DID 111.
+ */
+union cvmx_iob_to_ncb_did_111_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_111_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_111_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_111_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_111_credits cvmx_iob_to_ncb_did_111_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_223_credits
+ *
+ * IOB_TO_NCB_DID_223_CREDITS = IOB NCB DID 223 Credits
+ *
+ * Number of credits for NCB DID 223.
+ */
+union cvmx_iob_to_ncb_did_223_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_223_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_223_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_223_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_223_credits cvmx_iob_to_ncb_did_223_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_24_credits
+ *
+ * IOB_TO_NCB_DID_24_CREDITS = IOB NCB DID 24 Credits
+ *
+ * Number of credits for NCB DID 24.
+ */
+union cvmx_iob_to_ncb_did_24_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_24_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_24_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_24_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_24_credits cvmx_iob_to_ncb_did_24_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_32_credits
+ *
+ * IOB_TO_NCB_DID_32_CREDITS = IOB NCB DID 32 Credits
+ *
+ * Number of credits for NCB DID 32.
+ */
+union cvmx_iob_to_ncb_did_32_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_32_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_32_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_32_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_32_credits cvmx_iob_to_ncb_did_32_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_40_credits
+ *
+ * IOB_TO_NCB_DID_40_CREDITS = IOB NCB DID 40 Credits
+ *
+ * Number of credits for NCB DID 40.
+ */
+union cvmx_iob_to_ncb_did_40_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_40_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_40_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_40_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_40_credits cvmx_iob_to_ncb_did_40_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_55_credits
+ *
+ * IOB_TO_NCB_DID_55_CREDITS = IOB NCB DID 55 Credits
+ *
+ * Number of credits for NCB DID 55.
+ */
+union cvmx_iob_to_ncb_did_55_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_55_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_55_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_55_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_55_credits cvmx_iob_to_ncb_did_55_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_64_credits
+ *
+ * IOB_TO_NCB_DID_64_CREDITS = IOB NCB DID 64 Credits
+ *
+ * Number of credits for NCB DID 64.
+ */
+union cvmx_iob_to_ncb_did_64_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_64_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_64_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_64_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_64_credits cvmx_iob_to_ncb_did_64_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_79_credits
+ *
+ * IOB_TO_NCB_DID_79_CREDITS = IOB NCB DID 79 Credits
+ *
+ * Number of credits for NCB DID 79.
+ */
+union cvmx_iob_to_ncb_did_79_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_79_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_79_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_79_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_79_credits cvmx_iob_to_ncb_did_79_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_96_credits
+ *
+ * IOB_TO_NCB_DID_96_CREDITS = IOB NCB DID 96 Credits
+ *
+ * Number of credits for NCB DID 96.
+ */
+union cvmx_iob_to_ncb_did_96_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_96_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_96_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_96_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_96_credits cvmx_iob_to_ncb_did_96_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_98_credits
+ *
+ * IOB_TO_NCB_DID_98_CREDITS = IOB NCB DID 96 Credits
+ *
+ * Number of credits for NCB DID 98.
+ */
+union cvmx_iob_to_ncb_did_98_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_98_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t crd : 7; /**< Number of credits for DID. Writing this field will
+ casuse the credits to be set to the value written.
+ Reading this field will give the number of credits
+ PRESENTLY available. */
+#else
+ uint64_t crd : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_iob_to_ncb_did_98_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_98_credits_s cn68xxp1;
+};
+typedef union cvmx_iob_to_ncb_did_98_credits cvmx_iob_to_ncb_did_98_credits_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-iob-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-iob1-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-iob1-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-iob1-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,185 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-iob1-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon iob1.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_IOB1_DEFS_H__
+#define __CVMX_IOB1_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB1_BIST_STATUS CVMX_IOB1_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_IOB1_BIST_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB1_BIST_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F00107F8ull);
+}
+#else
+#define CVMX_IOB1_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800F00107F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB1_CTL_STATUS CVMX_IOB1_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_IOB1_CTL_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB1_CTL_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F0010050ull);
+}
+#else
+#define CVMX_IOB1_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011800F0010050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IOB1_TO_CMB_CREDITS CVMX_IOB1_TO_CMB_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB1_TO_CMB_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IOB1_TO_CMB_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800F00100B0ull);
+}
+#else
+#define CVMX_IOB1_TO_CMB_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00100B0ull))
+#endif
+
+/**
+ * cvmx_iob1_bist_status
+ *
+ * IOB_BIST_STATUS = BIST Status of IOB Memories
+ *
+ * The result of the BIST run on the IOB memories.
+ */
+union cvmx_iob1_bist_status {
+ uint64_t u64;
+ struct cvmx_iob1_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t xmdfif : 1; /**< xmdfif_bist_status */
+ uint64_t xmcfif : 1; /**< xmcfif_bist_status */
+ uint64_t iorfif : 1; /**< iorfif_bist_status */
+ uint64_t rsdfif : 1; /**< rsdfif_bist_status */
+ uint64_t iocfif : 1; /**< iocfif_bist_status */
+ uint64_t reserved_2_3 : 2;
+ uint64_t icrp0 : 1; /**< icr_pko_bist_mem0_status */
+ uint64_t icrp1 : 1; /**< icr_pko_bist_mem1_status */
+#else
+ uint64_t icrp1 : 1;
+ uint64_t icrp0 : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t iocfif : 1;
+ uint64_t rsdfif : 1;
+ uint64_t iorfif : 1;
+ uint64_t xmcfif : 1;
+ uint64_t xmdfif : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_iob1_bist_status_s cn68xx;
+ struct cvmx_iob1_bist_status_s cn68xxp1;
+};
+typedef union cvmx_iob1_bist_status cvmx_iob1_bist_status_t;
+
+/**
+ * cvmx_iob1_ctl_status
+ *
+ * IOB Control Status = IOB Control and Status Register
+ *
+ * Provides control for IOB functions.
+ */
+union cvmx_iob1_ctl_status {
+ uint64_t u64;
+ struct cvmx_iob1_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t fif_dly : 1; /**< Delay async FIFO counts to be used when clock ratio
+ is greater then 3:1. Writes should be followed by an
+ immediate read. */
+ uint64_t xmc_per : 4; /**< IBC XMC PUSH EARLY */
+ uint64_t reserved_0_5 : 6;
+#else
+ uint64_t reserved_0_5 : 6;
+ uint64_t xmc_per : 4;
+ uint64_t fif_dly : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_iob1_ctl_status_s cn68xx;
+ struct cvmx_iob1_ctl_status_s cn68xxp1;
+};
+typedef union cvmx_iob1_ctl_status cvmx_iob1_ctl_status_t;
+
+/**
+ * cvmx_iob1_to_cmb_credits
+ *
+ * IOB_TO_CMB_CREDITS = IOB To CMB Credits
+ *
+ * Controls the number of reads and writes that may be outstanding to the L2C (via the CMB).
+ */
+union cvmx_iob1_to_cmb_credits {
+ uint64_t u64;
+ struct cvmx_iob1_to_cmb_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t pko_rd : 4; /**< Number of PKO reads that can be out to L2C where
+ 0 == 16-credits. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t ncb_wr : 3; /**< Number of NCB/PKI writes that can be out to L2C
+ where 0 == 8-credits. */
+#else
+ uint64_t ncb_wr : 3;
+ uint64_t reserved_3_5 : 3;
+ uint64_t pko_rd : 4;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_iob1_to_cmb_credits_s cn68xx;
+ struct cvmx_iob1_to_cmb_credits_s cn68xxp1;
+};
+typedef union cvmx_iob1_to_cmb_credits cvmx_iob1_to_cmb_credits_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-iob1-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ipd-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ipd-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ipd-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,3589 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-ipd-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon ipd.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_IPD_DEFS_H__
+#define __CVMX_IPD_DEFS_H__
+
+#define CVMX_IPD_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000000ull))
+#define CVMX_IPD_1st_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000150ull))
+#define CVMX_IPD_2nd_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000158ull))
+#define CVMX_IPD_BIST_STATUS (CVMX_ADD_IO_SEG(0x00014F00000007F8ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_BPIDX_MBUF_TH(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_IPD_BPIDX_MBUF_TH(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000002000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_IPD_BPIDX_MBUF_TH(offset) (CVMX_ADD_IO_SEG(0x00014F0000002000ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_BPID_BP_COUNTERX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_IPD_BPID_BP_COUNTERX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000003000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_IPD_BPID_BP_COUNTERX(offset) (CVMX_ADD_IO_SEG(0x00014F0000003000ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_BP_PRT_RED_END CVMX_IPD_BP_PRT_RED_END_FUNC()
+static inline uint64_t CVMX_IPD_BP_PRT_RED_END_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_BP_PRT_RED_END not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000328ull);
+}
+#else
+#define CVMX_IPD_BP_PRT_RED_END (CVMX_ADD_IO_SEG(0x00014F0000000328ull))
+#endif
+#define CVMX_IPD_CLK_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000338ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_CREDITS CVMX_IPD_CREDITS_FUNC()
+static inline uint64_t CVMX_IPD_CREDITS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_CREDITS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000004410ull);
+}
+#else
+#define CVMX_IPD_CREDITS (CVMX_ADD_IO_SEG(0x00014F0000004410ull))
+#endif
+#define CVMX_IPD_CTL_STATUS (CVMX_ADD_IO_SEG(0x00014F0000000018ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_ECC_CTL CVMX_IPD_ECC_CTL_FUNC()
+static inline uint64_t CVMX_IPD_ECC_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_ECC_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000004408ull);
+}
+#else
+#define CVMX_IPD_ECC_CTL (CVMX_ADD_IO_SEG(0x00014F0000004408ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_FREE_PTR_FIFO_CTL CVMX_IPD_FREE_PTR_FIFO_CTL_FUNC()
+static inline uint64_t CVMX_IPD_FREE_PTR_FIFO_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_FREE_PTR_FIFO_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000780ull);
+}
+#else
+#define CVMX_IPD_FREE_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000780ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_FREE_PTR_VALUE CVMX_IPD_FREE_PTR_VALUE_FUNC()
+static inline uint64_t CVMX_IPD_FREE_PTR_VALUE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_FREE_PTR_VALUE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000788ull);
+}
+#else
+#define CVMX_IPD_FREE_PTR_VALUE (CVMX_ADD_IO_SEG(0x00014F0000000788ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_HOLD_PTR_FIFO_CTL CVMX_IPD_HOLD_PTR_FIFO_CTL_FUNC()
+static inline uint64_t CVMX_IPD_HOLD_PTR_FIFO_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_HOLD_PTR_FIFO_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000790ull);
+}
+#else
+#define CVMX_IPD_HOLD_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000790ull))
+#endif
+#define CVMX_IPD_INT_ENB (CVMX_ADD_IO_SEG(0x00014F0000000160ull))
+#define CVMX_IPD_INT_SUM (CVMX_ADD_IO_SEG(0x00014F0000000168ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_NEXT_PKT_PTR CVMX_IPD_NEXT_PKT_PTR_FUNC()
+static inline uint64_t CVMX_IPD_NEXT_PKT_PTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_NEXT_PKT_PTR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F00000007A0ull);
+}
+#else
+#define CVMX_IPD_NEXT_PKT_PTR (CVMX_ADD_IO_SEG(0x00014F00000007A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_NEXT_WQE_PTR CVMX_IPD_NEXT_WQE_PTR_FUNC()
+static inline uint64_t CVMX_IPD_NEXT_WQE_PTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_NEXT_WQE_PTR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F00000007A8ull);
+}
+#else
+#define CVMX_IPD_NEXT_WQE_PTR (CVMX_ADD_IO_SEG(0x00014F00000007A8ull))
+#endif
+#define CVMX_IPD_NOT_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000008ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_ON_BP_DROP_PKTX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_IPD_ON_BP_DROP_PKTX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00014F0000004100ull);
+}
+#else
+#define CVMX_IPD_ON_BP_DROP_PKTX(block_id) (CVMX_ADD_IO_SEG(0x00014F0000004100ull))
+#endif
+#define CVMX_IPD_PACKET_MBUFF_SIZE (CVMX_ADD_IO_SEG(0x00014F0000000010ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_PKT_ERR CVMX_IPD_PKT_ERR_FUNC()
+static inline uint64_t CVMX_IPD_PKT_ERR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_PKT_ERR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F00000003F0ull);
+}
+#else
+#define CVMX_IPD_PKT_ERR (CVMX_ADD_IO_SEG(0x00014F00000003F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_PKT_PTR_VALID CVMX_IPD_PKT_PTR_VALID_FUNC()
+static inline uint64_t CVMX_IPD_PKT_PTR_VALID_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_PKT_PTR_VALID not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000358ull);
+}
+#else
+#define CVMX_IPD_PKT_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000358ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORTX_BP_PAGE_CNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35))))))
+ cvmx_warn("CVMX_IPD_PORTX_BP_PAGE_CNT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000000028ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_IPD_PORTX_BP_PAGE_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000028ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORTX_BP_PAGE_CNT2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_IPD_PORTX_BP_PAGE_CNT2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000000368ull) + ((offset) & 63) * 8 - 8*36;
+}
+#else
+#define CVMX_IPD_PORTX_BP_PAGE_CNT2(offset) (CVMX_ADD_IO_SEG(0x00014F0000000368ull) + ((offset) & 63) * 8 - 8*36)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORTX_BP_PAGE_CNT3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 40) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 40) && (offset <= 47))))))
+ cvmx_warn("CVMX_IPD_PORTX_BP_PAGE_CNT3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F00000003D0ull) + ((offset) & 63) * 8 - 8*40;
+}
+#else
+#define CVMX_IPD_PORTX_BP_PAGE_CNT3(offset) (CVMX_ADD_IO_SEG(0x00014F00000003D0ull) + ((offset) & 63) * 8 - 8*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000000388ull) + ((offset) & 63) * 8 - 8*36;
+}
+#else
+#define CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000388ull) + ((offset) & 63) * 8 - 8*36)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORT_BP_COUNTERS3_PAIRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 40) && (offset <= 43))))))
+ cvmx_warn("CVMX_IPD_PORT_BP_COUNTERS3_PAIRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F00000003B0ull) + ((offset) & 63) * 8 - 8*40;
+}
+#else
+#define CVMX_IPD_PORT_BP_COUNTERS3_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000003B0ull) + ((offset) & 63) * 8 - 8*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORT_BP_COUNTERS4_PAIRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 44) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 44) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_IPD_PORT_BP_COUNTERS4_PAIRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000000410ull) + ((offset) & 63) * 8 - 8*44;
+}
+#else
+#define CVMX_IPD_PORT_BP_COUNTERS4_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000410ull) + ((offset) & 63) * 8 - 8*44)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORT_BP_COUNTERS_PAIRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35))))))
+ cvmx_warn("CVMX_IPD_PORT_BP_COUNTERS_PAIRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F00000001B8ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_IPD_PORT_BP_COUNTERS_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000001B8ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_PORT_PTR_FIFO_CTL CVMX_IPD_PORT_PTR_FIFO_CTL_FUNC()
+static inline uint64_t CVMX_IPD_PORT_PTR_FIFO_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_PORT_PTR_FIFO_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000798ull);
+}
+#else
+#define CVMX_IPD_PORT_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000798ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORT_QOS_INTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset == 0) || (offset == 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0) || (offset == 2) || (offset == 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0) || (offset == 2) || (offset == 4) || (offset == 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0) || (offset == 4) || (offset == 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0) || (offset == 2) || (offset == 4) || (offset == 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0) || (offset == 2) || (offset == 4) || (offset == 5)))))
+ cvmx_warn("CVMX_IPD_PORT_QOS_INTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000000808ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_IPD_PORT_QOS_INTX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000808ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORT_QOS_INT_ENBX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset == 0) || (offset == 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0) || (offset == 2) || (offset == 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0) || (offset == 2) || (offset == 4) || (offset == 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0) || (offset == 4) || (offset == 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0) || (offset == 2) || (offset == 4) || (offset == 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0) || (offset == 2) || (offset == 4) || (offset == 5)))))
+ cvmx_warn("CVMX_IPD_PORT_QOS_INT_ENBX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000000848ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_IPD_PORT_QOS_INT_ENBX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000848ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORT_QOS_X_CNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31) || ((offset >= 256) && (offset <= 319)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31) || ((offset >= 128) && (offset <= 159)) || ((offset >= 256) && (offset <= 319)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31) || ((offset >= 128) && (offset <= 159)) || ((offset >= 256) && (offset <= 383)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31) || ((offset >= 256) && (offset <= 351)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31) || ((offset >= 128) && (offset <= 159)) || ((offset >= 256) && (offset <= 335)) || ((offset >= 352) && (offset <= 383)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 511))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31) || ((offset >= 128) && (offset <= 159)) || ((offset >= 256) && (offset <= 383))))))
+ cvmx_warn("CVMX_IPD_PORT_QOS_X_CNT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000000888ull) + ((offset) & 511) * 8;
+}
+#else
+#define CVMX_IPD_PORT_QOS_X_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000888ull) + ((offset) & 511) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_PORT_SOPX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_IPD_PORT_SOPX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00014F0000004400ull);
+}
+#else
+#define CVMX_IPD_PORT_SOPX(block_id) (CVMX_ADD_IO_SEG(0x00014F0000004400ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL_FUNC()
+static inline uint64_t CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000348ull);
+}
+#else
+#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000348ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL CVMX_IPD_PRC_PORT_PTR_FIFO_CTL_FUNC()
+static inline uint64_t CVMX_IPD_PRC_PORT_PTR_FIFO_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_PRC_PORT_PTR_FIFO_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000350ull);
+}
+#else
+#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000350ull))
+#endif
+#define CVMX_IPD_PTR_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000320ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_PWP_PTR_FIFO_CTL CVMX_IPD_PWP_PTR_FIFO_CTL_FUNC()
+static inline uint64_t CVMX_IPD_PWP_PTR_FIFO_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_PWP_PTR_FIFO_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000340ull);
+}
+#else
+#define CVMX_IPD_PWP_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000340ull))
+#endif
+#define CVMX_IPD_QOS0_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(0)
+#define CVMX_IPD_QOS1_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(1)
+#define CVMX_IPD_QOS2_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(2)
+#define CVMX_IPD_QOS3_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(3)
+#define CVMX_IPD_QOS4_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(4)
+#define CVMX_IPD_QOS5_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(5)
+#define CVMX_IPD_QOS6_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(6)
+#define CVMX_IPD_QOS7_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(7)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_QOSX_RED_MARKS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_IPD_QOSX_RED_MARKS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F0000000178ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_IPD_QOSX_RED_MARKS(offset) (CVMX_ADD_IO_SEG(0x00014F0000000178ull) + ((offset) & 7) * 8)
+#endif
+#define CVMX_IPD_QUE0_FREE_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000330ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_RED_BPID_ENABLEX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_IPD_RED_BPID_ENABLEX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00014F0000004200ull);
+}
+#else
+#define CVMX_IPD_RED_BPID_ENABLEX(block_id) (CVMX_ADD_IO_SEG(0x00014F0000004200ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_RED_DELAY CVMX_IPD_RED_DELAY_FUNC()
+static inline uint64_t CVMX_IPD_RED_DELAY_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_RED_DELAY not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000004300ull);
+}
+#else
+#define CVMX_IPD_RED_DELAY (CVMX_ADD_IO_SEG(0x00014F0000004300ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_RED_PORT_ENABLE CVMX_IPD_RED_PORT_ENABLE_FUNC()
+static inline uint64_t CVMX_IPD_RED_PORT_ENABLE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_RED_PORT_ENABLE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F00000002D8ull);
+}
+#else
+#define CVMX_IPD_RED_PORT_ENABLE (CVMX_ADD_IO_SEG(0x00014F00000002D8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_RED_PORT_ENABLE2 CVMX_IPD_RED_PORT_ENABLE2_FUNC()
+static inline uint64_t CVMX_IPD_RED_PORT_ENABLE2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_RED_PORT_ENABLE2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F00000003A8ull);
+}
+#else
+#define CVMX_IPD_RED_PORT_ENABLE2 (CVMX_ADD_IO_SEG(0x00014F00000003A8ull))
+#endif
+#define CVMX_IPD_RED_QUE0_PARAM CVMX_IPD_RED_QUEX_PARAM(0)
+#define CVMX_IPD_RED_QUE1_PARAM CVMX_IPD_RED_QUEX_PARAM(1)
+#define CVMX_IPD_RED_QUE2_PARAM CVMX_IPD_RED_QUEX_PARAM(2)
+#define CVMX_IPD_RED_QUE3_PARAM CVMX_IPD_RED_QUEX_PARAM(3)
+#define CVMX_IPD_RED_QUE4_PARAM CVMX_IPD_RED_QUEX_PARAM(4)
+#define CVMX_IPD_RED_QUE5_PARAM CVMX_IPD_RED_QUEX_PARAM(5)
+#define CVMX_IPD_RED_QUE6_PARAM CVMX_IPD_RED_QUEX_PARAM(6)
+#define CVMX_IPD_RED_QUE7_PARAM CVMX_IPD_RED_QUEX_PARAM(7)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_IPD_RED_QUEX_PARAM(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_IPD_RED_QUEX_PARAM(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00014F00000002E0ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_IPD_RED_QUEX_PARAM(offset) (CVMX_ADD_IO_SEG(0x00014F00000002E0ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_REQ_WGT CVMX_IPD_REQ_WGT_FUNC()
+static inline uint64_t CVMX_IPD_REQ_WGT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_IPD_REQ_WGT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000004418ull);
+}
+#else
+#define CVMX_IPD_REQ_WGT (CVMX_ADD_IO_SEG(0x00014F0000004418ull))
+#endif
+#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000148ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_SUB_PORT_FCS CVMX_IPD_SUB_PORT_FCS_FUNC()
+static inline uint64_t CVMX_IPD_SUB_PORT_FCS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_SUB_PORT_FCS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000170ull);
+}
+#else
+#define CVMX_IPD_SUB_PORT_FCS (CVMX_ADD_IO_SEG(0x00014F0000000170ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_SUB_PORT_QOS_CNT CVMX_IPD_SUB_PORT_QOS_CNT_FUNC()
+static inline uint64_t CVMX_IPD_SUB_PORT_QOS_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_SUB_PORT_QOS_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000800ull);
+}
+#else
+#define CVMX_IPD_SUB_PORT_QOS_CNT (CVMX_ADD_IO_SEG(0x00014F0000000800ull))
+#endif
+#define CVMX_IPD_WQE_FPA_QUEUE (CVMX_ADD_IO_SEG(0x00014F0000000020ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_IPD_WQE_PTR_VALID CVMX_IPD_WQE_PTR_VALID_FUNC()
+static inline uint64_t CVMX_IPD_WQE_PTR_VALID_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_IPD_WQE_PTR_VALID not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00014F0000000360ull);
+}
+#else
+#define CVMX_IPD_WQE_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000360ull))
+#endif
+
+/**
+ * cvmx_ipd_1st_mbuff_skip
+ *
+ * IPD_1ST_MBUFF_SKIP = IPD First MBUFF Word Skip Size
+ *
+ * The number of words that the IPD will skip when writing the first MBUFF.
+ */
+union cvmx_ipd_1st_mbuff_skip {
+ uint64_t u64;
+ struct cvmx_ipd_1st_mbuff_skip_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t skip_sz : 6; /**< The number of 8-byte words from the top of the
+ 1st MBUFF that the IPD will store the next-pointer.
+ Legal values are 0 to 32, where the MAX value
+ is also limited to:
+ IPD_PACKET_MBUFF_SIZE[MB_SIZE] - 18.
+ Must be at least 16 when IPD_CTL_STATUS[NO_WPTR]
+ is set. */
+#else
+ uint64_t skip_sz : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_ipd_1st_mbuff_skip_s cn30xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn31xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn38xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn38xxp2;
+ struct cvmx_ipd_1st_mbuff_skip_s cn50xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn52xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn52xxp1;
+ struct cvmx_ipd_1st_mbuff_skip_s cn56xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn56xxp1;
+ struct cvmx_ipd_1st_mbuff_skip_s cn58xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn58xxp1;
+ struct cvmx_ipd_1st_mbuff_skip_s cn61xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn63xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn63xxp1;
+ struct cvmx_ipd_1st_mbuff_skip_s cn66xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn68xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn68xxp1;
+ struct cvmx_ipd_1st_mbuff_skip_s cnf71xx;
+};
+typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_1st_mbuff_skip_t;
+
+/**
+ * cvmx_ipd_1st_next_ptr_back
+ *
+ * IPD_1st_NEXT_PTR_BACK = IPD First Next Pointer Back Values
+ *
+ * Contains the Back Field for use in creating the Next Pointer Header for the First MBUF
+ */
+union cvmx_ipd_1st_next_ptr_back {
+ uint64_t u64;
+ struct cvmx_ipd_1st_next_ptr_back_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t back : 4; /**< Used to find head of buffer from the nxt-hdr-ptr. */
+#else
+ uint64_t back : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ipd_1st_next_ptr_back_s cn30xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn31xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn38xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn38xxp2;
+ struct cvmx_ipd_1st_next_ptr_back_s cn50xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn52xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn52xxp1;
+ struct cvmx_ipd_1st_next_ptr_back_s cn56xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn56xxp1;
+ struct cvmx_ipd_1st_next_ptr_back_s cn58xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn58xxp1;
+ struct cvmx_ipd_1st_next_ptr_back_s cn61xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn63xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn63xxp1;
+ struct cvmx_ipd_1st_next_ptr_back_s cn66xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn68xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn68xxp1;
+ struct cvmx_ipd_1st_next_ptr_back_s cnf71xx;
+};
+typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_1st_next_ptr_back_t;
+
+/**
+ * cvmx_ipd_2nd_next_ptr_back
+ *
+ * IPD_2nd_NEXT_PTR_BACK = IPD Second Next Pointer Back Value
+ *
+ * Contains the Back Field for use in creating the Next Pointer Header for the First MBUF
+ */
+union cvmx_ipd_2nd_next_ptr_back {
+ uint64_t u64;
+ struct cvmx_ipd_2nd_next_ptr_back_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t back : 4; /**< Used to find head of buffer from the nxt-hdr-ptr. */
+#else
+ uint64_t back : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn30xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn31xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn38xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn38xxp2;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn50xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn52xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn52xxp1;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn56xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn56xxp1;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn58xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn58xxp1;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn61xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn63xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn63xxp1;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn66xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn68xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn68xxp1;
+ struct cvmx_ipd_2nd_next_ptr_back_s cnf71xx;
+};
+typedef union cvmx_ipd_2nd_next_ptr_back cvmx_ipd_2nd_next_ptr_back_t;
+
+/**
+ * cvmx_ipd_bist_status
+ *
+ * IPD_BIST_STATUS = IPD BIST STATUS
+ *
+ * BIST Status for IPD's Memories.
+ */
+union cvmx_ipd_bist_status {
+ uint64_t u64;
+ struct cvmx_ipd_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t iiwo1 : 1; /**< IPD IOB WQE Dataout MEM1 Bist Status. */
+ uint64_t iiwo0 : 1; /**< IPD IOB WQE Dataout MEM0 Bist Status. */
+ uint64_t iio1 : 1; /**< IPD IOB Dataout MEM1 Bist Status. */
+ uint64_t iio0 : 1; /**< IPD IOB Dataout MEM0 Bist Status. */
+ uint64_t pbm4 : 1; /**< PBM4Memory Bist Status. */
+ uint64_t csr_mem : 1; /**< CSR Register Memory Bist Status. */
+ uint64_t csr_ncmd : 1; /**< CSR NCB Commands Memory Bist Status. */
+ uint64_t pwq_wqed : 1; /**< PWQ PIP WQE DONE Memory Bist Status. */
+ uint64_t pwq_wp1 : 1; /**< PWQ WQE PAGE1 PTR Memory Bist Status. */
+ uint64_t pwq_pow : 1; /**< PWQ POW MEM Memory Bist Status. */
+ uint64_t ipq_pbe1 : 1; /**< IPQ PBE1 Memory Bist Status. */
+ uint64_t ipq_pbe0 : 1; /**< IPQ PBE0 Memory Bist Status. */
+ uint64_t pbm3 : 1; /**< PBM3 Memory Bist Status. */
+ uint64_t pbm2 : 1; /**< PBM2 Memory Bist Status. */
+ uint64_t pbm1 : 1; /**< PBM1 Memory Bist Status. */
+ uint64_t pbm0 : 1; /**< PBM0 Memory Bist Status. */
+ uint64_t pbm_word : 1; /**< PBM_WORD Memory Bist Status. */
+ uint64_t pwq1 : 1; /**< PWQ1 Memory Bist Status. */
+ uint64_t pwq0 : 1; /**< PWQ0 Memory Bist Status. */
+ uint64_t prc_off : 1; /**< PRC_OFF Memory Bist Status. */
+ uint64_t ipd_old : 1; /**< IPD_OLD Memory Bist Status. */
+ uint64_t ipd_new : 1; /**< IPD_NEW Memory Bist Status. */
+ uint64_t pwp : 1; /**< PWP Memory Bist Status. */
+#else
+ uint64_t pwp : 1;
+ uint64_t ipd_new : 1;
+ uint64_t ipd_old : 1;
+ uint64_t prc_off : 1;
+ uint64_t pwq0 : 1;
+ uint64_t pwq1 : 1;
+ uint64_t pbm_word : 1;
+ uint64_t pbm0 : 1;
+ uint64_t pbm1 : 1;
+ uint64_t pbm2 : 1;
+ uint64_t pbm3 : 1;
+ uint64_t ipq_pbe0 : 1;
+ uint64_t ipq_pbe1 : 1;
+ uint64_t pwq_pow : 1;
+ uint64_t pwq_wp1 : 1;
+ uint64_t pwq_wqed : 1;
+ uint64_t csr_ncmd : 1;
+ uint64_t csr_mem : 1;
+ uint64_t pbm4 : 1;
+ uint64_t iio0 : 1;
+ uint64_t iio1 : 1;
+ uint64_t iiwo0 : 1;
+ uint64_t iiwo1 : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_ipd_bist_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t pwq_wqed : 1; /**< PWQ PIP WQE DONE Memory Bist Status. */
+ uint64_t pwq_wp1 : 1; /**< PWQ WQE PAGE1 PTR Memory Bist Status. */
+ uint64_t pwq_pow : 1; /**< PWQ POW MEM Memory Bist Status. */
+ uint64_t ipq_pbe1 : 1; /**< IPQ PBE1 Memory Bist Status. */
+ uint64_t ipq_pbe0 : 1; /**< IPQ PBE0 Memory Bist Status. */
+ uint64_t pbm3 : 1; /**< PBM3 Memory Bist Status. */
+ uint64_t pbm2 : 1; /**< PBM2 Memory Bist Status. */
+ uint64_t pbm1 : 1; /**< PBM1 Memory Bist Status. */
+ uint64_t pbm0 : 1; /**< PBM0 Memory Bist Status. */
+ uint64_t pbm_word : 1; /**< PBM_WORD Memory Bist Status. */
+ uint64_t pwq1 : 1; /**< PWQ1 Memory Bist Status. */
+ uint64_t pwq0 : 1; /**< PWQ0 Memory Bist Status. */
+ uint64_t prc_off : 1; /**< PRC_OFF Memory Bist Status. */
+ uint64_t ipd_old : 1; /**< IPD_OLD Memory Bist Status. */
+ uint64_t ipd_new : 1; /**< IPD_NEW Memory Bist Status. */
+ uint64_t pwp : 1; /**< PWP Memory Bist Status. */
+#else
+ uint64_t pwp : 1;
+ uint64_t ipd_new : 1;
+ uint64_t ipd_old : 1;
+ uint64_t prc_off : 1;
+ uint64_t pwq0 : 1;
+ uint64_t pwq1 : 1;
+ uint64_t pbm_word : 1;
+ uint64_t pbm0 : 1;
+ uint64_t pbm1 : 1;
+ uint64_t pbm2 : 1;
+ uint64_t pbm3 : 1;
+ uint64_t ipq_pbe0 : 1;
+ uint64_t ipq_pbe1 : 1;
+ uint64_t pwq_pow : 1;
+ uint64_t pwq_wp1 : 1;
+ uint64_t pwq_wqed : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_bist_status_cn30xx cn31xx;
+ struct cvmx_ipd_bist_status_cn30xx cn38xx;
+ struct cvmx_ipd_bist_status_cn30xx cn38xxp2;
+ struct cvmx_ipd_bist_status_cn30xx cn50xx;
+ struct cvmx_ipd_bist_status_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t csr_mem : 1; /**< CSR Register Memory Bist Status. */
+ uint64_t csr_ncmd : 1; /**< CSR NCB Commands Memory Bist Status. */
+ uint64_t pwq_wqed : 1; /**< PWQ PIP WQE DONE Memory Bist Status. */
+ uint64_t pwq_wp1 : 1; /**< PWQ WQE PAGE1 PTR Memory Bist Status. */
+ uint64_t pwq_pow : 1; /**< PWQ POW MEM Memory Bist Status. */
+ uint64_t ipq_pbe1 : 1; /**< IPQ PBE1 Memory Bist Status. */
+ uint64_t ipq_pbe0 : 1; /**< IPQ PBE0 Memory Bist Status. */
+ uint64_t pbm3 : 1; /**< PBM3 Memory Bist Status. */
+ uint64_t pbm2 : 1; /**< PBM2 Memory Bist Status. */
+ uint64_t pbm1 : 1; /**< PBM1 Memory Bist Status. */
+ uint64_t pbm0 : 1; /**< PBM0 Memory Bist Status. */
+ uint64_t pbm_word : 1; /**< PBM_WORD Memory Bist Status. */
+ uint64_t pwq1 : 1; /**< PWQ1 Memory Bist Status. */
+ uint64_t pwq0 : 1; /**< PWQ0 Memory Bist Status. */
+ uint64_t prc_off : 1; /**< PRC_OFF Memory Bist Status. */
+ uint64_t ipd_old : 1; /**< IPD_OLD Memory Bist Status. */
+ uint64_t ipd_new : 1; /**< IPD_NEW Memory Bist Status. */
+ uint64_t pwp : 1; /**< PWP Memory Bist Status. */
+#else
+ uint64_t pwp : 1;
+ uint64_t ipd_new : 1;
+ uint64_t ipd_old : 1;
+ uint64_t prc_off : 1;
+ uint64_t pwq0 : 1;
+ uint64_t pwq1 : 1;
+ uint64_t pbm_word : 1;
+ uint64_t pbm0 : 1;
+ uint64_t pbm1 : 1;
+ uint64_t pbm2 : 1;
+ uint64_t pbm3 : 1;
+ uint64_t ipq_pbe0 : 1;
+ uint64_t ipq_pbe1 : 1;
+ uint64_t pwq_pow : 1;
+ uint64_t pwq_wp1 : 1;
+ uint64_t pwq_wqed : 1;
+ uint64_t csr_ncmd : 1;
+ uint64_t csr_mem : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn52xx;
+ struct cvmx_ipd_bist_status_cn52xx cn52xxp1;
+ struct cvmx_ipd_bist_status_cn52xx cn56xx;
+ struct cvmx_ipd_bist_status_cn52xx cn56xxp1;
+ struct cvmx_ipd_bist_status_cn30xx cn58xx;
+ struct cvmx_ipd_bist_status_cn30xx cn58xxp1;
+ struct cvmx_ipd_bist_status_cn52xx cn61xx;
+ struct cvmx_ipd_bist_status_cn52xx cn63xx;
+ struct cvmx_ipd_bist_status_cn52xx cn63xxp1;
+ struct cvmx_ipd_bist_status_cn52xx cn66xx;
+ struct cvmx_ipd_bist_status_s cn68xx;
+ struct cvmx_ipd_bist_status_s cn68xxp1;
+ struct cvmx_ipd_bist_status_cn52xx cnf71xx;
+};
+typedef union cvmx_ipd_bist_status cvmx_ipd_bist_status_t;
+
+/**
+ * cvmx_ipd_bp_prt_red_end
+ *
+ * IPD_BP_PRT_RED_END = IPD Backpressure Port RED Enable
+ *
+ * When IPD applies backpressure to a PORT and the corresponding bit in this register is set,
+ * the RED Unit will drop packets for that port.
+ */
+union cvmx_ipd_bp_prt_red_end {
+ uint64_t u64;
+ struct cvmx_ipd_bp_prt_red_end_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t prt_enb : 48; /**< The port corresponding to the bit position in this
+ field will drop all NON-RAW packets to that port
+ when port level backpressure is applied to that
+ port. The applying of port-level backpressure for
+ this dropping does not take into consideration the
+ value of IPD_PORTX_BP_PAGE_CNT[BP_ENB], nor
+ IPD_RED_PORT_ENABLE[PRT_ENB]. */
+#else
+ uint64_t prt_enb : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t prt_enb : 36; /**< The port corresponding to the bit position in this
+ field, will allow RED to drop back when port level
+ backpressure is applied to the port. The applying
+ of port-level backpressure for this RED dropping
+ does not take into consideration the value of
+ IPD_PORTX_BP_PAGE_CNT[BP_ENB]. */
+#else
+ uint64_t prt_enb : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn31xx;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xx;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xxp2;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn50xx;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t prt_enb : 40; /**< The port corresponding to the bit position in this
+ field, will allow RED to drop back when port level
+ backpressure is applied to the port. The applying
+ of port-level backpressure for this RED dropping
+ does not take into consideration the value of
+ IPD_PORTX_BP_PAGE_CNT[BP_ENB]. */
+#else
+ uint64_t prt_enb : 40;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn52xx;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx cn52xxp1;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xx;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xxp1;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xx;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xxp1;
+ struct cvmx_ipd_bp_prt_red_end_s cn61xx;
+ struct cvmx_ipd_bp_prt_red_end_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t prt_enb : 44; /**< The port corresponding to the bit position in this
+ field will drop all NON-RAW packets to that port
+ when port level backpressure is applied to that
+ port. The applying of port-level backpressure for
+ this dropping does not take into consideration the
+ value of IPD_PORTX_BP_PAGE_CNT[BP_ENB], nor
+ IPD_RED_PORT_ENABLE[PRT_ENB]. */
+#else
+ uint64_t prt_enb : 44;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } cn63xx;
+ struct cvmx_ipd_bp_prt_red_end_cn63xx cn63xxp1;
+ struct cvmx_ipd_bp_prt_red_end_s cn66xx;
+ struct cvmx_ipd_bp_prt_red_end_s cnf71xx;
+};
+typedef union cvmx_ipd_bp_prt_red_end cvmx_ipd_bp_prt_red_end_t;
+
+/**
+ * cvmx_ipd_bpid#_mbuf_th
+ *
+ * 0x2000 2FFF
+ *
+ * IPD_BPIDX_MBUF_TH = IPD BPID MBUFF Threshold
+ *
+ * The number of MBUFFs in use by the BPID, that when exceeded, backpressure will be applied to the BPID.
+ */
+union cvmx_ipd_bpidx_mbuf_th {
+ uint64_t u64;
+ struct cvmx_ipd_bpidx_mbuf_th_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t bp_enb : 1; /**< When set '1' BP will be applied, if '0' BP will
+ not be applied to bpid. */
+ uint64_t page_cnt : 17; /**< The number of page pointers assigned to
+ the BPID, that when exceeded will cause
+ back-pressure to be applied to the BPID.
+ This value is in 256 page-pointer increments,
+ (i.e. 0 = 0-page-ptrs, 1 = 256-page-ptrs,..) */
+#else
+ uint64_t page_cnt : 17;
+ uint64_t bp_enb : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ipd_bpidx_mbuf_th_s cn68xx;
+ struct cvmx_ipd_bpidx_mbuf_th_s cn68xxp1;
+};
+typedef union cvmx_ipd_bpidx_mbuf_th cvmx_ipd_bpidx_mbuf_th_t;
+
+/**
+ * cvmx_ipd_bpid_bp_counter#
+ *
+ * RESERVE SPACE UPTO 0x2FFF
+ *
+ * 0x3000 0x3ffff
+ *
+ * IPD_BPID_BP_COUNTERX = MBUF BPID Counters used to generate Back Pressure Per BPID.
+ */
+union cvmx_ipd_bpid_bp_counterx {
+ uint64_t u64;
+ struct cvmx_ipd_bpid_bp_counterx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this BPID. */
+#else
+ uint64_t cnt_val : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_ipd_bpid_bp_counterx_s cn68xx;
+ struct cvmx_ipd_bpid_bp_counterx_s cn68xxp1;
+};
+typedef union cvmx_ipd_bpid_bp_counterx cvmx_ipd_bpid_bp_counterx_t;
+
+/**
+ * cvmx_ipd_clk_count
+ *
+ * IPD_CLK_COUNT = IPD Clock Count
+ *
+ * Counts the number of core clocks periods since the de-asserition of reset.
+ */
+union cvmx_ipd_clk_count {
+ uint64_t u64;
+ struct cvmx_ipd_clk_count_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clk_cnt : 64; /**< This counter will be zeroed when reset is applied
+ and will increment every rising edge of the
+ core-clock. */
+#else
+ uint64_t clk_cnt : 64;
+#endif
+ } s;
+ struct cvmx_ipd_clk_count_s cn30xx;
+ struct cvmx_ipd_clk_count_s cn31xx;
+ struct cvmx_ipd_clk_count_s cn38xx;
+ struct cvmx_ipd_clk_count_s cn38xxp2;
+ struct cvmx_ipd_clk_count_s cn50xx;
+ struct cvmx_ipd_clk_count_s cn52xx;
+ struct cvmx_ipd_clk_count_s cn52xxp1;
+ struct cvmx_ipd_clk_count_s cn56xx;
+ struct cvmx_ipd_clk_count_s cn56xxp1;
+ struct cvmx_ipd_clk_count_s cn58xx;
+ struct cvmx_ipd_clk_count_s cn58xxp1;
+ struct cvmx_ipd_clk_count_s cn61xx;
+ struct cvmx_ipd_clk_count_s cn63xx;
+ struct cvmx_ipd_clk_count_s cn63xxp1;
+ struct cvmx_ipd_clk_count_s cn66xx;
+ struct cvmx_ipd_clk_count_s cn68xx;
+ struct cvmx_ipd_clk_count_s cn68xxp1;
+ struct cvmx_ipd_clk_count_s cnf71xx;
+};
+typedef union cvmx_ipd_clk_count cvmx_ipd_clk_count_t;
+
+/**
+ * cvmx_ipd_credits
+ *
+ * IPD_CREDITS = IPD Credits
+ *
+ * The credits allowed for IPD.
+ */
+union cvmx_ipd_credits {
+ uint64_t u64;
+ struct cvmx_ipd_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t iob_wrc : 8; /**< The present number of credits available for
+ stores to the IOB. */
+ uint64_t iob_wr : 8; /**< The number of command credits the IPD has to send
+ stores to the IOB. Legal values for this field
+ are 1-8 (a value of 0 will be treated as a 1 and
+ a value greater than 8 will be treated as an 8. */
+#else
+ uint64_t iob_wr : 8;
+ uint64_t iob_wrc : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ipd_credits_s cn68xx;
+ struct cvmx_ipd_credits_s cn68xxp1;
+};
+typedef union cvmx_ipd_credits cvmx_ipd_credits_t;
+
+/**
+ * cvmx_ipd_ctl_status
+ *
+ * IPD_CTL_STATUS = IPD's Control Status Register
+ *
+ * The number of words in a MBUFF used for packet data store.
+ */
+union cvmx_ipd_ctl_status {
+ uint64_t u64;
+ struct cvmx_ipd_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t use_sop : 1; /**< When '1' the SOP sent by the MAC will be used in
+ place of the SOP generated by the IPD. */
+ uint64_t rst_done : 1; /**< When '0' IPD has finished reset. No access
+ except the reading of this bit should occur to the
+ IPD until this is asserted. Or a 1000 core clock
+ cycles has passed after the de-assertion of reset. */
+ uint64_t clken : 1; /**< Controls the conditional clocking within IPD
+ 0=Allow HW to control the clocks
+ 1=Force the clocks to be always on */
+ uint64_t no_wptr : 1; /**< When set '1' the WQE pointers will not be used and
+ the WQE will be located at the front of the packet.
+ When set:
+ - IPD_WQE_FPA_QUEUE[WQE_QUE] is not used
+ - IPD_1ST_MBUFF_SKIP[SKIP_SZ] must be at least 16
+ - If 16 <= IPD_1ST_MBUFF_SKIP[SKIP_SZ] <= 31 then
+ the WQE will be written into the first 128B
+ cache block in the first buffer that contains
+ the packet.
+ - If IPD_1ST_MBUFF_SKIP[SKIP_SZ] == 32 then
+ the WQE will be written into the second 128B
+ cache block in the first buffer that contains
+ the packet. */
+ uint64_t pq_apkt : 1; /**< When set IPD_PORT_QOS_X_CNT WILL be incremented
+ by one for every work queue entry that is sent to
+ POW. */
+ uint64_t pq_nabuf : 1; /**< When set IPD_PORT_QOS_X_CNT WILL NOT be
+ incremented when IPD allocates a buffer for a
+ packet. */
+ uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly.
+ When set '1' the IPD drive the IPD_BUFF_FULL line to
+ the IOB-arbiter, telling it to not give grants to
+ NCB devices sending packet data. */
+ uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly,
+ buffering the received packet data. When set '1'
+ the IPD will not buffer the received packet data. */
+ uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the
+ data-length field in the header written to the
+ POW and the top of a MBUFF.
+ OCTEAN generates a length that includes the
+ length of the data + 8 for the header-field. By
+ setting this bit the 8 for the instr-field will
+ not be included in the length field of the header.
+ NOTE: IPD is compliant with the spec when this
+ field is '1'. */
+ uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except
+ RSL. */
+ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL],
+ IPD_PORT_BP_COUNTERS2_PAIR(port)[CNT_VAL] and
+ IPD_PORT_BP_COUNTERS3_PAIR(port)[CNT_VAL]
+ WILL be incremented by one for every work
+ queue entry that is sent to POW. */
+ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL],
+ IPD_PORT_BP_COUNTERS2_PAIR(port)[CNT_VAL] and
+ IPD_PORT_BP_COUNTERS3_PAIR(port)[CNT_VAL]
+ WILL NOT be incremented when IPD allocates a
+ buffer for a packet on the port. */
+ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */
+ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */
+ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables
+ the sending of port level backpressure to the
+ Octane input-ports. The application should NOT
+ de-assert this bit after asserting it. The
+ receivers of this bit may have been put into
+ backpressure mode and can only be released by
+ IPD informing them that the backpressure has
+ been released.
+ GMXX_INF_MODE[EN] must be set to '1' for each
+ packet interface which requires port back pressure
+ prior to setting PBP_EN to '1'. */
+ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers)
+ is written through to memory.
+ 1 ==> All packet data (and next buffer pointers) is
+ written into the cache.
+ 2 ==> The first aligned cache block holding the
+ packet data (and initial next buffer pointer) is
+ written to the L2 cache, all remaining cache blocks
+ are not written to the L2 cache.
+ 3 ==> The first two aligned cache blocks holding
+ the packet data (and initial next buffer pointer)
+ are written to the L2 cache, all remaining cache
+ blocks are not written to the L2 cache. */
+ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD.
+ When clear '0', the IPD will appear to the
+ IOB-arbiter to be applying backpressure, this
+ causes the IOB-Arbiter to not send grants to NCB
+ devices requesting to send packet data to the IPD. */
+#else
+ uint64_t ipd_en : 1;
+ cvmx_ipd_mode_t opc_mode : 2;
+ uint64_t pbp_en : 1;
+ uint64_t wqe_lend : 1;
+ uint64_t pkt_lend : 1;
+ uint64_t naddbuf : 1;
+ uint64_t addpkt : 1;
+ uint64_t reset : 1;
+ uint64_t len_m8 : 1;
+ uint64_t pkt_off : 1;
+ uint64_t ipd_full : 1;
+ uint64_t pq_nabuf : 1;
+ uint64_t pq_apkt : 1;
+ uint64_t no_wptr : 1;
+ uint64_t clken : 1;
+ uint64_t rst_done : 1;
+ uint64_t use_sop : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ipd_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the
+ data-length field in the header written wo the
+ POW and the top of a MBUFF.
+ OCTEAN generates a length that includes the
+ length of the data + 8 for the header-field. By
+ setting this bit the 8 for the instr-field will
+ not be included in the length field of the header.
+ NOTE: IPD is compliant with the spec when this
+ field is '1'. */
+ uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except
+ RSL. */
+ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL]
+ WILL be incremented by one for every work
+ queue entry that is sent to POW. */
+ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL]
+ WILL NOT be incremented when IPD allocates a
+ buffer for a packet on the port. */
+ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */
+ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */
+ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables
+ the sending of port level backpressure to the
+ Octane input-ports. Once enabled the sending of
+ port-level-backpressure can not be disabled by
+ changing the value of this bit.
+ GMXX_INF_MODE[EN] must be set to '1' for each
+ packet interface which requires port back pressure
+ prior to setting PBP_EN to '1'. */
+ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers)
+ is written through to memory.
+ 1 ==> All packet data (and next buffer pointers) is
+ written into the cache.
+ 2 ==> The first aligned cache block holding the
+ packet data (and initial next buffer pointer) is
+ written to the L2 cache, all remaining cache blocks
+ are not written to the L2 cache.
+ 3 ==> The first two aligned cache blocks holding
+ the packet data (and initial next buffer pointer)
+ are written to the L2 cache, all remaining cache
+ blocks are not written to the L2 cache. */
+ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. */
+#else
+ uint64_t ipd_en : 1;
+ cvmx_ipd_mode_t opc_mode : 2;
+ uint64_t pbp_en : 1;
+ uint64_t wqe_lend : 1;
+ uint64_t pkt_lend : 1;
+ uint64_t naddbuf : 1;
+ uint64_t addpkt : 1;
+ uint64_t reset : 1;
+ uint64_t len_m8 : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_ctl_status_cn30xx cn31xx;
+ struct cvmx_ipd_ctl_status_cn30xx cn38xx;
+ struct cvmx_ipd_ctl_status_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except
+ RSL. */
+ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL]
+ WILL be incremented by one for every work
+ queue entry that is sent to POW.
+ PASS-2 Field. */
+ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL]
+ WILL NOT be incremented when IPD allocates a
+ buffer for a packet on the port.
+ PASS-2 Field. */
+ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */
+ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */
+ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables
+ the sending of port level backpressure to the
+ Octane input-ports. Once enabled the sending of
+ port-level-backpressure can not be disabled by
+ changing the value of this bit. */
+ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers)
+ is written through to memory.
+ 1 ==> All packet data (and next buffer pointers) is
+ written into the cache.
+ 2 ==> The first aligned cache block holding the
+ packet data (and initial next buffer pointer) is
+ written to the L2 cache, all remaining cache blocks
+ are not written to the L2 cache.
+ 3 ==> The first two aligned cache blocks holding
+ the packet data (and initial next buffer pointer)
+ are written to the L2 cache, all remaining cache
+ blocks are not written to the L2 cache. */
+ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. */
+#else
+ uint64_t ipd_en : 1;
+ cvmx_ipd_mode_t opc_mode : 2;
+ uint64_t pbp_en : 1;
+ uint64_t wqe_lend : 1;
+ uint64_t pkt_lend : 1;
+ uint64_t naddbuf : 1;
+ uint64_t addpkt : 1;
+ uint64_t reset : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn38xxp2;
+ struct cvmx_ipd_ctl_status_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t no_wptr : 1; /**< When set '1' the WQE pointers will not be used and
+ the WQE will be located at the front of the packet. */
+ uint64_t pq_apkt : 1; /**< Reserved. */
+ uint64_t pq_nabuf : 1; /**< Reserved. */
+ uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly.
+ When set '1' the IPD drive the IPD_BUFF_FULL line to
+ the IOB-arbiter, telling it to not give grants to
+ NCB devices sending packet data. */
+ uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly,
+ buffering the received packet data. When set '1'
+ the IPD will not buffer the received packet data. */
+ uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the
+ data-length field in the header written wo the
+ POW and the top of a MBUFF.
+ OCTEAN generates a length that includes the
+ length of the data + 8 for the header-field. By
+ setting this bit the 8 for the instr-field will
+ not be included in the length field of the header.
+ NOTE: IPD is compliant with the spec when this
+ field is '1'. */
+ uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except
+ RSL. */
+ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL]
+ WILL be incremented by one for every work
+ queue entry that is sent to POW. */
+ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL]
+ WILL NOT be incremented when IPD allocates a
+ buffer for a packet on the port. */
+ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */
+ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */
+ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables
+ the sending of port level backpressure to the
+ Octane input-ports. Once enabled the sending of
+ port-level-backpressure can not be disabled by
+ changing the value of this bit.
+ GMXX_INF_MODE[EN] must be set to '1' for each
+ packet interface which requires port back pressure
+ prior to setting PBP_EN to '1'. */
+ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers)
+ is written through to memory.
+ 1 ==> All packet data (and next buffer pointers) is
+ written into the cache.
+ 2 ==> The first aligned cache block holding the
+ packet data (and initial next buffer pointer) is
+ written to the L2 cache, all remaining cache blocks
+ are not written to the L2 cache.
+ 3 ==> The first two aligned cache blocks holding
+ the packet data (and initial next buffer pointer)
+ are written to the L2 cache, all remaining cache
+ blocks are not written to the L2 cache. */
+ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD.
+ When clear '0', the IPD will appear to the
+ IOB-arbiter to be applying backpressure, this
+ causes the IOB-Arbiter to not send grants to NCB
+ devices requesting to send packet data to the IPD. */
+#else
+ uint64_t ipd_en : 1;
+ cvmx_ipd_mode_t opc_mode : 2;
+ uint64_t pbp_en : 1;
+ uint64_t wqe_lend : 1;
+ uint64_t pkt_lend : 1;
+ uint64_t naddbuf : 1;
+ uint64_t addpkt : 1;
+ uint64_t reset : 1;
+ uint64_t len_m8 : 1;
+ uint64_t pkt_off : 1;
+ uint64_t ipd_full : 1;
+ uint64_t pq_nabuf : 1;
+ uint64_t pq_apkt : 1;
+ uint64_t no_wptr : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } cn50xx;
+ struct cvmx_ipd_ctl_status_cn50xx cn52xx;
+ struct cvmx_ipd_ctl_status_cn50xx cn52xxp1;
+ struct cvmx_ipd_ctl_status_cn50xx cn56xx;
+ struct cvmx_ipd_ctl_status_cn50xx cn56xxp1;
+ struct cvmx_ipd_ctl_status_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly.
+ When set '1' the IPD drive the IPD_BUFF_FULL line to
+ the IOB-arbiter, telling it to not give grants to
+ NCB devices sending packet data. */
+ uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly,
+ buffering the received packet data. When set '1'
+ the IPD will not buffer the received packet data. */
+ uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the
+ data-length field in the header written wo the
+ POW and the top of a MBUFF.
+ OCTEAN PASS2 generates a length that includes the
+ length of the data + 8 for the header-field. By
+ setting this bit the 8 for the instr-field will
+ not be included in the length field of the header.
+ NOTE: IPD is compliant with the spec when this
+ field is '1'. */
+ uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except
+ RSL. */
+ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL]
+ WILL be incremented by one for every work
+ queue entry that is sent to POW.
+ PASS-2 Field. */
+ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL]
+ WILL NOT be incremented when IPD allocates a
+ buffer for a packet on the port.
+ PASS-2 Field. */
+ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */
+ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */
+ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables
+ the sending of port level backpressure to the
+ Octane input-ports. Once enabled the sending of
+ port-level-backpressure can not be disabled by
+ changing the value of this bit. */
+ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers)
+ is written through to memory.
+ 1 ==> All packet data (and next buffer pointers) is
+ written into the cache.
+ 2 ==> The first aligned cache block holding the
+ packet data (and initial next buffer pointer) is
+ written to the L2 cache, all remaining cache blocks
+ are not written to the L2 cache.
+ 3 ==> The first two aligned cache blocks holding
+ the packet data (and initial next buffer pointer)
+ are written to the L2 cache, all remaining cache
+ blocks are not written to the L2 cache. */
+ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD.
+ When clear '0', the IPD will appear to the
+ IOB-arbiter to be applying backpressure, this
+ causes the IOB-Arbiter to not send grants to NCB
+ devices requesting to send packet data to the IPD. */
+#else
+ uint64_t ipd_en : 1;
+ cvmx_ipd_mode_t opc_mode : 2;
+ uint64_t pbp_en : 1;
+ uint64_t wqe_lend : 1;
+ uint64_t pkt_lend : 1;
+ uint64_t naddbuf : 1;
+ uint64_t addpkt : 1;
+ uint64_t reset : 1;
+ uint64_t len_m8 : 1;
+ uint64_t pkt_off : 1;
+ uint64_t ipd_full : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn58xx;
+ struct cvmx_ipd_ctl_status_cn58xx cn58xxp1;
+ struct cvmx_ipd_ctl_status_s cn61xx;
+ struct cvmx_ipd_ctl_status_s cn63xx;
+ struct cvmx_ipd_ctl_status_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t clken : 1; /**< Controls the conditional clocking within IPD
+ 0=Allow HW to control the clocks
+ 1=Force the clocks to be always on */
+ uint64_t no_wptr : 1; /**< When set '1' the WQE pointers will not be used and
+ the WQE will be located at the front of the packet.
+ When set:
+ - IPD_WQE_FPA_QUEUE[WQE_QUE] is not used
+ - IPD_1ST_MBUFF_SKIP[SKIP_SZ] must be at least 16
+ - If 16 <= IPD_1ST_MBUFF_SKIP[SKIP_SZ] <= 31 then
+ the WQE will be written into the first 128B
+ cache block in the first buffer that contains
+ the packet.
+ - If IPD_1ST_MBUFF_SKIP[SKIP_SZ] == 32 then
+ the WQE will be written into the second 128B
+ cache block in the first buffer that contains
+ the packet. */
+ uint64_t pq_apkt : 1; /**< When set IPD_PORT_QOS_X_CNT WILL be incremented
+ by one for every work queue entry that is sent to
+ POW. */
+ uint64_t pq_nabuf : 1; /**< When set IPD_PORT_QOS_X_CNT WILL NOT be
+ incremented when IPD allocates a buffer for a
+ packet. */
+ uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly.
+ When set '1' the IPD drive the IPD_BUFF_FULL line to
+ the IOB-arbiter, telling it to not give grants to
+ NCB devices sending packet data. */
+ uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly,
+ buffering the received packet data. When set '1'
+ the IPD will not buffer the received packet data. */
+ uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the
+ data-length field in the header written to the
+ POW and the top of a MBUFF.
+ OCTEAN generates a length that includes the
+ length of the data + 8 for the header-field. By
+ setting this bit the 8 for the instr-field will
+ not be included in the length field of the header.
+ NOTE: IPD is compliant with the spec when this
+ field is '1'. */
+ uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except
+ RSL. */
+ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL],
+ IPD_PORT_BP_COUNTERS2_PAIR(port)[CNT_VAL] and
+ IPD_PORT_BP_COUNTERS3_PAIR(port)[CNT_VAL]
+ WILL be incremented by one for every work
+ queue entry that is sent to POW. */
+ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set,
+ IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL],
+ IPD_PORT_BP_COUNTERS2_PAIR(port)[CNT_VAL] and
+ IPD_PORT_BP_COUNTERS3_PAIR(port)[CNT_VAL]
+ WILL NOT be incremented when IPD allocates a
+ buffer for a packet on the port. */
+ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */
+ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */
+ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables
+ the sending of port level backpressure to the
+ Octane input-ports. The application should NOT
+ de-assert this bit after asserting it. The
+ receivers of this bit may have been put into
+ backpressure mode and can only be released by
+ IPD informing them that the backpressure has
+ been released.
+ GMXX_INF_MODE[EN] must be set to '1' for each
+ packet interface which requires port back pressure
+ prior to setting PBP_EN to '1'. */
+ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers)
+ is written through to memory.
+ 1 ==> All packet data (and next buffer pointers) is
+ written into the cache.
+ 2 ==> The first aligned cache block holding the
+ packet data (and initial next buffer pointer) is
+ written to the L2 cache, all remaining cache blocks
+ are not written to the L2 cache.
+ 3 ==> The first two aligned cache blocks holding
+ the packet data (and initial next buffer pointer)
+ are written to the L2 cache, all remaining cache
+ blocks are not written to the L2 cache. */
+ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD.
+ When clear '0', the IPD will appear to the
+ IOB-arbiter to be applying backpressure, this
+ causes the IOB-Arbiter to not send grants to NCB
+ devices requesting to send packet data to the IPD. */
+#else
+ uint64_t ipd_en : 1;
+ cvmx_ipd_mode_t opc_mode : 2;
+ uint64_t pbp_en : 1;
+ uint64_t wqe_lend : 1;
+ uint64_t pkt_lend : 1;
+ uint64_t naddbuf : 1;
+ uint64_t addpkt : 1;
+ uint64_t reset : 1;
+ uint64_t len_m8 : 1;
+ uint64_t pkt_off : 1;
+ uint64_t ipd_full : 1;
+ uint64_t pq_nabuf : 1;
+ uint64_t pq_apkt : 1;
+ uint64_t no_wptr : 1;
+ uint64_t clken : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn63xxp1;
+ struct cvmx_ipd_ctl_status_s cn66xx;
+ struct cvmx_ipd_ctl_status_s cn68xx;
+ struct cvmx_ipd_ctl_status_s cn68xxp1;
+ struct cvmx_ipd_ctl_status_s cnf71xx;
+};
+typedef union cvmx_ipd_ctl_status cvmx_ipd_ctl_status_t;
+
+/**
+ * cvmx_ipd_ecc_ctl
+ *
+ * IPD_ECC_CTL = IPD ECC Control
+ *
+ * Allows inserting ECC errors for testing.
+ */
+union cvmx_ipd_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t pm3_syn : 2; /**< Flip the syndrom to generate 1-bit/2-bits error
+ for testing of Packet Memory 3.
+ 2'b00 : No Error Generation
+ 2'b10, 2'b01: Flip 1 bit
+ 2'b11 : Flip 2 bits */
+ uint64_t pm2_syn : 2; /**< Flip the syndrom to generate 1-bit/2-bits error
+ for testing of Packet Memory 2.
+ 2'b00 : No Error Generation
+ 2'b10, 2'b01: Flip 1 bit
+ 2'b11 : Flip 2 bits */
+ uint64_t pm1_syn : 2; /**< Flip the syndrom to generate 1-bit/2-bits error
+ for testing of Packet Memory 1.
+ 2'b00 : No Error Generation
+ 2'b10, 2'b01: Flip 1 bit
+ 2'b11 : Flip 2 bits */
+ uint64_t pm0_syn : 2; /**< Flip the syndrom to generate 1-bit/2-bits error
+ for testing of Packet Memory 0.
+ 2'b00 : No Error Generation
+ 2'b10, 2'b01: Flip 1 bit
+ 2'b11 : Flip 2 bits */
+#else
+ uint64_t pm0_syn : 2;
+ uint64_t pm1_syn : 2;
+ uint64_t pm2_syn : 2;
+ uint64_t pm3_syn : 2;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_ipd_ecc_ctl_s cn68xx;
+ struct cvmx_ipd_ecc_ctl_s cn68xxp1;
+};
+typedef union cvmx_ipd_ecc_ctl cvmx_ipd_ecc_ctl_t;
+
+/**
+ * cvmx_ipd_free_ptr_fifo_ctl
+ *
+ * IPD_FREE_PTR_FIFO_CTL = IPD's FREE Pointer FIFO Control
+ *
+ * Allows reading of the Page-Pointers stored in the IPD's FREE Fifo.
+ * See also the IPD_FREE_PTR_VALUE
+ */
+union cvmx_ipd_free_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_free_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t max_cnts : 7; /**< Maximum number of Packet-Pointers or WQE-Pointers
+ that COULD be in the FIFO.
+ When IPD_CTL_STATUS[NO_WPTR] is set '1' this field
+ only represents the Max number of Packet-Pointers,
+ WQE-Pointers are not used in this mode. */
+ uint64_t wraddr : 8; /**< Present FIFO WQE Read address. */
+ uint64_t praddr : 8; /**< Present FIFO Packet Read address. */
+ uint64_t cena : 1; /**< Active low Chip Enable to the read the
+ pwp_fifo. This bit also controls the MUX-select
+ that steers [RADDR] to the pwp_fifo.
+ *WARNING - Setting this field to '0' will allow
+ reading of the memories thorugh the PTR field,
+ but will cause unpredictable operation of the IPD
+ under normal operation. */
+ uint64_t raddr : 8; /**< Sets the address to read from in the pwp_fifo.
+ Addresses 0 through 63 contain Packet-Pointers and
+ addresses 64 through 127 contain WQE-Pointers.
+ When IPD_CTL_STATUS[NO_WPTR] is set '1' addresses
+ 64 through 127 are not valid. */
+#else
+ uint64_t raddr : 8;
+ uint64_t cena : 1;
+ uint64_t praddr : 8;
+ uint64_t wraddr : 8;
+ uint64_t max_cnts : 7;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ipd_free_ptr_fifo_ctl_s cn68xx;
+ struct cvmx_ipd_free_ptr_fifo_ctl_s cn68xxp1;
+};
+typedef union cvmx_ipd_free_ptr_fifo_ctl cvmx_ipd_free_ptr_fifo_ctl_t;
+
+/**
+ * cvmx_ipd_free_ptr_value
+ *
+ * IPD_FREE_PTR_VALUE = IPD's FREE Pointer Value
+ *
+ * The value of the pointer selected through the IPD_FREE_PTR_FIFO_CTL
+ */
+union cvmx_ipd_free_ptr_value {
+ uint64_t u64;
+ struct cvmx_ipd_free_ptr_value_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t ptr : 33; /**< The output of the pwp_fifo. */
+#else
+ uint64_t ptr : 33;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_ipd_free_ptr_value_s cn68xx;
+ struct cvmx_ipd_free_ptr_value_s cn68xxp1;
+};
+typedef union cvmx_ipd_free_ptr_value cvmx_ipd_free_ptr_value_t;
+
+/**
+ * cvmx_ipd_hold_ptr_fifo_ctl
+ *
+ * IPD_HOLD_PTR_FIFO_CTL = IPD's Holding Pointer FIFO Control
+ *
+ * Allows reading of the Page-Pointers stored in the IPD's Holding Fifo.
+ */
+union cvmx_ipd_hold_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_hold_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t ptr : 33; /**< The output of the holding-fifo. */
+ uint64_t max_pkt : 3; /**< Maximum number of Packet-Pointers that COULD be
+ in the FIFO. */
+ uint64_t praddr : 3; /**< Present Packet-Pointer read address. */
+ uint64_t cena : 1; /**< Active low Chip Enable that controls the
+ MUX-select that steers [RADDR] to the fifo.
+ *WARNING - Setting this field to '0' will allow
+ reading of the memories thorugh the PTR field,
+ but will cause unpredictable operation of the IPD
+ under normal operation. */
+ uint64_t raddr : 3; /**< Sets the address to read from in the holding.
+ fifo in the IPD. This FIFO holds Packet-Pointers
+ to be used for packet data storage. */
+#else
+ uint64_t raddr : 3;
+ uint64_t cena : 1;
+ uint64_t praddr : 3;
+ uint64_t max_pkt : 3;
+ uint64_t ptr : 33;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } s;
+ struct cvmx_ipd_hold_ptr_fifo_ctl_s cn68xx;
+ struct cvmx_ipd_hold_ptr_fifo_ctl_s cn68xxp1;
+};
+typedef union cvmx_ipd_hold_ptr_fifo_ctl cvmx_ipd_hold_ptr_fifo_ctl_t;
+
+/**
+ * cvmx_ipd_int_enb
+ *
+ * IPD_INTERRUPT_ENB = IPD Interrupt Enable Register
+ *
+ * Used to enable the various interrupting conditions of IPD
+ */
+union cvmx_ipd_int_enb {
+ uint64_t u64;
+ struct cvmx_ipd_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t pw3_dbe : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pw3_sbe : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pw2_dbe : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pw2_sbe : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pw1_dbe : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pw1_sbe : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pw0_dbe : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pw0_sbe : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t dat : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t eop : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t sop : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pq_sub : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pq_add : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t bc_ovr : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t d_coll : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t c_coll : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t cc_ovr : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t dc_ovr : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract
+ has an illegal value. */
+ uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits
+ [127:96] of the PBM memory. */
+ uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits
+ [95:64] of the PBM memory. */
+ uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits
+ [63:32] of the PBM memory. */
+ uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits
+ [31:0] of the PBM memory. */
+#else
+ uint64_t prc_par0 : 1;
+ uint64_t prc_par1 : 1;
+ uint64_t prc_par2 : 1;
+ uint64_t prc_par3 : 1;
+ uint64_t bp_sub : 1;
+ uint64_t dc_ovr : 1;
+ uint64_t cc_ovr : 1;
+ uint64_t c_coll : 1;
+ uint64_t d_coll : 1;
+ uint64_t bc_ovr : 1;
+ uint64_t pq_add : 1;
+ uint64_t pq_sub : 1;
+ uint64_t sop : 1;
+ uint64_t eop : 1;
+ uint64_t dat : 1;
+ uint64_t pw0_sbe : 1;
+ uint64_t pw0_dbe : 1;
+ uint64_t pw1_sbe : 1;
+ uint64_t pw1_dbe : 1;
+ uint64_t pw2_sbe : 1;
+ uint64_t pw2_dbe : 1;
+ uint64_t pw3_sbe : 1;
+ uint64_t pw3_dbe : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_ipd_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract
+ has an illegal value. */
+ uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits
+ [127:96] of the PBM memory. */
+ uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits
+ [95:64] of the PBM memory. */
+ uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits
+ [63:32] of the PBM memory. */
+ uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits
+ [31:0] of the PBM memory. */
+#else
+ uint64_t prc_par0 : 1;
+ uint64_t prc_par1 : 1;
+ uint64_t prc_par2 : 1;
+ uint64_t prc_par3 : 1;
+ uint64_t bp_sub : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_int_enb_cn30xx cn31xx;
+ struct cvmx_ipd_int_enb_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t bc_ovr : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set.
+ This is a PASS-3 Field. */
+ uint64_t d_coll : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set.
+ This is a PASS-3 Field. */
+ uint64_t c_coll : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set.
+ This is a PASS-3 Field. */
+ uint64_t cc_ovr : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set.
+ This is a PASS-3 Field. */
+ uint64_t dc_ovr : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set.
+ This is a PASS-3 Field. */
+ uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract
+ has an illegal value. */
+ uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits
+ [127:96] of the PBM memory. */
+ uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits
+ [95:64] of the PBM memory. */
+ uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits
+ [63:32] of the PBM memory. */
+ uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits
+ [31:0] of the PBM memory. */
+#else
+ uint64_t prc_par0 : 1;
+ uint64_t prc_par1 : 1;
+ uint64_t prc_par2 : 1;
+ uint64_t prc_par3 : 1;
+ uint64_t bp_sub : 1;
+ uint64_t dc_ovr : 1;
+ uint64_t cc_ovr : 1;
+ uint64_t c_coll : 1;
+ uint64_t d_coll : 1;
+ uint64_t bc_ovr : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn38xx;
+ struct cvmx_ipd_int_enb_cn30xx cn38xxp2;
+ struct cvmx_ipd_int_enb_cn38xx cn50xx;
+ struct cvmx_ipd_int_enb_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t pq_sub : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t pq_add : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t bc_ovr : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t d_coll : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t c_coll : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t cc_ovr : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t dc_ovr : 1; /**< Allows an interrupt to be sent when the
+ corresponding bit in the IPD_INT_SUM is set. */
+ uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract
+ has an illegal value. */
+ uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits
+ [127:96] of the PBM memory. */
+ uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits
+ [95:64] of the PBM memory. */
+ uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits
+ [63:32] of the PBM memory. */
+ uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits
+ [31:0] of the PBM memory. */
+#else
+ uint64_t prc_par0 : 1;
+ uint64_t prc_par1 : 1;
+ uint64_t prc_par2 : 1;
+ uint64_t prc_par3 : 1;
+ uint64_t bp_sub : 1;
+ uint64_t dc_ovr : 1;
+ uint64_t cc_ovr : 1;
+ uint64_t c_coll : 1;
+ uint64_t d_coll : 1;
+ uint64_t bc_ovr : 1;
+ uint64_t pq_add : 1;
+ uint64_t pq_sub : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn52xx;
+ struct cvmx_ipd_int_enb_cn52xx cn52xxp1;
+ struct cvmx_ipd_int_enb_cn52xx cn56xx;
+ struct cvmx_ipd_int_enb_cn52xx cn56xxp1;
+ struct cvmx_ipd_int_enb_cn38xx cn58xx;
+ struct cvmx_ipd_int_enb_cn38xx cn58xxp1;
+ struct cvmx_ipd_int_enb_cn52xx cn61xx;
+ struct cvmx_ipd_int_enb_cn52xx cn63xx;
+ struct cvmx_ipd_int_enb_cn52xx cn63xxp1;
+ struct cvmx_ipd_int_enb_cn52xx cn66xx;
+ struct cvmx_ipd_int_enb_s cn68xx;
+ struct cvmx_ipd_int_enb_s cn68xxp1;
+ struct cvmx_ipd_int_enb_cn52xx cnf71xx;
+};
+typedef union cvmx_ipd_int_enb cvmx_ipd_int_enb_t;
+
+/**
+ * cvmx_ipd_int_sum
+ *
+ * IPD_INTERRUPT_SUM = IPD Interrupt Summary Register
+ *
+ * Set when an interrupt condition occurs, write '1' to clear.
+ */
+union cvmx_ipd_int_sum {
+ uint64_t u64;
+ struct cvmx_ipd_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t pw3_dbe : 1; /**< Packet memory 3 had ECC DBE. */
+ uint64_t pw3_sbe : 1; /**< Packet memory 3 had ECC SBE. */
+ uint64_t pw2_dbe : 1; /**< Packet memory 2 had ECC DBE. */
+ uint64_t pw2_sbe : 1; /**< Packet memory 2 had ECC SBE. */
+ uint64_t pw1_dbe : 1; /**< Packet memory 1 had ECC DBE. */
+ uint64_t pw1_sbe : 1; /**< Packet memory 1 had ECC SBE. */
+ uint64_t pw0_dbe : 1; /**< Packet memory 0 had ECC DBE. */
+ uint64_t pw0_sbe : 1; /**< Packet memory 0 had ECC SBE. */
+ uint64_t dat : 1; /**< Set when a data arrives before a SOP for the same
+ reasm-id for a packet.
+ The first detected error associated with bits [14:12]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared.
+ Also see IPD_PKT_ERR. */
+ uint64_t eop : 1; /**< Set when a EOP is followed by an EOP for the same
+ reasm-id for a packet.
+ The first detected error associated with bits [14:12]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared.
+ Also see IPD_PKT_ERR. */
+ uint64_t sop : 1; /**< Set when a SOP is followed by an SOP for the same
+ reasm-id for a packet.
+ The first detected error associated with bits [14:12]
+ of this register will only be set here. A new bit
+ can be set when the previous reported bit is cleared.
+ Also see IPD_PKT_ERR. */
+ uint64_t pq_sub : 1; /**< Set when a port-qos does an sub to the count
+ that causes the counter to wrap. */
+ uint64_t pq_add : 1; /**< Set when a port-qos does an add to the count
+ that causes the counter to wrap. */
+ uint64_t bc_ovr : 1; /**< Set when the byte-count to send to IOB overflows. */
+ uint64_t d_coll : 1; /**< Set when the packet/WQE data to be sent to IOB
+ collides. */
+ uint64_t c_coll : 1; /**< Set when the packet/WQE commands to be sent to IOB
+ collides. */
+ uint64_t cc_ovr : 1; /**< Set when the command credits to the IOB overflow. */
+ uint64_t dc_ovr : 1; /**< Set when the data credits to the IOB overflow. */
+ uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a
+ supplied illegal value. */
+ uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits
+ [127:96] of the PBM memory. */
+ uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits
+ [95:64] of the PBM memory. */
+ uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits
+ [63:32] of the PBM memory. */
+ uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits
+ [31:0] of the PBM memory. */
+#else
+ uint64_t prc_par0 : 1;
+ uint64_t prc_par1 : 1;
+ uint64_t prc_par2 : 1;
+ uint64_t prc_par3 : 1;
+ uint64_t bp_sub : 1;
+ uint64_t dc_ovr : 1;
+ uint64_t cc_ovr : 1;
+ uint64_t c_coll : 1;
+ uint64_t d_coll : 1;
+ uint64_t bc_ovr : 1;
+ uint64_t pq_add : 1;
+ uint64_t pq_sub : 1;
+ uint64_t sop : 1;
+ uint64_t eop : 1;
+ uint64_t dat : 1;
+ uint64_t pw0_sbe : 1;
+ uint64_t pw0_dbe : 1;
+ uint64_t pw1_sbe : 1;
+ uint64_t pw1_dbe : 1;
+ uint64_t pw2_sbe : 1;
+ uint64_t pw2_dbe : 1;
+ uint64_t pw3_sbe : 1;
+ uint64_t pw3_dbe : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_ipd_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a
+ supplied illegal value. */
+ uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits
+ [127:96] of the PBM memory. */
+ uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits
+ [95:64] of the PBM memory. */
+ uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits
+ [63:32] of the PBM memory. */
+ uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits
+ [31:0] of the PBM memory. */
+#else
+ uint64_t prc_par0 : 1;
+ uint64_t prc_par1 : 1;
+ uint64_t prc_par2 : 1;
+ uint64_t prc_par3 : 1;
+ uint64_t bp_sub : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_int_sum_cn30xx cn31xx;
+ struct cvmx_ipd_int_sum_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t bc_ovr : 1; /**< Set when the byte-count to send to IOB overflows.
+ This is a PASS-3 Field. */
+ uint64_t d_coll : 1; /**< Set when the packet/WQE data to be sent to IOB
+ collides.
+ This is a PASS-3 Field. */
+ uint64_t c_coll : 1; /**< Set when the packet/WQE commands to be sent to IOB
+ collides.
+ This is a PASS-3 Field. */
+ uint64_t cc_ovr : 1; /**< Set when the command credits to the IOB overflow.
+ This is a PASS-3 Field. */
+ uint64_t dc_ovr : 1; /**< Set when the data credits to the IOB overflow.
+ This is a PASS-3 Field. */
+ uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a
+ supplied illegal value. */
+ uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits
+ [127:96] of the PBM memory. */
+ uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits
+ [95:64] of the PBM memory. */
+ uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits
+ [63:32] of the PBM memory. */
+ uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits
+ [31:0] of the PBM memory. */
+#else
+ uint64_t prc_par0 : 1;
+ uint64_t prc_par1 : 1;
+ uint64_t prc_par2 : 1;
+ uint64_t prc_par3 : 1;
+ uint64_t bp_sub : 1;
+ uint64_t dc_ovr : 1;
+ uint64_t cc_ovr : 1;
+ uint64_t c_coll : 1;
+ uint64_t d_coll : 1;
+ uint64_t bc_ovr : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn38xx;
+ struct cvmx_ipd_int_sum_cn30xx cn38xxp2;
+ struct cvmx_ipd_int_sum_cn38xx cn50xx;
+ struct cvmx_ipd_int_sum_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t pq_sub : 1; /**< Set when a port-qos does an sub to the count
+ that causes the counter to wrap. */
+ uint64_t pq_add : 1; /**< Set when a port-qos does an add to the count
+ that causes the counter to wrap. */
+ uint64_t bc_ovr : 1; /**< Set when the byte-count to send to IOB overflows. */
+ uint64_t d_coll : 1; /**< Set when the packet/WQE data to be sent to IOB
+ collides. */
+ uint64_t c_coll : 1; /**< Set when the packet/WQE commands to be sent to IOB
+ collides. */
+ uint64_t cc_ovr : 1; /**< Set when the command credits to the IOB overflow. */
+ uint64_t dc_ovr : 1; /**< Set when the data credits to the IOB overflow. */
+ uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a
+ supplied illegal value. */
+ uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits
+ [127:96] of the PBM memory. */
+ uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits
+ [95:64] of the PBM memory. */
+ uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits
+ [63:32] of the PBM memory. */
+ uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits
+ [31:0] of the PBM memory. */
+#else
+ uint64_t prc_par0 : 1;
+ uint64_t prc_par1 : 1;
+ uint64_t prc_par2 : 1;
+ uint64_t prc_par3 : 1;
+ uint64_t bp_sub : 1;
+ uint64_t dc_ovr : 1;
+ uint64_t cc_ovr : 1;
+ uint64_t c_coll : 1;
+ uint64_t d_coll : 1;
+ uint64_t bc_ovr : 1;
+ uint64_t pq_add : 1;
+ uint64_t pq_sub : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn52xx;
+ struct cvmx_ipd_int_sum_cn52xx cn52xxp1;
+ struct cvmx_ipd_int_sum_cn52xx cn56xx;
+ struct cvmx_ipd_int_sum_cn52xx cn56xxp1;
+ struct cvmx_ipd_int_sum_cn38xx cn58xx;
+ struct cvmx_ipd_int_sum_cn38xx cn58xxp1;
+ struct cvmx_ipd_int_sum_cn52xx cn61xx;
+ struct cvmx_ipd_int_sum_cn52xx cn63xx;
+ struct cvmx_ipd_int_sum_cn52xx cn63xxp1;
+ struct cvmx_ipd_int_sum_cn52xx cn66xx;
+ struct cvmx_ipd_int_sum_s cn68xx;
+ struct cvmx_ipd_int_sum_s cn68xxp1;
+ struct cvmx_ipd_int_sum_cn52xx cnf71xx;
+};
+typedef union cvmx_ipd_int_sum cvmx_ipd_int_sum_t;
+
+/**
+ * cvmx_ipd_next_pkt_ptr
+ *
+ * IPD_NEXT_PKT_PTR = IPD's Next Packet Pointer
+ *
+ * The value of the packet-pointer fetched and in the valid register.
+ */
+union cvmx_ipd_next_pkt_ptr {
+ uint64_t u64;
+ struct cvmx_ipd_next_pkt_ptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t ptr : 33; /**< Pointer value. */
+#else
+ uint64_t ptr : 33;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_ipd_next_pkt_ptr_s cn68xx;
+ struct cvmx_ipd_next_pkt_ptr_s cn68xxp1;
+};
+typedef union cvmx_ipd_next_pkt_ptr cvmx_ipd_next_pkt_ptr_t;
+
+/**
+ * cvmx_ipd_next_wqe_ptr
+ *
+ * IPD_NEXT_WQE_PTR = IPD's NEXT_WQE Pointer
+ *
+ * The value of the WQE-pointer fetched and in the valid register.
+ */
+union cvmx_ipd_next_wqe_ptr {
+ uint64_t u64;
+ struct cvmx_ipd_next_wqe_ptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t ptr : 33; /**< Pointer value.
+ When IPD_CTL_STATUS[NO_WPTR] is set '1' this field
+ represents a Packet-Pointer NOT a WQE pointer. */
+#else
+ uint64_t ptr : 33;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_ipd_next_wqe_ptr_s cn68xx;
+ struct cvmx_ipd_next_wqe_ptr_s cn68xxp1;
+};
+typedef union cvmx_ipd_next_wqe_ptr cvmx_ipd_next_wqe_ptr_t;
+
+/**
+ * cvmx_ipd_not_1st_mbuff_skip
+ *
+ * IPD_NOT_1ST_MBUFF_SKIP = IPD Not First MBUFF Word Skip Size
+ *
+ * The number of words that the IPD will skip when writing any MBUFF that is not the first.
+ */
+union cvmx_ipd_not_1st_mbuff_skip {
+ uint64_t u64;
+ struct cvmx_ipd_not_1st_mbuff_skip_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t skip_sz : 6; /**< The number of 8-byte words from the top of any
+ MBUFF, that is not the 1st MBUFF, that the IPD
+ will write the next-pointer.
+ Legal values are 0 to 32, where the MAX value
+ is also limited to:
+ IPD_PACKET_MBUFF_SIZE[MB_SIZE] - 16. */
+#else
+ uint64_t skip_sz : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn30xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn31xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn38xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn38xxp2;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn50xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn52xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn52xxp1;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn56xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn56xxp1;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn58xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn58xxp1;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn61xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn63xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn63xxp1;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn66xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn68xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn68xxp1;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cnf71xx;
+};
+typedef union cvmx_ipd_not_1st_mbuff_skip cvmx_ipd_not_1st_mbuff_skip_t;
+
+/**
+ * cvmx_ipd_on_bp_drop_pkt#
+ *
+ * RESERVE SPACE UPTO 0x3FFF
+ *
+ *
+ * RESERVED FOR FORMER IPD_SUB_PKIND_FCS - MOVED TO PIP
+ *
+ * RESERVE 4008 - 40FF
+ *
+ *
+ * IPD_ON_BP_DROP_PKT = IPD On Backpressure Drop Packet
+ *
+ * When IPD applies backpressure to a BPID and the corresponding bit in this register is set,
+ * then previously received packets will be dropped when processed.
+ */
+union cvmx_ipd_on_bp_drop_pktx {
+ uint64_t u64;
+ struct cvmx_ipd_on_bp_drop_pktx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prt_enb : 64; /**< The BPID corresponding to the bit position in this
+ field will drop all NON-RAW packets to that BPID
+ when BPID level backpressure is applied to that
+ BPID. The applying of BPID-level backpressure for
+ this dropping does not take into consideration the
+ value of IPD_BPIDX_MBUF_TH[BP_ENB], nor
+ IPD_RED_BPID_ENABLE[PRT_ENB]. */
+#else
+ uint64_t prt_enb : 64;
+#endif
+ } s;
+ struct cvmx_ipd_on_bp_drop_pktx_s cn68xx;
+ struct cvmx_ipd_on_bp_drop_pktx_s cn68xxp1;
+};
+typedef union cvmx_ipd_on_bp_drop_pktx cvmx_ipd_on_bp_drop_pktx_t;
+
+/**
+ * cvmx_ipd_packet_mbuff_size
+ *
+ * IPD_PACKET_MBUFF_SIZE = IPD's PACKET MUBUF Size In Words
+ *
+ * The number of words in a MBUFF used for packet data store.
+ */
+union cvmx_ipd_packet_mbuff_size {
+ uint64_t u64;
+ struct cvmx_ipd_packet_mbuff_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t mb_size : 12; /**< The number of 8-byte words in a MBUF.
+ This must be a number in the range of 32 to
+ 2048.
+ This is also the size of the FPA's
+ Queue-0 Free-Page. */
+#else
+ uint64_t mb_size : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_ipd_packet_mbuff_size_s cn30xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn31xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn38xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn38xxp2;
+ struct cvmx_ipd_packet_mbuff_size_s cn50xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn52xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn52xxp1;
+ struct cvmx_ipd_packet_mbuff_size_s cn56xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn56xxp1;
+ struct cvmx_ipd_packet_mbuff_size_s cn58xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn58xxp1;
+ struct cvmx_ipd_packet_mbuff_size_s cn61xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn63xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn63xxp1;
+ struct cvmx_ipd_packet_mbuff_size_s cn66xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn68xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn68xxp1;
+ struct cvmx_ipd_packet_mbuff_size_s cnf71xx;
+};
+typedef union cvmx_ipd_packet_mbuff_size cvmx_ipd_packet_mbuff_size_t;
+
+/**
+ * cvmx_ipd_pkt_err
+ *
+ * IPD_PKT_ERR = IPD Packet Error Register
+ *
+ * Provides status about the failing packet recevie error.
+ */
+union cvmx_ipd_pkt_err {
+ uint64_t u64;
+ struct cvmx_ipd_pkt_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t reasm : 6; /**< When IPD_INT_SUM[14:12] bit is set, this field
+ latches the failing reasm number associated with
+ the IPD_INT_SUM[14:12] bit set.
+ Values 0-62 can be seen here, reasm-id 63 is not
+ used. */
+#else
+ uint64_t reasm : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_ipd_pkt_err_s cn68xx;
+ struct cvmx_ipd_pkt_err_s cn68xxp1;
+};
+typedef union cvmx_ipd_pkt_err cvmx_ipd_pkt_err_t;
+
+/**
+ * cvmx_ipd_pkt_ptr_valid
+ *
+ * IPD_PKT_PTR_VALID = IPD's Packet Pointer Valid
+ *
+ * The value of the packet-pointer fetched and in the valid register.
+ */
+union cvmx_ipd_pkt_ptr_valid {
+ uint64_t u64;
+ struct cvmx_ipd_pkt_ptr_valid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t ptr : 29; /**< Pointer value. */
+#else
+ uint64_t ptr : 29;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_ipd_pkt_ptr_valid_s cn30xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn31xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn38xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn50xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn52xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn52xxp1;
+ struct cvmx_ipd_pkt_ptr_valid_s cn56xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn56xxp1;
+ struct cvmx_ipd_pkt_ptr_valid_s cn58xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn58xxp1;
+ struct cvmx_ipd_pkt_ptr_valid_s cn61xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn63xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn63xxp1;
+ struct cvmx_ipd_pkt_ptr_valid_s cn66xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cnf71xx;
+};
+typedef union cvmx_ipd_pkt_ptr_valid cvmx_ipd_pkt_ptr_valid_t;
+
+/**
+ * cvmx_ipd_port#_bp_page_cnt
+ *
+ * IPD_PORTX_BP_PAGE_CNT = IPD Port Backpressure Page Count
+ *
+ * The number of pages in use by the port that when exceeded, backpressure will be applied to the port.
+ * See also IPD_PORTX_BP_PAGE_CNT2
+ * See also IPD_PORTX_BP_PAGE_CNT3
+ */
+union cvmx_ipd_portx_bp_page_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t bp_enb : 1; /**< When set '1' BP will be applied, if '0' BP will
+ not be applied to port. */
+ uint64_t page_cnt : 17; /**< The number of page pointers assigned to
+ the port, that when exceeded will cause
+ back-pressure to be applied to the port.
+ This value is in 256 page-pointer increments,
+ (i.e. 0 = 0-page-ptrs, 1 = 256-page-ptrs,..) */
+#else
+ uint64_t page_cnt : 17;
+ uint64_t bp_enb : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn30xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn31xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn38xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn38xxp2;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn50xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn52xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn52xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn56xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn56xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn58xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn58xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn61xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn63xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn63xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn66xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cnf71xx;
+};
+typedef union cvmx_ipd_portx_bp_page_cnt cvmx_ipd_portx_bp_page_cnt_t;
+
+/**
+ * cvmx_ipd_port#_bp_page_cnt2
+ *
+ * IPD_PORTX_BP_PAGE_CNT2 = IPD Port Backpressure Page Count
+ *
+ * The number of pages in use by the port that when exceeded, backpressure will be applied to the port.
+ * See also IPD_PORTX_BP_PAGE_CNT
+ * See also IPD_PORTX_BP_PAGE_CNT3
+ * 0x368-0x380
+ */
+union cvmx_ipd_portx_bp_page_cnt2 {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t bp_enb : 1; /**< When set '1' BP will be applied, if '0' BP will
+ not be applied to port. */
+ uint64_t page_cnt : 17; /**< The number of page pointers assigned to
+ the port, that when exceeded will cause
+ back-pressure to be applied to the port.
+ This value is in 256 page-pointer increments,
+ (i.e. 0 = 0-page-ptrs, 1 = 256-page-ptrs,..) */
+#else
+ uint64_t page_cnt : 17;
+ uint64_t bp_enb : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn52xx;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn52xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn56xx;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn56xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn61xx;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn63xx;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn63xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn66xx;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cnf71xx;
+};
+typedef union cvmx_ipd_portx_bp_page_cnt2 cvmx_ipd_portx_bp_page_cnt2_t;
+
+/**
+ * cvmx_ipd_port#_bp_page_cnt3
+ *
+ * IPD_PORTX_BP_PAGE_CNT3 = IPD Port Backpressure Page Count
+ *
+ * The number of pages in use by the port that when exceeded, backpressure will be applied to the port.
+ * See also IPD_PORTX_BP_PAGE_CNT
+ * See also IPD_PORTX_BP_PAGE_CNT2
+ * 0x3d0-408
+ */
+union cvmx_ipd_portx_bp_page_cnt3 {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t bp_enb : 1; /**< When set '1' BP will be applied, if '0' BP will
+ not be applied to port. */
+ uint64_t page_cnt : 17; /**< The number of page pointers assigned to
+ the port, that when exceeded will cause
+ back-pressure to be applied to the port.
+ This value is in 256 page-pointer increments,
+ (i.e. 0 = 0-page-ptrs, 1 = 256-page-ptrs,..) */
+#else
+ uint64_t page_cnt : 17;
+ uint64_t bp_enb : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_ipd_portx_bp_page_cnt3_s cn61xx;
+ struct cvmx_ipd_portx_bp_page_cnt3_s cn63xx;
+ struct cvmx_ipd_portx_bp_page_cnt3_s cn63xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt3_s cn66xx;
+ struct cvmx_ipd_portx_bp_page_cnt3_s cnf71xx;
+};
+typedef union cvmx_ipd_portx_bp_page_cnt3 cvmx_ipd_portx_bp_page_cnt3_t;
+
+/**
+ * cvmx_ipd_port_bp_counters2_pair#
+ *
+ * IPD_PORT_BP_COUNTERS2_PAIRX = MBUF Counters port Ports used to generate Back Pressure Per Port.
+ * See also IPD_PORT_BP_COUNTERS_PAIRX
+ * See also IPD_PORT_BP_COUNTERS3_PAIRX
+ * 0x388-0x3a0
+ */
+union cvmx_ipd_port_bp_counters2_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters2_pairx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this port. */
+#else
+ uint64_t cnt_val : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn52xx;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn52xxp1;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn56xx;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn56xxp1;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn61xx;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn63xx;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn63xxp1;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn66xx;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cnf71xx;
+};
+typedef union cvmx_ipd_port_bp_counters2_pairx cvmx_ipd_port_bp_counters2_pairx_t;
+
+/**
+ * cvmx_ipd_port_bp_counters3_pair#
+ *
+ * IPD_PORT_BP_COUNTERS3_PAIRX = MBUF Counters port Ports used to generate Back Pressure Per Port.
+ * See also IPD_PORT_BP_COUNTERS_PAIRX
+ * See also IPD_PORT_BP_COUNTERS2_PAIRX
+ * 0x3b0-0x3c8
+ */
+union cvmx_ipd_port_bp_counters3_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters3_pairx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this port. */
+#else
+ uint64_t cnt_val : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_ipd_port_bp_counters3_pairx_s cn61xx;
+ struct cvmx_ipd_port_bp_counters3_pairx_s cn63xx;
+ struct cvmx_ipd_port_bp_counters3_pairx_s cn63xxp1;
+ struct cvmx_ipd_port_bp_counters3_pairx_s cn66xx;
+ struct cvmx_ipd_port_bp_counters3_pairx_s cnf71xx;
+};
+typedef union cvmx_ipd_port_bp_counters3_pairx cvmx_ipd_port_bp_counters3_pairx_t;
+
+/**
+ * cvmx_ipd_port_bp_counters4_pair#
+ *
+ * IPD_PORT_BP_COUNTERS4_PAIRX = MBUF Counters port Ports used to generate Back Pressure Per Port.
+ * See also IPD_PORT_BP_COUNTERS_PAIRX
+ * See also IPD_PORT_BP_COUNTERS2_PAIRX
+ * 0x410-0x3c8
+ */
+union cvmx_ipd_port_bp_counters4_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters4_pairx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this port. */
+#else
+ uint64_t cnt_val : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_ipd_port_bp_counters4_pairx_s cn61xx;
+ struct cvmx_ipd_port_bp_counters4_pairx_s cn66xx;
+ struct cvmx_ipd_port_bp_counters4_pairx_s cnf71xx;
+};
+typedef union cvmx_ipd_port_bp_counters4_pairx cvmx_ipd_port_bp_counters4_pairx_t;
+
+/**
+ * cvmx_ipd_port_bp_counters_pair#
+ *
+ * IPD_PORT_BP_COUNTERS_PAIRX = MBUF Counters port Ports used to generate Back Pressure Per Port.
+ * See also IPD_PORT_BP_COUNTERS2_PAIRX
+ * See also IPD_PORT_BP_COUNTERS3_PAIRX
+ * 0x1b8-0x2d0
+ */
+union cvmx_ipd_port_bp_counters_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters_pairx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this port. */
+#else
+ uint64_t cnt_val : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn30xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn31xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn38xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn38xxp2;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn50xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn52xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn52xxp1;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn56xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn56xxp1;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn58xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn58xxp1;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn61xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn63xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn63xxp1;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn66xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cnf71xx;
+};
+typedef union cvmx_ipd_port_bp_counters_pairx cvmx_ipd_port_bp_counters_pairx_t;
+
+/**
+ * cvmx_ipd_port_ptr_fifo_ctl
+ *
+ * IPD_PORT_PTR_FIFO_CTL = IPD's Reasm-Id Pointer FIFO Control
+ *
+ * Allows reading of the Page-Pointers stored in the IPD's Reasm-Id Fifo.
+ */
+union cvmx_ipd_port_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_port_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t ptr : 33; /**< The output of the reasm-id-ptr-fifo. */
+ uint64_t max_pkt : 7; /**< Maximum number of Packet-Pointers that are in
+ in the FIFO. */
+ uint64_t cena : 1; /**< Active low Chip Enable to the read the
+ pwp_fifo. This bit also controls the MUX-select
+ that steers [RADDR] to the pwp_fifo.
+ *WARNING - Setting this field to '0' will allow
+ reading of the memories thorugh the PTR field,
+ but will cause unpredictable operation of the IPD
+ under normal operation. */
+ uint64_t raddr : 7; /**< Sets the address to read from in the reasm-id
+ fifo in the IPD. This FIFO holds Packet-Pointers
+ to be used for packet data storage. */
+#else
+ uint64_t raddr : 7;
+ uint64_t cena : 1;
+ uint64_t max_pkt : 7;
+ uint64_t ptr : 33;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_ipd_port_ptr_fifo_ctl_s cn68xx;
+ struct cvmx_ipd_port_ptr_fifo_ctl_s cn68xxp1;
+};
+typedef union cvmx_ipd_port_ptr_fifo_ctl cvmx_ipd_port_ptr_fifo_ctl_t;
+
+/**
+ * cvmx_ipd_port_qos_#_cnt
+ *
+ * IPD_PORT_QOS_X_CNT = IPD PortX QOS-0 Count
+ *
+ * A counter per port/qos. Counter are originzed in sequence where the first 8 counter (0-7) belong to Port-0
+ * QOS 0-7 respectively followed by port 1 at (8-15), etc
+ * Ports 0-3, 32-43
+ */
+union cvmx_ipd_port_qos_x_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_port_qos_x_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wmark : 32; /**< When the field CNT after being modified is equal to
+ or crosses this value (i.e. value was greater than
+ then becomes less then, or value was less than and
+ becomes greater than) the corresponding bit in
+ IPD_PORT_QOS_INTX is set. */
+ uint64_t cnt : 32; /**< The packet related count that is incremented as
+ specified by IPD_SUB_PORT_QOS_CNT. */
+#else
+ uint64_t cnt : 32;
+ uint64_t wmark : 32;
+#endif
+ } s;
+ struct cvmx_ipd_port_qos_x_cnt_s cn52xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn52xxp1;
+ struct cvmx_ipd_port_qos_x_cnt_s cn56xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn56xxp1;
+ struct cvmx_ipd_port_qos_x_cnt_s cn61xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn63xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn63xxp1;
+ struct cvmx_ipd_port_qos_x_cnt_s cn66xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn68xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn68xxp1;
+ struct cvmx_ipd_port_qos_x_cnt_s cnf71xx;
+};
+typedef union cvmx_ipd_port_qos_x_cnt cvmx_ipd_port_qos_x_cnt_t;
+
+/**
+ * cvmx_ipd_port_qos_int#
+ *
+ * IPD_PORT_QOS_INTX = IPD PORT-QOS Interrupt
+ *
+ * See the description for IPD_PORT_QOS_X_CNT
+ *
+ * 0=P0-7; 1=P8-15; 2=P16-23; 3=P24-31; 4=P32-39; 5=P40-47; 6=P48-55; 7=P56-63
+ *
+ * Only ports used are: P0-3, P32-39, and P40-47. Therefore only IPD_PORT_QOS_INT0, IPD_PORT_QOS_INT4,
+ * and IPD_PORT_QOS_INT5 exist and, furthermore: <63:32> of IPD_PORT_QOS_INT0,
+ * are reserved.
+ */
+union cvmx_ipd_port_qos_intx {
+ uint64_t u64;
+ struct cvmx_ipd_port_qos_intx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr : 64; /**< Interrupt bits. */
+#else
+ uint64_t intr : 64;
+#endif
+ } s;
+ struct cvmx_ipd_port_qos_intx_s cn52xx;
+ struct cvmx_ipd_port_qos_intx_s cn52xxp1;
+ struct cvmx_ipd_port_qos_intx_s cn56xx;
+ struct cvmx_ipd_port_qos_intx_s cn56xxp1;
+ struct cvmx_ipd_port_qos_intx_s cn61xx;
+ struct cvmx_ipd_port_qos_intx_s cn63xx;
+ struct cvmx_ipd_port_qos_intx_s cn63xxp1;
+ struct cvmx_ipd_port_qos_intx_s cn66xx;
+ struct cvmx_ipd_port_qos_intx_s cn68xx;
+ struct cvmx_ipd_port_qos_intx_s cn68xxp1;
+ struct cvmx_ipd_port_qos_intx_s cnf71xx;
+};
+typedef union cvmx_ipd_port_qos_intx cvmx_ipd_port_qos_intx_t;
+
+/**
+ * cvmx_ipd_port_qos_int_enb#
+ *
+ * IPD_PORT_QOS_INT_ENBX = IPD PORT-QOS Interrupt Enable
+ *
+ * When the IPD_PORT_QOS_INTX[\#] is '1' and IPD_PORT_QOS_INT_ENBX[\#] is '1' a interrupt will be generated.
+ */
+union cvmx_ipd_port_qos_int_enbx {
+ uint64_t u64;
+ struct cvmx_ipd_port_qos_int_enbx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb : 64; /**< Enable bits. */
+#else
+ uint64_t enb : 64;
+#endif
+ } s;
+ struct cvmx_ipd_port_qos_int_enbx_s cn52xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn52xxp1;
+ struct cvmx_ipd_port_qos_int_enbx_s cn56xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn56xxp1;
+ struct cvmx_ipd_port_qos_int_enbx_s cn61xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn63xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn63xxp1;
+ struct cvmx_ipd_port_qos_int_enbx_s cn66xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn68xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn68xxp1;
+ struct cvmx_ipd_port_qos_int_enbx_s cnf71xx;
+};
+typedef union cvmx_ipd_port_qos_int_enbx cvmx_ipd_port_qos_int_enbx_t;
+
+/**
+ * cvmx_ipd_port_sop#
+ *
+ * IPD_PORT_SOP = IPD Reasm-Id SOP
+ *
+ * Set when a SOP is detected on a reasm-num. Where the reasm-num value set the bit vector of this register.
+ */
+union cvmx_ipd_port_sopx {
+ uint64_t u64;
+ struct cvmx_ipd_port_sopx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sop : 64; /**< When set '1' a SOP was detected on a reasm-num,
+ When clear '0' no SOP was yet received or an EOP
+ was received on the reasm-num.
+ IPD only supports 63 reasm-nums, so bit [63]
+ should never be set. */
+#else
+ uint64_t sop : 64;
+#endif
+ } s;
+ struct cvmx_ipd_port_sopx_s cn68xx;
+ struct cvmx_ipd_port_sopx_s cn68xxp1;
+};
+typedef union cvmx_ipd_port_sopx cvmx_ipd_port_sopx_t;
+
+/**
+ * cvmx_ipd_prc_hold_ptr_fifo_ctl
+ *
+ * IPD_PRC_HOLD_PTR_FIFO_CTL = IPD's PRC Holding Pointer FIFO Control
+ *
+ * Allows reading of the Page-Pointers stored in the IPD's PRC Holding Fifo.
+ */
+union cvmx_ipd_prc_hold_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t max_pkt : 3; /**< Maximum number of Packet-Pointers that COULD be
+ in the FIFO. */
+ uint64_t praddr : 3; /**< Present Packet-Pointer read address. */
+ uint64_t ptr : 29; /**< The output of the prc-holding-fifo. */
+ uint64_t cena : 1; /**< Active low Chip Enable that controls the
+ MUX-select that steers [RADDR] to the fifo.
+ *WARNING - Setting this field to '0' will allow
+ reading of the memories thorugh the PTR field,
+ but will cause unpredictable operation of the IPD
+ under normal operation. */
+ uint64_t raddr : 3; /**< Sets the address to read from in the holding.
+ fifo in the PRC. This FIFO holds Packet-Pointers
+ to be used for packet data storage. */
+#else
+ uint64_t raddr : 3;
+ uint64_t cena : 1;
+ uint64_t ptr : 29;
+ uint64_t praddr : 3;
+ uint64_t max_pkt : 3;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } s;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn30xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn31xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn38xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn50xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xxp1;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xxp1;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xxp1;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn61xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xxp1;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn66xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cnf71xx;
+};
+typedef union cvmx_ipd_prc_hold_ptr_fifo_ctl cvmx_ipd_prc_hold_ptr_fifo_ctl_t;
+
+/**
+ * cvmx_ipd_prc_port_ptr_fifo_ctl
+ *
+ * IPD_PRC_PORT_PTR_FIFO_CTL = IPD's PRC PORT Pointer FIFO Control
+ *
+ * Allows reading of the Page-Pointers stored in the IPD's PRC PORT Fifo.
+ */
+union cvmx_ipd_prc_port_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t max_pkt : 7; /**< Maximum number of Packet-Pointers that are in
+ in the FIFO. */
+ uint64_t ptr : 29; /**< The output of the prc-port-ptr-fifo. */
+ uint64_t cena : 1; /**< Active low Chip Enable to the read port of the
+ pwp_fifo. This bit also controls the MUX-select
+ that steers [RADDR] to the pwp_fifo.
+ *WARNING - Setting this field to '0' will allow
+ reading of the memories thorugh the PTR field,
+ but will cause unpredictable operation of the IPD
+ under normal operation. */
+ uint64_t raddr : 7; /**< Sets the address to read from in the port
+ fifo in the PRC. This FIFO holds Packet-Pointers
+ to be used for packet data storage. */
+#else
+ uint64_t raddr : 7;
+ uint64_t cena : 1;
+ uint64_t ptr : 29;
+ uint64_t max_pkt : 7;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn30xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn31xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn38xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn50xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xxp1;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xxp1;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xxp1;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn61xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xxp1;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn66xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cnf71xx;
+};
+typedef union cvmx_ipd_prc_port_ptr_fifo_ctl cvmx_ipd_prc_port_ptr_fifo_ctl_t;
+
+/**
+ * cvmx_ipd_ptr_count
+ *
+ * IPD_PTR_COUNT = IPD Page Pointer Count
+ *
+ * Shows the number of WQE and Packet Page Pointers stored in the IPD.
+ */
+union cvmx_ipd_ptr_count {
+ uint64_t u64;
+ struct cvmx_ipd_ptr_count_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t pktv_cnt : 1; /**< PKT Ptr Valid. */
+ uint64_t wqev_cnt : 1; /**< WQE Ptr Valid. This value is '1' when a WQE
+ is being for use by the IPD. The value of this
+ field should be added to tha value of the
+ WQE_PCNT field, of this register, for a total
+ count of the WQE Page Pointers being held by IPD.
+ When IPD_CTL_STATUS[NO_WPTR] is set '1' this field
+ represents a Packet-Pointer NOT a WQE pointer. */
+ uint64_t pfif_cnt : 3; /**< See PKT_PCNT. */
+ uint64_t pkt_pcnt : 7; /**< This value plus PFIF_CNT plus
+ IPD_PRC_PORT_PTR_FIFO_CTL[MAX_PKT] is the number
+ of PKT Page Pointers in IPD. */
+ uint64_t wqe_pcnt : 7; /**< Number of page pointers for WQE storage that are
+ buffered in the IPD. The total count is the value
+ of this buffer plus the field [WQEV_CNT]. For
+ PASS-1 (which does not have the WQEV_CNT field)
+ when the value of this register is '0' there still
+ may be 1 pointer being held by IPD. */
+#else
+ uint64_t wqe_pcnt : 7;
+ uint64_t pkt_pcnt : 7;
+ uint64_t pfif_cnt : 3;
+ uint64_t wqev_cnt : 1;
+ uint64_t pktv_cnt : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_ipd_ptr_count_s cn30xx;
+ struct cvmx_ipd_ptr_count_s cn31xx;
+ struct cvmx_ipd_ptr_count_s cn38xx;
+ struct cvmx_ipd_ptr_count_s cn38xxp2;
+ struct cvmx_ipd_ptr_count_s cn50xx;
+ struct cvmx_ipd_ptr_count_s cn52xx;
+ struct cvmx_ipd_ptr_count_s cn52xxp1;
+ struct cvmx_ipd_ptr_count_s cn56xx;
+ struct cvmx_ipd_ptr_count_s cn56xxp1;
+ struct cvmx_ipd_ptr_count_s cn58xx;
+ struct cvmx_ipd_ptr_count_s cn58xxp1;
+ struct cvmx_ipd_ptr_count_s cn61xx;
+ struct cvmx_ipd_ptr_count_s cn63xx;
+ struct cvmx_ipd_ptr_count_s cn63xxp1;
+ struct cvmx_ipd_ptr_count_s cn66xx;
+ struct cvmx_ipd_ptr_count_s cn68xx;
+ struct cvmx_ipd_ptr_count_s cn68xxp1;
+ struct cvmx_ipd_ptr_count_s cnf71xx;
+};
+typedef union cvmx_ipd_ptr_count cvmx_ipd_ptr_count_t;
+
+/**
+ * cvmx_ipd_pwp_ptr_fifo_ctl
+ *
+ * IPD_PWP_PTR_FIFO_CTL = IPD's PWP Pointer FIFO Control
+ *
+ * Allows reading of the Page-Pointers stored in the IPD's PWP Fifo.
+ */
+union cvmx_ipd_pwp_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t max_cnts : 7; /**< Maximum number of Packet-Pointers or WQE-Pointers
+ that COULD be in the FIFO.
+ When IPD_CTL_STATUS[NO_WPTR] is set '1' this field
+ only represents the Max number of Packet-Pointers,
+ WQE-Pointers are not used in this mode. */
+ uint64_t wraddr : 8; /**< Present FIFO WQE Read address. */
+ uint64_t praddr : 8; /**< Present FIFO Packet Read address. */
+ uint64_t ptr : 29; /**< The output of the pwp_fifo. */
+ uint64_t cena : 1; /**< Active low Chip Enable to the read port of the
+ pwp_fifo. This bit also controls the MUX-select
+ that steers [RADDR] to the pwp_fifo.
+ *WARNING - Setting this field to '0' will allow
+ reading of the memories thorugh the PTR field,
+ but will cause unpredictable operation of the IPD
+ under normal operation. */
+ uint64_t raddr : 8; /**< Sets the address to read from in the pwp_fifo.
+ Addresses 0 through 63 contain Packet-Pointers and
+ addresses 64 through 127 contain WQE-Pointers.
+ When IPD_CTL_STATUS[NO_WPTR] is set '1' addresses
+ 64 through 127 are not valid. */
+#else
+ uint64_t raddr : 8;
+ uint64_t cena : 1;
+ uint64_t ptr : 29;
+ uint64_t praddr : 8;
+ uint64_t wraddr : 8;
+ uint64_t max_cnts : 7;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } s;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn30xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn31xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn38xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn50xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xxp1;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xxp1;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xxp1;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn61xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xxp1;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn66xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cnf71xx;
+};
+typedef union cvmx_ipd_pwp_ptr_fifo_ctl cvmx_ipd_pwp_ptr_fifo_ctl_t;
+
+/**
+ * cvmx_ipd_qos#_red_marks
+ *
+ * IPD_QOS0_RED_MARKS = IPD QOS 0 Marks Red High Low
+ *
+ * Set the pass-drop marks for qos level.
+ */
+union cvmx_ipd_qosx_red_marks {
+ uint64_t u64;
+ struct cvmx_ipd_qosx_red_marks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drop : 32; /**< Packets will be dropped when the average value of
+ IPD_QUE0_FREE_PAGE_CNT is equal to or less than
+ this value. */
+ uint64_t pass : 32; /**< Packets will be passed when the average value of
+ IPD_QUE0_FREE_PAGE_CNT is larger than this value. */
+#else
+ uint64_t pass : 32;
+ uint64_t drop : 32;
+#endif
+ } s;
+ struct cvmx_ipd_qosx_red_marks_s cn30xx;
+ struct cvmx_ipd_qosx_red_marks_s cn31xx;
+ struct cvmx_ipd_qosx_red_marks_s cn38xx;
+ struct cvmx_ipd_qosx_red_marks_s cn38xxp2;
+ struct cvmx_ipd_qosx_red_marks_s cn50xx;
+ struct cvmx_ipd_qosx_red_marks_s cn52xx;
+ struct cvmx_ipd_qosx_red_marks_s cn52xxp1;
+ struct cvmx_ipd_qosx_red_marks_s cn56xx;
+ struct cvmx_ipd_qosx_red_marks_s cn56xxp1;
+ struct cvmx_ipd_qosx_red_marks_s cn58xx;
+ struct cvmx_ipd_qosx_red_marks_s cn58xxp1;
+ struct cvmx_ipd_qosx_red_marks_s cn61xx;
+ struct cvmx_ipd_qosx_red_marks_s cn63xx;
+ struct cvmx_ipd_qosx_red_marks_s cn63xxp1;
+ struct cvmx_ipd_qosx_red_marks_s cn66xx;
+ struct cvmx_ipd_qosx_red_marks_s cn68xx;
+ struct cvmx_ipd_qosx_red_marks_s cn68xxp1;
+ struct cvmx_ipd_qosx_red_marks_s cnf71xx;
+};
+typedef union cvmx_ipd_qosx_red_marks cvmx_ipd_qosx_red_marks_t;
+
+/**
+ * cvmx_ipd_que0_free_page_cnt
+ *
+ * IPD_QUE0_FREE_PAGE_CNT = IPD Queue0 Free Page Count
+ *
+ * Number of Free-Page Pointer that are available for use in the FPA for Queue-0.
+ */
+union cvmx_ipd_que0_free_page_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_que0_free_page_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t q0_pcnt : 32; /**< Number of Queue-0 Page Pointers Available. */
+#else
+ uint64_t q0_pcnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ipd_que0_free_page_cnt_s cn30xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn31xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn38xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn38xxp2;
+ struct cvmx_ipd_que0_free_page_cnt_s cn50xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn52xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn52xxp1;
+ struct cvmx_ipd_que0_free_page_cnt_s cn56xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn56xxp1;
+ struct cvmx_ipd_que0_free_page_cnt_s cn58xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn58xxp1;
+ struct cvmx_ipd_que0_free_page_cnt_s cn61xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn63xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn63xxp1;
+ struct cvmx_ipd_que0_free_page_cnt_s cn66xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn68xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn68xxp1;
+ struct cvmx_ipd_que0_free_page_cnt_s cnf71xx;
+};
+typedef union cvmx_ipd_que0_free_page_cnt cvmx_ipd_que0_free_page_cnt_t;
+
+/**
+ * cvmx_ipd_red_bpid_enable#
+ *
+ * IPD_RED_BPID_ENABLE = IPD RED BPID Enable
+ *
+ * Set the pass-drop marks for qos level.
+ */
+union cvmx_ipd_red_bpid_enablex {
+ uint64_t u64;
+ struct cvmx_ipd_red_bpid_enablex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prt_enb : 64; /**< The bit position will enable the corresponding
+ BPIDs ability to have packets dropped by RED
+ probability. */
+#else
+ uint64_t prt_enb : 64;
+#endif
+ } s;
+ struct cvmx_ipd_red_bpid_enablex_s cn68xx;
+ struct cvmx_ipd_red_bpid_enablex_s cn68xxp1;
+};
+typedef union cvmx_ipd_red_bpid_enablex cvmx_ipd_red_bpid_enablex_t;
+
+/**
+ * cvmx_ipd_red_delay
+ *
+ * IPD_RED_DELAY = IPD RED BPID Enable
+ *
+ * Set the pass-drop marks for qos level.
+ */
+union cvmx_ipd_red_delay {
+ uint64_t u64;
+ struct cvmx_ipd_red_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t prb_dly : 14; /**< Number (core clocks periods + 68) * 8 to wait
+ before calculating the new packet drop
+ probability for each QOS level. */
+ uint64_t avg_dly : 14; /**< Number (core clocks periods + 10) * 8 to wait
+ before calculating the moving average for each
+ QOS level.
+ Larger AVG_DLY values cause the moving averages
+ of ALL QOS levels to track changes in the actual
+ free space more slowly. Smaller NEW_CON (and
+ larger AVG_CON) values can have a similar effect,
+ but only affect an individual QOS level, rather
+ than all. */
+#else
+ uint64_t avg_dly : 14;
+ uint64_t prb_dly : 14;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_ipd_red_delay_s cn68xx;
+ struct cvmx_ipd_red_delay_s cn68xxp1;
+};
+typedef union cvmx_ipd_red_delay cvmx_ipd_red_delay_t;
+
+/**
+ * cvmx_ipd_red_port_enable
+ *
+ * IPD_RED_PORT_ENABLE = IPD RED Port Enable
+ *
+ * Set the pass-drop marks for qos level.
+ */
+union cvmx_ipd_red_port_enable {
+ uint64_t u64;
+ struct cvmx_ipd_red_port_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prb_dly : 14; /**< Number (core clocks periods + 68) * 8 to wait
+ before calculating the new packet drop
+ probability for each QOS level. */
+ uint64_t avg_dly : 14; /**< Number (core clocks periods + 10) * 8 to wait
+ before calculating the moving average for each
+ QOS level.
+ Larger AVG_DLY values cause the moving averages
+ of ALL QOS levels to track changes in the actual
+ free space more slowly. Smaller NEW_CON (and
+ larger AVG_CON) values can have a similar effect,
+ but only affect an individual QOS level, rather
+ than all. */
+ uint64_t prt_enb : 36; /**< The bit position will enable the corresponding
+ Ports ability to have packets dropped by RED
+ probability. */
+#else
+ uint64_t prt_enb : 36;
+ uint64_t avg_dly : 14;
+ uint64_t prb_dly : 14;
+#endif
+ } s;
+ struct cvmx_ipd_red_port_enable_s cn30xx;
+ struct cvmx_ipd_red_port_enable_s cn31xx;
+ struct cvmx_ipd_red_port_enable_s cn38xx;
+ struct cvmx_ipd_red_port_enable_s cn38xxp2;
+ struct cvmx_ipd_red_port_enable_s cn50xx;
+ struct cvmx_ipd_red_port_enable_s cn52xx;
+ struct cvmx_ipd_red_port_enable_s cn52xxp1;
+ struct cvmx_ipd_red_port_enable_s cn56xx;
+ struct cvmx_ipd_red_port_enable_s cn56xxp1;
+ struct cvmx_ipd_red_port_enable_s cn58xx;
+ struct cvmx_ipd_red_port_enable_s cn58xxp1;
+ struct cvmx_ipd_red_port_enable_s cn61xx;
+ struct cvmx_ipd_red_port_enable_s cn63xx;
+ struct cvmx_ipd_red_port_enable_s cn63xxp1;
+ struct cvmx_ipd_red_port_enable_s cn66xx;
+ struct cvmx_ipd_red_port_enable_s cnf71xx;
+};
+typedef union cvmx_ipd_red_port_enable cvmx_ipd_red_port_enable_t;
+
+/**
+ * cvmx_ipd_red_port_enable2
+ *
+ * IPD_RED_PORT_ENABLE2 = IPD RED Port Enable2
+ *
+ * Set the pass-drop marks for qos level.
+ */
+union cvmx_ipd_red_port_enable2 {
+ uint64_t u64;
+ struct cvmx_ipd_red_port_enable2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t prt_enb : 12; /**< Bits 11-0 corresponds to ports 47-36. These bits
+ have the same meaning as the PRT_ENB field of
+ IPD_RED_PORT_ENABLE. */
+#else
+ uint64_t prt_enb : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_ipd_red_port_enable2_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t prt_enb : 4; /**< Bits 3-0 cooresponds to ports 39-36. These bits
+ have the same meaning as the PRT_ENB field of
+ IPD_RED_PORT_ENABLE. */
+#else
+ uint64_t prt_enb : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ipd_red_port_enable2_cn52xx cn52xxp1;
+ struct cvmx_ipd_red_port_enable2_cn52xx cn56xx;
+ struct cvmx_ipd_red_port_enable2_cn52xx cn56xxp1;
+ struct cvmx_ipd_red_port_enable2_s cn61xx;
+ struct cvmx_ipd_red_port_enable2_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t prt_enb : 8; /**< Bits 7-0 corresponds to ports 43-36. These bits
+ have the same meaning as the PRT_ENB field of
+ IPD_RED_PORT_ENABLE. */
+#else
+ uint64_t prt_enb : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn63xx;
+ struct cvmx_ipd_red_port_enable2_cn63xx cn63xxp1;
+ struct cvmx_ipd_red_port_enable2_s cn66xx;
+ struct cvmx_ipd_red_port_enable2_s cnf71xx;
+};
+typedef union cvmx_ipd_red_port_enable2 cvmx_ipd_red_port_enable2_t;
+
+/**
+ * cvmx_ipd_red_que#_param
+ *
+ * IPD_RED_QUE0_PARAM = IPD RED Queue-0 Parameters
+ *
+ * Value control the Passing and Dropping of packets by the red engine for QOS Level-0.
+ */
+union cvmx_ipd_red_quex_param {
+ uint64_t u64;
+ struct cvmx_ipd_red_quex_param_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t use_pcnt : 1; /**< When set '1' red will use the actual Packet-Page
+ Count in place of the Average for RED calculations. */
+ uint64_t new_con : 8; /**< This value is used control how much of the present
+ Actual Queue Size is used to calculate the new
+ Average Queue Size. The value is a number from 0
+ 256, which represents NEW_CON/256 of the Actual
+ Queue Size that will be used in the calculation.
+ The number in this field plus the value of
+ AVG_CON must be equal to 256.
+ Larger AVG_DLY values cause the moving averages
+ of ALL QOS levels to track changes in the actual
+ free space more slowly. Smaller NEW_CON (and
+ larger AVG_CON) values can have a similar effect,
+ but only affect an individual QOS level, rather
+ than all. */
+ uint64_t avg_con : 8; /**< This value is used control how much of the present
+ Average Queue Size is used to calculate the new
+ Average Queue Size. The value is a number from 0
+ 256, which represents AVG_CON/256 of the Average
+ Queue Size that will be used in the calculation.
+ The number in this field plus the value of
+ NEW_CON must be equal to 256.
+ Larger AVG_DLY values cause the moving averages
+ of ALL QOS levels to track changes in the actual
+ free space more slowly. Smaller NEW_CON (and
+ larger AVG_CON) values can have a similar effect,
+ but only affect an individual QOS level, rather
+ than all. */
+ uint64_t prb_con : 32; /**< Used in computing the probability of a packet being
+ passed or drop by the WRED engine. The field is
+ calculated to be (255 * 2^24)/(PASS-DROP). Where
+ PASS and DROP are the field from the
+ IPD_QOS0_RED_MARKS CSR. */
+#else
+ uint64_t prb_con : 32;
+ uint64_t avg_con : 8;
+ uint64_t new_con : 8;
+ uint64_t use_pcnt : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_ipd_red_quex_param_s cn30xx;
+ struct cvmx_ipd_red_quex_param_s cn31xx;
+ struct cvmx_ipd_red_quex_param_s cn38xx;
+ struct cvmx_ipd_red_quex_param_s cn38xxp2;
+ struct cvmx_ipd_red_quex_param_s cn50xx;
+ struct cvmx_ipd_red_quex_param_s cn52xx;
+ struct cvmx_ipd_red_quex_param_s cn52xxp1;
+ struct cvmx_ipd_red_quex_param_s cn56xx;
+ struct cvmx_ipd_red_quex_param_s cn56xxp1;
+ struct cvmx_ipd_red_quex_param_s cn58xx;
+ struct cvmx_ipd_red_quex_param_s cn58xxp1;
+ struct cvmx_ipd_red_quex_param_s cn61xx;
+ struct cvmx_ipd_red_quex_param_s cn63xx;
+ struct cvmx_ipd_red_quex_param_s cn63xxp1;
+ struct cvmx_ipd_red_quex_param_s cn66xx;
+ struct cvmx_ipd_red_quex_param_s cn68xx;
+ struct cvmx_ipd_red_quex_param_s cn68xxp1;
+ struct cvmx_ipd_red_quex_param_s cnf71xx;
+};
+typedef union cvmx_ipd_red_quex_param cvmx_ipd_red_quex_param_t;
+
+/**
+ * cvmx_ipd_req_wgt
+ *
+ * IPD_REQ_WGT = IPD REQ weights
+ *
+ * There are 8 devices that can request to send packet traffic to the IPD. These weights are used for the Weighted Round Robin
+ * grant generated by the IPD to requestors.
+ */
+union cvmx_ipd_req_wgt {
+ uint64_t u64;
+ struct cvmx_ipd_req_wgt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wgt7 : 8; /**< Weight for ILK REQ */
+ uint64_t wgt6 : 8; /**< Weight for PKO REQ */
+ uint64_t wgt5 : 8; /**< Weight for DPI REQ */
+ uint64_t wgt4 : 8; /**< Weight for AGX4 REQ */
+ uint64_t wgt3 : 8; /**< Weight for AGX3 REQ */
+ uint64_t wgt2 : 8; /**< Weight for AGX2 REQ */
+ uint64_t wgt1 : 8; /**< Weight for AGX1 REQ */
+ uint64_t wgt0 : 8; /**< Weight for AGX0 REQ */
+#else
+ uint64_t wgt0 : 8;
+ uint64_t wgt1 : 8;
+ uint64_t wgt2 : 8;
+ uint64_t wgt3 : 8;
+ uint64_t wgt4 : 8;
+ uint64_t wgt5 : 8;
+ uint64_t wgt6 : 8;
+ uint64_t wgt7 : 8;
+#endif
+ } s;
+ struct cvmx_ipd_req_wgt_s cn68xx;
+};
+typedef union cvmx_ipd_req_wgt cvmx_ipd_req_wgt_t;
+
+/**
+ * cvmx_ipd_sub_port_bp_page_cnt
+ *
+ * IPD_SUB_PORT_BP_PAGE_CNT = IPD Subtract Port Backpressure Page Count
+ *
+ * Will add the value to the indicated port count register, the number of pages supplied. The value added should
+ * be the 2's complement of the value that needs to be subtracted. Users add 2's complement values to the
+ * port-mbuf-count register to return (lower the count) mbufs to the counter in order to avoid port-level
+ * backpressure being applied to the port. Backpressure is applied when the MBUF used count of a port exceeds the
+ * value in the IPD_PORTX_BP_PAGE_CNT, IPD_PORTX_BP_PAGE_CNT2, and IPD_PORTX_BP_PAGE_CNT3.
+ *
+ * This register can't be written from the PCI via a window write.
+ */
+union cvmx_ipd_sub_port_bp_page_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t port : 6; /**< The port to add the PAGE_CNT field to. */
+ uint64_t page_cnt : 25; /**< The number of page pointers to add to
+ the port counter pointed to by the
+ PORT Field. */
+#else
+ uint64_t page_cnt : 25;
+ uint64_t port : 6;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn30xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn31xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xxp2;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn50xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xxp1;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xxp1;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xxp1;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn61xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xxp1;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn66xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn68xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn68xxp1;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cnf71xx;
+};
+typedef union cvmx_ipd_sub_port_bp_page_cnt cvmx_ipd_sub_port_bp_page_cnt_t;
+
+/**
+ * cvmx_ipd_sub_port_fcs
+ *
+ * IPD_SUB_PORT_FCS = IPD Subtract Ports FCS Register
+ *
+ * When set '1' the port corresponding to the bit set will subtract 4 bytes from the end of
+ * the packet.
+ */
+union cvmx_ipd_sub_port_fcs {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_fcs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t port_bit2 : 4; /**< When set '1', the port corresponding to the bit
+ position set, will subtract the FCS for packets
+ on that port. */
+ uint64_t reserved_32_35 : 4;
+ uint64_t port_bit : 32; /**< When set '1', the port corresponding to the bit
+ position set, will subtract the FCS for packets
+ on that port. */
+#else
+ uint64_t port_bit : 32;
+ uint64_t reserved_32_35 : 4;
+ uint64_t port_bit2 : 4;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_ipd_sub_port_fcs_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t port_bit : 3; /**< When set '1', the port corresponding to the bit
+ position set, will subtract the FCS for packets
+ on that port. */
+#else
+ uint64_t port_bit : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_sub_port_fcs_cn30xx cn31xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t port_bit : 32; /**< When set '1', the port corresponding to the bit
+ position set, will subtract the FCS for packets
+ on that port. */
+#else
+ uint64_t port_bit : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn38xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx cn38xxp2;
+ struct cvmx_ipd_sub_port_fcs_cn30xx cn50xx;
+ struct cvmx_ipd_sub_port_fcs_s cn52xx;
+ struct cvmx_ipd_sub_port_fcs_s cn52xxp1;
+ struct cvmx_ipd_sub_port_fcs_s cn56xx;
+ struct cvmx_ipd_sub_port_fcs_s cn56xxp1;
+ struct cvmx_ipd_sub_port_fcs_cn38xx cn58xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx cn58xxp1;
+ struct cvmx_ipd_sub_port_fcs_s cn61xx;
+ struct cvmx_ipd_sub_port_fcs_s cn63xx;
+ struct cvmx_ipd_sub_port_fcs_s cn63xxp1;
+ struct cvmx_ipd_sub_port_fcs_s cn66xx;
+ struct cvmx_ipd_sub_port_fcs_s cnf71xx;
+};
+typedef union cvmx_ipd_sub_port_fcs cvmx_ipd_sub_port_fcs_t;
+
+/**
+ * cvmx_ipd_sub_port_qos_cnt
+ *
+ * IPD_SUB_PORT_QOS_CNT = IPD Subtract Port QOS Count
+ *
+ * Will add the value (CNT) to the indicated Port-QOS register (PORT_QOS). The value added must be
+ * be the 2's complement of the value that needs to be subtracted.
+ */
+union cvmx_ipd_sub_port_qos_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_qos_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_41_63 : 23;
+ uint64_t port_qos : 9; /**< The port to add the CNT field to. */
+ uint64_t cnt : 32; /**< The value to be added to the register selected
+ in the PORT_QOS field. */
+#else
+ uint64_t cnt : 32;
+ uint64_t port_qos : 9;
+ uint64_t reserved_41_63 : 23;
+#endif
+ } s;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn52xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn52xxp1;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn56xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn56xxp1;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn61xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn63xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn63xxp1;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn66xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn68xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn68xxp1;
+ struct cvmx_ipd_sub_port_qos_cnt_s cnf71xx;
+};
+typedef union cvmx_ipd_sub_port_qos_cnt cvmx_ipd_sub_port_qos_cnt_t;
+
+/**
+ * cvmx_ipd_wqe_fpa_queue
+ *
+ * IPD_WQE_FPA_QUEUE = IPD Work-Queue-Entry FPA Page Size
+ *
+ * Which FPA Queue (0-7) to fetch page-pointers from for WQE's
+ */
+union cvmx_ipd_wqe_fpa_queue {
+ uint64_t u64;
+ struct cvmx_ipd_wqe_fpa_queue_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t wqe_pool : 3; /**< Which FPA Queue to fetch page-pointers
+ from for WQE's.
+ Not used when IPD_CTL_STATUS[NO_WPTR] is set. */
+#else
+ uint64_t wqe_pool : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_ipd_wqe_fpa_queue_s cn30xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn31xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn38xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn38xxp2;
+ struct cvmx_ipd_wqe_fpa_queue_s cn50xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn52xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn52xxp1;
+ struct cvmx_ipd_wqe_fpa_queue_s cn56xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn56xxp1;
+ struct cvmx_ipd_wqe_fpa_queue_s cn58xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn58xxp1;
+ struct cvmx_ipd_wqe_fpa_queue_s cn61xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn63xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn63xxp1;
+ struct cvmx_ipd_wqe_fpa_queue_s cn66xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn68xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn68xxp1;
+ struct cvmx_ipd_wqe_fpa_queue_s cnf71xx;
+};
+typedef union cvmx_ipd_wqe_fpa_queue cvmx_ipd_wqe_fpa_queue_t;
+
+/**
+ * cvmx_ipd_wqe_ptr_valid
+ *
+ * IPD_WQE_PTR_VALID = IPD's WQE Pointer Valid
+ *
+ * The value of the WQE-pointer fetched and in the valid register.
+ */
+union cvmx_ipd_wqe_ptr_valid {
+ uint64_t u64;
+ struct cvmx_ipd_wqe_ptr_valid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t ptr : 29; /**< Pointer value.
+ When IPD_CTL_STATUS[NO_WPTR] is set '1' this field
+ represents a Packet-Pointer NOT a WQE pointer. */
+#else
+ uint64_t ptr : 29;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_ipd_wqe_ptr_valid_s cn30xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn31xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn38xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn50xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn52xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn52xxp1;
+ struct cvmx_ipd_wqe_ptr_valid_s cn56xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn56xxp1;
+ struct cvmx_ipd_wqe_ptr_valid_s cn58xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn58xxp1;
+ struct cvmx_ipd_wqe_ptr_valid_s cn61xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn63xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn63xxp1;
+ struct cvmx_ipd_wqe_ptr_valid_s cn66xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cnf71xx;
+};
+typedef union cvmx_ipd_wqe_ptr_valid cvmx_ipd_wqe_ptr_valid_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ipd-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ipd.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ipd.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ipd.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,319 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * IPD Support.
+ *
+ * <hr>$Revision: 58943 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#include <asm/octeon/cvmx-pip-defs.h>
+#include <asm/octeon/cvmx-dbg-defs.h>
+#include <asm/octeon/cvmx-sso-defs.h>
+
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-wqe.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-helper-errata.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#endif
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-version.h"
+#include "cvmx-helper-check-defines.h"
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "cvmx-error.h"
+#include "cvmx-config.h"
+#endif
+
+#include "cvmx-fpa.h"
+#include "cvmx-wqe.h"
+#include "cvmx-ipd.h"
+#include "cvmx-helper-errata.h"
+#include "cvmx-helper-cfg.h"
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+static void __cvmx_ipd_free_ptr_v1(void)
+{
+ /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
+ if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
+ int no_wptr = 0;
+ cvmx_ipd_ptr_count_t ipd_ptr_count;
+ ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
+
+ /* Handle Work Queue Entry in cn56xx and cn52xx */
+ if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
+ cvmx_ipd_ctl_status_t ipd_ctl_status;
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ if (ipd_ctl_status.s.no_wptr)
+ no_wptr = 1;
+ }
+
+ /* Free the prefetched WQE */
+ if (ipd_ptr_count.s.wqev_cnt) {
+ cvmx_ipd_wqe_ptr_valid_t ipd_wqe_ptr_valid;
+ ipd_wqe_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
+ if (no_wptr)
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ else
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
+ }
+
+ /* Free all WQE in the fifo */
+ if (ipd_ptr_count.s.wqe_pcnt) {
+ int i;
+ cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl;
+ ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
+ ipd_pwp_ptr_fifo_ctl.s.cena = 0;
+ ipd_pwp_ptr_fifo_ctl.s.raddr = ipd_pwp_ptr_fifo_ctl.s.max_cnts + (ipd_pwp_ptr_fifo_ctl.s.wraddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
+ ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ if (no_wptr)
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ else
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
+ }
+ ipd_pwp_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
+ }
+
+ /* Free the prefetched packet */
+ if (ipd_ptr_count.s.pktv_cnt) {
+ cvmx_ipd_pkt_ptr_valid_t ipd_pkt_ptr_valid;
+ ipd_pkt_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pkt_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ }
+
+ /* Free the per port prefetched packets */
+ if (1) {
+ int i;
+ cvmx_ipd_prc_port_ptr_fifo_ctl_t ipd_prc_port_ptr_fifo_ctl;
+ ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt; i++) {
+ ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
+ ipd_prc_port_ptr_fifo_ctl.s.raddr = i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
+ cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64);
+ ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_port_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ }
+ ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64);
+ }
+
+ /* Free all packets in the holding fifo */
+ if (ipd_ptr_count.s.pfif_cnt) {
+ int i;
+ cvmx_ipd_prc_hold_ptr_fifo_ctl_t ipd_prc_hold_ptr_fifo_ctl;
+
+ ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
+ ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
+ ipd_prc_hold_ptr_fifo_ctl.s.raddr = (ipd_prc_hold_ptr_fifo_ctl.s.praddr + i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
+ cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64);
+ ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_hold_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ }
+ ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64);
+ }
+
+ /* Free all packets in the fifo */
+ if (ipd_ptr_count.s.pkt_pcnt) {
+ int i;
+ cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl;
+ ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
+ ipd_pwp_ptr_fifo_ctl.s.cena = 0;
+ ipd_pwp_ptr_fifo_ctl.s.raddr = (ipd_pwp_ptr_fifo_ctl.s.praddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
+ ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ }
+ ipd_pwp_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
+ }
+ }
+}
+
+static void __cvmx_ipd_free_ptr_v2(void)
+{
+ int no_wptr = 0;
+ int i;
+ cvmx_ipd_port_ptr_fifo_ctl_t ipd_port_ptr_fifo_ctl;
+ cvmx_ipd_ptr_count_t ipd_ptr_count;
+ ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
+
+ /* Handle Work Queue Entry in cn68xx */
+ if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
+ cvmx_ipd_ctl_status_t ipd_ctl_status;
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ if (ipd_ctl_status.s.no_wptr)
+ no_wptr = 1;
+ }
+
+ /* Free the prefetched WQE */
+ if (ipd_ptr_count.s.wqev_cnt) {
+ cvmx_ipd_next_wqe_ptr_t ipd_next_wqe_ptr;
+ ipd_next_wqe_ptr.u64 = cvmx_read_csr(CVMX_IPD_NEXT_WQE_PTR);
+ if (no_wptr)
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_next_wqe_ptr.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ else
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_next_wqe_ptr.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
+ }
+
+
+ /* Free all WQE in the fifo */
+ if (ipd_ptr_count.s.wqe_pcnt) {
+ cvmx_ipd_free_ptr_fifo_ctl_t ipd_free_ptr_fifo_ctl;
+ cvmx_ipd_free_ptr_value_t ipd_free_ptr_value;
+ ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
+ for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
+ ipd_free_ptr_fifo_ctl.s.cena = 0;
+ ipd_free_ptr_fifo_ctl.s.raddr = ipd_free_ptr_fifo_ctl.s.max_cnts + (ipd_free_ptr_fifo_ctl.s.wraddr+i) % ipd_free_ptr_fifo_ctl.s.max_cnts;
+ cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
+ ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
+ ipd_free_ptr_value.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_VALUE);
+ if (no_wptr)
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_free_ptr_value.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ else
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_free_ptr_value.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
+ }
+ ipd_free_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
+ }
+
+ /* Free the prefetched packet */
+ if (ipd_ptr_count.s.pktv_cnt) {
+ cvmx_ipd_next_pkt_ptr_t ipd_next_pkt_ptr;
+ ipd_next_pkt_ptr.u64 = cvmx_read_csr(CVMX_IPD_NEXT_PKT_PTR);
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_next_pkt_ptr.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ }
+
+ /* Free the per port prefetched packets */
+ ipd_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PORT_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_port_ptr_fifo_ctl.s.max_pkt; i++) {
+ ipd_port_ptr_fifo_ctl.s.cena = 0;
+ ipd_port_ptr_fifo_ctl.s.raddr = i % ipd_port_ptr_fifo_ctl.s.max_pkt;
+ cvmx_write_csr(CVMX_IPD_PORT_PTR_FIFO_CTL, ipd_port_ptr_fifo_ctl.u64);
+ ipd_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PORT_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_port_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ }
+ ipd_port_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PORT_PTR_FIFO_CTL, ipd_port_ptr_fifo_ctl.u64);
+
+ /* Free all packets in the holding fifo */
+ if (ipd_ptr_count.s.pfif_cnt) {
+ cvmx_ipd_hold_ptr_fifo_ctl_t ipd_hold_ptr_fifo_ctl;
+
+ ipd_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
+ ipd_hold_ptr_fifo_ctl.s.cena = 0;
+ ipd_hold_ptr_fifo_ctl.s.raddr = (ipd_hold_ptr_fifo_ctl.s.praddr + i) % ipd_hold_ptr_fifo_ctl.s.max_pkt;
+ cvmx_write_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL, ipd_hold_ptr_fifo_ctl.u64);
+ ipd_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_hold_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ }
+ ipd_hold_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL, ipd_hold_ptr_fifo_ctl.u64);
+ }
+
+ /* Free all packets in the fifo */
+ if (ipd_ptr_count.s.pkt_pcnt) {
+ cvmx_ipd_free_ptr_fifo_ctl_t ipd_free_ptr_fifo_ctl;
+ cvmx_ipd_free_ptr_value_t ipd_free_ptr_value;
+ ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
+ ipd_free_ptr_fifo_ctl.s.cena = 0;
+ ipd_free_ptr_fifo_ctl.s.raddr = (ipd_free_ptr_fifo_ctl.s.praddr+i) % ipd_free_ptr_fifo_ctl.s.max_cnts;
+ cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
+ ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
+ ipd_free_ptr_value.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_VALUE);
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_free_ptr_value.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
+ }
+ ipd_free_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
+ }
+}
+
+/**
+ * @INTERNAL
+ * This function is called by cvmx_helper_shutdown() to extract
+ * all FPA buffers out of the IPD and PIP. After this function
+ * completes, all FPA buffers that were prefetched by IPD and PIP
+ * wil be in the apropriate FPA pool. This functions does not reset
+ * PIP or IPD as FPA pool zero must be empty before the reset can
+ * be performed. WARNING: It is very important that IPD and PIP be
+ * reset soon after a call to this function.
+ */
+void __cvmx_ipd_free_ptr(void)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ __cvmx_ipd_free_ptr_v2();
+ else
+ __cvmx_ipd_free_ptr_v1();
+}
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ipd.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ipd.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ipd.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ipd.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,204 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the hardware Input Packet Data unit.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+
+#ifndef __CVMX_IPD_H__
+#define __CVMX_IPD_H__
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+#else
+# ifndef CVMX_DONT_INCLUDE_CONFIG
+# include "executive-config.h"
+# ifdef CVMX_ENABLE_PKO_FUNCTIONS
+# include "cvmx-config.h"
+# endif
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CVMX_ENABLE_LEN_M8_FIX
+#define CVMX_ENABLE_LEN_M8_FIX 0
+#endif
+
+/* CSR typedefs have been moved to cvmx-ipd-defs.h */
+
+typedef cvmx_ipd_1st_mbuff_skip_t cvmx_ipd_mbuff_not_first_skip_t;
+typedef cvmx_ipd_1st_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
+
+
+/**
+ * Configure IPD
+ *
+ * @param mbuff_size Packets buffer size in 8 byte words
+ * @param first_mbuff_skip
+ * Number of 8 byte words to skip in the first buffer
+ * @param not_first_mbuff_skip
+ * Number of 8 byte words to skip in each following buffer
+ * @param first_back Must be same as first_mbuff_skip / 128
+ * @param second_back
+ * Must be same as not_first_mbuff_skip / 128
+ * @param wqe_fpa_pool
+ * FPA pool to get work entries from
+ * @param cache_mode
+ * @param back_pres_enable_flag
+ * Enable or disable port back pressure at a global level.
+ * This should always be 1 as more accurate control can be
+ * found in IPD_PORTX_BP_PAGE_CNT[BP_ENB].
+ */
+static inline void cvmx_ipd_config(uint64_t mbuff_size,
+ uint64_t first_mbuff_skip,
+ uint64_t not_first_mbuff_skip,
+ uint64_t first_back,
+ uint64_t second_back,
+ uint64_t wqe_fpa_pool,
+ cvmx_ipd_mode_t cache_mode,
+ uint64_t back_pres_enable_flag
+ )
+{
+ cvmx_ipd_1st_mbuff_skip_t first_skip;
+ cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
+ cvmx_ipd_packet_mbuff_size_t size;
+ cvmx_ipd_1st_next_ptr_back_t first_back_struct;
+ cvmx_ipd_second_next_ptr_back_t second_back_struct;
+ cvmx_ipd_wqe_fpa_queue_t wqe_pool;
+ cvmx_ipd_ctl_status_t ipd_ctl_reg;
+
+ first_skip.u64 = 0;
+ first_skip.s.skip_sz = first_mbuff_skip;
+ cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
+
+ not_first_skip.u64 = 0;
+ not_first_skip.s.skip_sz = not_first_mbuff_skip;
+ cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
+
+ size.u64 = 0;
+ size.s.mb_size = mbuff_size;
+ cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
+
+ first_back_struct.u64 = 0;
+ first_back_struct.s.back = first_back;
+ cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
+
+ second_back_struct.u64 = 0;
+ second_back_struct.s.back = second_back;
+ cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK,second_back_struct.u64);
+
+ wqe_pool.u64 = 0;
+ wqe_pool.s.wqe_pool = wqe_fpa_pool;
+ cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
+
+ ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_reg.s.opc_mode = cache_mode;
+ ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
+
+ /* Note: the example RED code that used to be here has been moved to
+ cvmx_helper_setup_red */
+}
+
+
+/**
+ * Enable IPD
+ */
+static inline void cvmx_ipd_enable(void)
+{
+ cvmx_ipd_ctl_status_t ipd_reg;
+
+ ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+
+ /*
+ * busy-waiting for rst_done in o68
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ while(ipd_reg.s.rst_done != 0)
+ ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+
+ if (ipd_reg.s.ipd_en)
+ cvmx_dprintf("Warning: Enabling IPD when IPD already enabled.\n");
+
+ ipd_reg.s.ipd_en = 1;
+
+#if CVMX_ENABLE_LEN_M8_FIX
+ if(!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+ ipd_reg.s.len_m8 = 1;
+#endif
+
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+}
+
+
+/**
+ * Disable IPD
+ */
+static inline void cvmx_ipd_disable(void)
+{
+ cvmx_ipd_ctl_status_t ipd_reg;
+ ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_reg.s.ipd_en = 0;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+}
+
+extern void __cvmx_ipd_free_ptr(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_IPD_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ipd.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,363 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+/* This file contains support functions for the Cortina IXF18201 SPI->XAUI dual
+** MAC. The IXF18201 has dual SPI and dual XAUI interfaces to provide 2 10 gigabit
+** interfaces.
+** This file supports the EBT5810 evaluation board. To support a different board,
+** the 16 bit read/write functions would need to be customized for that board, and the
+** IXF18201 may need to be initialized differently as well.
+**
+** The IXF18201 and Octeon are configured for 2 SPI channels per interface (ports 0/1, and 16/17).
+** Ports 0 and 16 are the ports that are connected to the XAUI MACs (which are connected to the SFP+ modules)
+** Ports 1 and 17 are connected to the hairpin loopback port on the IXF SPI interface. All packets sent out
+** of these ports are looped back the same port they were sent on. The loopback ports are always enabled.
+**
+** The MAC address filtering on the IXF is not enabled. Link up/down events are not detected, only SPI status
+** is monitored by default, which is independent of the XAUI/SFP+ link status.
+**
+**
+*/
+#include "cvmx.h"
+#include "cvmx-swap.h"
+
+
+
+
+
+#define PAL_BASE (1ull << 63 | 0x1d030000)
+#define IXF_ADDR_HI (PAL_BASE + 0xa)
+#define IXF_ADDR_LO (PAL_BASE + 0xb)
+#define IXF_ADDR_16 IXF_ADDR_HI /* 16 bit access */
+
+#define IXF_WR_DATA_HI (PAL_BASE + 0xc)
+#define IXF_WR_DATA_LO (PAL_BASE + 0xd)
+#define IXF_WR_DATA_16 IXF_WR_DATA_HI
+
+#define IXF_RD_DATA_HI (PAL_BASE + 0x10)
+#define IXF_RD_DATA_LO (PAL_BASE + 0x11)
+#define IXF_RD_DATA_16 IXF_RD_DATA_HI
+
+#define IXF_TRANS_TYPE (PAL_BASE + 0xe)
+#define IXF_TRANS_STATUS (PAL_BASE + 0xf)
+
+
+uint16_t cvmx_ixf18201_read16(uint16_t reg_addr)
+{
+ cvmx_write64_uint16(IXF_ADDR_16, reg_addr);
+ cvmx_write64_uint8(IXF_TRANS_TYPE, 1); // Do read
+ cvmx_wait(800000);
+
+ /* Read result */
+ return(cvmx_read64_uint16(IXF_RD_DATA_16));
+}
+
+void cvmx_ixf18201_write16(uint16_t reg_addr, uint16_t data)
+{
+ cvmx_write64_uint16(IXF_ADDR_16, reg_addr);
+ cvmx_write64_uint16(IXF_WR_DATA_16, data);
+ cvmx_write64_uint8(IXF_TRANS_TYPE, 0);
+ cvmx_wait(800000);
+}
+
+
+
+uint32_t cvmx_ixf18201_read32(uint16_t reg_addr)
+{
+ uint32_t hi, lo;
+
+ if (reg_addr & 0x1)
+ {
+ return(0xdeadbeef);
+ }
+ lo = cvmx_ixf18201_read16(reg_addr);
+ hi = cvmx_ixf18201_read16(reg_addr + 1);
+ return((hi << 16) | lo);
+}
+void cvmx_ixf18201_write32(uint16_t reg_addr, uint32_t data)
+{
+ uint16_t hi, lo;
+
+ if (reg_addr & 0x1)
+ {
+ return;
+ }
+ lo = data & 0xFFFF;
+ hi = data >> 16;
+ cvmx_ixf18201_write16(reg_addr, lo);
+ cvmx_ixf18201_write16(reg_addr + 1, hi);
+
+}
+
+
+#define IXF_REG_MDI_CMD_ADDR1 0x310E
+#define IXF_REG_MDI_RD_WR1 0x3110
+void cvmx_ixf18201_mii_write(int mii_addr, int mmd, uint16_t reg, uint16_t val)
+{
+ uint32_t cmd_val = 0;
+
+
+ cmd_val = reg;
+ cmd_val |= 0x0 << 26; // Set address operation
+ cmd_val |= (mii_addr & 0x1f) << 21; // Set PHY addr
+ cmd_val |= (mmd & 0x1f) << 16; // Set MMD
+ cmd_val |= 1 << 30; // Do operation
+ cmd_val |= 1 << 31; // enable in progress bit
+
+
+
+ /* Set up address */
+ cvmx_ixf18201_write32(IXF_REG_MDI_CMD_ADDR1, cmd_val);
+
+ while (cvmx_ixf18201_read32(IXF_REG_MDI_CMD_ADDR1) & ( 1 << 30))
+ ; /* Wait for operation to complete */
+
+
+ cvmx_ixf18201_write32(IXF_REG_MDI_RD_WR1, val);
+
+ /* Do read operation */
+ cmd_val = 0;
+ cmd_val |= 0x1 << 26; // Set write operation
+ cmd_val |= (mii_addr & 0x1f) << 21; // Set PHY addr
+ cmd_val |= (mmd & 0x1f) << 16; // Set MMD
+ cmd_val |= 1 << 30; // Do operation
+ cmd_val |= 1 << 31; // enable in progress bit
+ cvmx_ixf18201_write32(IXF_REG_MDI_CMD_ADDR1, cmd_val);
+
+ while (cvmx_ixf18201_read32(IXF_REG_MDI_CMD_ADDR1) & ( 1 << 30))
+ ; /* Wait for operation to complete */
+
+
+}
+
+
+int cvmx_ixf18201_mii_read(int mii_addr, int mmd, uint16_t reg)
+{
+ uint32_t cmd_val = 0;
+
+
+ cmd_val = reg;
+ cmd_val |= 0x0 << 26; // Set address operation
+ cmd_val |= (mii_addr & 0x1f) << 21; // Set PHY addr
+ cmd_val |= (mmd & 0x1f) << 16; // Set MMD
+ cmd_val |= 1 << 30; // Do operation
+ cmd_val |= 1 << 31; // enable in progress bit
+
+
+
+ /* Set up address */
+ cvmx_ixf18201_write32(IXF_REG_MDI_CMD_ADDR1, cmd_val);
+
+ while (cvmx_ixf18201_read32(IXF_REG_MDI_CMD_ADDR1) & ( 1 << 30))
+ ; /* Wait for operation to complete */
+
+ /* Do read operation */
+ cmd_val = 0;
+ cmd_val |= 0x3 << 26; // Set read operation
+ cmd_val |= (mii_addr & 0x1f) << 21; // Set PHY addr
+ cmd_val |= (mmd & 0x1f) << 16; // Set MMD
+ cmd_val |= 1 << 30; // Do operation
+ cmd_val |= 1 << 31; // enable in progress bit
+ cvmx_ixf18201_write32(IXF_REG_MDI_CMD_ADDR1, cmd_val);
+
+ while (cvmx_ixf18201_read32(IXF_REG_MDI_CMD_ADDR1) & ( 1 << 30))
+ ; /* Wait for operation to complete */
+
+ cmd_val = cvmx_ixf18201_read32(IXF_REG_MDI_RD_WR1);
+
+ return(cmd_val >> 16);
+
+}
+
+
+
+int cvmx_ixf18201_init(void)
+{
+ int index; /* For indexing the two 'ports' on ixf */
+ int offset;
+
+ /* Reset IXF, and take all blocks out of reset */
+
+/*
+Initializing...
+PP0:~CONSOLE-> Changing register value, addr 0x0003, old: 0x0000, new: 0x0001
+PP0:~CONSOLE-> Changing register value, addr 0x0003, old: 0x0001, new: 0x0000
+PP0:~CONSOLE-> **** LLM201(Lochlomond) Driver loaded ****
+PP0:~CONSOLE-> LLM201 Driver - Released on Tue Aug 28 09:51:30 2007.
+PP0:~CONSOLE-> retval is: 0
+PP0:~CONSOLE-> Changing register value, addr 0x0003, old: 0x0000, new: 0x0001
+PP0:~CONSOLE-> Changing register value, addr 0x0003, old: 0x0001, new: 0x0000
+PP0:~CONSOLE-> Brought all blocks out of reset
+PP0:~CONSOLE-> Getting default config.
+*/
+
+
+ cvmx_ixf18201_write16(0x0003, 0x0001);
+ cvmx_ixf18201_write16(0x0003, 0);
+
+ /*
+PP0:~CONSOLE-> Changing register value, addr 0x0000, old: 0x4014, new: 0x4010
+PP0:~CONSOLE-> Changing register value, addr 0x0000, old: 0x4010, new: 0x4014
+PP0:~CONSOLE-> Changing register value, addr 0x0004, old: 0x01ff, new: 0x0140
+PP0:~CONSOLE-> Changing register value, addr 0x0009, old: 0x007f, new: 0x0000
+ */
+ cvmx_ixf18201_write16(0x0000, 0x4010);
+ cvmx_ixf18201_write16(0x0000, 0x4014);
+ cvmx_ixf18201_write16(0x0004, 0x0140);
+ cvmx_ixf18201_write16(0x0009, 0);
+
+
+ /*
+PP0:~CONSOLE-> Changing register value, addr 0x000e, old: 0x0000, new: 0x000f
+PP0:~CONSOLE-> Changing register value, addr 0x000f, old: 0x0000, new: 0x0004
+PP0:~CONSOLE-> Changing register value, addr 0x000f, old: 0x0004, new: 0x0006
+PP0:~CONSOLE-> Changing register value, addr 0x000e, old: 0x000f, new: 0x00f0
+PP0:~CONSOLE-> Changing register value, addr 0x000f, old: 0x0006, new: 0x0040
+PP0:~CONSOLE-> Changing register value, addr 0x000f, old: 0x0040, new: 0x0060
+ */
+ // skip GPIO, 0xe/0xf
+
+
+ /*
+PP0:~CONSOLE-> Changing register value, addr 0x3100, old: 0x57fb, new: 0x7f7b
+PP0:~CONSOLE-> Changing register value, addr 0x3600, old: 0x57fb, new: 0x7f7b
+PP0:~CONSOLE-> Changing register value, addr 0x3005, old: 0x8010, new: 0x0040
+PP0:~CONSOLE-> Changing register value, addr 0x3006, old: 0x061a, new: 0x0000
+PP0:~CONSOLE-> Changing register value, addr 0x3505, old: 0x8010, new: 0x0040
+PP0:~CONSOLE-> Changing register value, addr 0x3506, old: 0x061a, new: 0x0000
+ */
+ for (index = 0; index < 2;index++ )
+ {
+ offset = 0x500 * index;
+ cvmx_ixf18201_write32(0x3100 + offset, 0x47f7b);
+ cvmx_ixf18201_write16(0x3005 + offset, 0x0040);
+ cvmx_ixf18201_write16(0x3006 + offset, 0);
+ }
+
+ /*PP0:~CONSOLE-> *** SPI soft reset ***, block id: 0
+PP0:~CONSOLE-> Changing register value, addr 0x3007, old: 0xf980, new: 0xf9c0
+PP0:~CONSOLE-> Changing register value, addr 0x3008, old: 0xa6f0, new: 0x36f0
+PP0:~CONSOLE-> Changing register value, addr 0x3000, old: 0x0080, new: 0x0060
+PP0:~CONSOLE-> Changing register value, addr 0x3002, old: 0x0200, new: 0x0040
+PP0:~CONSOLE-> Changing register value, addr 0x3003, old: 0x0100, new: 0x0000
+PP0:~CONSOLE-> Changing register value, addr 0x30c2, old: 0x0080, new: 0x0060
+PP0:~CONSOLE-> Changing register value, addr 0x300a, old: 0x0800, new: 0x0000
+PP0:~CONSOLE-> Changing register value, addr 0x3007, old: 0xf9c0, new: 0x89c0
+PP0:~CONSOLE-> Changing register value, addr 0x3016, old: 0x0000, new: 0x0010
+PP0:~CONSOLE-> Changing register value, addr 0x3008, old: 0x36f0, new: 0x3610
+PP0:~CONSOLE-> Changing register value, addr 0x3012, old: 0x0000, new: 0x0010
+PP0:~CONSOLE-> Changing register value, addr 0x3007, old: 0x89c0, new: 0x8980
+PP0:~CONSOLE-> Changing register value, addr 0x3008, old: 0x3610, new: 0xa210
+PP0:~CONSOLE->
+
+ */
+
+
+ for (index = 0; index < 2;index++ )
+ {
+ offset = 0x500 * index;
+ int cal_len_min_1 = 0; /* Calendar length -1. Must match number
+ ** of ports configured for interface.*/
+ cvmx_ixf18201_write16(0x3007 + offset, 0x81c0 | (cal_len_min_1 << 11));
+ cvmx_ixf18201_write16(0x3008 + offset, 0x3600 | (cal_len_min_1 << 4));
+ cvmx_ixf18201_write16(0x3000 + offset, 0x0060);
+ cvmx_ixf18201_write16(0x3002 + offset, 0x0040);
+ cvmx_ixf18201_write16(0x3003 + offset, 0x0000);
+ cvmx_ixf18201_write16(0x30c2 + offset, 0x0060);
+ cvmx_ixf18201_write16(0x300a + offset, 0x0000);
+ cvmx_ixf18201_write16(0x3007 + offset, 0x81c0 | (cal_len_min_1 << 11));
+ cvmx_ixf18201_write16(0x3016 + offset, 0x0010);
+ cvmx_ixf18201_write16(0x3008 + offset, 0x3600 | (cal_len_min_1 << 4));
+ cvmx_ixf18201_write16(0x3012 + offset, 0x0010);
+ cvmx_ixf18201_write16(0x3007 + offset, 0x8180 | (cal_len_min_1 << 11));
+ cvmx_ixf18201_write16(0x3008 + offset, 0xa200 | (cal_len_min_1 << 4));
+
+ cvmx_ixf18201_write16(0x3090 + offset, 0x0301); /* Enable hairpin loopback */
+ }
+
+
+
+ /*
+PP0:~CONSOLE-> Changing register value, addr 0x0004, old: 0x0140, new: 0x1fff
+PP0:~CONSOLE-> Changing register value, addr 0x0009, old: 0x0000, new: 0x007f
+PP0:~CONSOLE-> Changing register value, addr 0x310b, old: 0x0004, new: 0xffff
+PP0:~CONSOLE-> Changing register value, addr 0x310a, old: 0x7f7b, new: 0xffff
+
+ */
+
+ cvmx_ixf18201_write16(0x0004, 0x1fff);
+ cvmx_ixf18201_write16(0x0009, 0x007f);
+#if 0
+ /* MDI autoscan */
+ cvmx_ixf18201_write16(0x310b, 0xffff);
+ cvmx_ixf18201_write16(0x310a, 0xffff);
+#endif
+
+
+ /*
+ *** 32 bit register, trace only captures part of it...
+PP0:~CONSOLE-> Changing register value, addr 0x3100, old: 0x7f7b, new: 0x7f78
+PP0:~CONSOLE-> Changing register value, addr 0x3600, old: 0x7f7b, new: 0x7f78
+ */
+
+ for (index = 0; index < 2;index++ )
+ {
+ offset = 0x500 * index;
+ cvmx_ixf18201_write32(0x3100 + offset, 0x47f7c); /* Also enable jumbo frames */
+ /* Set max packet size to 9600 bytes, max supported by IXF18201 */
+ cvmx_ixf18201_write32(0x3114 + offset, 0x25800000);
+ }
+
+
+ cvmx_wait(100000000);
+
+ /* Now reset the PCS blocks in the phy. This seems to be required after
+ ** bringing up the Cortina. */
+ cvmx_ixf18201_mii_write(1, 3, 0, 0x8000);
+ cvmx_ixf18201_mii_write(5, 3, 0, 0x8000);
+
+
+ return 1;
+
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,113 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+#ifndef __CVMX_IXF18201_H__
+#define __CVMX_IXF18201_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Initialize the IXF18201 SPI<->XAUI MAC.
+ * @return 1 on success
+ * 0 on failure
+ */
+int cvmx_ixf18201_init(void);
+
+/**
+ * Read a 16 bit register from the IXF18201
+ *
+ * @param reg_addr Register address
+ *
+ * @return 16 bit register value
+ */
+uint16_t cvmx_ixf18201_read16(uint16_t reg_addr);
+/**
+ * Write a 16 bit IXF18201 register
+ *
+ * @param reg_addr Register address
+ * @param data Value to write
+ *
+ */
+void cvmx_ixf18201_write16(uint16_t reg_addr, uint16_t data);
+/**
+ * Write a 16 bit IXF18201 register
+ *
+ * @param reg_addr Register address (must be 4 byte aligned)
+ *
+ * @return 32 bit register value
+ */
+uint32_t cvmx_ixf18201_read32(uint16_t reg_addr);
+/**
+ * Write a 32 bit IXF18201 register
+ *
+ * @param reg_addr Register address (must be 4 byte aligned)
+ * @param data Value to write
+ *
+ */
+void cvmx_ixf18201_write32(uint16_t reg_addr, uint32_t data);
+
+/**
+ * Performs an MII clause 45 write using the MII block in IXF18201.
+ *
+ * @param mii_addr Device MII address
+ * @param mmd MMD address (block within device)
+ * @param reg Register address
+ * @param val Value to write
+ */
+void cvmx_ixf18201_mii_write(int mii_addr, int mmd, uint16_t reg, uint16_t val);
+/**
+ * Performs an MII clause 45 read using the MII block in IXF18201.
+ *
+ * @param mii_addr Device MII address
+ * @param mmd MMD address (block within device)
+ * @param reg Register address
+ * @return register value read from device
+ */
+int cvmx_ixf18201_mii_read(int mii_addr, int mmd, uint16_t reg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_IXF18201_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ixf18201.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-key-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-key-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-key-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,267 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-key-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon key.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_KEY_DEFS_H__
+#define __CVMX_KEY_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_KEY_BIST_REG CVMX_KEY_BIST_REG_FUNC()
+static inline uint64_t CVMX_KEY_BIST_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_KEY_BIST_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180020000018ull);
+}
+#else
+#define CVMX_KEY_BIST_REG (CVMX_ADD_IO_SEG(0x0001180020000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_KEY_CTL_STATUS CVMX_KEY_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_KEY_CTL_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_KEY_CTL_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180020000010ull);
+}
+#else
+#define CVMX_KEY_CTL_STATUS (CVMX_ADD_IO_SEG(0x0001180020000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_KEY_INT_ENB CVMX_KEY_INT_ENB_FUNC()
+static inline uint64_t CVMX_KEY_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_KEY_INT_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180020000008ull);
+}
+#else
+#define CVMX_KEY_INT_ENB (CVMX_ADD_IO_SEG(0x0001180020000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_KEY_INT_SUM CVMX_KEY_INT_SUM_FUNC()
+static inline uint64_t CVMX_KEY_INT_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_KEY_INT_SUM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180020000000ull);
+}
+#else
+#define CVMX_KEY_INT_SUM (CVMX_ADD_IO_SEG(0x0001180020000000ull))
+#endif
+
+/**
+ * cvmx_key_bist_reg
+ *
+ * KEY_BIST_REG = KEY's BIST Status Register
+ *
+ * The KEY's BIST status for memories.
+ */
+union cvmx_key_bist_reg {
+ uint64_t u64;
+ struct cvmx_key_bist_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t rrc : 1; /**< RRC bist status. */
+ uint64_t mem1 : 1; /**< MEM - 1 bist status. */
+ uint64_t mem0 : 1; /**< MEM - 0 bist status. */
+#else
+ uint64_t mem0 : 1;
+ uint64_t mem1 : 1;
+ uint64_t rrc : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_key_bist_reg_s cn38xx;
+ struct cvmx_key_bist_reg_s cn38xxp2;
+ struct cvmx_key_bist_reg_s cn56xx;
+ struct cvmx_key_bist_reg_s cn56xxp1;
+ struct cvmx_key_bist_reg_s cn58xx;
+ struct cvmx_key_bist_reg_s cn58xxp1;
+ struct cvmx_key_bist_reg_s cn61xx;
+ struct cvmx_key_bist_reg_s cn63xx;
+ struct cvmx_key_bist_reg_s cn63xxp1;
+ struct cvmx_key_bist_reg_s cn66xx;
+ struct cvmx_key_bist_reg_s cn68xx;
+ struct cvmx_key_bist_reg_s cn68xxp1;
+ struct cvmx_key_bist_reg_s cnf71xx;
+};
+typedef union cvmx_key_bist_reg cvmx_key_bist_reg_t;
+
+/**
+ * cvmx_key_ctl_status
+ *
+ * KEY_CTL_STATUS = KEY's Control/Status Register
+ *
+ * The KEY's interrupt enable register.
+ */
+union cvmx_key_ctl_status {
+ uint64_t u64;
+ struct cvmx_key_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t mem1_err : 7; /**< Causes a flip of the ECC bit associated 38:32
+ respective to bit 13:7 of this field, for FPF
+ FIFO 1. */
+ uint64_t mem0_err : 7; /**< Causes a flip of the ECC bit associated 38:32
+ respective to bit 6:0 of this field, for FPF
+ FIFO 0. */
+#else
+ uint64_t mem0_err : 7;
+ uint64_t mem1_err : 7;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_key_ctl_status_s cn38xx;
+ struct cvmx_key_ctl_status_s cn38xxp2;
+ struct cvmx_key_ctl_status_s cn56xx;
+ struct cvmx_key_ctl_status_s cn56xxp1;
+ struct cvmx_key_ctl_status_s cn58xx;
+ struct cvmx_key_ctl_status_s cn58xxp1;
+ struct cvmx_key_ctl_status_s cn61xx;
+ struct cvmx_key_ctl_status_s cn63xx;
+ struct cvmx_key_ctl_status_s cn63xxp1;
+ struct cvmx_key_ctl_status_s cn66xx;
+ struct cvmx_key_ctl_status_s cn68xx;
+ struct cvmx_key_ctl_status_s cn68xxp1;
+ struct cvmx_key_ctl_status_s cnf71xx;
+};
+typedef union cvmx_key_ctl_status cvmx_key_ctl_status_t;
+
+/**
+ * cvmx_key_int_enb
+ *
+ * KEY_INT_ENB = KEY's Interrupt Enable
+ *
+ * The KEY's interrupt enable register.
+ */
+union cvmx_key_int_enb {
+ uint64_t u64;
+ struct cvmx_key_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t ked1_dbe : 1; /**< When set (1) and bit 3 of the KEY_INT_SUM
+ register is asserted the KEY will assert an
+ interrupt. */
+ uint64_t ked1_sbe : 1; /**< When set (1) and bit 2 of the KEY_INT_SUM
+ register is asserted the KEY will assert an
+ interrupt. */
+ uint64_t ked0_dbe : 1; /**< When set (1) and bit 1 of the KEY_INT_SUM
+ register is asserted the KEY will assert an
+ interrupt. */
+ uint64_t ked0_sbe : 1; /**< When set (1) and bit 0 of the KEY_INT_SUM
+ register is asserted the KEY will assert an
+ interrupt. */
+#else
+ uint64_t ked0_sbe : 1;
+ uint64_t ked0_dbe : 1;
+ uint64_t ked1_sbe : 1;
+ uint64_t ked1_dbe : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_key_int_enb_s cn38xx;
+ struct cvmx_key_int_enb_s cn38xxp2;
+ struct cvmx_key_int_enb_s cn56xx;
+ struct cvmx_key_int_enb_s cn56xxp1;
+ struct cvmx_key_int_enb_s cn58xx;
+ struct cvmx_key_int_enb_s cn58xxp1;
+ struct cvmx_key_int_enb_s cn61xx;
+ struct cvmx_key_int_enb_s cn63xx;
+ struct cvmx_key_int_enb_s cn63xxp1;
+ struct cvmx_key_int_enb_s cn66xx;
+ struct cvmx_key_int_enb_s cn68xx;
+ struct cvmx_key_int_enb_s cn68xxp1;
+ struct cvmx_key_int_enb_s cnf71xx;
+};
+typedef union cvmx_key_int_enb cvmx_key_int_enb_t;
+
+/**
+ * cvmx_key_int_sum
+ *
+ * KEY_INT_SUM = KEY's Interrupt Summary Register
+ *
+ * Contains the diffrent interrupt summary bits of the KEY.
+ */
+union cvmx_key_int_sum {
+ uint64_t u64;
+ struct cvmx_key_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t ked1_dbe : 1;
+ uint64_t ked1_sbe : 1;
+ uint64_t ked0_dbe : 1;
+ uint64_t ked0_sbe : 1;
+#else
+ uint64_t ked0_sbe : 1;
+ uint64_t ked0_dbe : 1;
+ uint64_t ked1_sbe : 1;
+ uint64_t ked1_dbe : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_key_int_sum_s cn38xx;
+ struct cvmx_key_int_sum_s cn38xxp2;
+ struct cvmx_key_int_sum_s cn56xx;
+ struct cvmx_key_int_sum_s cn56xxp1;
+ struct cvmx_key_int_sum_s cn58xx;
+ struct cvmx_key_int_sum_s cn58xxp1;
+ struct cvmx_key_int_sum_s cn61xx;
+ struct cvmx_key_int_sum_s cn63xx;
+ struct cvmx_key_int_sum_s cn63xxp1;
+ struct cvmx_key_int_sum_s cn66xx;
+ struct cvmx_key_int_sum_s cn68xx;
+ struct cvmx_key_int_sum_s cn68xxp1;
+ struct cvmx_key_int_sum_s cnf71xx;
+};
+typedef union cvmx_key_int_sum cvmx_key_int_sum_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-key-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-key.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-key.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-key.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,116 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the on chip key memory. Key memory is
+ * 8k on chip that is inaccessible from off chip. It can
+ * also be cleared using an external hardware pin.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMX_KEY_H__
+#define __CVMX_KEY_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_KEY_MEM_SIZE 8192 /* Size in bytes */
+
+
+/**
+ * Read from KEY memory
+ *
+ * @param address Address (byte) in key memory to read
+ * 0 <= address < CVMX_KEY_MEM_SIZE
+ * @return Value from key memory
+ */
+static inline uint64_t cvmx_key_read(uint64_t address)
+{
+ cvmx_addr_t ptr;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_KEY_RW;
+ ptr.sio.offset = address;
+
+ return cvmx_read_csr(ptr.u64);
+}
+
+
+/**
+ * Write to KEY memory
+ *
+ * @param address Address (byte) in key memory to write
+ * 0 <= address < CVMX_KEY_MEM_SIZE
+ * @param value Value to write to key memory
+ */
+static inline void cvmx_key_write(uint64_t address, uint64_t value)
+{
+ cvmx_addr_t ptr;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_KEY_RW;
+ ptr.sio.offset = address;
+
+ cvmx_write_io(ptr.u64, value);
+}
+
+
+/* CSR typedefs have been moved to cvmx-key-defs.h */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_KEY_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-key.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-l2c-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-l2c-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-l2c-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,6544 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-l2c-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon l2c.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_L2C_DEFS_H__
+#define __CVMX_L2C_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_BIG_CTL CVMX_L2C_BIG_CTL_FUNC()
+static inline uint64_t CVMX_L2C_BIG_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_BIG_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080800030ull);
+}
+#else
+#define CVMX_L2C_BIG_CTL (CVMX_ADD_IO_SEG(0x0001180080800030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_BST CVMX_L2C_BST_FUNC()
+static inline uint64_t CVMX_L2C_BST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_BST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800808007F8ull);
+}
+#else
+#define CVMX_L2C_BST (CVMX_ADD_IO_SEG(0x00011800808007F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_BST0 CVMX_L2C_BST0_FUNC()
+static inline uint64_t CVMX_L2C_BST0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_BST0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800007F8ull);
+}
+#else
+#define CVMX_L2C_BST0 (CVMX_ADD_IO_SEG(0x00011800800007F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_BST1 CVMX_L2C_BST1_FUNC()
+static inline uint64_t CVMX_L2C_BST1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_BST1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800007F0ull);
+}
+#else
+#define CVMX_L2C_BST1 (CVMX_ADD_IO_SEG(0x00011800800007F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_BST2 CVMX_L2C_BST2_FUNC()
+static inline uint64_t CVMX_L2C_BST2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_BST2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800007E8ull);
+}
+#else
+#define CVMX_L2C_BST2 (CVMX_ADD_IO_SEG(0x00011800800007E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_BST_MEMX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_BST_MEMX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080C007F8ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_BST_MEMX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F8ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_BST_TDTX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_BST_TDTX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A007F0ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_BST_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F0ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_BST_TTGX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_BST_TTGX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A007F8ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_BST_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F8ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_CFG CVMX_L2C_CFG_FUNC()
+static inline uint64_t CVMX_L2C_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000000ull);
+}
+#else
+#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_COP0_MAPX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1023) || ((offset >= 16128) && (offset <= 16383)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1535) || ((offset >= 16128) && (offset <= 16383)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 2559) || ((offset >= 16128) && (offset <= 16383)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 8191) || ((offset >= 16128) && (offset <= 16383)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1023) || ((offset >= 16128) && (offset <= 16383))))))
+ cvmx_warn("CVMX_L2C_COP0_MAPX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8;
+}
+#else
+#define CVMX_L2C_COP0_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_CTL CVMX_L2C_CTL_FUNC()
+static inline uint64_t CVMX_L2C_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080800000ull);
+}
+#else
+#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_DBG CVMX_L2C_DBG_FUNC()
+static inline uint64_t CVMX_L2C_DBG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_DBG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000030ull);
+}
+#else
+#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_DUT CVMX_L2C_DUT_FUNC()
+static inline uint64_t CVMX_L2C_DUT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_DUT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000050ull);
+}
+#else
+#define CVMX_L2C_DUT (CVMX_ADD_IO_SEG(0x0001180080000050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_DUT_MAPX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1023))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1535))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 2559))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 8191))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1023)))))
+ cvmx_warn("CVMX_L2C_DUT_MAPX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 8191) * 8;
+}
+#else
+#define CVMX_L2C_DUT_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 8191) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_ERR_TDTX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_ERR_TDTX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_ERR_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_ERR_TTGX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_ERR_TTGX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_ERR_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_ERR_VBFX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_ERR_VBFX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080C007F0ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_ERR_VBFX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F0ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_ERR_XMC CVMX_L2C_ERR_XMC_FUNC()
+static inline uint64_t CVMX_L2C_ERR_XMC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_ERR_XMC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800808007D8ull);
+}
+#else
+#define CVMX_L2C_ERR_XMC (CVMX_ADD_IO_SEG(0x00011800808007D8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_GRPWRR0 CVMX_L2C_GRPWRR0_FUNC()
+static inline uint64_t CVMX_L2C_GRPWRR0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_GRPWRR0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800000C8ull);
+}
+#else
+#define CVMX_L2C_GRPWRR0 (CVMX_ADD_IO_SEG(0x00011800800000C8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_GRPWRR1 CVMX_L2C_GRPWRR1_FUNC()
+static inline uint64_t CVMX_L2C_GRPWRR1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_GRPWRR1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800000D0ull);
+}
+#else
+#define CVMX_L2C_GRPWRR1 (CVMX_ADD_IO_SEG(0x00011800800000D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_INT_EN CVMX_L2C_INT_EN_FUNC()
+static inline uint64_t CVMX_L2C_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000100ull);
+}
+#else
+#define CVMX_L2C_INT_EN (CVMX_ADD_IO_SEG(0x0001180080000100ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_INT_ENA CVMX_L2C_INT_ENA_FUNC()
+static inline uint64_t CVMX_L2C_INT_ENA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_INT_ENA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080800020ull);
+}
+#else
+#define CVMX_L2C_INT_ENA (CVMX_ADD_IO_SEG(0x0001180080800020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_INT_REG CVMX_L2C_INT_REG_FUNC()
+static inline uint64_t CVMX_L2C_INT_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_INT_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080800018ull);
+}
+#else
+#define CVMX_L2C_INT_REG (CVMX_ADD_IO_SEG(0x0001180080800018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_INT_STAT CVMX_L2C_INT_STAT_FUNC()
+static inline uint64_t CVMX_L2C_INT_STAT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_INT_STAT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800000F8ull);
+}
+#else
+#define CVMX_L2C_INT_STAT (CVMX_ADD_IO_SEG(0x00011800800000F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_IOCX_PFC(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_IOCX_PFC(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080800420ull);
+}
+#else
+#define CVMX_L2C_IOCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800420ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_IORX_PFC(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_IORX_PFC(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080800428ull);
+}
+#else
+#define CVMX_L2C_IORX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800428ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_LCKBASE CVMX_L2C_LCKBASE_FUNC()
+static inline uint64_t CVMX_L2C_LCKBASE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_LCKBASE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000058ull);
+}
+#else
+#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_LCKOFF CVMX_L2C_LCKOFF_FUNC()
+static inline uint64_t CVMX_L2C_LCKOFF_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_LCKOFF not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000060ull);
+}
+#else
+#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_LFB0 CVMX_L2C_LFB0_FUNC()
+static inline uint64_t CVMX_L2C_LFB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_LFB0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000038ull);
+}
+#else
+#define CVMX_L2C_LFB0 (CVMX_ADD_IO_SEG(0x0001180080000038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_LFB1 CVMX_L2C_LFB1_FUNC()
+static inline uint64_t CVMX_L2C_LFB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_LFB1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000040ull);
+}
+#else
+#define CVMX_L2C_LFB1 (CVMX_ADD_IO_SEG(0x0001180080000040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_LFB2 CVMX_L2C_LFB2_FUNC()
+static inline uint64_t CVMX_L2C_LFB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_LFB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000048ull);
+}
+#else
+#define CVMX_L2C_LFB2 (CVMX_ADD_IO_SEG(0x0001180080000048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_LFB3 CVMX_L2C_LFB3_FUNC()
+static inline uint64_t CVMX_L2C_LFB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_LFB3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800000B8ull);
+}
+#else
+#define CVMX_L2C_LFB3 (CVMX_ADD_IO_SEG(0x00011800800000B8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_OOB CVMX_L2C_OOB_FUNC()
+static inline uint64_t CVMX_L2C_OOB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_OOB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800000D8ull);
+}
+#else
+#define CVMX_L2C_OOB (CVMX_ADD_IO_SEG(0x00011800800000D8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_OOB1 CVMX_L2C_OOB1_FUNC()
+static inline uint64_t CVMX_L2C_OOB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_OOB1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800000E0ull);
+}
+#else
+#define CVMX_L2C_OOB1 (CVMX_ADD_IO_SEG(0x00011800800000E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_OOB2 CVMX_L2C_OOB2_FUNC()
+static inline uint64_t CVMX_L2C_OOB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_OOB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800000E8ull);
+}
+#else
+#define CVMX_L2C_OOB2 (CVMX_ADD_IO_SEG(0x00011800800000E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_OOB3 CVMX_L2C_OOB3_FUNC()
+static inline uint64_t CVMX_L2C_OOB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_OOB3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800000F0ull);
+}
+#else
+#define CVMX_L2C_OOB3 (CVMX_ADD_IO_SEG(0x00011800800000F0ull))
+#endif
+#define CVMX_L2C_PFC0 CVMX_L2C_PFCX(0)
+#define CVMX_L2C_PFC1 CVMX_L2C_PFCX(1)
+#define CVMX_L2C_PFC2 CVMX_L2C_PFCX(2)
+#define CVMX_L2C_PFC3 CVMX_L2C_PFCX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_PFCTL CVMX_L2C_PFCTL_FUNC()
+static inline uint64_t CVMX_L2C_PFCTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_PFCTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000090ull);
+}
+#else
+#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_PFCX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_L2C_PFCX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_PPGRP CVMX_L2C_PPGRP_FUNC()
+static inline uint64_t CVMX_L2C_PPGRP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_PPGRP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800000C0ull);
+}
+#else
+#define CVMX_L2C_PPGRP (CVMX_ADD_IO_SEG(0x00011800800000C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_QOS_IOBX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_L2C_QOS_IOBX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080880200ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_L2C_QOS_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080880200ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_QOS_PPX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_L2C_QOS_PPX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 31) * 8;
+}
+#else
+#define CVMX_L2C_QOS_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 31) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_QOS_WGT CVMX_L2C_QOS_WGT_FUNC()
+static inline uint64_t CVMX_L2C_QOS_WGT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_QOS_WGT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080800008ull);
+}
+#else
+#define CVMX_L2C_QOS_WGT (CVMX_ADD_IO_SEG(0x0001180080800008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_RSCX_PFC(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_L2C_RSCX_PFC(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080800410ull) + ((offset) & 3) * 64;
+}
+#else
+#define CVMX_L2C_RSCX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800410ull) + ((offset) & 3) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_RSDX_PFC(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_L2C_RSDX_PFC(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080800418ull) + ((offset) & 3) * 64;
+}
+#else
+#define CVMX_L2C_RSDX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800418ull) + ((offset) & 3) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_SPAR0 CVMX_L2C_SPAR0_FUNC()
+static inline uint64_t CVMX_L2C_SPAR0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_SPAR0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000068ull);
+}
+#else
+#define CVMX_L2C_SPAR0 (CVMX_ADD_IO_SEG(0x0001180080000068ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_SPAR1 CVMX_L2C_SPAR1_FUNC()
+static inline uint64_t CVMX_L2C_SPAR1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_L2C_SPAR1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000070ull);
+}
+#else
+#define CVMX_L2C_SPAR1 (CVMX_ADD_IO_SEG(0x0001180080000070ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_SPAR2 CVMX_L2C_SPAR2_FUNC()
+static inline uint64_t CVMX_L2C_SPAR2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_L2C_SPAR2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000078ull);
+}
+#else
+#define CVMX_L2C_SPAR2 (CVMX_ADD_IO_SEG(0x0001180080000078ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_SPAR3 CVMX_L2C_SPAR3_FUNC()
+static inline uint64_t CVMX_L2C_SPAR3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_L2C_SPAR3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000080ull);
+}
+#else
+#define CVMX_L2C_SPAR3 (CVMX_ADD_IO_SEG(0x0001180080000080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_SPAR4 CVMX_L2C_SPAR4_FUNC()
+static inline uint64_t CVMX_L2C_SPAR4_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2C_SPAR4 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000088ull);
+}
+#else
+#define CVMX_L2C_SPAR4 (CVMX_ADD_IO_SEG(0x0001180080000088ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_ECC0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_ECC0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00018ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_ECC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00018ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_ECC1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_ECC1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00020ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_ECC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00020ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_IEN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_IEN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00000ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_IEN(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00000ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_INT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_INT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00028ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_INT(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00028ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_PFC0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_PFC0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00400ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_PFC1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_PFC1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00408ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_PFC2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_PFC2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00410ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_PFC3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_PFC3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00418ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_PRF(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_PRF(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00008ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_PRF(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00008ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_TADX_TAG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_L2C_TADX_TAG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180080A00010ull) + ((block_id) & 3) * 0x40000ull;
+}
+#else
+#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull) + ((block_id) & 3) * 0x40000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_VER_ID CVMX_L2C_VER_ID_FUNC()
+static inline uint64_t CVMX_L2C_VER_ID_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_VER_ID not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800808007E0ull);
+}
+#else
+#define CVMX_L2C_VER_ID (CVMX_ADD_IO_SEG(0x00011800808007E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_VER_IOB CVMX_L2C_VER_IOB_FUNC()
+static inline uint64_t CVMX_L2C_VER_IOB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_VER_IOB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800808007F0ull);
+}
+#else
+#define CVMX_L2C_VER_IOB (CVMX_ADD_IO_SEG(0x00011800808007F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_VER_MSC CVMX_L2C_VER_MSC_FUNC()
+static inline uint64_t CVMX_L2C_VER_MSC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_VER_MSC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800808007D0ull);
+}
+#else
+#define CVMX_L2C_VER_MSC (CVMX_ADD_IO_SEG(0x00011800808007D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_VER_PP CVMX_L2C_VER_PP_FUNC()
+static inline uint64_t CVMX_L2C_VER_PP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_VER_PP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800808007E8ull);
+}
+#else
+#define CVMX_L2C_VER_PP (CVMX_ADD_IO_SEG(0x00011800808007E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_VIRTID_IOBX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_L2C_VIRTID_IOBX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800808C0200ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_L2C_VIRTID_IOBX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0200ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_VIRTID_PPX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_L2C_VIRTID_PPX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 31) * 8;
+}
+#else
+#define CVMX_L2C_VIRTID_PPX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 31) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_VRT_CTL CVMX_L2C_VRT_CTL_FUNC()
+static inline uint64_t CVMX_L2C_VRT_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_VRT_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080800010ull);
+}
+#else
+#define CVMX_L2C_VRT_CTL (CVMX_ADD_IO_SEG(0x0001180080800010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_VRT_MEMX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1023))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1023))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1023))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1023))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1023)))))
+ cvmx_warn("CVMX_L2C_VRT_MEMX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8;
+}
+#else
+#define CVMX_L2C_VRT_MEMX(offset) (CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_WPAR_IOBX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_L2C_WPAR_IOBX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080840200ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_L2C_WPAR_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080840200ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_WPAR_PPX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_L2C_WPAR_PPX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 31) * 8;
+}
+#else
+#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 31) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_XMCX_PFC(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_L2C_XMCX_PFC(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080800400ull) + ((offset) & 3) * 64;
+}
+#else
+#define CVMX_L2C_XMCX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800400ull) + ((offset) & 3) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2C_XMC_CMD CVMX_L2C_XMC_CMD_FUNC()
+static inline uint64_t CVMX_L2C_XMC_CMD_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_L2C_XMC_CMD not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080800028ull);
+}
+#else
+#define CVMX_L2C_XMC_CMD (CVMX_ADD_IO_SEG(0x0001180080800028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_L2C_XMDX_PFC(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_L2C_XMDX_PFC(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180080800408ull) + ((offset) & 3) * 64;
+}
+#else
+#define CVMX_L2C_XMDX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800408ull) + ((offset) & 3) * 64)
+#endif
+
+/**
+ * cvmx_l2c_big_ctl
+ *
+ * L2C_BIG_CTL = L2C Big memory control register
+ *
+ *
+ * Notes:
+ * (1) BIGRD interrupts can occur during normal operation as the PP's are allowed to prefetch to
+ * non-existent memory locations. Therefore, BIGRD is for informational purposes only.
+ *
+ * (2) When HOLEWR/BIGWR blocks a store L2C_VER_ID, L2C_VER_PP, L2C_VER_IOB, and L2C_VER_MSC will be
+ * loaded just like a store which is blocked by VRTWR. Additionally, L2C_ERR_XMC will be loaded.
+ */
+union cvmx_l2c_big_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_big_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t maxdram : 4; /**< Amount of configured DRAM
+ 0 = reserved
+ 1 = 512MB
+ 2 = 1GB
+ 3 = 2GB
+ 4 = 4GB
+ 5 = 8GB
+ 6 = 16GB
+ 7 = 32GB
+ 8 = 64GB (**reserved in 63xx**)
+ 9 = 128GB (**reserved in 63xx**)
+ 10-15 reserved
+ Violations of this limit causes
+ L2C to set L2C_INT_REG[BIGRD/BIGWR]. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t disable : 1; /**< When set, disables the BIGWR/BIGRD logic completely
+ and reverts HOLEWR to 63xx pass 1.x behavior.
+ When clear, BIGWR and HOLEWR block stores in the same
+ same manner as the VRT logic, and BIGRD is reported. */
+#else
+ uint64_t disable : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t maxdram : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_l2c_big_ctl_s cn61xx;
+ struct cvmx_l2c_big_ctl_s cn63xx;
+ struct cvmx_l2c_big_ctl_s cn66xx;
+ struct cvmx_l2c_big_ctl_s cn68xx;
+ struct cvmx_l2c_big_ctl_s cn68xxp1;
+ struct cvmx_l2c_big_ctl_s cnf71xx;
+};
+typedef union cvmx_l2c_big_ctl cvmx_l2c_big_ctl_t;
+
+/**
+ * cvmx_l2c_bst
+ *
+ * L2C_BST = L2C BIST Status
+ *
+ */
+union cvmx_l2c_bst {
+ uint64_t u64;
+ struct cvmx_l2c_bst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dutfl : 32; /**< BIST failure status for PP0-3 DUT */
+ uint64_t rbffl : 4; /**< BIST failure status for RBF0-3 */
+ uint64_t xbffl : 4; /**< BIST failure status for XBF0-3 */
+ uint64_t tdpfl : 4; /**< BIST failure status for TDP0-3 */
+ uint64_t ioccmdfl : 4; /**< BIST failure status for IOCCMD */
+ uint64_t iocdatfl : 4; /**< BIST failure status for IOCDAT */
+ uint64_t dutresfl : 4; /**< BIST failure status for DUTRES */
+ uint64_t vrtfl : 4; /**< BIST failure status for VRT0 */
+ uint64_t tdffl : 4; /**< BIST failure status for TDF0 */
+#else
+ uint64_t tdffl : 4;
+ uint64_t vrtfl : 4;
+ uint64_t dutresfl : 4;
+ uint64_t iocdatfl : 4;
+ uint64_t ioccmdfl : 4;
+ uint64_t tdpfl : 4;
+ uint64_t xbffl : 4;
+ uint64_t rbffl : 4;
+ uint64_t dutfl : 32;
+#endif
+ } s;
+ struct cvmx_l2c_bst_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t dutfl : 4; /**< BIST failure status for PP0-3 DUT */
+ uint64_t reserved_17_31 : 15;
+ uint64_t ioccmdfl : 1; /**< BIST failure status for IOCCMD */
+ uint64_t reserved_13_15 : 3;
+ uint64_t iocdatfl : 1; /**< BIST failure status for IOCDAT */
+ uint64_t reserved_9_11 : 3;
+ uint64_t dutresfl : 1; /**< BIST failure status for DUTRES */
+ uint64_t reserved_5_7 : 3;
+ uint64_t vrtfl : 1; /**< BIST failure status for VRT0 */
+ uint64_t reserved_1_3 : 3;
+ uint64_t tdffl : 1; /**< BIST failure status for TDF0 */
+#else
+ uint64_t tdffl : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t vrtfl : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t dutresfl : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t iocdatfl : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t ioccmdfl : 1;
+ uint64_t reserved_17_31 : 15;
+ uint64_t dutfl : 4;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_bst_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t dutfl : 6; /**< BIST failure status for PP0-5 DUT */
+ uint64_t reserved_17_31 : 15;
+ uint64_t ioccmdfl : 1; /**< BIST failure status for IOCCMD */
+ uint64_t reserved_13_15 : 3;
+ uint64_t iocdatfl : 1; /**< BIST failure status for IOCDAT */
+ uint64_t reserved_9_11 : 3;
+ uint64_t dutresfl : 1; /**< BIST failure status for DUTRES */
+ uint64_t reserved_5_7 : 3;
+ uint64_t vrtfl : 1; /**< BIST failure status for VRT0 */
+ uint64_t reserved_1_3 : 3;
+ uint64_t tdffl : 1; /**< BIST failure status for TDF0 */
+#else
+ uint64_t tdffl : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t vrtfl : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t dutresfl : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t iocdatfl : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t ioccmdfl : 1;
+ uint64_t reserved_17_31 : 15;
+ uint64_t dutfl : 6;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } cn63xx;
+ struct cvmx_l2c_bst_cn63xx cn63xxp1;
+ struct cvmx_l2c_bst_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t dutfl : 10; /**< BIST failure status for PP0-9 DUT */
+ uint64_t reserved_17_31 : 15;
+ uint64_t ioccmdfl : 1; /**< BIST failure status for IOCCMD */
+ uint64_t reserved_13_15 : 3;
+ uint64_t iocdatfl : 1; /**< BIST failure status for IOCDAT */
+ uint64_t reserved_9_11 : 3;
+ uint64_t dutresfl : 1; /**< BIST failure status for DUTRES */
+ uint64_t reserved_5_7 : 3;
+ uint64_t vrtfl : 1; /**< BIST failure status for VRT0 */
+ uint64_t reserved_1_3 : 3;
+ uint64_t tdffl : 1; /**< BIST failure status for TDF0 */
+#else
+ uint64_t tdffl : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t vrtfl : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t dutresfl : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t iocdatfl : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t ioccmdfl : 1;
+ uint64_t reserved_17_31 : 15;
+ uint64_t dutfl : 10;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } cn66xx;
+ struct cvmx_l2c_bst_s cn68xx;
+ struct cvmx_l2c_bst_s cn68xxp1;
+ struct cvmx_l2c_bst_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_bst cvmx_l2c_bst_t;
+
+/**
+ * cvmx_l2c_bst0
+ *
+ * L2C_BST0 = L2C BIST 0 CTL/STAT
+ *
+ */
+union cvmx_l2c_bst0 {
+ uint64_t u64;
+ struct cvmx_l2c_bst0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t dtbnk : 1; /**< DuTag Bank#
+ When DT=1(BAD), this field provides additional information
+ about which DuTag Bank (0/1) failed. */
+ uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3]
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t dtcnt : 13; /**< DuTag BiST Counter (used to help isolate the failure)
+ [12]: i (0=FORWARD/1=REVERSE pass)
+ [11:10]: j (Pattern# 1 of 4)
+ [9:4]: k (DT Index 1 of 64)
+ [3:0]: l (DT# 1 of 16 DTs) */
+ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3]
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t wlb_dat : 4;
+ uint64_t stin_msk : 1;
+ uint64_t dt : 1;
+ uint64_t dtcnt : 13;
+ uint64_t wlb_msk : 4;
+ uint64_t dtbnk : 1;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_l2c_bst0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3]
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_15_18 : 4;
+ uint64_t dtcnt : 9; /**< DuTag BiST Counter (used to help isolate the failure)
+ [8]: i (0=FORWARD/1=REVERSE pass)
+ [7:6]: j (Pattern# 1 of 4)
+ [5:0]: k (DT Index 1 of 64) */
+ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_4_4 : 1;
+ uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3]
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t wlb_dat : 4;
+ uint64_t reserved_4_4 : 1;
+ uint64_t dt : 1;
+ uint64_t dtcnt : 9;
+ uint64_t reserved_15_18 : 4;
+ uint64_t wlb_msk : 4;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_bst0_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3]
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_16_18 : 3;
+ uint64_t dtcnt : 10; /**< DuTag BiST Counter (used to help isolate the failure)
+ [9]: i (0=FORWARD/1=REVERSE pass)
+ [8:7]: j (Pattern# 1 of 4)
+ [6:1]: k (DT Index 1 of 64)
+ [0]: l (DT# 1 of 2 DTs) */
+ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3]
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t wlb_dat : 4;
+ uint64_t stin_msk : 1;
+ uint64_t dt : 1;
+ uint64_t dtcnt : 10;
+ uint64_t reserved_16_18 : 3;
+ uint64_t wlb_msk : 4;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } cn31xx;
+ struct cvmx_l2c_bst0_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t dtcnt : 13; /**< DuTag BiST Counter (used to help isolate the failure)
+ [12]: i (0=FORWARD/1=REVERSE pass)
+ [11:10]: j (Pattern# 1 of 4)
+ [9:4]: k (DT Index 1 of 64)
+ [3:0]: l (DT# 1 of 16 DTs) */
+ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3]
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t wlb_dat : 4;
+ uint64_t stin_msk : 1;
+ uint64_t dt : 1;
+ uint64_t dtcnt : 13;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn38xx;
+ struct cvmx_l2c_bst0_cn38xx cn38xxp2;
+ struct cvmx_l2c_bst0_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t dtbnk : 1; /**< DuTag Bank#
+ When DT=1(BAD), this field provides additional information
+ about which DuTag Bank (0/1) failed. */
+ uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3]
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_16_18 : 3;
+ uint64_t dtcnt : 10; /**< DuTag BiST Counter (used to help isolate the failure)
+ [9]: i (0=FORWARD/1=REVERSE pass)
+ [8:7]: j (Pattern# 1 of 4)
+ [6:1]: k (DT Index 1 of 64)
+ [0]: l (DT# 1 of 2 DTs) */
+ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3]
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t wlb_dat : 4;
+ uint64_t stin_msk : 1;
+ uint64_t dt : 1;
+ uint64_t dtcnt : 10;
+ uint64_t reserved_16_18 : 3;
+ uint64_t wlb_msk : 4;
+ uint64_t dtbnk : 1;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn50xx;
+ struct cvmx_l2c_bst0_cn50xx cn52xx;
+ struct cvmx_l2c_bst0_cn50xx cn52xxp1;
+ struct cvmx_l2c_bst0_s cn56xx;
+ struct cvmx_l2c_bst0_s cn56xxp1;
+ struct cvmx_l2c_bst0_s cn58xx;
+ struct cvmx_l2c_bst0_s cn58xxp1;
+};
+typedef union cvmx_l2c_bst0 cvmx_l2c_bst0_t;
+
+/**
+ * cvmx_l2c_bst1
+ *
+ * L2C_BST1 = L2C BIST 1 CTL/STAT
+ *
+ */
+union cvmx_l2c_bst1 {
+ uint64_t u64;
+ struct cvmx_l2c_bst1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t l2t : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_l2c_bst1_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t lrf : 2; /**< Bist Results for LRF RAMs (PLC+ILC)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_5_8 : 4;
+ uint64_t l2t : 5; /**< Bist Results for L2T (USE+4SET RAMs)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t l2t : 5;
+ uint64_t reserved_5_8 : 4;
+ uint64_t vab_vwcf : 1;
+ uint64_t lrf : 2;
+ uint64_t vwdf : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_bst1_cn30xx cn31xx;
+ struct cvmx_l2c_bst1_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t lrf : 2; /**< Bist Results for LRF RAMs (PLC+ILC)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t l2t : 9;
+ uint64_t vab_vwcf : 1;
+ uint64_t lrf : 2;
+ uint64_t vwdf : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_l2c_bst1_cn38xx cn38xxp2;
+ struct cvmx_l2c_bst1_cn38xx cn50xx;
+ struct cvmx_l2c_bst1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t plc2 : 1; /**< Bist Results for PLC2 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t plc1 : 1; /**< Bist Results for PLC1 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t plc0 : 1; /**< Bist Results for PLC0 RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_11_11 : 1;
+ uint64_t ilc : 1; /**< Bist Results for ILC RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t l2t : 9;
+ uint64_t vab_vwcf : 1;
+ uint64_t ilc : 1;
+ uint64_t reserved_11_11 : 1;
+ uint64_t vwdf : 4;
+ uint64_t plc0 : 1;
+ uint64_t plc1 : 1;
+ uint64_t plc2 : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn52xx;
+ struct cvmx_l2c_bst1_cn52xx cn52xxp1;
+ struct cvmx_l2c_bst1_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t plc2 : 1; /**< Bist Results for LRF RAMs (ILC)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t plc1 : 1; /**< Bist Results for LRF RAMs (ILC)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t plc0 : 1; /**< Bist Results for LRF RAMs (ILC)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ilc : 1; /**< Bist Results for LRF RAMs (ILC)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t vwdf1 : 4; /**< Bist Results for VWDF1 RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t vwdf0 : 4; /**< Bist Results for VWDF0 RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t vab_vwcf1 : 1; /**< Bist Results for VAB VWCF1_MEM */
+ uint64_t reserved_10_10 : 1;
+ uint64_t vab_vwcf0 : 1; /**< Bist Results for VAB VWCF0_MEM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t l2t : 9;
+ uint64_t vab_vwcf0 : 1;
+ uint64_t reserved_10_10 : 1;
+ uint64_t vab_vwcf1 : 1;
+ uint64_t vwdf0 : 4;
+ uint64_t vwdf1 : 4;
+ uint64_t ilc : 1;
+ uint64_t plc0 : 1;
+ uint64_t plc1 : 1;
+ uint64_t plc2 : 1;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn56xx;
+ struct cvmx_l2c_bst1_cn56xx cn56xxp1;
+ struct cvmx_l2c_bst1_cn38xx cn58xx;
+ struct cvmx_l2c_bst1_cn38xx cn58xxp1;
+};
+typedef union cvmx_l2c_bst1 cvmx_l2c_bst1_t;
+
+/**
+ * cvmx_l2c_bst2
+ *
+ * L2C_BST2 = L2C BIST 2 CTL/STAT
+ *
+ */
+union cvmx_l2c_bst2 {
+ uint64_t u64;
+ struct cvmx_l2c_bst2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mrb : 4; /**< Bist Results for MRB RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_4_11 : 8;
+ uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM
+ - 1: BAD */
+ uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM
+ - 1: BAD */
+ uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t xrddat : 1;
+ uint64_t xrdmsk : 1;
+ uint64_t picbst : 1;
+ uint64_t ipcbst : 1;
+ uint64_t reserved_4_11 : 8;
+ uint64_t mrb : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_l2c_bst2_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mrb : 4; /**< Bist Results for MRB RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t rmdf : 4; /**< Bist Results for RMDF RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_4_7 : 4;
+ uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_2_2 : 1;
+ uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t xrddat : 1;
+ uint64_t xrdmsk : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t ipcbst : 1;
+ uint64_t reserved_4_7 : 4;
+ uint64_t rmdf : 4;
+ uint64_t mrb : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_bst2_cn30xx cn31xx;
+ struct cvmx_l2c_bst2_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mrb : 4; /**< Bist Results for MRB RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t rmdf : 4; /**< Bist Results for RMDF RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t rhdf : 4; /**< Bist Results for RHDF RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM
+ - 1: BAD */
+ uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM
+ - 1: BAD */
+ uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t xrddat : 1;
+ uint64_t xrdmsk : 1;
+ uint64_t picbst : 1;
+ uint64_t ipcbst : 1;
+ uint64_t rhdf : 4;
+ uint64_t rmdf : 4;
+ uint64_t mrb : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_l2c_bst2_cn38xx cn38xxp2;
+ struct cvmx_l2c_bst2_cn30xx cn50xx;
+ struct cvmx_l2c_bst2_cn30xx cn52xx;
+ struct cvmx_l2c_bst2_cn30xx cn52xxp1;
+ struct cvmx_l2c_bst2_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mrb : 4; /**< Bist Results for MRB RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t rmdb : 4; /**< Bist Results for RMDB RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t rhdb : 4; /**< Bist Results for RHDB RAMs
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM
+ - 1: BAD */
+ uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM
+ - 1: BAD */
+ uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t xrddat : 1;
+ uint64_t xrdmsk : 1;
+ uint64_t picbst : 1;
+ uint64_t ipcbst : 1;
+ uint64_t rhdb : 4;
+ uint64_t rmdb : 4;
+ uint64_t mrb : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn56xx;
+ struct cvmx_l2c_bst2_cn56xx cn56xxp1;
+ struct cvmx_l2c_bst2_cn56xx cn58xx;
+ struct cvmx_l2c_bst2_cn56xx cn58xxp1;
+};
+typedef union cvmx_l2c_bst2 cvmx_l2c_bst2_t;
+
+/**
+ * cvmx_l2c_bst_mem#
+ *
+ * L2C_BST_MEM = L2C MEM BIST Status
+ *
+ *
+ * Notes:
+ * (1) CLEAR_BIST must be written to 1 before START_BIST is written to 1 using a separate CSR write.
+ *
+ * (2) CLEAR_BIST must not be changed after writing START_BIST to 1 until the BIST operation completes
+ * (indicated by START_BIST returning to 0) or operation is undefined.
+ */
+union cvmx_l2c_bst_memx {
+ uint64_t u64;
+ struct cvmx_l2c_bst_memx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t start_bist : 1; /**< When written to 1, starts BIST. Will read 1 until
+ BIST is complete (see Note). */
+ uint64_t clear_bist : 1; /**< When BIST is triggered, run clear BIST (see Note) */
+ uint64_t reserved_5_61 : 57;
+ uint64_t rdffl : 1; /**< BIST failure status for RDF */
+ uint64_t vbffl : 4; /**< BIST failure status for VBF0-3 */
+#else
+ uint64_t vbffl : 4;
+ uint64_t rdffl : 1;
+ uint64_t reserved_5_61 : 57;
+ uint64_t clear_bist : 1;
+ uint64_t start_bist : 1;
+#endif
+ } s;
+ struct cvmx_l2c_bst_memx_s cn61xx;
+ struct cvmx_l2c_bst_memx_s cn63xx;
+ struct cvmx_l2c_bst_memx_s cn63xxp1;
+ struct cvmx_l2c_bst_memx_s cn66xx;
+ struct cvmx_l2c_bst_memx_s cn68xx;
+ struct cvmx_l2c_bst_memx_s cn68xxp1;
+ struct cvmx_l2c_bst_memx_s cnf71xx;
+};
+typedef union cvmx_l2c_bst_memx cvmx_l2c_bst_memx_t;
+
+/**
+ * cvmx_l2c_bst_tdt#
+ *
+ * L2C_BST_TDT = L2C TAD DaTa BIST Status
+ *
+ */
+union cvmx_l2c_bst_tdtx {
+ uint64_t u64;
+ struct cvmx_l2c_bst_tdtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t fbfrspfl : 8; /**< BIST failure status for quad 0-7 FBF RSP read port */
+ uint64_t sbffl : 8; /**< BIST failure status for quad 0-7 SBF */
+ uint64_t fbffl : 8; /**< BIST failure status for quad 0-7 FBF WRP read port */
+ uint64_t l2dfl : 8; /**< BIST failure status for quad 0-7 L2D */
+#else
+ uint64_t l2dfl : 8;
+ uint64_t fbffl : 8;
+ uint64_t sbffl : 8;
+ uint64_t fbfrspfl : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_l2c_bst_tdtx_s cn61xx;
+ struct cvmx_l2c_bst_tdtx_s cn63xx;
+ struct cvmx_l2c_bst_tdtx_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t sbffl : 8; /**< BIST failure status for quad 0-7 SBF */
+ uint64_t fbffl : 8; /**< BIST failure status for quad 0-7 FBF */
+ uint64_t l2dfl : 8; /**< BIST failure status for quad 0-7 L2D */
+#else
+ uint64_t l2dfl : 8;
+ uint64_t fbffl : 8;
+ uint64_t sbffl : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn63xxp1;
+ struct cvmx_l2c_bst_tdtx_s cn66xx;
+ struct cvmx_l2c_bst_tdtx_s cn68xx;
+ struct cvmx_l2c_bst_tdtx_s cn68xxp1;
+ struct cvmx_l2c_bst_tdtx_s cnf71xx;
+};
+typedef union cvmx_l2c_bst_tdtx cvmx_l2c_bst_tdtx_t;
+
+/**
+ * cvmx_l2c_bst_ttg#
+ *
+ * L2C_BST_TTG = L2C TAD TaG BIST Status
+ *
+ */
+union cvmx_l2c_bst_ttgx {
+ uint64_t u64;
+ struct cvmx_l2c_bst_ttgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t lrufl : 1; /**< BIST failure status for tag LRU */
+ uint64_t tagfl : 16; /**< BIST failure status for tag ways 0-15 */
+#else
+ uint64_t tagfl : 16;
+ uint64_t lrufl : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_l2c_bst_ttgx_s cn61xx;
+ struct cvmx_l2c_bst_ttgx_s cn63xx;
+ struct cvmx_l2c_bst_ttgx_s cn63xxp1;
+ struct cvmx_l2c_bst_ttgx_s cn66xx;
+ struct cvmx_l2c_bst_ttgx_s cn68xx;
+ struct cvmx_l2c_bst_ttgx_s cn68xxp1;
+ struct cvmx_l2c_bst_ttgx_s cnf71xx;
+};
+typedef union cvmx_l2c_bst_ttgx cvmx_l2c_bst_ttgx_t;
+
+/**
+ * cvmx_l2c_cfg
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * L2C_CFG = L2C Configuration
+ *
+ * Description:
+ */
+union cvmx_l2c_cfg {
+ uint64_t u64;
+ struct cvmx_l2c_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t bstrun : 1; /**< L2 Data Store Bist Running
+ Indicates when the L2C HW Bist sequence(short or long) is
+ running. [L2C ECC Bist FSM is not in the RESET/DONE state] */
+ uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence
+ When the previous state was '0' and SW writes a '1',
+ the long bist sequence (enhanced 13N March) is performed.
+ SW can then read the L2C_CFG[BSTRUN] which will indicate
+ that the long bist sequence is running. When BSTRUN-=0,
+ the state of the L2D_BST[0-3] registers contain information
+ which reflects the status of the recent long bist sequence.
+ NOTE: SW must never write LBIST=0 while Long Bist is running
+ (ie: when BSTRUN=1 never write LBIST=0).
+ NOTE: LBIST is disabled if the MIO_FUS_DAT2.BIST_DIS
+ Fuse is blown. */
+ uint64_t xor_bank : 1; /**< L2C XOR Bank Bit
+ When both LMC's are enabled(DPRES1=1/DPRES0=1), this
+ bit determines how addresses are assigned to
+ LMC port(s).
+ XOR_BANK| LMC#
+ ----------+---------------------------------
+ 0 | byte address[7]
+ 1 | byte address[7] XOR byte address[12]
+ Example: If both LMC ports are enabled (DPRES1=1/DPRES0=1)
+ and XOR_BANK=1, then addr[7] XOR addr[12] is used to determine
+ which LMC Port# a reference is directed to. */
+ uint64_t dpres1 : 1; /**< DDR1 Present/LMC1 Enable
+ When DPRES1 is set, LMC#1 is enabled(DDR1 pins at
+ the BOTTOM of the chip are active).
+ NOTE: When both LMC ports are enabled(DPRES1=1/DPRES0=1),
+ see XOR_BANK bit to determine how a reference is
+ assigned to a DDR/LMC port. (Also, in dual-LMC configuration,
+ the address sent to the targeted LMC port is the
+ address shifted right by one).
+ NOTE: For power-savings, the DPRES1 is also used to
+ disable DDR1/LMC1 clocks. */
+ uint64_t dpres0 : 1; /**< DDR0 Present/LMC0 Enable
+ When DPRES0 is set, LMC#0 is enabled(DDR0 pins at
+ the BOTTOM of the chip are active).
+ NOTE: When both LMC ports are enabled(DPRES1=1/DPRES0=1),
+ see XOR_BANK bit to determine how a reference is
+ assigned to a DDR/LMC port. (Also, in dual-LMC configuration,
+ the address sent to the targeted LMC port is the
+ address shifted right by one).
+ NOTE: For power-savings, the DPRES0 is also used to
+ disable DDR0/LMC0 clocks. */
+ uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable
+ When set, the L2C dual-fill performance feature is
+ disabled.
+ NOTE: This bit is only intended to evaluate the
+ effectiveness of the dual-fill feature. For OPTIMAL
+ performance, this bit should ALWAYS be zero. */
+ uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When FPEN is enabled and the LFB is empty, the
+ forward progress counter (FPCNT) is initialized to:
+ FPCNT[24:0] = 2^(9+FPEXP)
+ When the LFB is non-empty the FPCNT is decremented
+ (every eclk interval). If the FPCNT reaches zero,
+ the LFB no longer accepts new requests until either
+ a) all of the current LFB entries have completed
+ (to ensure forward progress).
+ b) FPEMPTY=0 and another forward progress count
+ interval timeout expires.
+ EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks.
+ (For eclk=500MHz(2ns), this would be ~4us). */
+ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL all current LFB
+ entries have completed.
+ When clear, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL either
+ a) all current LFB entries have completed.
+ b) another forward progress interval expires
+ NOTE: We may want to FREEZE/HANG the system when
+ we encounter an LFB entry cannot complete, and there
+ may be times when we want to allow further LFB-NQs
+ to be permitted to help in further analyzing the
+ source */
+ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, enables the Forward Progress Counter to
+ prevent new LFB entries from enqueueing until ALL
+ current LFB entries have completed. */
+ uint64_t idxalias : 1; /**< L2C Index Alias Enable
+ When set, the L2 Tag/Data Store will alias the 11-bit
+ index with the low order 11-bits of the tag.
+ index[17:7] = (tag[28:18] ^ index[17:7])
+ NOTE: This bit must only be modified at boot time,
+ when it can be guaranteed that no blocks have been
+ loaded into the L2 Cache.
+ The index aliasing is a performance enhancement feature
+ which reduces the L2 cache thrashing experienced for
+ regular stride references.
+ NOTE: The index alias is stored in the LFB and VAB, and
+ its effects are reversed for memory references (Victims,
+ STT-Misses and Read-Misses) */
+ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits
+ become less than or equal to the MWF_CRD, the L2C will
+ assert l2c__lmi_mwd_hiwater_a to signal the LMC to give
+ writes (victims) higher priority. */
+ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode:
+ - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC]
+ - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss),
+ RHCF(RdHit), STRSP(ST RSP w/ invalidate),
+ STRSC(ST RSP no invalidate)] */
+ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB->PP requests are higher priority than
+ PP->IOB requests
+ - 1: Round Robin -
+ I/O requests from PP and IOB are serviced in
+ round robin */
+ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB memory requests are higher priority than PP
+ memory requests.
+ - 1: Round Robin -
+ Memory requests from PP and IOB are serviced in
+ round robin. */
+#else
+ uint64_t lrf_arb_mode : 1;
+ uint64_t rfb_arb_mode : 1;
+ uint64_t rsp_arb_mode : 1;
+ uint64_t mwf_crd : 4;
+ uint64_t idxalias : 1;
+ uint64_t fpen : 1;
+ uint64_t fpempty : 1;
+ uint64_t fpexp : 4;
+ uint64_t dfill_dis : 1;
+ uint64_t dpres0 : 1;
+ uint64_t dpres1 : 1;
+ uint64_t xor_bank : 1;
+ uint64_t lbist : 1;
+ uint64_t bstrun : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_l2c_cfg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When FPEN is enabled and the LFB is empty, the
+ forward progress counter (FPCNT) is initialized to:
+ FPCNT[24:0] = 2^(9+FPEXP)
+ When the LFB is non-empty the FPCNT is decremented
+ (every eclk interval). If the FPCNT reaches zero,
+ the LFB no longer accepts new requests until either
+ a) all of the current LFB entries have completed
+ (to ensure forward progress).
+ b) FPEMPTY=0 and another forward progress count
+ interval timeout expires.
+ EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks.
+ (For eclk=500MHz(2ns), this would be ~4us). */
+ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL all current LFB
+ entries have completed.
+ When clear, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL either
+ a) all current LFB entries have completed.
+ b) another forward progress interval expires
+ NOTE: We may want to FREEZE/HANG the system when
+ we encounter an LFB entry cannot complete, and there
+ may be times when we want to allow further LFB-NQs
+ to be permitted to help in further analyzing the
+ source */
+ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, enables the Forward Progress Counter to
+ prevent new LFB entries from enqueueing until ALL
+ current LFB entries have completed. */
+ uint64_t idxalias : 1; /**< L2C Index Alias Enable
+ When set, the L2 Tag/Data Store will alias the 8-bit
+ index with the low order 8-bits of the tag.
+ index[14:7] = (tag[22:15] ^ index[14:7])
+ NOTE: This bit must only be modified at boot time,
+ when it can be guaranteed that no blocks have been
+ loaded into the L2 Cache.
+ The index aliasing is a performance enhancement feature
+ which reduces the L2 cache thrashing experienced for
+ regular stride references.
+ NOTE: The index alias is stored in the LFB and VAB, and
+ its effects are reversed for memory references (Victims,
+ STT-Misses and Read-Misses) */
+ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits
+ become less than or equal to the MWF_CRD, the L2C will
+ assert l2c__lmi_mwd_hiwater_a to signal the LMC to give
+ writes (victims) higher priority. */
+ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode:
+ - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC]
+ - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss),
+ RHCF(RdHit), STRSP(ST RSP w/ invalidate),
+ STRSC(ST RSP no invalidate)] */
+ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB->PP requests are higher priority than
+ PP->IOB requests
+ - 1: Round Robin -
+ I/O requests from PP and IOB are serviced in
+ round robin */
+ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB memory requests are higher priority than PP
+ memory requests.
+ - 1: Round Robin -
+ Memory requests from PP and IOB are serviced in
+ round robin. */
+#else
+ uint64_t lrf_arb_mode : 1;
+ uint64_t rfb_arb_mode : 1;
+ uint64_t rsp_arb_mode : 1;
+ uint64_t mwf_crd : 4;
+ uint64_t idxalias : 1;
+ uint64_t fpen : 1;
+ uint64_t fpempty : 1;
+ uint64_t fpexp : 4;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_cfg_cn30xx cn31xx;
+ struct cvmx_l2c_cfg_cn30xx cn38xx;
+ struct cvmx_l2c_cfg_cn30xx cn38xxp2;
+ struct cvmx_l2c_cfg_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t bstrun : 1; /**< L2 Data Store Bist Running
+ Indicates when the L2C HW Bist sequence(short or long) is
+ running. [L2C ECC Bist FSM is not in the RESET/DONE state] */
+ uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence
+ When the previous state was '0' and SW writes a '1',
+ the long bist sequence (enhanced 13N March) is performed.
+ SW can then read the L2C_CFG[BSTRUN] which will indicate
+ that the long bist sequence is running. When BSTRUN-=0,
+ the state of the L2D_BST[0-3] registers contain information
+ which reflects the status of the recent long bist sequence.
+ NOTE: SW must never write LBIST=0 while Long Bist is running
+ (ie: when BSTRUN=1 never write LBIST=0). */
+ uint64_t reserved_14_17 : 4;
+ uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When FPEN is enabled and the LFB is empty, the
+ forward progress counter (FPCNT) is initialized to:
+ FPCNT[24:0] = 2^(9+FPEXP)
+ When the LFB is non-empty the FPCNT is decremented
+ (every eclk interval). If the FPCNT reaches zero,
+ the LFB no longer accepts new requests until either
+ a) all of the current LFB entries have completed
+ (to ensure forward progress).
+ b) FPEMPTY=0 and another forward progress count
+ interval timeout expires.
+ EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks.
+ (For eclk=500MHz(2ns), this would be ~4us). */
+ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL all current LFB
+ entries have completed.
+ When clear, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL either
+ a) all current LFB entries have completed.
+ b) another forward progress interval expires
+ NOTE: We may want to FREEZE/HANG the system when
+ we encounter an LFB entry cannot complete, and there
+ may be times when we want to allow further LFB-NQs
+ to be permitted to help in further analyzing the
+ source */
+ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, enables the Forward Progress Counter to
+ prevent new LFB entries from enqueueing until ALL
+ current LFB entries have completed. */
+ uint64_t idxalias : 1; /**< L2C Index Alias Enable
+ When set, the L2 Tag/Data Store will alias the 7-bit
+ index with the low order 7-bits of the tag.
+ index[13:7] = (tag[20:14] ^ index[13:7])
+ NOTE: This bit must only be modified at boot time,
+ when it can be guaranteed that no blocks have been
+ loaded into the L2 Cache.
+ The index aliasing is a performance enhancement feature
+ which reduces the L2 cache thrashing experienced for
+ regular stride references.
+ NOTE: The index alias is stored in the LFB and VAB, and
+ its effects are reversed for memory references (Victims,
+ STT-Misses and Read-Misses) */
+ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits
+ become less than or equal to the MWF_CRD, the L2C will
+ assert l2c__lmi_mwd_hiwater_a to signal the LMC to give
+ writes (victims) higher priority. */
+ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode:
+ - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC]
+ - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss),
+ RHCF(RdHit), STRSP(ST RSP w/ invalidate),
+ STRSC(ST RSP no invalidate)] */
+ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB->PP requests are higher priority than
+ PP->IOB requests
+ - 1: Round Robin -
+ I/O requests from PP and IOB are serviced in
+ round robin */
+ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB memory requests are higher priority than PP
+ memory requests.
+ - 1: Round Robin -
+ Memory requests from PP and IOB are serviced in
+ round robin. */
+#else
+ uint64_t lrf_arb_mode : 1;
+ uint64_t rfb_arb_mode : 1;
+ uint64_t rsp_arb_mode : 1;
+ uint64_t mwf_crd : 4;
+ uint64_t idxalias : 1;
+ uint64_t fpen : 1;
+ uint64_t fpempty : 1;
+ uint64_t fpexp : 4;
+ uint64_t reserved_14_17 : 4;
+ uint64_t lbist : 1;
+ uint64_t bstrun : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn50xx;
+ struct cvmx_l2c_cfg_cn50xx cn52xx;
+ struct cvmx_l2c_cfg_cn50xx cn52xxp1;
+ struct cvmx_l2c_cfg_s cn56xx;
+ struct cvmx_l2c_cfg_s cn56xxp1;
+ struct cvmx_l2c_cfg_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t bstrun : 1; /**< L2 Data Store Bist Running
+ Indicates when the L2C HW Bist sequence(short or long) is
+ running. [L2C ECC Bist FSM is not in the RESET/DONE state] */
+ uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence
+ When the previous state was '0' and SW writes a '1',
+ the long bist sequence (enhanced 13N March) is performed.
+ SW can then read the L2C_CFG[BSTRUN] which will indicate
+ that the long bist sequence is running. When BSTRUN-=0,
+ the state of the L2D_BST[0-3] registers contain information
+ which reflects the status of the recent long bist sequence.
+ NOTE: SW must never write LBIST=0 while Long Bist is running
+ (ie: when BSTRUN=1 never write LBIST=0).
+ NOTE: LBIST is disabled if the MIO_FUS_DAT2.BIST_DIS
+ Fuse is blown. */
+ uint64_t reserved_15_17 : 3;
+ uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable
+ When set, the L2C dual-fill performance feature is
+ disabled.
+ NOTE: This bit is only intended to evaluate the
+ effectiveness of the dual-fill feature. For OPTIMAL
+ performance, this bit should ALWAYS be zero. */
+ uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When FPEN is enabled and the LFB is empty, the
+ forward progress counter (FPCNT) is initialized to:
+ FPCNT[24:0] = 2^(9+FPEXP)
+ When the LFB is non-empty the FPCNT is decremented
+ (every eclk interval). If the FPCNT reaches zero,
+ the LFB no longer accepts new requests until either
+ a) all of the current LFB entries have completed
+ (to ensure forward progress).
+ b) FPEMPTY=0 and another forward progress count
+ interval timeout expires.
+ EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks.
+ (For eclk=500MHz(2ns), this would be ~4us). */
+ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL all current LFB
+ entries have completed.
+ When clear, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL either
+ a) all current LFB entries have completed.
+ b) another forward progress interval expires
+ NOTE: We may want to FREEZE/HANG the system when
+ we encounter an LFB entry cannot complete, and there
+ may be times when we want to allow further LFB-NQs
+ to be permitted to help in further analyzing the
+ source */
+ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, enables the Forward Progress Counter to
+ prevent new LFB entries from enqueueing until ALL
+ current LFB entries have completed. */
+ uint64_t idxalias : 1; /**< L2C Index Alias Enable
+ When set, the L2 Tag/Data Store will alias the 11-bit
+ index with the low order 11-bits of the tag.
+ index[17:7] = (tag[28:18] ^ index[17:7])
+ NOTE: This bit must only be modified at boot time,
+ when it can be guaranteed that no blocks have been
+ loaded into the L2 Cache.
+ The index aliasing is a performance enhancement feature
+ which reduces the L2 cache thrashing experienced for
+ regular stride references.
+ NOTE: The index alias is stored in the LFB and VAB, and
+ its effects are reversed for memory references (Victims,
+ STT-Misses and Read-Misses) */
+ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits
+ become less than or equal to the MWF_CRD, the L2C will
+ assert l2c__lmi_mwd_hiwater_a to signal the LMC to give
+ writes (victims) higher priority. */
+ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode:
+ - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC]
+ - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss),
+ RHCF(RdHit), STRSP(ST RSP w/ invalidate),
+ STRSC(ST RSP no invalidate)] */
+ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB->PP requests are higher priority than
+ PP->IOB requests
+ - 1: Round Robin -
+ I/O requests from PP and IOB are serviced in
+ round robin */
+ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB memory requests are higher priority than PP
+ memory requests.
+ - 1: Round Robin -
+ Memory requests from PP and IOB are serviced in
+ round robin. */
+#else
+ uint64_t lrf_arb_mode : 1;
+ uint64_t rfb_arb_mode : 1;
+ uint64_t rsp_arb_mode : 1;
+ uint64_t mwf_crd : 4;
+ uint64_t idxalias : 1;
+ uint64_t fpen : 1;
+ uint64_t fpempty : 1;
+ uint64_t fpexp : 4;
+ uint64_t dfill_dis : 1;
+ uint64_t reserved_15_17 : 3;
+ uint64_t lbist : 1;
+ uint64_t bstrun : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn58xx;
+ struct cvmx_l2c_cfg_cn58xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable
+ When set, the L2C dual-fill performance feature is
+ disabled.
+ NOTE: This bit is only intended to evaluate the
+ effectiveness of the dual-fill feature. For OPTIMAL
+ performance, this bit should ALWAYS be zero. */
+ uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When FPEN is enabled and the LFB is empty, the
+ forward progress counter (FPCNT) is initialized to:
+ FPCNT[24:0] = 2^(9+FPEXP)
+ When the LFB is non-empty the FPCNT is decremented
+ (every eclk interval). If the FPCNT reaches zero,
+ the LFB no longer accepts new requests until either
+ a) all of the current LFB entries have completed
+ (to ensure forward progress).
+ b) FPEMPTY=0 and another forward progress count
+ interval timeout expires.
+ EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks.
+ (For eclk=500MHz(2ns), this would be ~4us). */
+ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL all current LFB
+ entries have completed.
+ When clear, if the forward progress counter expires,
+ all new LFB-NQs are stopped UNTIL either
+ a) all current LFB entries have completed.
+ b) another forward progress interval expires
+ NOTE: We may want to FREEZE/HANG the system when
+ we encounter an LFB entry cannot complete, and there
+ may be times when we want to allow further LFB-NQs
+ to be permitted to help in further analyzing the
+ source */
+ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable
+ NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY]
+ When set, enables the Forward Progress Counter to
+ prevent new LFB entries from enqueueing until ALL
+ current LFB entries have completed. */
+ uint64_t idxalias : 1; /**< L2C Index Alias Enable
+ When set, the L2 Tag/Data Store will alias the 11-bit
+ index with the low order 11-bits of the tag.
+ index[17:7] = (tag[28:18] ^ index[17:7])
+ NOTE: This bit must only be modified at boot time,
+ when it can be guaranteed that no blocks have been
+ loaded into the L2 Cache.
+ The index aliasing is a performance enhancement feature
+ which reduces the L2 cache thrashing experienced for
+ regular stride references.
+ NOTE: The index alias is stored in the LFB and VAB, and
+ its effects are reversed for memory references (Victims,
+ STT-Misses and Read-Misses) */
+ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits
+ become less than or equal to the MWF_CRD, the L2C will
+ assert l2c__lmi_mwd_hiwater_a to signal the LMC to give
+ writes (victims) higher priority. */
+ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode:
+ - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC]
+ - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss),
+ RHCF(RdHit), STRSP(ST RSP w/ invalidate),
+ STRSC(ST RSP no invalidate)] */
+ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB->PP requests are higher priority than
+ PP->IOB requests
+ - 1: Round Robin -
+ I/O requests from PP and IOB are serviced in
+ round robin */
+ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode:
+ - 0: Fixed Priority -
+ IOB memory requests are higher priority than PP
+ memory requests.
+ - 1: Round Robin -
+ Memory requests from PP and IOB are serviced in
+ round robin. */
+#else
+ uint64_t lrf_arb_mode : 1;
+ uint64_t rfb_arb_mode : 1;
+ uint64_t rsp_arb_mode : 1;
+ uint64_t mwf_crd : 4;
+ uint64_t idxalias : 1;
+ uint64_t fpen : 1;
+ uint64_t fpempty : 1;
+ uint64_t fpexp : 4;
+ uint64_t dfill_dis : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } cn58xxp1;
+};
+typedef union cvmx_l2c_cfg cvmx_l2c_cfg_t;
+
+/**
+ * cvmx_l2c_cop0_map#
+ *
+ * L2C_COP0_MAP = PP COP0 register memory mapped region
+ *
+ * Description: PP COP0 register mapped region.
+ *
+ * NOTE: for 63xx, if the PPID is outside the range of 0-3,63 the write will be ignored and reads
+ * will return 0x2bad2bad2bad2bad
+ *
+ * Notes:
+ * (1) There are 256 COP0 registers per PP. Registers 0-255 map to PP0's COP0 registers, 256-511 are
+ * mapped to PP1's, etc. A special set X PP63 (registers 16128-16383) are for broadcast writes.
+ * Any write done to these registers will take effect in ALL PPs. Note the means the L2C_COP0_MAP
+ * register to access can be gotten by:
+ *
+ * REGNUM = [ PPID[5:0], rd[4:0], sel[2:0] ]
+ *
+ * where rd and sel are as defined in the HRM description of Core Coprocessor 0 registers
+ * and note 4 below.
+ *
+ * (2) if a COP0 register cannot be accessed by this mechanism the write be silently ignored and the
+ * read data will be 0xBADDEED.
+ *
+ * (3) for 61xx, if the PPID is outside the range of 0-3,63 or if the PP in question is in reset a
+ * write will be ignored and reads will timeout the RSL bus.
+ *
+ * (4) Referring to note (1) above, the following rd/sel values are supported:
+ *
+ * NOTE: Put only the "Customer type" in HRM. do not put the "Real type" in HRM.
+ *
+ * Customer Real
+ * rd sel type Description type
+ * ======+=======+==========+==============================================+=========
+ * 4 2 RO COP0 UserLocal RW
+ * 7 0 RO COP0 HWREna RW
+ * 9 0 RO COP0 Count RW
+ * 9 6 RO COP0 CvmCount RW
+ * 9 7 RO COP0 CvmCtl RW
+ * 11 0 RO COP0 Compare RW
+ * 11 6 RW COP0 PowThrottle RW
+ * 12 0 RO COP0 Status RW
+ * 12 1 RO COP0 IntCtl RO
+ * 12 2 RO COP0 SRSCtl RO
+ * 13 0 RO COP0 Cause RW
+ * 14 0 RO COP0 EPC RW
+ * 15 0 RO COP0 PrID RO
+ * 15 1 RO COP0 EBase RW
+ * 16 0 RO PC Issue Debug Info (see details below) RO
+ * 16 1 RO PC Fetch Debug Info (see details below) RO
+ * 16 2 RO PC Fill Debug Info (see details below) RO
+ * 16 3 RO PC Misc Debug Info (see details below) RO
+ * 18 0 RO COP0 WatchLo0 RW
+ * 19 0 RO COP0 WatchHi0 RW
+ * 22 0 RO COP0 MultiCoreDebug RW
+ * 22 1 COP0 VoltageMonitor RW
+ * 23 0 RO COP0 Debug RW
+ * 23 6 RO COP0 Debug2 RO
+ * 24 0 RO COP0 DEPC RW
+ * 25 0 RO COP0 PerfCnt Control0 RW
+ * 25 1 RO COP0 PerfCnt Counter0 RW
+ * 25 2 RO COP0 PerfCnt Control1 RW
+ * 25 3 RO COP0 PerfCnt Counter1 RW
+ * 27 0 RO COP0 CacheErr (icache) RW
+ * 28 0 RO COP0 TagLo (icache) RW
+ * 28 1 RO COP0 DataLo (icache) RW
+ * 29 1 RO COP0 DataHi (icache) RW
+ * 30 0 RO COP0 ErrorEPC RW
+ * 31 0 RO COP0 DESAVE RW
+ * 31 2 RO COP0 Scratch RW
+ * 31 3 RO COP0 Scratch1 RW
+ * 31 4 RO COP0 Scratch2 RW
+ *
+ * - PC Issue Debug Info
+ *
+ * - 63:2 pc0_5a<63:2> // often VA<63:2> of the next instruction to issue
+ * // but can also be the VA of an instruction executing/replaying on pipe 0
+ * // or can also be a VA being filled into the instruction cache
+ * // or can also be unpredictable
+ * // <61:49> RAZ
+ * 1 illegal // set when illegal VA
+ * 0 delayslot // set when VA is delayslot (prior branch may be either taken or not taken)
+ *
+ * - PC Fetch Debug Info
+ *
+ * - 63:0 fetch_address_3a // VA being fetched from the instruction cache
+ * // <61:49>, <1:0> RAZ
+ *
+ * - PC Fill Debug Info
+ *
+ * - 63:0 fill_address_4a<63:2> // VA<63:2> being filled into instruction cache
+ * // valid when waiting_for_ifill_4a is set (see PC Misc Debug Info below)
+ * // <61:49> RAZ
+ * 1 illegal // set when illegal VA
+ * 0 RAZ
+ *
+ * - PC Misc Debug Info
+ *
+ * - 63:3 RAZ
+ * 2 mem_stall_3a // stall term from L1 memory system
+ * 1 waiting_for_pfill_4a // when waiting_for_ifill_4a is set, indicates whether instruction cache fill is due to a prefetch
+ * 0 waiting_for_ifill_4a // set when there is an outstanding instruction cache fill
+ */
+union cvmx_l2c_cop0_mapx {
+ uint64_t u64;
+ struct cvmx_l2c_cop0_mapx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Data to write to/read from designated PP's COP0
+ register. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_l2c_cop0_mapx_s cn61xx;
+ struct cvmx_l2c_cop0_mapx_s cn63xx;
+ struct cvmx_l2c_cop0_mapx_s cn63xxp1;
+ struct cvmx_l2c_cop0_mapx_s cn66xx;
+ struct cvmx_l2c_cop0_mapx_s cn68xx;
+ struct cvmx_l2c_cop0_mapx_s cn68xxp1;
+ struct cvmx_l2c_cop0_mapx_s cnf71xx;
+};
+typedef union cvmx_l2c_cop0_mapx cvmx_l2c_cop0_mapx_t;
+
+/**
+ * cvmx_l2c_ctl
+ *
+ * L2C_CTL = L2C Control
+ *
+ *
+ * Notes:
+ * (1) If MAXVAB is != 0, VAB_THRESH should be less than MAXVAB.
+ *
+ * (2) L2DFDBE and L2DFSBE allows software to generate L2DSBE, L2DDBE, VBFSBE, and VBFDBE errors for
+ * the purposes of testing error handling code. When one (or both) of these bits are set a PL2
+ * which misses in the L2 will fill with the appropriate error in the first 2 OWs of the fill.
+ * Software can determine which OW pair gets the error by choosing the desired fill order
+ * (address<6:5>). A PL2 which hits in the L2 will not inject any errors. Therefore sending a
+ * WBIL2 prior to the PL2 is recommended to make a miss likely (if multiple processors are involved
+ * software must be careful to be sure no other processor or IO device can bring the block into the
+ * L2).
+ *
+ * To generate a VBFSBE or VBFDBE, software must first get the cache block into the cache with an
+ * error using a PL2 which misses the L2. Then a store partial to a portion of the cache block
+ * without the error must change the block to dirty. Then, a subsequent WBL2/WBIL2/victim will
+ * trigger the VBFSBE/VBFDBE error.
+ */
+union cvmx_l2c_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63 : 34;
+ uint64_t sepcmt : 1; /**< Sends all invals before the corresponding commit. */
+ uint64_t rdf_fast : 1; /**< When 0, delay read data fifo from DCLK to RCLK by one
+ cycle. Needed when DCLK:RCLK ratio > 3:1. Should be
+ set before DDR traffic begins and only changed when
+ memory traffic is idle. */
+ uint64_t disstgl2i : 1; /**< Disable STGL2I's from changing the tags */
+ uint64_t l2dfsbe : 1; /**< Force single bit ECC error on PL2 allocates (2) */
+ uint64_t l2dfdbe : 1; /**< Force double bit ECC error on PL2 allocates (2) */
+ uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */
+ uint64_t maxvab : 4; /**< Maximum VABs in use at once
+ (0 means 16, 1-15 as expected) */
+ uint64_t maxlfb : 4; /**< Maximum LFBs in use at once
+ (0 means 16, 1-15 as expected) */
+ uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus
+ == 0, round-robin
+ == 1, static priority
+ 1. IOR data
+ 2. STIN/FILLs
+ 3. STDN/SCDN/SCFL */
+ uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues
+ == 0, fully determined through QOS
+ == 1, QOS0 highest priority, QOS1-3 use normal mode */
+ uint64_t ef_ena : 1; /**< LMC early fill enable */
+ uint64_t ef_cnt : 7; /**< LMC early fill count
+ Specifies the number of cycles after the first LMC
+ fill cycle to wait before requesting a fill on the
+ RSC/RSD bus.
+ // 7 dclks (we've received 1st out of 8
+ // by the time we start counting)
+ ef_cnt = ((LMCn_CONFIG[MODE32b] ? 14 : 7) *
+ dclk0_period) / rclk_period;
+ // + 1 rclk if the dclk and rclk edges don't
+ // stay in the same position
+ if ((dclk0_gen.period % rclk_gen.period) != 0)
+ ef_cnt = ef_cnt + 1;
+ // + 2 rclk synchronization uncertainty
+ ef_cnt = ef_cnt + 2;
+ // - 3 rclks to recognize first write
+ ef_cnt = ef_cnt - 3;
+ // + 3 rclks to perform first write
+ ef_cnt = ef_cnt + 3;
+ // - 9 rclks minimum latency from counter expire
+ // to final fbf read
+ ef_cnt = ef_cnt - 9; */
+ uint64_t vab_thresh : 4; /**< VAB Threshold
+ When the number of valid VABs exceeds this number the
+ L2C increases the priority of all writes in the LMC. */
+ uint64_t disecc : 1; /**< Tag and Data ECC Disable */
+ uint64_t disidxalias : 1; /**< Index Alias Disable */
+#else
+ uint64_t disidxalias : 1;
+ uint64_t disecc : 1;
+ uint64_t vab_thresh : 4;
+ uint64_t ef_cnt : 7;
+ uint64_t ef_ena : 1;
+ uint64_t xmc_arb_mode : 1;
+ uint64_t rsp_arb_mode : 1;
+ uint64_t maxlfb : 4;
+ uint64_t maxvab : 4;
+ uint64_t discclk : 1;
+ uint64_t l2dfdbe : 1;
+ uint64_t l2dfsbe : 1;
+ uint64_t disstgl2i : 1;
+ uint64_t rdf_fast : 1;
+ uint64_t sepcmt : 1;
+ uint64_t reserved_30_63 : 34;
+#endif
+ } s;
+ struct cvmx_l2c_ctl_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t rdf_fast : 1; /**< When 0, delay read data fifo from DCLK to RCLK by one
+ cycle. Needed when DCLK:RCLK ratio > 3:1. Should be
+ set before DDR traffic begins and only changed when
+ memory traffic is idle. */
+ uint64_t disstgl2i : 1; /**< Disable STGL2I's from changing the tags */
+ uint64_t l2dfsbe : 1; /**< Force single bit ECC error on PL2 allocates (2) */
+ uint64_t l2dfdbe : 1; /**< Force double bit ECC error on PL2 allocates (2) */
+ uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */
+ uint64_t maxvab : 4; /**< Maximum VABs in use at once
+ (0 means 16, 1-15 as expected) */
+ uint64_t maxlfb : 4; /**< Maximum LFBs in use at once
+ (0 means 16, 1-15 as expected) */
+ uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus
+ == 0, round-robin
+ == 1, static priority
+ 1. IOR data
+ 2. STIN/FILLs
+ 3. STDN/SCDN/SCFL */
+ uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues
+ == 0, fully determined through QOS
+ == 1, QOS0 highest priority, QOS1-3 use normal mode */
+ uint64_t ef_ena : 1; /**< LMC early fill enable */
+ uint64_t ef_cnt : 7; /**< LMC early fill count
+ Specifies the number of cycles after the first LMC
+ fill cycle to wait before requesting a fill on the
+ RSC/RSD bus.
+ // 7 dclks (we've received 1st out of 8
+ // by the time we start counting)
+ ef_cnt = ((LMCn_CONFIG[MODE32b] ? 14 : 7) *
+ dclk0_period) / rclk_period;
+ // + 1 rclk if the dclk and rclk edges don't
+ // stay in the same position
+ if ((dclk0_gen.period % rclk_gen.period) != 0)
+ ef_cnt = ef_cnt + 1;
+ // + 2 rclk synchronization uncertainty
+ ef_cnt = ef_cnt + 2;
+ // - 3 rclks to recognize first write
+ ef_cnt = ef_cnt - 3;
+ // + 3 rclks to perform first write
+ ef_cnt = ef_cnt + 3;
+ // - 9 rclks minimum latency from counter expire
+ // to final fbf read
+ ef_cnt = ef_cnt - 9; */
+ uint64_t vab_thresh : 4; /**< VAB Threshold
+ When the number of valid VABs exceeds this number the
+ L2C increases the priority of all writes in the LMC. */
+ uint64_t disecc : 1; /**< Tag and Data ECC Disable */
+ uint64_t disidxalias : 1; /**< Index Alias Disable */
+#else
+ uint64_t disidxalias : 1;
+ uint64_t disecc : 1;
+ uint64_t vab_thresh : 4;
+ uint64_t ef_cnt : 7;
+ uint64_t ef_ena : 1;
+ uint64_t xmc_arb_mode : 1;
+ uint64_t rsp_arb_mode : 1;
+ uint64_t maxlfb : 4;
+ uint64_t maxvab : 4;
+ uint64_t discclk : 1;
+ uint64_t l2dfdbe : 1;
+ uint64_t l2dfsbe : 1;
+ uint64_t disstgl2i : 1;
+ uint64_t rdf_fast : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_ctl_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t disstgl2i : 1; /**< Disable STGL2I's from changing the tags */
+ uint64_t l2dfsbe : 1; /**< Force single bit ECC error on PL2 allocates (2) */
+ uint64_t l2dfdbe : 1; /**< Force double bit ECC error on PL2 allocates (2) */
+ uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */
+ uint64_t maxvab : 4; /**< Maximum VABs in use at once
+ (0 means 16, 1-15 as expected) */
+ uint64_t maxlfb : 4; /**< Maximum LFBs in use at once
+ (0 means 16, 1-15 as expected) */
+ uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus
+ == 0, round-robin
+ == 1, static priority
+ 1. IOR data
+ 2. STIN/FILLs
+ 3. STDN/SCDN/SCFL */
+ uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues
+ == 0, fully determined through QOS
+ == 1, QOS0 highest priority, QOS1-3 use normal mode */
+ uint64_t ef_ena : 1; /**< LMC early fill enable */
+ uint64_t ef_cnt : 7; /**< LMC early fill count
+ Specifies the number of cycles after the first LMC
+ fill cycle to wait before requesting a fill on the
+ RSC/RSD bus.
+ // 7 dclks (we've received 1st out of 8
+ // by the time we start counting)
+ ef_cnt = (7 * dclk0_period) / rclk_period;
+ // + 1 rclk if the dclk and rclk edges don't
+ // stay in the same position
+ if ((dclk0_gen.period % rclk_gen.period) != 0)
+ ef_cnt = ef_cnt + 1;
+ // + 2 rclk synchronization uncertainty
+ ef_cnt = ef_cnt + 2;
+ // - 3 rclks to recognize first write
+ ef_cnt = ef_cnt - 3;
+ // + 3 rclks to perform first write
+ ef_cnt = ef_cnt + 3;
+ // - 9 rclks minimum latency from counter expire
+ // to final fbf read
+ ef_cnt = ef_cnt - 9; */
+ uint64_t vab_thresh : 4; /**< VAB Threshold
+ When the number of valid VABs exceeds this number the
+ L2C increases the priority of all writes in the LMC. */
+ uint64_t disecc : 1; /**< Tag and Data ECC Disable */
+ uint64_t disidxalias : 1; /**< Index Alias Disable */
+#else
+ uint64_t disidxalias : 1;
+ uint64_t disecc : 1;
+ uint64_t vab_thresh : 4;
+ uint64_t ef_cnt : 7;
+ uint64_t ef_ena : 1;
+ uint64_t xmc_arb_mode : 1;
+ uint64_t rsp_arb_mode : 1;
+ uint64_t maxlfb : 4;
+ uint64_t maxvab : 4;
+ uint64_t discclk : 1;
+ uint64_t l2dfdbe : 1;
+ uint64_t l2dfsbe : 1;
+ uint64_t disstgl2i : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn63xx;
+ struct cvmx_l2c_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */
+ uint64_t maxvab : 4; /**< Maximum VABs in use at once
+ (0 means 16, 1-15 as expected) */
+ uint64_t maxlfb : 4; /**< Maximum LFBs in use at once
+ (0 means 16, 1-15 as expected) */
+ uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus
+ == 0, round-robin
+ == 1, static priority
+ 1. IOR data
+ 2. STIN/FILLs
+ 3. STDN/SCDN/SCFL */
+ uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues
+ == 0, fully determined through QOS
+ == 1, QOS0 highest priority, QOS1-3 use normal mode */
+ uint64_t ef_ena : 1; /**< LMC early fill enable */
+ uint64_t ef_cnt : 7; /**< LMC early fill count
+ Specifies the number of cycles after the first LMC
+ fill cycle to wait before requesting a fill on the
+ RSC/RSD bus.
+ // 7 dclks (we've received 1st out of 8
+ // by the time we start counting)
+ ef_cnt = (7 * dclk0_period) / rclk_period;
+ // + 1 rclk if the dclk and rclk edges don't
+ // stay in the same position
+ if ((dclk0_gen.period % rclk_gen.period) != 0)
+ ef_cnt = ef_cnt + 1;
+ // + 2 rclk synchronization uncertainty
+ ef_cnt = ef_cnt + 2;
+ // - 3 rclks to recognize first write
+ ef_cnt = ef_cnt - 3;
+ // + 3 rclks to perform first write
+ ef_cnt = ef_cnt + 3;
+ // - 9 rclks minimum latency from counter expire
+ // to final fbf read
+ ef_cnt = ef_cnt - 9; */
+ uint64_t vab_thresh : 4; /**< VAB Threshold
+ When the number of valid VABs exceeds this number the
+ L2C increases the priority of all writes in the LMC. */
+ uint64_t disecc : 1; /**< Tag and Data ECC Disable */
+ uint64_t disidxalias : 1; /**< Index Alias Disable */
+#else
+ uint64_t disidxalias : 1;
+ uint64_t disecc : 1;
+ uint64_t vab_thresh : 4;
+ uint64_t ef_cnt : 7;
+ uint64_t ef_ena : 1;
+ uint64_t xmc_arb_mode : 1;
+ uint64_t rsp_arb_mode : 1;
+ uint64_t maxlfb : 4;
+ uint64_t maxvab : 4;
+ uint64_t discclk : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } cn63xxp1;
+ struct cvmx_l2c_ctl_cn61xx cn66xx;
+ struct cvmx_l2c_ctl_s cn68xx;
+ struct cvmx_l2c_ctl_cn63xx cn68xxp1;
+ struct cvmx_l2c_ctl_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_ctl cvmx_l2c_ctl_t;
+
+/**
+ * cvmx_l2c_dbg
+ *
+ * L2C_DBG = L2C DEBUG Register
+ *
+ * Description: L2C Tag/Data Store Debug Register
+ *
+ * Notes:
+ * (1) When using the L2T, L2D or FINV Debug probe feature, the LDD command WILL NOT update the DuTags.
+ * (2) L2T, L2D, FINV MUST BE mutually exclusive (only one set)
+ * (3) Force Invalidate is intended as a means for SW to invalidate the L2 Cache while also writing back
+ * dirty data to memory to maintain coherency.
+ * (4) L2 Cache Lock Down feature MUST BE disabled (L2C_LCKBASE[LCK_ENA]=0) if ANY of the L2C debug
+ * features (L2T, L2D, FINV) are enabled.
+ */
+union cvmx_l2c_dbg {
+ uint64_t u64;
+ struct cvmx_l2c_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t lfb_enum : 4; /**< Specifies the LFB Entry# which is to be captured. */
+ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of
+ the LFB specified by LFB_ENUM[3:0] are captured
+ into the L2C_LFB(0/1/2) registers.
+ NOTE: Some fields of the LFB entry are unpredictable
+ and dependent on usage. This is only intended to be
+ used for HW debug. */
+ uint64_t ppnum : 4; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines which one-of-16
+ PPs is selected as the diagnostic PP. */
+ uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines 1-of-n targeted
+ sets to act upon.
+ NOTE: L2C_DBG[SET] must never equal a crippled or
+ unusable set (see UMSK* registers and Cripple mode
+ fuses). */
+ uint64_t finv : 1; /**< Flush-Invalidate.
+ When flush-invalidate is enable (FINV=1), all STF
+ (L1 store-miss) commands generated from the diagnostic PP
+ (L2C_DBG[PPNUM]) will invalidate the specified set
+ (L2C_DBG[SET]) at the index specified in the STF
+ address[17:7]. If a dirty block is detected (D=1), it is
+ written back to memory. The contents of the invalid
+ L2 Cache line is also 'scrubbed' with the STF write data.
+ NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in
+ STF address[17:7] refers to the 'aliased' address.
+ NOTE: An STF command with write data=ZEROES can be
+ generated by SW using the Prefetch instruction with
+ Hint=30d "prepare for Store", followed by a SYNCW.
+ What is seen at the L2C as an STF w/wrdcnt=0 with all
+ of its mask bits clear (indicates zero-fill data).
+ A flush-invalidate will 'force-hit' the L2 cache at
+ [index,set] and invalidate the entry (V=0/D=0/L=0/U=0).
+ If the cache block is dirty, it is also written back
+ to memory. The DuTag state is probed/updated as normal
+ for an STF request.
+ TYPICAL APPLICATIONS:
+ 1) L2 Tag/Data ECC SW Recovery
+ 2) Cache Unlocking
+ NOTE: If the cacheline had been previously LOCKED(L=1),
+ a flush-invalidate operation will explicitly UNLOCK
+ (L=0) the set/index specified.
+ NOTE: The diagnostic PP cores can generate STF
+ commands to the L2 Cache whenever all 128 bytes in a
+ block are written. SW must take this into consideration
+ to avoid 'errant' Flush-Invalidates. */
+ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is
+ returned directly from the L2 Data Store
+ (regardless of hit/miss) when an LDD(L1 load-miss) command
+ is issued from a PP determined by the L2C_DBG[PPNUM]
+ field. The selected set# is determined by the
+ L2C_DBG[SET] field, and the index is determined
+ from the address[17:7] associated with the LDD
+ command.
+ This 'force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state. */
+ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:18]]
+ is returned on the data bus starting at +32(and +96) bytes
+ offset from the beginning of cacheline when an LDD
+ (L1 load-miss) command is issued from a PP determined by
+ the L2C_DBG[PPNUM] field.
+ The selected L2 set# is determined by the L2C_DBG[SET]
+ field, and the L2 index is determined from the
+ phys_addr[17:7] associated with the LDD command.
+ This 'L2 force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state.
+ NOTE: The diagnostic PP should issue a d-stream load
+ to an aligned cacheline+0x20(+0x60) in order to have the
+ return VDLUTAG information (in OW2/OW6) written directly
+ into the proper PP register. The diagnostic PP should also
+ flush it's local L1 cache after use(to ensure data
+ coherency).
+ NOTE: The position of the VDLUTAG data in the destination
+ register is dependent on the endian mode(big/little).
+ NOTE: N3K-Pass2 modification. (This bit's functionality
+ has changed since Pass1-in the following way).
+ NOTE: (For L2C BitMap testing of L2 Data Store OW ECC):
+ If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected
+ half cacheline (see: L2D_ERR[BMHCLSEL] is also
+ conditionally latched into the L2D_FSYN0/1 CSRs if an
+ LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */
+#else
+ uint64_t l2t : 1;
+ uint64_t l2d : 1;
+ uint64_t finv : 1;
+ uint64_t set : 3;
+ uint64_t ppnum : 4;
+ uint64_t lfb_dmp : 1;
+ uint64_t lfb_enum : 4;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_l2c_dbg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t lfb_enum : 2; /**< Specifies the LFB Entry# which is to be captured. */
+ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of
+ the LFB specified by LFB_ENUM are captured
+ into the L2C_LFB(0/1/2) registers.
+ NOTE: Some fields of the LFB entry are unpredictable
+ and dependent on usage. This is only intended to be
+ used for HW debug. */
+ uint64_t reserved_7_9 : 3;
+ uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines which
+ PP is selected as the diagnostic PP.
+ NOTE: For CN30XX single core PPNUM=0 (MBZ) */
+ uint64_t reserved_5_5 : 1;
+ uint64_t set : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines 1-of-n targeted
+ sets to act upon.
+ NOTE: L2C_DBG[SET] must never equal a crippled or
+ unusable set (see UMSK* registers and Cripple mode
+ fuses). */
+ uint64_t finv : 1; /**< Flush-Invalidate.
+ When flush-invalidate is enable (FINV=1), all STF
+ (L1 store-miss) commands generated from the PP will invalidate
+ the specified set(L2C_DBG[SET]) at the index specified
+ in the STF address[14:7]. If a dirty block is detected(D=1),
+ it is written back to memory. The contents of the invalid
+ L2 Cache line is also 'scrubbed' with the STF write data.
+ NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in
+ STF address[14:7] refers to the 'aliased' address.
+ NOTE: An STF command with write data=ZEROES can be
+ generated by SW using the Prefetch instruction with
+ Hint=30d "prepare for Store", followed by a SYNCW.
+ What is seen at the L2C as an STF w/wrdcnt=0 with all
+ of its mask bits clear (indicates zero-fill data).
+ A flush-invalidate will 'force-hit' the L2 cache at
+ [index,set] and invalidate the entry (V=0/D=0/L=0/U=0).
+ If the cache block is dirty, it is also written back
+ to memory. The DuTag state is probed/updated as normal
+ for an STF request.
+ TYPICAL APPLICATIONS:
+ 1) L2 Tag/Data ECC SW Recovery
+ 2) Cache Unlocking
+ NOTE: If the cacheline had been previously LOCKED(L=1),
+ a flush-invalidate operation will explicitly UNLOCK
+ (L=0) the set/index specified.
+ NOTE: The PP can generate STF(L1 store-miss)
+ commands to the L2 Cache whenever all 128 bytes in a
+ block are written. SW must take this into consideration
+ to avoid 'errant' Flush-Invalidates. */
+ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is
+ returned directly from the L2 Data Store
+ (regardless of hit/miss) when an LDD(L1 load-miss)
+ command is issued from the PP.
+ The selected set# is determined by the
+ L2C_DBG[SET] field, and the index is determined
+ from the address[14:7] associated with the LDD
+ command.
+ This 'force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state. */
+ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:15]]
+ is returned on the data bus starting at +32(and +96) bytes
+ offset from the beginning of cacheline when an LDD
+ (L1 load-miss) command is issued from the PP.
+ The selected L2 set# is determined by the L2C_DBG[SET]
+ field, and the L2 index is determined from the
+ phys_addr[14:7] associated with the LDD command.
+ This 'L2 force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state.
+ NOTE: The diagnostic PP should issue a d-stream load
+ to an aligned cacheline+0x20(+0x60) in order to have the
+ return VDLUTAG information (in OW2/OW6) written directly
+ into the proper PP register. The diagnostic PP should also
+ flush it's local L1 cache after use(to ensure data
+ coherency).
+ NOTE: The position of the VDLUTAG data in the destination
+ register is dependent on the endian mode(big/little).
+ NOTE: (For L2C BitMap testing of L2 Data Store OW ECC):
+ If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected
+ half cacheline (see: L2D_ERR[BMHCLSEL] is also
+ conditionally latched into the L2D_FSYN0/1 CSRs if an
+ LDD(L1 load-miss) is detected. */
+#else
+ uint64_t l2t : 1;
+ uint64_t l2d : 1;
+ uint64_t finv : 1;
+ uint64_t set : 2;
+ uint64_t reserved_5_5 : 1;
+ uint64_t ppnum : 1;
+ uint64_t reserved_7_9 : 3;
+ uint64_t lfb_dmp : 1;
+ uint64_t lfb_enum : 2;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_dbg_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */
+ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of
+ the LFB specified by LFB_ENUM are captured
+ into the L2C_LFB(0/1/2) registers.
+ NOTE: Some fields of the LFB entry are unpredictable
+ and dependent on usage. This is only intended to be
+ used for HW debug. */
+ uint64_t reserved_7_9 : 3;
+ uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines which
+ PP is selected as the diagnostic PP. */
+ uint64_t reserved_5_5 : 1;
+ uint64_t set : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines 1-of-n targeted
+ sets to act upon.
+ NOTE: L2C_DBG[SET] must never equal a crippled or
+ unusable set (see UMSK* registers and Cripple mode
+ fuses). */
+ uint64_t finv : 1; /**< Flush-Invalidate.
+ When flush-invalidate is enable (FINV=1), all STF
+ (L1 store-miss) commands generated from the diagnostic PP
+ (L2C_DBG[PPNUM]) will invalidate the specified set
+ (L2C_DBG[SET]) at the index specified in the STF
+ address[15:7]. If a dirty block is detected (D=1), it is
+ written back to memory. The contents of the invalid
+ L2 Cache line is also 'scrubbed' with the STF write data.
+ NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in
+ STF address[15:7] refers to the 'aliased' address.
+ NOTE: An STF command with write data=ZEROES can be
+ generated by SW using the Prefetch instruction with
+ Hint=30d "prepare for Store", followed by a SYNCW.
+ What is seen at the L2C as an STF w/wrdcnt=0 with all
+ of its mask bits clear (indicates zero-fill data).
+ A flush-invalidate will 'force-hit' the L2 cache at
+ [index,set] and invalidate the entry (V=0/D=0/L=0/U=0).
+ If the cache block is dirty, it is also written back
+ to memory. The DuTag state is probed/updated as normal
+ for an STF request.
+ TYPICAL APPLICATIONS:
+ 1) L2 Tag/Data ECC SW Recovery
+ 2) Cache Unlocking
+ NOTE: If the cacheline had been previously LOCKED(L=1),
+ a flush-invalidate operation will explicitly UNLOCK
+ (L=0) the set/index specified.
+ NOTE: The diagnostic PP cores can generate STF(L1 store-miss)
+ commands to the L2 Cache whenever all 128 bytes in a
+ block are written. SW must take this into consideration
+ to avoid 'errant' Flush-Invalidates. */
+ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is
+ returned directly from the L2 Data Store
+ (regardless of hit/miss) when an LDD(L1 load-miss)
+ command is issued from a PP determined by the
+ L2C_DBG[PPNUM] field. The selected set# is determined
+ by the L2C_DBG[SET] field, and the index is determined
+ from the address[15:7] associated with the LDD command.
+ This 'L2 force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state. */
+ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:16]]
+ is returned on the data bus starting at +32(and +96) bytes
+ offset from the beginning of cacheline when an LDD
+ (L1 load-miss) command is issued from a PP determined by
+ the L2C_DBG[PPNUM] field.
+ The selected L2 set# is determined by the L2C_DBG[SET]
+ field, and the L2 index is determined from the
+ phys_addr[15:7] associated with the LDD command.
+ This 'L2 force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state.
+ NOTE: The diagnostic PP should issue a d-stream load
+ to an aligned cacheline+0x20(+0x60) in order to have the
+ return VDLUTAG information (in OW2/OW6) written directly
+ into the proper PP register. The diagnostic PP should also
+ flush it's local L1 cache after use(to ensure data
+ coherency).
+ NOTE: The position of the VDLUTAG data in the destination
+ register is dependent on the endian mode(big/little).
+ NOTE: (For L2C BitMap testing of L2 Data Store OW ECC):
+ If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected
+ half cacheline (see: L2D_ERR[BMHCLSEL] is also
+ conditionally latched into the L2D_FSYN0/1 CSRs if an
+ LDD(L1 load-miss) is detected from the diagnostic PP
+ (L2C_DBG[PPNUM]). */
+#else
+ uint64_t l2t : 1;
+ uint64_t l2d : 1;
+ uint64_t finv : 1;
+ uint64_t set : 2;
+ uint64_t reserved_5_5 : 1;
+ uint64_t ppnum : 1;
+ uint64_t reserved_7_9 : 3;
+ uint64_t lfb_dmp : 1;
+ uint64_t lfb_enum : 3;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn31xx;
+ struct cvmx_l2c_dbg_s cn38xx;
+ struct cvmx_l2c_dbg_s cn38xxp2;
+ struct cvmx_l2c_dbg_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */
+ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of
+ the LFB specified by LFB_ENUM[2:0] are captured
+ into the L2C_LFB(0/1/2) registers.
+ NOTE: Some fields of the LFB entry are unpredictable
+ and dependent on usage. This is only intended to be
+ used for HW debug. */
+ uint64_t reserved_7_9 : 3;
+ uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines which 1-of-2
+ PPs is selected as the diagnostic PP. */
+ uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines 1-of-n targeted
+ sets to act upon.
+ NOTE: L2C_DBG[SET] must never equal a crippled or
+ unusable set (see UMSK* registers and Cripple mode
+ fuses). */
+ uint64_t finv : 1; /**< Flush-Invalidate.
+ When flush-invalidate is enable (FINV=1), all STF
+ (L1 store-miss) commands generated from the diagnostic PP
+ (L2C_DBG[PPNUM]) will invalidate the specified set
+ (L2C_DBG[SET]) at the index specified in the STF
+ address[13:7]. If a dirty block is detected (D=1), it is
+ written back to memory. The contents of the invalid
+ L2 Cache line is also 'scrubbed' with the STF write data.
+ NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in
+ STF address[13:7] refers to the 'aliased' address.
+ NOTE: An STF command with write data=ZEROES can be
+ generated by SW using the Prefetch instruction with
+ Hint=30d "prepare for Store", followed by a SYNCW.
+ What is seen at the L2C as an STF w/wrdcnt=0 with all
+ of its mask bits clear (indicates zero-fill data).
+ A flush-invalidate will 'force-hit' the L2 cache at
+ [index,set] and invalidate the entry (V=0/D=0/L=0/U=0).
+ If the cache block is dirty, it is also written back
+ to memory. The DuTag state is probed/updated as normal
+ for an STF request.
+ TYPICAL APPLICATIONS:
+ 1) L2 Tag/Data ECC SW Recovery
+ 2) Cache Unlocking
+ NOTE: If the cacheline had been previously LOCKED(L=1),
+ a flush-invalidate operation will explicitly UNLOCK
+ (L=0) the set/index specified.
+ NOTE: The diagnostic PP cores can generate STF
+ commands to the L2 Cache whenever all 128 bytes in a
+ block are written. SW must take this into consideration
+ to avoid 'errant' Flush-Invalidates. */
+ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is
+ returned directly from the L2 Data Store
+ (regardless of hit/miss) when an LDD(L1 load-miss) command
+ is issued from a PP determined by the L2C_DBG[PPNUM]
+ field. The selected set# is determined by the
+ L2C_DBG[SET] field, and the index is determined
+ from the address[13:7] associated with the LDD
+ command.
+ This 'force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state. */
+ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:14]]
+ is returned on the data bus starting at +32(and +96) bytes
+ offset from the beginning of cacheline when an LDD
+ (L1 load-miss) command is issued from a PP determined by
+ the L2C_DBG[PPNUM] field.
+ The selected L2 set# is determined by the L2C_DBG[SET]
+ field, and the L2 index is determined from the
+ phys_addr[13:7] associated with the LDD command.
+ This 'L2 force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state.
+ NOTE: The diagnostic PP should issue a d-stream load
+ to an aligned cacheline+0x20(+0x60) in order to have the
+ return VDLUTAG information (in OW2/OW6) written directly
+ into the proper PP register. The diagnostic PP should also
+ flush it's local L1 cache after use(to ensure data
+ coherency).
+ NOTE: The position of the VDLUTAG data in the destination
+ register is dependent on the endian mode(big/little).
+ NOTE: (For L2C BitMap testing of L2 Data Store OW ECC):
+ If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected
+ half cacheline (see: L2D_ERR[BMHCLSEL] is also
+ conditionally latched into the L2D_FSYN0/1 CSRs if an
+ LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */
+#else
+ uint64_t l2t : 1;
+ uint64_t l2d : 1;
+ uint64_t finv : 1;
+ uint64_t set : 3;
+ uint64_t ppnum : 1;
+ uint64_t reserved_7_9 : 3;
+ uint64_t lfb_dmp : 1;
+ uint64_t lfb_enum : 3;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn50xx;
+ struct cvmx_l2c_dbg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */
+ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of
+ the LFB specified by LFB_ENUM[2:0] are captured
+ into the L2C_LFB(0/1/2) registers.
+ NOTE: Some fields of the LFB entry are unpredictable
+ and dependent on usage. This is only intended to be
+ used for HW debug. */
+ uint64_t reserved_8_9 : 2;
+ uint64_t ppnum : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines which 1-of-4
+ PPs is selected as the diagnostic PP. */
+ uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV]
+ is enabled, this field determines 1-of-n targeted
+ sets to act upon.
+ NOTE: L2C_DBG[SET] must never equal a crippled or
+ unusable set (see UMSK* registers and Cripple mode
+ fuses). */
+ uint64_t finv : 1; /**< Flush-Invalidate.
+ When flush-invalidate is enable (FINV=1), all STF
+ (L1 store-miss) commands generated from the diagnostic PP
+ (L2C_DBG[PPNUM]) will invalidate the specified set
+ (L2C_DBG[SET]) at the index specified in the STF
+ address[15:7]. If a dirty block is detected (D=1), it is
+ written back to memory. The contents of the invalid
+ L2 Cache line is also 'scrubbed' with the STF write data.
+ NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in
+ STF address[15:7] refers to the 'aliased' address.
+ NOTE: An STF command with write data=ZEROES can be
+ generated by SW using the Prefetch instruction with
+ Hint=30d "prepare for Store", followed by a SYNCW.
+ What is seen at the L2C as an STF w/wrdcnt=0 with all
+ of its mask bits clear (indicates zero-fill data).
+ A flush-invalidate will 'force-hit' the L2 cache at
+ [index,set] and invalidate the entry (V=0/D=0/L=0/U=0).
+ If the cache block is dirty, it is also written back
+ to memory. The DuTag state is probed/updated as normal
+ for an STF request.
+ TYPICAL APPLICATIONS:
+ 1) L2 Tag/Data ECC SW Recovery
+ 2) Cache Unlocking
+ NOTE: If the cacheline had been previously LOCKED(L=1),
+ a flush-invalidate operation will explicitly UNLOCK
+ (L=0) the set/index specified.
+ NOTE: The diagnostic PP cores can generate STF
+ commands to the L2 Cache whenever all 128 bytes in a
+ block are written. SW must take this into consideration
+ to avoid 'errant' Flush-Invalidates. */
+ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is
+ returned directly from the L2 Data Store
+ (regardless of hit/miss) when an LDD(L1 load-miss) command
+ is issued from a PP determined by the L2C_DBG[PPNUM]
+ field. The selected set# is determined by the
+ L2C_DBG[SET] field, and the index is determined
+ from the address[15:7] associated with the LDD
+ command.
+ This 'force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state. */
+ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:16]]
+ is returned on the data bus starting at +32(and +96) bytes
+ offset from the beginning of cacheline when an LDD
+ (L1 load-miss) command is issued from a PP determined by
+ the L2C_DBG[PPNUM] field.
+ The selected L2 set# is determined by the L2C_DBG[SET]
+ field, and the L2 index is determined from the
+ phys_addr[15:7] associated with the LDD command.
+ This 'L2 force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state.
+ NOTE: The diagnostic PP should issue a d-stream load
+ to an aligned cacheline+0x20(+0x60) in order to have the
+ return VDLUTAG information (in OW2/OW6) written directly
+ into the proper PP register. The diagnostic PP should also
+ flush it's local L1 cache after use(to ensure data
+ coherency).
+ NOTE: The position of the VDLUTAG data in the destination
+ register is dependent on the endian mode(big/little).
+ NOTE: (For L2C BitMap testing of L2 Data Store OW ECC):
+ If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected
+ half cacheline (see: L2D_ERR[BMHCLSEL] is also
+ conditionally latched into the L2D_FSYN0/1 CSRs if an
+ LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */
+#else
+ uint64_t l2t : 1;
+ uint64_t l2d : 1;
+ uint64_t finv : 1;
+ uint64_t set : 3;
+ uint64_t ppnum : 2;
+ uint64_t reserved_8_9 : 2;
+ uint64_t lfb_dmp : 1;
+ uint64_t lfb_enum : 3;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn52xx;
+ struct cvmx_l2c_dbg_cn52xx cn52xxp1;
+ struct cvmx_l2c_dbg_s cn56xx;
+ struct cvmx_l2c_dbg_s cn56xxp1;
+ struct cvmx_l2c_dbg_s cn58xx;
+ struct cvmx_l2c_dbg_s cn58xxp1;
+};
+typedef union cvmx_l2c_dbg cvmx_l2c_dbg_t;
+
+/**
+ * cvmx_l2c_dut
+ *
+ * L2C_DUT = L2C DUTAG Register
+ *
+ * Description: L2C Duplicate Tag State Register
+ *
+ * Notes:
+ * (1) When using the L2T, L2D or FINV Debug probe feature, an LDD command issued by the diagnostic PP
+ * WILL NOT update the DuTags.
+ * (2) L2T, L2D, FINV MUST BE mutually exclusive (only one enabled at a time).
+ * (3) Force Invalidate is intended as a means for SW to invalidate the L2 Cache while also writing back
+ * dirty data to memory to maintain coherency. (A side effect of FINV is that an LDD L2 fill is
+ * launched which fills data into the L2 DS).
+ */
+union cvmx_l2c_dut {
+ uint64_t u64;
+ struct cvmx_l2c_dut_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t dtena : 1; /**< DuTag Diagnostic read enable.
+ When L2C_DUT[DTENA]=1, all LDD(L1 load-miss)
+ commands issued from the diagnostic PP
+ (L2C_DBG[PPNUM]) will capture the DuTag state (V|L1TAG)
+ of the PP#(specified in the LDD address[29:26] into
+ the L2C_DUT CSR register. This allows the diagPP to
+ read ALL DuTags (from any PP).
+ The DuTag Set# to capture is extracted from the LDD
+ address[25:20]. The diagnostic PP would issue the
+ LDD then read the L2C_DUT register (one at a time).
+ This LDD 'L2 force-hit' will NOT alter the current L2
+ Tag State OR the DuTag state.
+ NOTE: For CN58XX the DuTag SIZE has doubled (to 16KB)
+ where each DuTag is organized as 2x 64-way entries.
+ The LDD address[7] determines which 1(of-2) internal
+ 64-ways to select.
+ The fill data is returned directly from the L2 Data
+ Store(regardless of hit/miss) when an LDD command
+ is issued from a PP determined by the L2C_DBG[PPNUM]
+ field. The selected L2 Set# is determined by the
+ L2C_DBG[SET] field, and the index is determined
+ from the address[17:7] associated with the LDD
+ command.
+ This 'L2 force-hit' will NOT alter the current L2 Tag
+ state OR the DuTag state.
+ NOTE: In order for the DiagPP to generate an LDD command
+ to the L2C, it must first force an L1 Dcache flush. */
+ uint64_t reserved_30_30 : 1;
+ uint64_t dt_vld : 1; /**< Duplicate L1 Tag Valid bit latched in for previous
+ LDD(L1 load-miss) command sourced by diagnostic PP. */
+ uint64_t dt_tag : 29; /**< Duplicate L1 Tag[35:7] latched in for previous
+ LDD(L1 load-miss) command sourced by diagnostic PP. */
+#else
+ uint64_t dt_tag : 29;
+ uint64_t dt_vld : 1;
+ uint64_t reserved_30_30 : 1;
+ uint64_t dtena : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_l2c_dut_s cn30xx;
+ struct cvmx_l2c_dut_s cn31xx;
+ struct cvmx_l2c_dut_s cn38xx;
+ struct cvmx_l2c_dut_s cn38xxp2;
+ struct cvmx_l2c_dut_s cn50xx;
+ struct cvmx_l2c_dut_s cn52xx;
+ struct cvmx_l2c_dut_s cn52xxp1;
+ struct cvmx_l2c_dut_s cn56xx;
+ struct cvmx_l2c_dut_s cn56xxp1;
+ struct cvmx_l2c_dut_s cn58xx;
+ struct cvmx_l2c_dut_s cn58xxp1;
+};
+typedef union cvmx_l2c_dut cvmx_l2c_dut_t;
+
+/**
+ * cvmx_l2c_dut_map#
+ *
+ * L2C_DUT_MAP = L2C DUT memory map region
+ *
+ * Description: Address of the start of the region mapped to the duplicate tag. Can be used to read
+ * and write the raw duplicate tag CAM. Writes should be used only with great care as they can easily
+ * destroy the coherency of the memory system. In any case this region is expected to only be used
+ * for debug.
+ *
+ * This base address should be combined with PP virtual ID, L1 way and L1 set to produce the final
+ * address as follows:
+ * addr<63:13> L2C_DUT_MAP<63:13>
+ * addr<12:11> PP VID
+ * addr<10:6> L1 way
+ * addr<5:3> L1 set
+ * addr<2:0> UNUSED
+ *
+ * Notes:
+ * (1) The tag is 37:10 from the 38-bit OCTEON physical address after hole removal. (The hole is between DR0
+ * and DR1. Remove the hole by subtracting 256MB from 38-bit OCTEON L2/DRAM physical addresses >= 512 MB.)
+ */
+union cvmx_l2c_dut_mapx {
+ uint64_t u64;
+ struct cvmx_l2c_dut_mapx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t tag : 28; /**< The tag value (see Note 1) */
+ uint64_t reserved_1_9 : 9;
+ uint64_t valid : 1; /**< The valid bit */
+#else
+ uint64_t valid : 1;
+ uint64_t reserved_1_9 : 9;
+ uint64_t tag : 28;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_l2c_dut_mapx_s cn61xx;
+ struct cvmx_l2c_dut_mapx_s cn63xx;
+ struct cvmx_l2c_dut_mapx_s cn63xxp1;
+ struct cvmx_l2c_dut_mapx_s cn66xx;
+ struct cvmx_l2c_dut_mapx_s cn68xx;
+ struct cvmx_l2c_dut_mapx_s cn68xxp1;
+ struct cvmx_l2c_dut_mapx_s cnf71xx;
+};
+typedef union cvmx_l2c_dut_mapx cvmx_l2c_dut_mapx_t;
+
+/**
+ * cvmx_l2c_err_tdt#
+ *
+ * L2C_ERR_TDT = L2C TAD DaTa Error Info
+ *
+ *
+ * Notes:
+ * (1) If the status bit corresponding to the value of the TYPE field is not set the WAYIDX/SYN fields
+ * are not associated with the errors currently logged by the status bits and should be ignored.
+ * This can occur, for example, because of a race between a write to clear a DBE and a new, lower
+ * priority, SBE error occuring. If the SBE arrives prior to the DBE clear the WAYIDX/SYN fields
+ * will still be locked, but the new SBE error status bit will still be set.
+ *
+ * (2) The four types of errors have differing priorities. Priority (from lowest to highest) is SBE,
+ * VSBE, DBE, VDBE. A error will lock the WAYIDX, and SYN fields for other errors of equal or
+ * lower priority until cleared by software. This means that the error information is always
+ * (assuming the TYPE field matches) for the highest priority error logged in the status bits.
+ *
+ * (3) If VSBE or VDBE are set (and the TYPE field matches), the WAYIDX fields are valid and the
+ * syndrome can be found in L2C_ERR_VBF.
+ *
+ * (4) The syndrome is recorded for DBE errors, though the utility of the value is not clear.
+ */
+union cvmx_l2c_err_tdtx {
+ uint64_t u64;
+ struct cvmx_l2c_err_tdtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dbe : 1; /**< L2D Double-Bit error has occurred */
+ uint64_t sbe : 1; /**< L2D Single-Bit error has occurred */
+ uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */
+ uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */
+ uint64_t syn : 10; /**< L2D syndrome (valid only for SBE/DBE, not VSBE/VDBE) */
+ uint64_t reserved_22_49 : 28;
+ uint64_t wayidx : 18; /**< Way, index, OW of the L2 block containing the error */
+ uint64_t reserved_2_3 : 2;
+ uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for.
+ 0 - VSBE
+ 1 - VDBE
+ 2 - SBE
+ 3 - DBE */
+#else
+ uint64_t type : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t wayidx : 18;
+ uint64_t reserved_22_49 : 28;
+ uint64_t syn : 10;
+ uint64_t vsbe : 1;
+ uint64_t vdbe : 1;
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+#endif
+ } s;
+ struct cvmx_l2c_err_tdtx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dbe : 1; /**< L2D Double-Bit error has occurred */
+ uint64_t sbe : 1; /**< L2D Single-Bit error has occurred */
+ uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */
+ uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */
+ uint64_t syn : 10; /**< L2D syndrome (valid only for SBE/DBE, not VSBE/VDBE) */
+ uint64_t reserved_20_49 : 30;
+ uint64_t wayidx : 16; /**< Way, index, OW of the L2 block containing the error */
+ uint64_t reserved_2_3 : 2;
+ uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for.
+ 0 - VSBE
+ 1 - VDBE
+ 2 - SBE
+ 3 - DBE */
+#else
+ uint64_t type : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t wayidx : 16;
+ uint64_t reserved_20_49 : 30;
+ uint64_t syn : 10;
+ uint64_t vsbe : 1;
+ uint64_t vdbe : 1;
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_err_tdtx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dbe : 1; /**< L2D Double-Bit error has occurred */
+ uint64_t sbe : 1; /**< L2D Single-Bit error has occurred */
+ uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */
+ uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */
+ uint64_t syn : 10; /**< L2D syndrome (valid only for SBE/DBE, not VSBE/VDBE) */
+ uint64_t reserved_21_49 : 29;
+ uint64_t wayidx : 17; /**< Way, index, OW of the L2 block containing the error */
+ uint64_t reserved_2_3 : 2;
+ uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for.
+ 0 - VSBE
+ 1 - VDBE
+ 2 - SBE
+ 3 - DBE */
+#else
+ uint64_t type : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t wayidx : 17;
+ uint64_t reserved_21_49 : 29;
+ uint64_t syn : 10;
+ uint64_t vsbe : 1;
+ uint64_t vdbe : 1;
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+#endif
+ } cn63xx;
+ struct cvmx_l2c_err_tdtx_cn63xx cn63xxp1;
+ struct cvmx_l2c_err_tdtx_cn63xx cn66xx;
+ struct cvmx_l2c_err_tdtx_s cn68xx;
+ struct cvmx_l2c_err_tdtx_s cn68xxp1;
+ struct cvmx_l2c_err_tdtx_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_err_tdtx cvmx_l2c_err_tdtx_t;
+
+/**
+ * cvmx_l2c_err_ttg#
+ *
+ * L2C_ERR_TTG = L2C TAD TaG Error Info
+ *
+ *
+ * Notes:
+ * (1) The priority of errors (highest to lowest) is DBE, SBE, NOWAY. An error will lock the SYN, and
+ * WAYIDX fields for equal or lower priority errors until cleared by software.
+ *
+ * (2) The syndrome is recorded for DBE errors, though the utility of the value is not clear.
+ *
+ * (3) A NOWAY error does not change the value of the SYN field, and leaves WAYIDX[20:17]
+ * unpredictable. WAYIDX[16:7] is the L2 block index associated with the command which had no way
+ * to allocate.
+ *
+ * (4) If the status bit corresponding to the value of the TYPE field is not set the WAYIDX/SYN fields
+ * are not associated with the errors currently logged by the status bits and should be ignored.
+ * This can occur, for example, because of a race between a write to clear a DBE and a new, lower
+ * priority, SBE error occuring. If the SBE arrives prior to the DBE clear the WAYIDX/SYN fields
+ * will still be locked, but the new SBE error status bit will still be set.
+ */
+union cvmx_l2c_err_ttgx {
+ uint64_t u64;
+ struct cvmx_l2c_err_ttgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dbe : 1; /**< Double-Bit ECC error */
+ uint64_t sbe : 1; /**< Single-Bit ECC error */
+ uint64_t noway : 1; /**< No way was available for allocation.
+ L2C sets NOWAY during its processing of a
+ transaction whenever it needed/wanted to allocate
+ a WAY in the L2 cache, but was unable to. NOWAY==1
+ is (generally) not an indication that L2C failed to
+ complete transactions. Rather, it is a hint of
+ possible performance degradation. (For example, L2C
+ must read-modify-write DRAM for every transaction
+ that updates some, but not all, of the bytes in a
+ cache block, misses in the L2 cache, and cannot
+ allocate a WAY.) There is one "failure" case where
+ L2C will set NOWAY: when it cannot leave a block
+ locked in the L2 cache as part of a LCKL2
+ transaction. */
+ uint64_t reserved_56_60 : 5;
+ uint64_t syn : 6; /**< Syndrome for the single-bit error */
+ uint64_t reserved_22_49 : 28;
+ uint64_t wayidx : 15; /**< Way and index of the L2 block containing the error */
+ uint64_t reserved_2_6 : 5;
+ uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for.
+ 0 - not valid
+ 1 - NOWAY
+ 2 - SBE
+ 3 - DBE */
+#else
+ uint64_t type : 2;
+ uint64_t reserved_2_6 : 5;
+ uint64_t wayidx : 15;
+ uint64_t reserved_22_49 : 28;
+ uint64_t syn : 6;
+ uint64_t reserved_56_60 : 5;
+ uint64_t noway : 1;
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+#endif
+ } s;
+ struct cvmx_l2c_err_ttgx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dbe : 1; /**< Double-Bit ECC error */
+ uint64_t sbe : 1; /**< Single-Bit ECC error */
+ uint64_t noway : 1; /**< No way was available for allocation.
+ L2C sets NOWAY during its processing of a
+ transaction whenever it needed/wanted to allocate
+ a WAY in the L2 cache, but was unable to. NOWAY==1
+ is (generally) not an indication that L2C failed to
+ complete transactions. Rather, it is a hint of
+ possible performance degradation. (For example, L2C
+ must read-modify-write DRAM for every transaction
+ that updates some, but not all, of the bytes in a
+ cache block, misses in the L2 cache, and cannot
+ allocate a WAY.) There is one "failure" case where
+ L2C will set NOWAY: when it cannot leave a block
+ locked in the L2 cache as part of a LCKL2
+ transaction. */
+ uint64_t reserved_56_60 : 5;
+ uint64_t syn : 6; /**< Syndrome for the single-bit error */
+ uint64_t reserved_20_49 : 30;
+ uint64_t wayidx : 13; /**< Way and index of the L2 block containing the error */
+ uint64_t reserved_2_6 : 5;
+ uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for.
+ 0 - not valid
+ 1 - NOWAY
+ 2 - SBE
+ 3 - DBE */
+#else
+ uint64_t type : 2;
+ uint64_t reserved_2_6 : 5;
+ uint64_t wayidx : 13;
+ uint64_t reserved_20_49 : 30;
+ uint64_t syn : 6;
+ uint64_t reserved_56_60 : 5;
+ uint64_t noway : 1;
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_err_ttgx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dbe : 1; /**< Double-Bit ECC error */
+ uint64_t sbe : 1; /**< Single-Bit ECC error */
+ uint64_t noway : 1; /**< No way was available for allocation.
+ L2C sets NOWAY during its processing of a
+ transaction whenever it needed/wanted to allocate
+ a WAY in the L2 cache, but was unable to. NOWAY==1
+ is (generally) not an indication that L2C failed to
+ complete transactions. Rather, it is a hint of
+ possible performance degradation. (For example, L2C
+ must read-modify-write DRAM for every transaction
+ that updates some, but not all, of the bytes in a
+ cache block, misses in the L2 cache, and cannot
+ allocate a WAY.) There is one "failure" case where
+ L2C will set NOWAY: when it cannot leave a block
+ locked in the L2 cache as part of a LCKL2
+ transaction. */
+ uint64_t reserved_56_60 : 5;
+ uint64_t syn : 6; /**< Syndrome for the single-bit error */
+ uint64_t reserved_21_49 : 29;
+ uint64_t wayidx : 14; /**< Way and index of the L2 block containing the error */
+ uint64_t reserved_2_6 : 5;
+ uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for.
+ 0 - not valid
+ 1 - NOWAY
+ 2 - SBE
+ 3 - DBE */
+#else
+ uint64_t type : 2;
+ uint64_t reserved_2_6 : 5;
+ uint64_t wayidx : 14;
+ uint64_t reserved_21_49 : 29;
+ uint64_t syn : 6;
+ uint64_t reserved_56_60 : 5;
+ uint64_t noway : 1;
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+#endif
+ } cn63xx;
+ struct cvmx_l2c_err_ttgx_cn63xx cn63xxp1;
+ struct cvmx_l2c_err_ttgx_cn63xx cn66xx;
+ struct cvmx_l2c_err_ttgx_s cn68xx;
+ struct cvmx_l2c_err_ttgx_s cn68xxp1;
+ struct cvmx_l2c_err_ttgx_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_err_ttgx cvmx_l2c_err_ttgx_t;
+
+/**
+ * cvmx_l2c_err_vbf#
+ *
+ * L2C_ERR_VBF = L2C VBF Error Info
+ *
+ *
+ * Notes:
+ * (1) The way/index information is stored in L2C_ERR_TDT, assuming no later interrupt occurred to
+ * overwrite the information. See the notes associated with L2C_ERR_TDT for full details.
+ *
+ * (2) The first VSBE will lock the register for other VSBE's. A VDBE, however, will overwrite a
+ * previously logged VSBE. Once a VDBE has been logged all later errors will not be logged. This
+ * means that if VDBE is set the information in the register is for the VDBE, if VDBE is clear and
+ * VSBE is set the register contains information about the VSBE.
+ *
+ * (3) The syndrome is recorded for VDBE errors, though the utility of the value is not clear.
+ *
+ * (4) If the status bit corresponding to the value of the TYPE field is not set the SYN field is not
+ * associated with the errors currently logged by the status bits and should be ignored. This can
+ * occur, for example, because of a race between a write to clear a VDBE and a new, lower priority,
+ * VSBE error occuring. If the VSBE arrives prior to the VDBE clear the SYN field will still be
+ * locked, but the new VSBE error status bit will still be set.
+ */
+union cvmx_l2c_err_vbfx {
+ uint64_t u64;
+ struct cvmx_l2c_err_vbfx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */
+ uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */
+ uint64_t vsyn : 10; /**< VBF syndrome (valid only if VSBE/VDBE is set) */
+ uint64_t reserved_2_49 : 48;
+ uint64_t type : 2; /**< The type of error the SYN were latched for.
+ 0 - VSBE
+ 1 - VDBE */
+#else
+ uint64_t type : 2;
+ uint64_t reserved_2_49 : 48;
+ uint64_t vsyn : 10;
+ uint64_t vsbe : 1;
+ uint64_t vdbe : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_l2c_err_vbfx_s cn61xx;
+ struct cvmx_l2c_err_vbfx_s cn63xx;
+ struct cvmx_l2c_err_vbfx_s cn63xxp1;
+ struct cvmx_l2c_err_vbfx_s cn66xx;
+ struct cvmx_l2c_err_vbfx_s cn68xx;
+ struct cvmx_l2c_err_vbfx_s cn68xxp1;
+ struct cvmx_l2c_err_vbfx_s cnf71xx;
+};
+typedef union cvmx_l2c_err_vbfx cvmx_l2c_err_vbfx_t;
+
+/**
+ * cvmx_l2c_err_xmc
+ *
+ * L2C_ERR_XMC = L2C XMC request error
+ *
+ * Description: records error information for HOLE*, BIG* and VRT* interrupts.
+ *
+ * Notes:
+ * (1) The first BIGWR/HOLEWR/VRT* interrupt will lock the register until L2C_INT_REG[6:1] are
+ * cleared.
+ *
+ * (2) ADDR<15:0> will always be zero for VRT* interrupts.
+ *
+ * (3) ADDR is the 38-bit OCTEON physical address after hole removal. (The hole is between DR0
+ * and DR1. Remove the hole by subtracting 256MB from all 38-bit OCTEON L2/DRAM physical addresses
+ * >= 512 MB.)
+ *
+ * (4) For 63xx pass 2.0 and all 68xx ADDR<15:0> will ALWAYS be zero.
+ */
+union cvmx_l2c_err_xmc {
+ uint64_t u64;
+ struct cvmx_l2c_err_xmc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cmd : 6; /**< XMC command or request causing error */
+ uint64_t reserved_54_57 : 4;
+ uint64_t sid : 6; /**< XMC sid of request causing error */
+ uint64_t reserved_38_47 : 10;
+ uint64_t addr : 38; /**< XMC address causing the error (see Notes 2 and 3) */
+#else
+ uint64_t addr : 38;
+ uint64_t reserved_38_47 : 10;
+ uint64_t sid : 6;
+ uint64_t reserved_54_57 : 4;
+ uint64_t cmd : 6;
+#endif
+ } s;
+ struct cvmx_l2c_err_xmc_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cmd : 6; /**< XMC command or request causing error */
+ uint64_t reserved_52_57 : 6;
+ uint64_t sid : 4; /**< XMC sid of request causing error */
+ uint64_t reserved_38_47 : 10;
+ uint64_t addr : 38; /**< XMC address causing the error (see Notes 2 and 3) */
+#else
+ uint64_t addr : 38;
+ uint64_t reserved_38_47 : 10;
+ uint64_t sid : 4;
+ uint64_t reserved_52_57 : 6;
+ uint64_t cmd : 6;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_err_xmc_cn61xx cn63xx;
+ struct cvmx_l2c_err_xmc_cn61xx cn63xxp1;
+ struct cvmx_l2c_err_xmc_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cmd : 6; /**< XMC command or request causing error */
+ uint64_t reserved_53_57 : 5;
+ uint64_t sid : 5; /**< XMC sid of request causing error */
+ uint64_t reserved_38_47 : 10;
+ uint64_t addr : 38; /**< XMC address causing the error (see Notes 2 and 3) */
+#else
+ uint64_t addr : 38;
+ uint64_t reserved_38_47 : 10;
+ uint64_t sid : 5;
+ uint64_t reserved_53_57 : 5;
+ uint64_t cmd : 6;
+#endif
+ } cn66xx;
+ struct cvmx_l2c_err_xmc_s cn68xx;
+ struct cvmx_l2c_err_xmc_s cn68xxp1;
+ struct cvmx_l2c_err_xmc_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_err_xmc cvmx_l2c_err_xmc_t;
+
+/**
+ * cvmx_l2c_grpwrr0
+ *
+ * L2C_GRPWRR0 = L2C PP Weighted Round \#0 Register
+ *
+ * Description: Defines Weighted rounds(32) for Group PLC0,PLC1
+ *
+ * Notes:
+ * - Starvation of a group 'could' occur, unless SW takes the precaution to ensure that each GROUP
+ * participates in at least 1(of 32) rounds (ie: At least 1 bit(of 32) should be clear).
+ */
+union cvmx_l2c_grpwrr0 {
+ uint64_t u64;
+ struct cvmx_l2c_grpwrr0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t plc1rmsk : 32; /**< PLC1 Group#1 Weighted Round Mask
+ Each bit represents 1 of 32 rounds
+ for Group \#1's participation. When a 'round' bit is
+ set, Group#1 is 'masked' and DOES NOT participate.
+ When a 'round' bit is clear, Group#1 WILL
+ participate in the arbitration for this round. */
+ uint64_t plc0rmsk : 32; /**< PLC Group#0 Weighted Round Mask
+ Each bit represents 1 of 32 rounds
+ for Group \#0's participation. When a 'round' bit is
+ set, Group#0 is 'masked' and DOES NOT participate.
+ When a 'round' bit is clear, Group#0 WILL
+ participate in the arbitration for this round. */
+#else
+ uint64_t plc0rmsk : 32;
+ uint64_t plc1rmsk : 32;
+#endif
+ } s;
+ struct cvmx_l2c_grpwrr0_s cn52xx;
+ struct cvmx_l2c_grpwrr0_s cn52xxp1;
+ struct cvmx_l2c_grpwrr0_s cn56xx;
+ struct cvmx_l2c_grpwrr0_s cn56xxp1;
+};
+typedef union cvmx_l2c_grpwrr0 cvmx_l2c_grpwrr0_t;
+
+/**
+ * cvmx_l2c_grpwrr1
+ *
+ * L2C_GRPWRR1 = L2C PP Weighted Round \#1 Register
+ *
+ * Description: Defines Weighted Rounds(32) for Group PLC2,ILC
+ *
+ * Notes:
+ * - Starvation of a group 'could' occur, unless SW takes the precaution to ensure that each GROUP
+ * participates in at least 1(of 32) rounds (ie: At least 1 bit(of 32) should be clear).
+ */
+union cvmx_l2c_grpwrr1 {
+ uint64_t u64;
+ struct cvmx_l2c_grpwrr1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ilcrmsk : 32; /**< ILC (IOB) Weighted Round Mask
+ Each bit represents 1 of 32 rounds
+ for IOB participation. When a 'round' bit is
+ set, IOB is 'masked' and DOES NOT participate.
+ When a 'round' bit is clear, IOB WILL
+ participate in the arbitration for this round. */
+ uint64_t plc2rmsk : 32; /**< PLC Group#2 Weighted Round Mask
+ Each bit represents 1 of 32 rounds
+ for Group \#2's participation. When a 'round' bit is
+ set, Group#2 is 'masked' and DOES NOT participate.
+ When a 'round' bit is clear, Group#2 WILL
+ participate in the arbitration for this round. */
+#else
+ uint64_t plc2rmsk : 32;
+ uint64_t ilcrmsk : 32;
+#endif
+ } s;
+ struct cvmx_l2c_grpwrr1_s cn52xx;
+ struct cvmx_l2c_grpwrr1_s cn52xxp1;
+ struct cvmx_l2c_grpwrr1_s cn56xx;
+ struct cvmx_l2c_grpwrr1_s cn56xxp1;
+};
+typedef union cvmx_l2c_grpwrr1 cvmx_l2c_grpwrr1_t;
+
+/**
+ * cvmx_l2c_int_en
+ *
+ * L2C_INT_EN = L2C Global Interrupt Enable Register
+ *
+ * Description:
+ */
+union cvmx_l2c_int_en {
+ uint64_t u64;
+ struct cvmx_l2c_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t lck2ena : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit
+ NOTE: This is the 'same' bit as L2T_ERR[LCK_INTENA2] */
+ uint64_t lckena : 1; /**< L2 Tag Lock Error Interrupt Enable bit
+ NOTE: This is the 'same' bit as L2T_ERR[LCK_INTENA] */
+ uint64_t l2ddeden : 1; /**< L2 Data ECC Double Error Detect(DED) Interrupt Enable bit
+ When set, allows interrupts to be reported on double bit
+ (uncorrectable) errors from the L2 Data Arrays.
+ NOTE: This is the 'same' bit as L2D_ERR[DED_INTENA] */
+ uint64_t l2dsecen : 1; /**< L2 Data ECC Single Error Correct(SEC) Interrupt Enable bit
+ When set, allows interrupts to be reported on single bit
+ (correctable) errors from the L2 Data Arrays.
+ NOTE: This is the 'same' bit as L2D_ERR[SEC_INTENA] */
+ uint64_t l2tdeden : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt
+ NOTE: This is the 'same' bit as L2T_ERR[DED_INTENA] */
+ uint64_t l2tsecen : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on single bit (correctable) errors from
+ the L2 Tag Arrays.
+ NOTE: This is the 'same' bit as L2T_ERR[SEC_INTENA] */
+ uint64_t oob3en : 1; /**< DMA Out of Bounds Interrupt Enable Range#3 */
+ uint64_t oob2en : 1; /**< DMA Out of Bounds Interrupt Enable Range#2 */
+ uint64_t oob1en : 1; /**< DMA Out of Bounds Interrupt Enable Range#1 */
+#else
+ uint64_t oob1en : 1;
+ uint64_t oob2en : 1;
+ uint64_t oob3en : 1;
+ uint64_t l2tsecen : 1;
+ uint64_t l2tdeden : 1;
+ uint64_t l2dsecen : 1;
+ uint64_t l2ddeden : 1;
+ uint64_t lckena : 1;
+ uint64_t lck2ena : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_l2c_int_en_s cn52xx;
+ struct cvmx_l2c_int_en_s cn52xxp1;
+ struct cvmx_l2c_int_en_s cn56xx;
+ struct cvmx_l2c_int_en_s cn56xxp1;
+};
+typedef union cvmx_l2c_int_en cvmx_l2c_int_en_t;
+
+/**
+ * cvmx_l2c_int_ena
+ *
+ * L2C_INT_ENA = L2C Interrupt Enable
+ *
+ */
+union cvmx_l2c_int_ena {
+ uint64_t u64;
+ struct cvmx_l2c_int_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t bigrd : 1; /**< Read reference past MAXDRAM enable */
+ uint64_t bigwr : 1; /**< Write reference past MAXDRAM enable */
+ uint64_t vrtpe : 1; /**< Virtualization memory parity error */
+ uint64_t vrtadrng : 1; /**< Address outside of virtualization range enable */
+ uint64_t vrtidrng : 1; /**< Virtualization ID out of range enable */
+ uint64_t vrtwr : 1; /**< Virtualization ID prevented a write enable */
+ uint64_t holewr : 1; /**< Write reference to 256MB hole enable */
+ uint64_t holerd : 1; /**< Read reference to 256MB hole enable */
+#else
+ uint64_t holerd : 1;
+ uint64_t holewr : 1;
+ uint64_t vrtwr : 1;
+ uint64_t vrtidrng : 1;
+ uint64_t vrtadrng : 1;
+ uint64_t vrtpe : 1;
+ uint64_t bigwr : 1;
+ uint64_t bigrd : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_l2c_int_ena_s cn61xx;
+ struct cvmx_l2c_int_ena_s cn63xx;
+ struct cvmx_l2c_int_ena_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t vrtpe : 1; /**< Virtualization memory parity error */
+ uint64_t vrtadrng : 1; /**< Address outside of virtualization range enable */
+ uint64_t vrtidrng : 1; /**< Virtualization ID out of range enable */
+ uint64_t vrtwr : 1; /**< Virtualization ID prevented a write enable */
+ uint64_t holewr : 1; /**< Write reference to 256MB hole enable */
+ uint64_t holerd : 1; /**< Read reference to 256MB hole enable */
+#else
+ uint64_t holerd : 1;
+ uint64_t holewr : 1;
+ uint64_t vrtwr : 1;
+ uint64_t vrtidrng : 1;
+ uint64_t vrtadrng : 1;
+ uint64_t vrtpe : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn63xxp1;
+ struct cvmx_l2c_int_ena_s cn66xx;
+ struct cvmx_l2c_int_ena_s cn68xx;
+ struct cvmx_l2c_int_ena_s cn68xxp1;
+ struct cvmx_l2c_int_ena_s cnf71xx;
+};
+typedef union cvmx_l2c_int_ena cvmx_l2c_int_ena_t;
+
+/**
+ * cvmx_l2c_int_reg
+ *
+ * L2C_INT_REG = L2C Interrupt Register
+ *
+ */
+union cvmx_l2c_int_reg {
+ uint64_t u64;
+ struct cvmx_l2c_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t tad3 : 1; /**< When set, the enabled interrupt is in
+ the L2C_TAD3_INT CSR */
+ uint64_t tad2 : 1; /**< When set, the enabled interrupt is in
+ the L2C_TAD2_INT CSR */
+ uint64_t tad1 : 1; /**< When set, the enabled interrupt is in
+ the L2C_TAD1_INT CSR */
+ uint64_t tad0 : 1; /**< When set, the enabled interrupt is in
+ the L2C_TAD0_INT CSR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t bigrd : 1; /**< Read reference past L2C_BIG_CTL[MAXDRAM] occurred */
+ uint64_t bigwr : 1; /**< Write reference past L2C_BIG_CTL[MAXDRAM] occurred */
+ uint64_t vrtpe : 1; /**< L2C_VRT_MEM read found a parity error
+ Whenever an L2C_VRT_MEM read finds a parity error,
+ that L2C_VRT_MEM cannot cause stores to be blocked.
+ Software should correct the error. */
+ uint64_t vrtadrng : 1; /**< Address outside of virtualization range
+ Set when a L2C_VRT_CTL[MEMSZ] violation blocked a
+ store.
+ L2C_VRT_CTL[OOBERR] must be set for L2C to set this. */
+ uint64_t vrtidrng : 1; /**< Virtualization ID out of range
+ Set when a L2C_VRT_CTL[NUMID] violation blocked a
+ store. */
+ uint64_t vrtwr : 1; /**< Virtualization ID prevented a write
+ Set when L2C_VRT_MEM blocked a store. */
+ uint64_t holewr : 1; /**< Write reference to 256MB hole occurred */
+ uint64_t holerd : 1; /**< Read reference to 256MB hole occurred */
+#else
+ uint64_t holerd : 1;
+ uint64_t holewr : 1;
+ uint64_t vrtwr : 1;
+ uint64_t vrtidrng : 1;
+ uint64_t vrtadrng : 1;
+ uint64_t vrtpe : 1;
+ uint64_t bigwr : 1;
+ uint64_t bigrd : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t tad0 : 1;
+ uint64_t tad1 : 1;
+ uint64_t tad2 : 1;
+ uint64_t tad3 : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_l2c_int_reg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t tad0 : 1; /**< When set, the enabled interrupt is in
+ the L2C_TAD0_INT CSR */
+ uint64_t reserved_8_15 : 8;
+ uint64_t bigrd : 1; /**< Read reference past L2C_BIG_CTL[MAXDRAM] occurred */
+ uint64_t bigwr : 1; /**< Write reference past L2C_BIG_CTL[MAXDRAM] occurred */
+ uint64_t vrtpe : 1; /**< L2C_VRT_MEM read found a parity error
+ Whenever an L2C_VRT_MEM read finds a parity error,
+ that L2C_VRT_MEM cannot cause stores to be blocked.
+ Software should correct the error. */
+ uint64_t vrtadrng : 1; /**< Address outside of virtualization range
+ Set when a L2C_VRT_CTL[MEMSZ] violation blocked a
+ store.
+ L2C_VRT_CTL[OOBERR] must be set for L2C to set this. */
+ uint64_t vrtidrng : 1; /**< Virtualization ID out of range
+ Set when a L2C_VRT_CTL[NUMID] violation blocked a
+ store. */
+ uint64_t vrtwr : 1; /**< Virtualization ID prevented a write
+ Set when L2C_VRT_MEM blocked a store. */
+ uint64_t holewr : 1; /**< Write reference to 256MB hole occurred */
+ uint64_t holerd : 1; /**< Read reference to 256MB hole occurred */
+#else
+ uint64_t holerd : 1;
+ uint64_t holewr : 1;
+ uint64_t vrtwr : 1;
+ uint64_t vrtidrng : 1;
+ uint64_t vrtadrng : 1;
+ uint64_t vrtpe : 1;
+ uint64_t bigwr : 1;
+ uint64_t bigrd : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t tad0 : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_int_reg_cn61xx cn63xx;
+ struct cvmx_l2c_int_reg_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t tad0 : 1; /**< When set, the enabled interrupt is in either
+ the L2C_ERR_TDT0 or L2C_ERR_TTG0 CSR */
+ uint64_t reserved_6_15 : 10;
+ uint64_t vrtpe : 1; /**< L2C_VRT_MEM read found a parity error
+ Whenever an L2C_VRT_MEM read finds a parity error,
+ that L2C_VRT_MEM cannot cause stores to be blocked.
+ Software should correct the error. */
+ uint64_t vrtadrng : 1; /**< Address outside of virtualization range
+ Set when a L2C_VRT_CTL[MEMSZ] violation blocked a
+ store.
+ L2C_VRT_CTL[OOBERR] must be set for L2C to set this. */
+ uint64_t vrtidrng : 1; /**< Virtualization ID out of range
+ Set when a L2C_VRT_CTL[NUMID] violation blocked a
+ store. */
+ uint64_t vrtwr : 1; /**< Virtualization ID prevented a write
+ Set when L2C_VRT_MEM blocked a store. */
+ uint64_t holewr : 1; /**< Write reference to 256MB hole occurred */
+ uint64_t holerd : 1; /**< Read reference to 256MB hole occurred */
+#else
+ uint64_t holerd : 1;
+ uint64_t holewr : 1;
+ uint64_t vrtwr : 1;
+ uint64_t vrtidrng : 1;
+ uint64_t vrtadrng : 1;
+ uint64_t vrtpe : 1;
+ uint64_t reserved_6_15 : 10;
+ uint64_t tad0 : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn63xxp1;
+ struct cvmx_l2c_int_reg_cn61xx cn66xx;
+ struct cvmx_l2c_int_reg_s cn68xx;
+ struct cvmx_l2c_int_reg_s cn68xxp1;
+ struct cvmx_l2c_int_reg_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_int_reg cvmx_l2c_int_reg_t;
+
+/**
+ * cvmx_l2c_int_stat
+ *
+ * L2C_INT_STAT = L2C Global Interrupt Status Register
+ *
+ * Description:
+ */
+union cvmx_l2c_int_stat {
+ uint64_t u64;
+ struct cvmx_l2c_int_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t lck2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n
+ could not find an available/unlocked set (for
+ replacement).
+ Most likely, this is a result of SW mixing SET
+ PARTITIONING with ADDRESS LOCKING. If SW allows
+ another PP to LOCKDOWN all SETs available to PP#n,
+ then a Rd/Wr Miss from PP#n will be unable
+ to determine a 'valid' replacement set (since LOCKED
+ addresses should NEVER be replaced).
+ If such an event occurs, the HW will select the smallest
+ available SET(specified by UMSK'x)' as the replacement
+ set, and the address is unlocked.
+ NOTE: This is the 'same' bit as L2T_ERR[LCKERR2] */
+ uint64_t lck : 1; /**< SW attempted to LOCK DOWN the last available set of
+ the INDEX (which is ignored by HW - but reported to SW).
+ The LDD(L1 load-miss) for the LOCK operation is completed
+ successfully, however the address is NOT locked.
+ NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*]
+ into account. For example, if diagnostic PPx has
+ UMSKx defined to only use SETs [1:0], and SET1 had
+ been previously LOCKED, then an attempt to LOCK the
+ last available SET0 would result in a LCKERR. (This
+ is to ensure that at least 1 SET at each INDEX is
+ not LOCKED for general use by other PPs).
+ NOTE: This is the 'same' bit as L2T_ERR[LCKERR] */
+ uint64_t l2dded : 1; /**< L2D Double Error detected (DED)
+ NOTE: This is the 'same' bit as L2D_ERR[DED_ERR] */
+ uint64_t l2dsec : 1; /**< L2D Single Error corrected (SEC)
+ NOTE: This is the 'same' bit as L2D_ERR[SEC_ERR] */
+ uint64_t l2tded : 1; /**< L2T Double Bit Error detected (DED)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for double bit errors(DBEs).
+ This bit is set if ANY of the 8 sets contains a DBE.
+ DBEs also generated an interrupt(if enabled).
+ NOTE: This is the 'same' bit as L2T_ERR[DED_ERR] */
+ uint64_t l2tsec : 1; /**< L2T Single Bit Error corrected (SEC) status
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for single bit errors(SBEs).
+ This bit is set if ANY of the 8 sets contains an SBE.
+ SBEs are auto corrected in HW and generate an
+ interrupt(if enabled).
+ NOTE: This is the 'same' bit as L2T_ERR[SEC_ERR] */
+ uint64_t oob3 : 1; /**< DMA Out of Bounds Interrupt Status Range#3 */
+ uint64_t oob2 : 1; /**< DMA Out of Bounds Interrupt Status Range#2 */
+ uint64_t oob1 : 1; /**< DMA Out of Bounds Interrupt Status Range#1 */
+#else
+ uint64_t oob1 : 1;
+ uint64_t oob2 : 1;
+ uint64_t oob3 : 1;
+ uint64_t l2tsec : 1;
+ uint64_t l2tded : 1;
+ uint64_t l2dsec : 1;
+ uint64_t l2dded : 1;
+ uint64_t lck : 1;
+ uint64_t lck2 : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_l2c_int_stat_s cn52xx;
+ struct cvmx_l2c_int_stat_s cn52xxp1;
+ struct cvmx_l2c_int_stat_s cn56xx;
+ struct cvmx_l2c_int_stat_s cn56xxp1;
+};
+typedef union cvmx_l2c_int_stat cvmx_l2c_int_stat_t;
+
+/**
+ * cvmx_l2c_ioc#_pfc
+ *
+ * L2C_IOC_PFC = L2C IOC Performance Counter(s)
+ *
+ */
+union cvmx_l2c_iocx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_iocx_pfc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_iocx_pfc_s cn61xx;
+ struct cvmx_l2c_iocx_pfc_s cn63xx;
+ struct cvmx_l2c_iocx_pfc_s cn63xxp1;
+ struct cvmx_l2c_iocx_pfc_s cn66xx;
+ struct cvmx_l2c_iocx_pfc_s cn68xx;
+ struct cvmx_l2c_iocx_pfc_s cn68xxp1;
+ struct cvmx_l2c_iocx_pfc_s cnf71xx;
+};
+typedef union cvmx_l2c_iocx_pfc cvmx_l2c_iocx_pfc_t;
+
+/**
+ * cvmx_l2c_ior#_pfc
+ *
+ * L2C_IOR_PFC = L2C IOR Performance Counter(s)
+ *
+ */
+union cvmx_l2c_iorx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_iorx_pfc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_iorx_pfc_s cn61xx;
+ struct cvmx_l2c_iorx_pfc_s cn63xx;
+ struct cvmx_l2c_iorx_pfc_s cn63xxp1;
+ struct cvmx_l2c_iorx_pfc_s cn66xx;
+ struct cvmx_l2c_iorx_pfc_s cn68xx;
+ struct cvmx_l2c_iorx_pfc_s cn68xxp1;
+ struct cvmx_l2c_iorx_pfc_s cnf71xx;
+};
+typedef union cvmx_l2c_iorx_pfc cvmx_l2c_iorx_pfc_t;
+
+/**
+ * cvmx_l2c_lckbase
+ *
+ * L2C_LCKBASE = L2C LockDown Base Register
+ *
+ * Description: L2C LockDown Base Register
+ *
+ * Notes:
+ * (1) SW RESTRICTION \#1: SW must manage the L2 Data Store lockdown space such that at least 1
+ * set per cache line remains in the 'unlocked' (normal) state to allow general caching operations.
+ * If SW violates this restriction, a status bit is set (LCK_ERR) and an interrupt is posted.
+ * [this limits the total lockdown space to 7/8ths of the total L2 data store = 896KB]
+ * (2) IOB initiated LDI commands are ignored (only PP initiated LDI/LDD commands are considered
+ * for lockdown).
+ * (3) To 'unlock' a locked cache line, SW can use the FLUSH-INVAL CSR mechanism (see L2C_DBG[FINV]).
+ * (4) LCK_ENA MUST only be activated when debug modes are disabled (L2C_DBG[L2T], L2C_DBG[L2D], L2C_DBG[FINV]).
+ */
+union cvmx_l2c_lckbase {
+ uint64_t u64;
+ struct cvmx_l2c_lckbase_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t lck_base : 27; /**< Base Memory block address[33:7]. Specifies the
+ starting address of the lockdown region. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t lck_ena : 1; /**< L2 Cache Lock Enable
+ When the LCK_ENA=1, all LDI(I-stream Load) or
+ LDD(L1 load-miss) commands issued from the
+ diagnostic PP (specified by the L2C_DBG[PPNUM]),
+ which fall within a predefined lockdown address
+ range (specified by: [lck_base:lck_base+lck_offset])
+ are LOCKED in the L2 cache. The LOCKED state is
+ denoted using an explicit L2 Tag bit (L=1).
+ If the LOCK request L2-Hits (on ANY SET), then data is
+ returned from the L2 and the hit set is updated to the
+ LOCKED state. NOTE: If the Hit Set# is outside the
+ available sets for a given PP (see UMSK'x'), the
+ the LOCK bit is still SET. If the programmer's intent
+ is to explicitly LOCK addresses into 'available' sets,
+ care must be taken to flush-invalidate the cache first
+ (to avoid such situations). Not following this procedure
+ can lead to LCKERR2 interrupts.
+ If the LOCK request L2-Misses, a replacment set is
+ chosen(from the available sets (UMSK'x').
+ If the replacement set contains a dirty-victim it is
+ written back to memory. Memory read data is then written
+ into the replacement set, and the replacment SET is
+ updated to the LOCKED state(L=1).
+ NOTE: SETs that contain LOCKED addresses are
+ excluded from the replacement set selection algorithm.
+ NOTE: The LDD command will allocate the DuTag as normal.
+ NOTE: If L2C_CFG[IDXALIAS]=1, the address is 'aliased' first
+ before being checked against the lockdown address
+ range. To ensure an 'aliased' address is properly locked,
+ it is recommmended that SW preload the 'aliased' locked adddress
+ into the L2C_LCKBASE[LCK_BASE] register (while keeping
+ L2C_LCKOFF[LCK_OFFSET]=0).
+ NOTE: The OCTEON(N3) implementation only supports 16GB(MAX) of
+ physical memory. Therefore, only byte address[33:0] are used
+ (ie: address[35:34] are ignored). */
+#else
+ uint64_t lck_ena : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t lck_base : 27;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_l2c_lckbase_s cn30xx;
+ struct cvmx_l2c_lckbase_s cn31xx;
+ struct cvmx_l2c_lckbase_s cn38xx;
+ struct cvmx_l2c_lckbase_s cn38xxp2;
+ struct cvmx_l2c_lckbase_s cn50xx;
+ struct cvmx_l2c_lckbase_s cn52xx;
+ struct cvmx_l2c_lckbase_s cn52xxp1;
+ struct cvmx_l2c_lckbase_s cn56xx;
+ struct cvmx_l2c_lckbase_s cn56xxp1;
+ struct cvmx_l2c_lckbase_s cn58xx;
+ struct cvmx_l2c_lckbase_s cn58xxp1;
+};
+typedef union cvmx_l2c_lckbase cvmx_l2c_lckbase_t;
+
+/**
+ * cvmx_l2c_lckoff
+ *
+ * L2C_LCKOFF = L2C LockDown OFFSET Register
+ *
+ * Description: L2C LockDown OFFSET Register
+ *
+ * Notes:
+ * (1) The generation of the end lockdown block address will 'wrap'.
+ * (2) The minimum granularity for lockdown is 1 cache line (= 128B block)
+ */
+union cvmx_l2c_lckoff {
+ uint64_t u64;
+ struct cvmx_l2c_lckoff_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t lck_offset : 10; /**< LockDown block Offset. Used in determining
+ the ending block address of the lockdown
+ region:
+ End Lockdown block Address[33:7] =
+ LCK_BASE[33:7]+LCK_OFFSET[9:0] */
+#else
+ uint64_t lck_offset : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_l2c_lckoff_s cn30xx;
+ struct cvmx_l2c_lckoff_s cn31xx;
+ struct cvmx_l2c_lckoff_s cn38xx;
+ struct cvmx_l2c_lckoff_s cn38xxp2;
+ struct cvmx_l2c_lckoff_s cn50xx;
+ struct cvmx_l2c_lckoff_s cn52xx;
+ struct cvmx_l2c_lckoff_s cn52xxp1;
+ struct cvmx_l2c_lckoff_s cn56xx;
+ struct cvmx_l2c_lckoff_s cn56xxp1;
+ struct cvmx_l2c_lckoff_s cn58xx;
+ struct cvmx_l2c_lckoff_s cn58xxp1;
+};
+typedef union cvmx_l2c_lckoff cvmx_l2c_lckoff_t;
+
+/**
+ * cvmx_l2c_lfb0
+ *
+ * L2C_LFB0 = L2C LFB DEBUG 0 Register
+ *
+ * Description: L2C LFB Contents (Status Bits)
+ */
+union cvmx_l2c_lfb0 {
+ uint64_t u64;
+ struct cvmx_l2c_lfb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t stcpnd : 1; /**< LFB STC Pending Status */
+ uint64_t stpnd : 1; /**< LFB ST* Pending Status */
+ uint64_t stinv : 1; /**< LFB ST* Invalidate Status */
+ uint64_t stcfl : 1; /**< LFB STC=FAIL Status */
+ uint64_t vam : 1; /**< Valid Full Address Match Status */
+ uint64_t inxt : 4; /**< Next LFB Pointer(invalid if ITL=1) */
+ uint64_t itl : 1; /**< LFB Tail of List Indicator */
+ uint64_t ihd : 1; /**< LFB Head of List Indicator */
+ uint64_t set : 3; /**< SET# used for DS-OP (hit=hset/miss=rset) */
+ uint64_t vabnum : 4; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */
+ uint64_t sid : 9; /**< LFB Source ID */
+ uint64_t cmd : 4; /**< LFB Command */
+ uint64_t vld : 1; /**< LFB Valid */
+#else
+ uint64_t vld : 1;
+ uint64_t cmd : 4;
+ uint64_t sid : 9;
+ uint64_t vabnum : 4;
+ uint64_t set : 3;
+ uint64_t ihd : 1;
+ uint64_t itl : 1;
+ uint64_t inxt : 4;
+ uint64_t vam : 1;
+ uint64_t stcfl : 1;
+ uint64_t stinv : 1;
+ uint64_t stpnd : 1;
+ uint64_t stcpnd : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_l2c_lfb0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t stcpnd : 1; /**< LFB STC Pending Status */
+ uint64_t stpnd : 1; /**< LFB ST* Pending Status */
+ uint64_t stinv : 1; /**< LFB ST* Invalidate Status */
+ uint64_t stcfl : 1; /**< LFB STC=FAIL Status */
+ uint64_t vam : 1; /**< Valid Full Address Match Status */
+ uint64_t reserved_25_26 : 2;
+ uint64_t inxt : 2; /**< Next LFB Pointer(invalid if ITL=1) */
+ uint64_t itl : 1; /**< LFB Tail of List Indicator */
+ uint64_t ihd : 1; /**< LFB Head of List Indicator */
+ uint64_t reserved_20_20 : 1;
+ uint64_t set : 2; /**< SET# used for DS-OP (hit=hset/miss=rset) */
+ uint64_t reserved_16_17 : 2;
+ uint64_t vabnum : 2; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */
+ uint64_t sid : 9; /**< LFB Source ID */
+ uint64_t cmd : 4; /**< LFB Command */
+ uint64_t vld : 1; /**< LFB Valid */
+#else
+ uint64_t vld : 1;
+ uint64_t cmd : 4;
+ uint64_t sid : 9;
+ uint64_t vabnum : 2;
+ uint64_t reserved_16_17 : 2;
+ uint64_t set : 2;
+ uint64_t reserved_20_20 : 1;
+ uint64_t ihd : 1;
+ uint64_t itl : 1;
+ uint64_t inxt : 2;
+ uint64_t reserved_25_26 : 2;
+ uint64_t vam : 1;
+ uint64_t stcfl : 1;
+ uint64_t stinv : 1;
+ uint64_t stpnd : 1;
+ uint64_t stcpnd : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_lfb0_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t stcpnd : 1; /**< LFB STC Pending Status */
+ uint64_t stpnd : 1; /**< LFB ST* Pending Status */
+ uint64_t stinv : 1; /**< LFB ST* Invalidate Status */
+ uint64_t stcfl : 1; /**< LFB STC=FAIL Status */
+ uint64_t vam : 1; /**< Valid Full Address Match Status */
+ uint64_t reserved_26_26 : 1;
+ uint64_t inxt : 3; /**< Next LFB Pointer(invalid if ITL=1) */
+ uint64_t itl : 1; /**< LFB Tail of List Indicator */
+ uint64_t ihd : 1; /**< LFB Head of List Indicator */
+ uint64_t reserved_20_20 : 1;
+ uint64_t set : 2; /**< SET# used for DS-OP (hit=hset/miss=rset) */
+ uint64_t reserved_17_17 : 1;
+ uint64_t vabnum : 3; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */
+ uint64_t sid : 9; /**< LFB Source ID */
+ uint64_t cmd : 4; /**< LFB Command */
+ uint64_t vld : 1; /**< LFB Valid */
+#else
+ uint64_t vld : 1;
+ uint64_t cmd : 4;
+ uint64_t sid : 9;
+ uint64_t vabnum : 3;
+ uint64_t reserved_17_17 : 1;
+ uint64_t set : 2;
+ uint64_t reserved_20_20 : 1;
+ uint64_t ihd : 1;
+ uint64_t itl : 1;
+ uint64_t inxt : 3;
+ uint64_t reserved_26_26 : 1;
+ uint64_t vam : 1;
+ uint64_t stcfl : 1;
+ uint64_t stinv : 1;
+ uint64_t stpnd : 1;
+ uint64_t stcpnd : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn31xx;
+ struct cvmx_l2c_lfb0_s cn38xx;
+ struct cvmx_l2c_lfb0_s cn38xxp2;
+ struct cvmx_l2c_lfb0_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t stcpnd : 1; /**< LFB STC Pending Status */
+ uint64_t stpnd : 1; /**< LFB ST* Pending Status */
+ uint64_t stinv : 1; /**< LFB ST* Invalidate Status */
+ uint64_t stcfl : 1; /**< LFB STC=FAIL Status */
+ uint64_t vam : 1; /**< Valid Full Address Match Status */
+ uint64_t reserved_26_26 : 1;
+ uint64_t inxt : 3; /**< Next LFB Pointer(invalid if ITL=1) */
+ uint64_t itl : 1; /**< LFB Tail of List Indicator */
+ uint64_t ihd : 1; /**< LFB Head of List Indicator */
+ uint64_t set : 3; /**< SET# used for DS-OP (hit=hset/miss=rset) */
+ uint64_t reserved_17_17 : 1;
+ uint64_t vabnum : 3; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */
+ uint64_t sid : 9; /**< LFB Source ID */
+ uint64_t cmd : 4; /**< LFB Command */
+ uint64_t vld : 1; /**< LFB Valid */
+#else
+ uint64_t vld : 1;
+ uint64_t cmd : 4;
+ uint64_t sid : 9;
+ uint64_t vabnum : 3;
+ uint64_t reserved_17_17 : 1;
+ uint64_t set : 3;
+ uint64_t ihd : 1;
+ uint64_t itl : 1;
+ uint64_t inxt : 3;
+ uint64_t reserved_26_26 : 1;
+ uint64_t vam : 1;
+ uint64_t stcfl : 1;
+ uint64_t stinv : 1;
+ uint64_t stpnd : 1;
+ uint64_t stcpnd : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn50xx;
+ struct cvmx_l2c_lfb0_cn50xx cn52xx;
+ struct cvmx_l2c_lfb0_cn50xx cn52xxp1;
+ struct cvmx_l2c_lfb0_s cn56xx;
+ struct cvmx_l2c_lfb0_s cn56xxp1;
+ struct cvmx_l2c_lfb0_s cn58xx;
+ struct cvmx_l2c_lfb0_s cn58xxp1;
+};
+typedef union cvmx_l2c_lfb0 cvmx_l2c_lfb0_t;
+
+/**
+ * cvmx_l2c_lfb1
+ *
+ * L2C_LFB1 = L2C LFB DEBUG 1 Register
+ *
+ * Description: L2C LFB Contents (Wait Bits)
+ */
+union cvmx_l2c_lfb1 {
+ uint64_t u64;
+ struct cvmx_l2c_lfb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t dsgoing : 1; /**< LFB DS Going (in flight) */
+ uint64_t bid : 2; /**< LFB DS Bid# */
+ uint64_t wtrsp : 1; /**< LFB Waiting for RSC Response [FILL,STRSP] completion */
+ uint64_t wtdw : 1; /**< LFB Waiting for DS-WR completion */
+ uint64_t wtdq : 1; /**< LFB Waiting for LFB-DQ */
+ uint64_t wtwhp : 1; /**< LFB Waiting for Write-Hit Partial L2 DS-WR completion */
+ uint64_t wtwhf : 1; /**< LFB Waiting for Write-Hit Full L2 DS-WR completion */
+ uint64_t wtwrm : 1; /**< LFB Waiting for Write-Miss L2 DS-WR completion */
+ uint64_t wtstm : 1; /**< LFB Waiting for Write-Miss L2 DS-WR completion */
+ uint64_t wtrda : 1; /**< LFB Waiting for Read-Miss L2 DS-WR completion */
+ uint64_t wtstdt : 1; /**< LFB Waiting for all ST write Data to arrive on XMD bus */
+ uint64_t wtstrsp : 1; /**< LFB Waiting for ST RSC/RSD to be issued on RSP
+ (with invalidates) */
+ uint64_t wtstrsc : 1; /**< LFB Waiting for ST RSC-Only to be issued on RSP
+ (no-invalidates) */
+ uint64_t wtvtm : 1; /**< LFB Waiting for Victim Read L2 DS-RD completion */
+ uint64_t wtmfl : 1; /**< LFB Waiting for Memory Fill completion to MRB */
+ uint64_t prbrty : 1; /**< Probe-Retry Detected - waiting for probe completion */
+ uint64_t wtprb : 1; /**< LFB Waiting for Probe */
+ uint64_t vld : 1; /**< LFB Valid */
+#else
+ uint64_t vld : 1;
+ uint64_t wtprb : 1;
+ uint64_t prbrty : 1;
+ uint64_t wtmfl : 1;
+ uint64_t wtvtm : 1;
+ uint64_t wtstrsc : 1;
+ uint64_t wtstrsp : 1;
+ uint64_t wtstdt : 1;
+ uint64_t wtrda : 1;
+ uint64_t wtstm : 1;
+ uint64_t wtwrm : 1;
+ uint64_t wtwhf : 1;
+ uint64_t wtwhp : 1;
+ uint64_t wtdq : 1;
+ uint64_t wtdw : 1;
+ uint64_t wtrsp : 1;
+ uint64_t bid : 2;
+ uint64_t dsgoing : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_l2c_lfb1_s cn30xx;
+ struct cvmx_l2c_lfb1_s cn31xx;
+ struct cvmx_l2c_lfb1_s cn38xx;
+ struct cvmx_l2c_lfb1_s cn38xxp2;
+ struct cvmx_l2c_lfb1_s cn50xx;
+ struct cvmx_l2c_lfb1_s cn52xx;
+ struct cvmx_l2c_lfb1_s cn52xxp1;
+ struct cvmx_l2c_lfb1_s cn56xx;
+ struct cvmx_l2c_lfb1_s cn56xxp1;
+ struct cvmx_l2c_lfb1_s cn58xx;
+ struct cvmx_l2c_lfb1_s cn58xxp1;
+};
+typedef union cvmx_l2c_lfb1 cvmx_l2c_lfb1_t;
+
+/**
+ * cvmx_l2c_lfb2
+ *
+ * L2C_LFB2 = L2C LFB DEBUG 2 Register
+ *
+ * Description: L2C LFB Contents Tag/Index
+ */
+union cvmx_l2c_lfb2 {
+ uint64_t u64;
+ struct cvmx_l2c_lfb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_l2c_lfb2_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t lfb_tag : 19; /**< LFB TAG[33:15] */
+ uint64_t lfb_idx : 8; /**< LFB IDX[14:7] */
+#else
+ uint64_t lfb_idx : 8;
+ uint64_t lfb_tag : 19;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_lfb2_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t lfb_tag : 17; /**< LFB TAG[33:16] */
+ uint64_t lfb_idx : 10; /**< LFB IDX[15:7] */
+#else
+ uint64_t lfb_idx : 10;
+ uint64_t lfb_tag : 17;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn31xx;
+ struct cvmx_l2c_lfb2_cn31xx cn38xx;
+ struct cvmx_l2c_lfb2_cn31xx cn38xxp2;
+ struct cvmx_l2c_lfb2_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t lfb_tag : 20; /**< LFB TAG[33:14] */
+ uint64_t lfb_idx : 7; /**< LFB IDX[13:7] */
+#else
+ uint64_t lfb_idx : 7;
+ uint64_t lfb_tag : 20;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn50xx;
+ struct cvmx_l2c_lfb2_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t lfb_tag : 18; /**< LFB TAG[33:16] */
+ uint64_t lfb_idx : 9; /**< LFB IDX[15:7] */
+#else
+ uint64_t lfb_idx : 9;
+ uint64_t lfb_tag : 18;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn52xx;
+ struct cvmx_l2c_lfb2_cn52xx cn52xxp1;
+ struct cvmx_l2c_lfb2_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t lfb_tag : 16; /**< LFB TAG[33:18] */
+ uint64_t lfb_idx : 11; /**< LFB IDX[17:7] */
+#else
+ uint64_t lfb_idx : 11;
+ uint64_t lfb_tag : 16;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn56xx;
+ struct cvmx_l2c_lfb2_cn56xx cn56xxp1;
+ struct cvmx_l2c_lfb2_cn56xx cn58xx;
+ struct cvmx_l2c_lfb2_cn56xx cn58xxp1;
+};
+typedef union cvmx_l2c_lfb2 cvmx_l2c_lfb2_t;
+
+/**
+ * cvmx_l2c_lfb3
+ *
+ * L2C_LFB3 = L2C LFB DEBUG 3 Register
+ *
+ * Description: LFB High Water Mark Register
+ */
+union cvmx_l2c_lfb3 {
+ uint64_t u64;
+ struct cvmx_l2c_lfb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable
+ When clear, all STP/C(store partials) will take 2 cycles
+ to complete (power-on default).
+ When set, all STP/C(store partials) will take 4 cycles
+ to complete.
+ NOTE: It is recommended to keep this bit ALWAYS ZERO. */
+ uint64_t lfb_hwm : 4; /**< LFB High Water Mark
+ Determines \#of LFB Entries in use before backpressure
+ is asserted.
+ HWM=0: 1 LFB Entry available
+ - ...
+ HWM=15: 16 LFB Entries available */
+#else
+ uint64_t lfb_hwm : 4;
+ uint64_t stpartdis : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_l2c_lfb3_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable
+ When clear, all STP/C(store partials) will take 2 cycles
+ to complete (power-on default).
+ When set, all STP/C(store partials) will take 4 cycles
+ to complete.
+ NOTE: It is recommended to keep this bit ALWAYS ZERO. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t lfb_hwm : 2; /**< LFB High Water Mark
+ Determines \#of LFB Entries in use before backpressure
+ is asserted.
+ HWM=0: 1 LFB Entry available
+ - ...
+ HWM=3: 4 LFB Entries available */
+#else
+ uint64_t lfb_hwm : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t stpartdis : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_lfb3_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable
+ When clear, all STP/C(store partials) will take 2 cycles
+ to complete (power-on default).
+ When set, all STP/C(store partials) will take 4 cycles
+ to complete.
+ NOTE: It is recommended to keep this bit ALWAYS ZERO. */
+ uint64_t reserved_3_3 : 1;
+ uint64_t lfb_hwm : 3; /**< LFB High Water Mark
+ Determines \#of LFB Entries in use before backpressure
+ is asserted.
+ HWM=0: 1 LFB Entry available
+ - ...
+ HWM=7: 8 LFB Entries available */
+#else
+ uint64_t lfb_hwm : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t stpartdis : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn31xx;
+ struct cvmx_l2c_lfb3_s cn38xx;
+ struct cvmx_l2c_lfb3_s cn38xxp2;
+ struct cvmx_l2c_lfb3_cn31xx cn50xx;
+ struct cvmx_l2c_lfb3_cn31xx cn52xx;
+ struct cvmx_l2c_lfb3_cn31xx cn52xxp1;
+ struct cvmx_l2c_lfb3_s cn56xx;
+ struct cvmx_l2c_lfb3_s cn56xxp1;
+ struct cvmx_l2c_lfb3_s cn58xx;
+ struct cvmx_l2c_lfb3_s cn58xxp1;
+};
+typedef union cvmx_l2c_lfb3 cvmx_l2c_lfb3_t;
+
+/**
+ * cvmx_l2c_oob
+ *
+ * L2C_OOB = L2C Out of Bounds Global Enables
+ *
+ * Description: Defines DMA "Out of Bounds" global enables.
+ */
+union cvmx_l2c_oob {
+ uint64_t u64;
+ struct cvmx_l2c_oob_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dwbena : 1; /**< DMA Out of Bounds Range Checker for DMA DWB
+ commands (Don't WriteBack).
+ When enabled, any DMA DWB commands which hit 1-of-3
+ out of bounds regions will be logged into
+ L2C_INT_STAT[OOB*] CSRs and the DMA store WILL
+ NOT occur. If the corresponding L2C_INT_EN[OOB*]
+ is enabled, an interrupt will also be reported. */
+ uint64_t stena : 1; /**< DMA Out of Bounds Range Checker for DMA store
+ commands (STF/P/T).
+ When enabled, any DMA store commands (STF/P/T) which
+ hit 1-of-3 out of bounds regions will be logged into
+ L2C_INT_STAT[OOB*] CSRs and the DMA store WILL
+ NOT occur. If the corresponding L2C_INT_EN[OOB*]
+ is enabled, an interrupt will also be reported. */
+#else
+ uint64_t stena : 1;
+ uint64_t dwbena : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_l2c_oob_s cn52xx;
+ struct cvmx_l2c_oob_s cn52xxp1;
+ struct cvmx_l2c_oob_s cn56xx;
+ struct cvmx_l2c_oob_s cn56xxp1;
+};
+typedef union cvmx_l2c_oob cvmx_l2c_oob_t;
+
+/**
+ * cvmx_l2c_oob1
+ *
+ * L2C_OOB1 = L2C Out of Bounds Range Checker
+ *
+ * Description: Defines DMA "Out of Bounds" region \#1. If a DMA initiated write transaction generates an address
+ * within the specified region, the write is 'ignored' and an interrupt is generated to alert software.
+ */
+union cvmx_l2c_oob1 {
+ uint64_t u64;
+ struct cvmx_l2c_oob1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address
+ When L2C_INT_STAT[OOB1]=1, this field indicates the
+ DMA cacheline address.
+ (addr[33:7] = full cacheline address captured)
+ NOTE: FADR is locked down until L2C_INT_STAT[OOB1]
+ is cleared. */
+ uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command
+ When L2C_INT_STAT[OOB1]=1, this field indicates the
+ type of DMA command.
+ - 0: ST* (STF/P/T)
+ - 1: DWB (Don't WriteBack)
+ NOTE: FSRC is locked down until L2C_INT_STAT[OOB1]
+ is cleared. */
+ uint64_t reserved_34_35 : 2;
+ uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address
+ (1MB granularity) */
+ uint64_t reserved_14_19 : 6;
+ uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size
+ (1MB granularity)
+ Example: 0: 0MB / 1: 1MB
+ The range check is for:
+ (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20)
+ SW NOTE: SADR+SIZE could be setup to potentially wrap
+ the 34bit ending bounds address. */
+#else
+ uint64_t size : 14;
+ uint64_t reserved_14_19 : 6;
+ uint64_t sadr : 14;
+ uint64_t reserved_34_35 : 2;
+ uint64_t fsrc : 1;
+ uint64_t fadr : 27;
+#endif
+ } s;
+ struct cvmx_l2c_oob1_s cn52xx;
+ struct cvmx_l2c_oob1_s cn52xxp1;
+ struct cvmx_l2c_oob1_s cn56xx;
+ struct cvmx_l2c_oob1_s cn56xxp1;
+};
+typedef union cvmx_l2c_oob1 cvmx_l2c_oob1_t;
+
+/**
+ * cvmx_l2c_oob2
+ *
+ * L2C_OOB2 = L2C Out of Bounds Range Checker
+ *
+ * Description: Defines DMA "Out of Bounds" region \#2. If a DMA initiated write transaction generates an address
+ * within the specified region, the write is 'ignored' and an interrupt is generated to alert software.
+ */
+union cvmx_l2c_oob2 {
+ uint64_t u64;
+ struct cvmx_l2c_oob2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address
+ When L2C_INT_STAT[OOB2]=1, this field indicates the
+ DMA cacheline address.
+ (addr[33:7] = full cacheline address captured)
+ NOTE: FADR is locked down until L2C_INT_STAT[OOB2]
+ is cleared. */
+ uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command
+ When L2C_INT_STAT[OOB2]=1, this field indicates the
+ type of DMA command.
+ - 0: ST* (STF/P/T)
+ - 1: DWB (Don't WriteBack)
+ NOTE: FSRC is locked down until L2C_INT_STAT[OOB2]
+ is cleared. */
+ uint64_t reserved_34_35 : 2;
+ uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address
+ (1MB granularity) */
+ uint64_t reserved_14_19 : 6;
+ uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size
+ (1MB granularity)
+ Example: 0: 0MB / 1: 1MB
+ The range check is for:
+ (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20)
+ SW NOTE: SADR+SIZE could be setup to potentially wrap
+ the 34bit ending bounds address. */
+#else
+ uint64_t size : 14;
+ uint64_t reserved_14_19 : 6;
+ uint64_t sadr : 14;
+ uint64_t reserved_34_35 : 2;
+ uint64_t fsrc : 1;
+ uint64_t fadr : 27;
+#endif
+ } s;
+ struct cvmx_l2c_oob2_s cn52xx;
+ struct cvmx_l2c_oob2_s cn52xxp1;
+ struct cvmx_l2c_oob2_s cn56xx;
+ struct cvmx_l2c_oob2_s cn56xxp1;
+};
+typedef union cvmx_l2c_oob2 cvmx_l2c_oob2_t;
+
+/**
+ * cvmx_l2c_oob3
+ *
+ * L2C_OOB3 = L2C Out of Bounds Range Checker
+ *
+ * Description: Defines DMA "Out of Bounds" region \#3. If a DMA initiated write transaction generates an address
+ * within the specified region, the write is 'ignored' and an interrupt is generated to alert software.
+ */
+union cvmx_l2c_oob3 {
+ uint64_t u64;
+ struct cvmx_l2c_oob3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address
+ When L2C_INT_STAT[OOB3]=1, this field indicates the
+ DMA cacheline address.
+ (addr[33:7] = full cacheline address captured)
+ NOTE: FADR is locked down until L2C_INT_STAT[00B3]
+ is cleared. */
+ uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command
+ When L2C_INT_STAT[OOB3]=1, this field indicates the
+ type of DMA command.
+ - 0: ST* (STF/P/T)
+ - 1: DWB (Don't WriteBack)
+ NOTE: FSRC is locked down until L2C_INT_STAT[00B3]
+ is cleared. */
+ uint64_t reserved_34_35 : 2;
+ uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address
+ (1MB granularity) */
+ uint64_t reserved_14_19 : 6;
+ uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size
+ (1MB granularity)
+ Example: 0: 0MB / 1: 1MB
+ The range check is for:
+ (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20)
+ SW NOTE: SADR+SIZE could be setup to potentially wrap
+ the 34bit ending bounds address. */
+#else
+ uint64_t size : 14;
+ uint64_t reserved_14_19 : 6;
+ uint64_t sadr : 14;
+ uint64_t reserved_34_35 : 2;
+ uint64_t fsrc : 1;
+ uint64_t fadr : 27;
+#endif
+ } s;
+ struct cvmx_l2c_oob3_s cn52xx;
+ struct cvmx_l2c_oob3_s cn52xxp1;
+ struct cvmx_l2c_oob3_s cn56xx;
+ struct cvmx_l2c_oob3_s cn56xxp1;
+};
+typedef union cvmx_l2c_oob3 cvmx_l2c_oob3_t;
+
+/**
+ * cvmx_l2c_pfc#
+ *
+ * L2C_PFC0 = L2 Performance Counter \#0
+ *
+ * Description:
+ */
+union cvmx_l2c_pfcx {
+ uint64_t u64;
+ struct cvmx_l2c_pfcx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t pfcnt0 : 36; /**< Performance Counter \#0 */
+#else
+ uint64_t pfcnt0 : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_l2c_pfcx_s cn30xx;
+ struct cvmx_l2c_pfcx_s cn31xx;
+ struct cvmx_l2c_pfcx_s cn38xx;
+ struct cvmx_l2c_pfcx_s cn38xxp2;
+ struct cvmx_l2c_pfcx_s cn50xx;
+ struct cvmx_l2c_pfcx_s cn52xx;
+ struct cvmx_l2c_pfcx_s cn52xxp1;
+ struct cvmx_l2c_pfcx_s cn56xx;
+ struct cvmx_l2c_pfcx_s cn56xxp1;
+ struct cvmx_l2c_pfcx_s cn58xx;
+ struct cvmx_l2c_pfcx_s cn58xxp1;
+};
+typedef union cvmx_l2c_pfcx cvmx_l2c_pfcx_t;
+
+/**
+ * cvmx_l2c_pfctl
+ *
+ * L2C_PFCTL = L2 Performance Counter Control Register
+ *
+ * Description: Controls the actions of the 4 Performance Counters
+ *
+ * Notes:
+ * - There are four 36b performance counter registers which can simultaneously count events.
+ * Each Counter's event is programmably selected via the corresponding CNTxSEL field:
+ * CNTxSEL[5:0] Event
+ * -----------------+-----------------------
+ * 0 | Cycles
+ * 1 | L2 LDI Command Miss (NOTE: Both PP and IOB are cabable of generating LDI)
+ * 2 | L2 LDI Command Hit (NOTE: Both PP and IOB are cabable of generating LDI)
+ * 3 | L2 non-LDI Command Miss
+ * 4 | L2 non-LDI Command Hit
+ * 5 | L2 Miss (total)
+ * 6 | L2 Hit (total)
+ * 7 | L2 Victim Buffer Hit (Retry Probe)
+ * 8 | LFB-NQ Index Conflict
+ * 9 | L2 Tag Probe (issued - could be VB-Retried)
+ * 10 | L2 Tag Update (completed - note: some CMD types do not update)
+ * 11 | L2 Tag Probe Completed (beyond VB-RTY window)
+ * 12 | L2 Tag Dirty Victim
+ * 13 | L2 Data Store NOP
+ * 14 | L2 Data Store READ
+ * 15 | L2 Data Store WRITE
+ * 16 | Memory Fill Data valid (1 strobe/32B)
+ * 17 | Memory Write Request
+ * 18 | Memory Read Request
+ * 19 | Memory Write Data valid (1 strobe/32B)
+ * 20 | XMC NOP (XMC Bus Idle)
+ * 21 | XMC LDT (Load-Through Request)
+ * 22 | XMC LDI (L2 Load I-Stream Request)
+ * 23 | XMC LDD (L2 Load D-stream Request)
+ * 24 | XMC STF (L2 Store Full cacheline Request)
+ * 25 | XMC STT (L2 Store Through Request)
+ * 26 | XMC STP (L2 Store Partial Request)
+ * 27 | XMC STC (L2 Store Conditional Request)
+ * 28 | XMC DWB (L2 Don't WriteBack Request)
+ * 29 | XMC PL2 (L2 Prefetch Request)
+ * 30 | XMC PSL1 (L1 Prefetch Request)
+ * 31 | XMC IOBLD
+ * 32 | XMC IOBST
+ * 33 | XMC IOBDMA
+ * 34 | XMC IOBRSP
+ * 35 | XMD Bus valid (all)
+ * 36 | XMD Bus valid (DST=L2C) Memory Data
+ * 37 | XMD Bus valid (DST=IOB) REFL Data
+ * 38 | XMD Bus valid (DST=PP) IOBRSP Data
+ * 39 | RSC NOP
+ * 40 | RSC STDN
+ * 41 | RSC FILL
+ * 42 | RSC REFL
+ * 43 | RSC STIN
+ * 44 | RSC SCIN
+ * 45 | RSC SCFL
+ * 46 | RSC SCDN
+ * 47 | RSD Data Valid
+ * 48 | RSD Data Valid (FILL)
+ * 49 | RSD Data Valid (STRSP)
+ * 50 | RSD Data Valid (REFL)
+ * 51 | LRF-REQ (LFB-NQ)
+ * 52 | DT RD-ALLOC (LDD/PSL1 Commands)
+ * 53 | DT WR-INVAL (ST* Commands)
+ */
+union cvmx_l2c_pfctl {
+ uint64_t u64;
+ struct cvmx_l2c_pfctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t cnt3rdclr : 1; /**< Performance Counter 3 Read Clear
+ When set, all CSR reads of the L2C_PFC3
+ register will auto-clear the counter. This allows
+ SW to maintain 'cumulative' counters in SW.
+ NOTE: If the CSR read occurs in the same cycle as
+ the 'event' to be counted, the counter will
+ properly reflect the event. */
+ uint64_t cnt2rdclr : 1; /**< Performance Counter 2 Read Clear
+ When set, all CSR reads of the L2C_PFC2
+ register will auto-clear the counter. This allows
+ SW to maintain 'cumulative' counters in SW.
+ NOTE: If the CSR read occurs in the same cycle as
+ the 'event' to be counted, the counter will
+ properly reflect the event. */
+ uint64_t cnt1rdclr : 1; /**< Performance Counter 1 Read Clear
+ When set, all CSR reads of the L2C_PFC1
+ register will auto-clear the counter. This allows
+ SW to maintain 'cumulative' counters in SW.
+ NOTE: If the CSR read occurs in the same cycle as
+ the 'event' to be counted, the counter will
+ properly reflect the event. */
+ uint64_t cnt0rdclr : 1; /**< Performance Counter 0 Read Clear
+ When set, all CSR reads of the L2C_PFC0
+ register will 'auto-clear' the counter. This allows
+ SW to maintain accurate 'cumulative' counters.
+ NOTE: If the CSR read occurs in the same cycle as
+ the 'event' to be counted, the counter will
+ properly reflect the event. */
+ uint64_t cnt3ena : 1; /**< Performance Counter 3 Enable
+ When this bit is set, the performance counter
+ is enabled. */
+ uint64_t cnt3clr : 1; /**< Performance Counter 3 Clear
+ When the CSR write occurs, if this bit is set,
+ the performance counter is cleared. Otherwise,
+ it will resume counting from its current value. */
+ uint64_t cnt3sel : 6; /**< Performance Counter 3 Event Selector
+ (see list of selectable events to count in NOTES) */
+ uint64_t cnt2ena : 1; /**< Performance Counter 2 Enable
+ When this bit is set, the performance counter
+ is enabled. */
+ uint64_t cnt2clr : 1; /**< Performance Counter 2 Clear
+ When the CSR write occurs, if this bit is set,
+ the performance counter is cleared. Otherwise,
+ it will resume counting from its current value. */
+ uint64_t cnt2sel : 6; /**< Performance Counter 2 Event Selector
+ (see list of selectable events to count in NOTES) */
+ uint64_t cnt1ena : 1; /**< Performance Counter 1 Enable
+ When this bit is set, the performance counter
+ is enabled. */
+ uint64_t cnt1clr : 1; /**< Performance Counter 1 Clear
+ When the CSR write occurs, if this bit is set,
+ the performance counter is cleared. Otherwise,
+ it will resume counting from its current value. */
+ uint64_t cnt1sel : 6; /**< Performance Counter 1 Event Selector
+ (see list of selectable events to count in NOTES) */
+ uint64_t cnt0ena : 1; /**< Performance Counter 0 Enable
+ When this bit is set, the performance counter
+ is enabled. */
+ uint64_t cnt0clr : 1; /**< Performance Counter 0 Clear
+ When the CSR write occurs, if this bit is set,
+ the performance counter is cleared. Otherwise,
+ it will resume counting from its current value. */
+ uint64_t cnt0sel : 6; /**< Performance Counter 0 Event Selector
+ (see list of selectable events to count in NOTES) */
+#else
+ uint64_t cnt0sel : 6;
+ uint64_t cnt0clr : 1;
+ uint64_t cnt0ena : 1;
+ uint64_t cnt1sel : 6;
+ uint64_t cnt1clr : 1;
+ uint64_t cnt1ena : 1;
+ uint64_t cnt2sel : 6;
+ uint64_t cnt2clr : 1;
+ uint64_t cnt2ena : 1;
+ uint64_t cnt3sel : 6;
+ uint64_t cnt3clr : 1;
+ uint64_t cnt3ena : 1;
+ uint64_t cnt0rdclr : 1;
+ uint64_t cnt1rdclr : 1;
+ uint64_t cnt2rdclr : 1;
+ uint64_t cnt3rdclr : 1;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_l2c_pfctl_s cn30xx;
+ struct cvmx_l2c_pfctl_s cn31xx;
+ struct cvmx_l2c_pfctl_s cn38xx;
+ struct cvmx_l2c_pfctl_s cn38xxp2;
+ struct cvmx_l2c_pfctl_s cn50xx;
+ struct cvmx_l2c_pfctl_s cn52xx;
+ struct cvmx_l2c_pfctl_s cn52xxp1;
+ struct cvmx_l2c_pfctl_s cn56xx;
+ struct cvmx_l2c_pfctl_s cn56xxp1;
+ struct cvmx_l2c_pfctl_s cn58xx;
+ struct cvmx_l2c_pfctl_s cn58xxp1;
+};
+typedef union cvmx_l2c_pfctl cvmx_l2c_pfctl_t;
+
+/**
+ * cvmx_l2c_ppgrp
+ *
+ * L2C_PPGRP = L2C PP Group Number
+ *
+ * Description: Defines the PP(Packet Processor) PLC Group \# (0,1,2)
+ */
+union cvmx_l2c_ppgrp {
+ uint64_t u64;
+ struct cvmx_l2c_ppgrp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t pp11grp : 2; /**< PP11 PLC Group# (0,1,2) */
+ uint64_t pp10grp : 2; /**< PP10 PLC Group# (0,1,2) */
+ uint64_t pp9grp : 2; /**< PP9 PLC Group# (0,1,2) */
+ uint64_t pp8grp : 2; /**< PP8 PLC Group# (0,1,2) */
+ uint64_t pp7grp : 2; /**< PP7 PLC Group# (0,1,2) */
+ uint64_t pp6grp : 2; /**< PP6 PLC Group# (0,1,2) */
+ uint64_t pp5grp : 2; /**< PP5 PLC Group# (0,1,2) */
+ uint64_t pp4grp : 2; /**< PP4 PLC Group# (0,1,2) */
+ uint64_t pp3grp : 2; /**< PP3 PLC Group# (0,1,2) */
+ uint64_t pp2grp : 2; /**< PP2 PLC Group# (0,1,2) */
+ uint64_t pp1grp : 2; /**< PP1 PLC Group# (0,1,2) */
+ uint64_t pp0grp : 2; /**< PP0 PLC Group# (0,1,2) */
+#else
+ uint64_t pp0grp : 2;
+ uint64_t pp1grp : 2;
+ uint64_t pp2grp : 2;
+ uint64_t pp3grp : 2;
+ uint64_t pp4grp : 2;
+ uint64_t pp5grp : 2;
+ uint64_t pp6grp : 2;
+ uint64_t pp7grp : 2;
+ uint64_t pp8grp : 2;
+ uint64_t pp9grp : 2;
+ uint64_t pp10grp : 2;
+ uint64_t pp11grp : 2;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_l2c_ppgrp_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t pp3grp : 2; /**< PP3 PLC Group# (0,1,2) */
+ uint64_t pp2grp : 2; /**< PP2 PLC Group# (0,1,2) */
+ uint64_t pp1grp : 2; /**< PP1 PLC Group# (0,1,2) */
+ uint64_t pp0grp : 2; /**< PP0 PLC Group# (0,1,2) */
+#else
+ uint64_t pp0grp : 2;
+ uint64_t pp1grp : 2;
+ uint64_t pp2grp : 2;
+ uint64_t pp3grp : 2;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn52xx;
+ struct cvmx_l2c_ppgrp_cn52xx cn52xxp1;
+ struct cvmx_l2c_ppgrp_s cn56xx;
+ struct cvmx_l2c_ppgrp_s cn56xxp1;
+};
+typedef union cvmx_l2c_ppgrp cvmx_l2c_ppgrp_t;
+
+/**
+ * cvmx_l2c_qos_iob#
+ *
+ * L2C_QOS_IOB = L2C IOB QOS level
+ *
+ * Description:
+ */
+union cvmx_l2c_qos_iobx {
+ uint64_t u64;
+ struct cvmx_l2c_qos_iobx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t dwblvl : 3; /**< QOS level for DWB commands. */
+ uint64_t reserved_3_3 : 1;
+ uint64_t lvl : 3; /**< QOS level for non-DWB commands. */
+#else
+ uint64_t lvl : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t dwblvl : 3;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_l2c_qos_iobx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t dwblvl : 2; /**< QOS level for DWB commands. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t lvl : 2; /**< QOS level for non-DWB commands. */
+#else
+ uint64_t lvl : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t dwblvl : 2;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_qos_iobx_cn61xx cn63xx;
+ struct cvmx_l2c_qos_iobx_cn61xx cn63xxp1;
+ struct cvmx_l2c_qos_iobx_cn61xx cn66xx;
+ struct cvmx_l2c_qos_iobx_s cn68xx;
+ struct cvmx_l2c_qos_iobx_s cn68xxp1;
+ struct cvmx_l2c_qos_iobx_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_qos_iobx cvmx_l2c_qos_iobx_t;
+
+/**
+ * cvmx_l2c_qos_pp#
+ *
+ * L2C_QOS_PP = L2C PP QOS level
+ *
+ * Description:
+ */
+union cvmx_l2c_qos_ppx {
+ uint64_t u64;
+ struct cvmx_l2c_qos_ppx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t lvl : 3; /**< QOS level to use for this PP. */
+#else
+ uint64_t lvl : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_l2c_qos_ppx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t lvl : 2; /**< QOS level to use for this PP. */
+#else
+ uint64_t lvl : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_qos_ppx_cn61xx cn63xx;
+ struct cvmx_l2c_qos_ppx_cn61xx cn63xxp1;
+ struct cvmx_l2c_qos_ppx_cn61xx cn66xx;
+ struct cvmx_l2c_qos_ppx_s cn68xx;
+ struct cvmx_l2c_qos_ppx_s cn68xxp1;
+ struct cvmx_l2c_qos_ppx_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_qos_ppx cvmx_l2c_qos_ppx_t;
+
+/**
+ * cvmx_l2c_qos_wgt
+ *
+ * L2C_QOS_WGT = L2C QOS weights
+ *
+ */
+union cvmx_l2c_qos_wgt {
+ uint64_t u64;
+ struct cvmx_l2c_qos_wgt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wgt7 : 8; /**< Weight for QOS level 7 */
+ uint64_t wgt6 : 8; /**< Weight for QOS level 6 */
+ uint64_t wgt5 : 8; /**< Weight for QOS level 5 */
+ uint64_t wgt4 : 8; /**< Weight for QOS level 4 */
+ uint64_t wgt3 : 8; /**< Weight for QOS level 3 */
+ uint64_t wgt2 : 8; /**< Weight for QOS level 2 */
+ uint64_t wgt1 : 8; /**< Weight for QOS level 1 */
+ uint64_t wgt0 : 8; /**< Weight for QOS level 0 */
+#else
+ uint64_t wgt0 : 8;
+ uint64_t wgt1 : 8;
+ uint64_t wgt2 : 8;
+ uint64_t wgt3 : 8;
+ uint64_t wgt4 : 8;
+ uint64_t wgt5 : 8;
+ uint64_t wgt6 : 8;
+ uint64_t wgt7 : 8;
+#endif
+ } s;
+ struct cvmx_l2c_qos_wgt_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wgt3 : 8; /**< Weight for QOS level 3 */
+ uint64_t wgt2 : 8; /**< Weight for QOS level 2 */
+ uint64_t wgt1 : 8; /**< Weight for QOS level 1 */
+ uint64_t wgt0 : 8; /**< Weight for QOS level 0 */
+#else
+ uint64_t wgt0 : 8;
+ uint64_t wgt1 : 8;
+ uint64_t wgt2 : 8;
+ uint64_t wgt3 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_qos_wgt_cn61xx cn63xx;
+ struct cvmx_l2c_qos_wgt_cn61xx cn63xxp1;
+ struct cvmx_l2c_qos_wgt_cn61xx cn66xx;
+ struct cvmx_l2c_qos_wgt_s cn68xx;
+ struct cvmx_l2c_qos_wgt_s cn68xxp1;
+ struct cvmx_l2c_qos_wgt_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_qos_wgt cvmx_l2c_qos_wgt_t;
+
+/**
+ * cvmx_l2c_rsc#_pfc
+ *
+ * L2C_RSC_PFC = L2C RSC Performance Counter(s)
+ *
+ */
+union cvmx_l2c_rscx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_rscx_pfc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_rscx_pfc_s cn61xx;
+ struct cvmx_l2c_rscx_pfc_s cn63xx;
+ struct cvmx_l2c_rscx_pfc_s cn63xxp1;
+ struct cvmx_l2c_rscx_pfc_s cn66xx;
+ struct cvmx_l2c_rscx_pfc_s cn68xx;
+ struct cvmx_l2c_rscx_pfc_s cn68xxp1;
+ struct cvmx_l2c_rscx_pfc_s cnf71xx;
+};
+typedef union cvmx_l2c_rscx_pfc cvmx_l2c_rscx_pfc_t;
+
+/**
+ * cvmx_l2c_rsd#_pfc
+ *
+ * L2C_RSD_PFC = L2C RSD Performance Counter(s)
+ *
+ */
+union cvmx_l2c_rsdx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_rsdx_pfc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_rsdx_pfc_s cn61xx;
+ struct cvmx_l2c_rsdx_pfc_s cn63xx;
+ struct cvmx_l2c_rsdx_pfc_s cn63xxp1;
+ struct cvmx_l2c_rsdx_pfc_s cn66xx;
+ struct cvmx_l2c_rsdx_pfc_s cn68xx;
+ struct cvmx_l2c_rsdx_pfc_s cn68xxp1;
+ struct cvmx_l2c_rsdx_pfc_s cnf71xx;
+};
+typedef union cvmx_l2c_rsdx_pfc cvmx_l2c_rsdx_pfc_t;
+
+/**
+ * cvmx_l2c_spar0
+ *
+ * L2C_SPAR0 = L2 Set Partitioning Register (PP0-3)
+ *
+ * Description: L2 Set Partitioning Register
+ *
+ * Notes:
+ * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that
+ * set for replacement.
+ * - There MUST ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation
+ * - NOTES: When L2C FUSE[136] is blown(CRIP_256K), then SETS#7-4 are SET in all UMSK'x' registers
+ * When L2C FUSE[137] is blown(CRIP_128K), then SETS#7-2 are SET in all UMSK'x' registers
+ */
+union cvmx_l2c_spar0 {
+ uint64_t u64;
+ struct cvmx_l2c_spar0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t umsk3 : 8; /**< PP[3] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk2 : 8; /**< PP[2] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk1 : 8; /**< PP[1] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk0 : 8; /**< PP[0] L2 'DO NOT USE' set partition mask */
+#else
+ uint64_t umsk0 : 8;
+ uint64_t umsk1 : 8;
+ uint64_t umsk2 : 8;
+ uint64_t umsk3 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_l2c_spar0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t umsk0 : 4; /**< PP[0] L2 'DO NOT USE' set partition mask */
+#else
+ uint64_t umsk0 : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_spar0_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t umsk1 : 4; /**< PP[1] L2 'DO NOT USE' set partition mask */
+ uint64_t reserved_4_7 : 4;
+ uint64_t umsk0 : 4; /**< PP[0] L2 'DO NOT USE' set partition mask */
+#else
+ uint64_t umsk0 : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t umsk1 : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn31xx;
+ struct cvmx_l2c_spar0_s cn38xx;
+ struct cvmx_l2c_spar0_s cn38xxp2;
+ struct cvmx_l2c_spar0_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t umsk1 : 8; /**< PP[1] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk0 : 8; /**< PP[0] L2 'DO NOT USE' set partition mask */
+#else
+ uint64_t umsk0 : 8;
+ uint64_t umsk1 : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn50xx;
+ struct cvmx_l2c_spar0_s cn52xx;
+ struct cvmx_l2c_spar0_s cn52xxp1;
+ struct cvmx_l2c_spar0_s cn56xx;
+ struct cvmx_l2c_spar0_s cn56xxp1;
+ struct cvmx_l2c_spar0_s cn58xx;
+ struct cvmx_l2c_spar0_s cn58xxp1;
+};
+typedef union cvmx_l2c_spar0 cvmx_l2c_spar0_t;
+
+/**
+ * cvmx_l2c_spar1
+ *
+ * L2C_SPAR1 = L2 Set Partitioning Register (PP4-7)
+ *
+ * Description: L2 Set Partitioning Register
+ *
+ * Notes:
+ * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that
+ * set for replacement.
+ * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation
+ * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers
+ * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers
+ */
+union cvmx_l2c_spar1 {
+ uint64_t u64;
+ struct cvmx_l2c_spar1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t umsk7 : 8; /**< PP[7] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk6 : 8; /**< PP[6] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk5 : 8; /**< PP[5] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk4 : 8; /**< PP[4] L2 'DO NOT USE' set partition mask */
+#else
+ uint64_t umsk4 : 8;
+ uint64_t umsk5 : 8;
+ uint64_t umsk6 : 8;
+ uint64_t umsk7 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_l2c_spar1_s cn38xx;
+ struct cvmx_l2c_spar1_s cn38xxp2;
+ struct cvmx_l2c_spar1_s cn56xx;
+ struct cvmx_l2c_spar1_s cn56xxp1;
+ struct cvmx_l2c_spar1_s cn58xx;
+ struct cvmx_l2c_spar1_s cn58xxp1;
+};
+typedef union cvmx_l2c_spar1 cvmx_l2c_spar1_t;
+
+/**
+ * cvmx_l2c_spar2
+ *
+ * L2C_SPAR2 = L2 Set Partitioning Register (PP8-11)
+ *
+ * Description: L2 Set Partitioning Register
+ *
+ * Notes:
+ * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that
+ * set for replacement.
+ * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation
+ * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers
+ * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers
+ */
+union cvmx_l2c_spar2 {
+ uint64_t u64;
+ struct cvmx_l2c_spar2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t umsk11 : 8; /**< PP[11] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk10 : 8; /**< PP[10] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk9 : 8; /**< PP[9] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk8 : 8; /**< PP[8] L2 'DO NOT USE' set partition mask */
+#else
+ uint64_t umsk8 : 8;
+ uint64_t umsk9 : 8;
+ uint64_t umsk10 : 8;
+ uint64_t umsk11 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_l2c_spar2_s cn38xx;
+ struct cvmx_l2c_spar2_s cn38xxp2;
+ struct cvmx_l2c_spar2_s cn56xx;
+ struct cvmx_l2c_spar2_s cn56xxp1;
+ struct cvmx_l2c_spar2_s cn58xx;
+ struct cvmx_l2c_spar2_s cn58xxp1;
+};
+typedef union cvmx_l2c_spar2 cvmx_l2c_spar2_t;
+
+/**
+ * cvmx_l2c_spar3
+ *
+ * L2C_SPAR3 = L2 Set Partitioning Register (PP12-15)
+ *
+ * Description: L2 Set Partitioning Register
+ *
+ * Notes:
+ * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that
+ * set for replacement.
+ * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation
+ * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers
+ * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers
+ */
+union cvmx_l2c_spar3 {
+ uint64_t u64;
+ struct cvmx_l2c_spar3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t umsk15 : 8; /**< PP[15] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk14 : 8; /**< PP[14] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk13 : 8; /**< PP[13] L2 'DO NOT USE' set partition mask */
+ uint64_t umsk12 : 8; /**< PP[12] L2 'DO NOT USE' set partition mask */
+#else
+ uint64_t umsk12 : 8;
+ uint64_t umsk13 : 8;
+ uint64_t umsk14 : 8;
+ uint64_t umsk15 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_l2c_spar3_s cn38xx;
+ struct cvmx_l2c_spar3_s cn38xxp2;
+ struct cvmx_l2c_spar3_s cn58xx;
+ struct cvmx_l2c_spar3_s cn58xxp1;
+};
+typedef union cvmx_l2c_spar3 cvmx_l2c_spar3_t;
+
+/**
+ * cvmx_l2c_spar4
+ *
+ * L2C_SPAR4 = L2 Set Partitioning Register (IOB)
+ *
+ * Description: L2 Set Partitioning Register
+ *
+ * Notes:
+ * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that
+ * set for replacement.
+ * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation
+ * - NOTES: When L2C FUSE[136] is blown(CRIP_256K), then SETS#7-4 are SET in all UMSK'x' registers
+ * When L2C FUSE[137] is blown(CRIP_128K), then SETS#7-2 are SET in all UMSK'x' registers
+ */
+union cvmx_l2c_spar4 {
+ uint64_t u64;
+ struct cvmx_l2c_spar4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t umskiob : 8; /**< IOB L2 'DO NOT USE' set partition mask */
+#else
+ uint64_t umskiob : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_l2c_spar4_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t umskiob : 4; /**< IOB L2 'DO NOT USE' set partition mask */
+#else
+ uint64_t umskiob : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_l2c_spar4_cn30xx cn31xx;
+ struct cvmx_l2c_spar4_s cn38xx;
+ struct cvmx_l2c_spar4_s cn38xxp2;
+ struct cvmx_l2c_spar4_s cn50xx;
+ struct cvmx_l2c_spar4_s cn52xx;
+ struct cvmx_l2c_spar4_s cn52xxp1;
+ struct cvmx_l2c_spar4_s cn56xx;
+ struct cvmx_l2c_spar4_s cn56xxp1;
+ struct cvmx_l2c_spar4_s cn58xx;
+ struct cvmx_l2c_spar4_s cn58xxp1;
+};
+typedef union cvmx_l2c_spar4 cvmx_l2c_spar4_t;
+
+/**
+ * cvmx_l2c_tad#_ecc0
+ *
+ * L2C_TAD_ECC0 = L2C ECC logging
+ *
+ * Description: holds the syndromes for a L2D read generated from L2C_XMC_CMD
+ */
+union cvmx_l2c_tadx_ecc0 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_ecc0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t ow3ecc : 10; /**< ECC for OW3 of cache block */
+ uint64_t reserved_42_47 : 6;
+ uint64_t ow2ecc : 10; /**< ECC for OW2 of cache block */
+ uint64_t reserved_26_31 : 6;
+ uint64_t ow1ecc : 10; /**< ECC for OW1 of cache block */
+ uint64_t reserved_10_15 : 6;
+ uint64_t ow0ecc : 10; /**< ECC for OW0 of cache block */
+#else
+ uint64_t ow0ecc : 10;
+ uint64_t reserved_10_15 : 6;
+ uint64_t ow1ecc : 10;
+ uint64_t reserved_26_31 : 6;
+ uint64_t ow2ecc : 10;
+ uint64_t reserved_42_47 : 6;
+ uint64_t ow3ecc : 10;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_ecc0_s cn61xx;
+ struct cvmx_l2c_tadx_ecc0_s cn63xx;
+ struct cvmx_l2c_tadx_ecc0_s cn63xxp1;
+ struct cvmx_l2c_tadx_ecc0_s cn66xx;
+ struct cvmx_l2c_tadx_ecc0_s cn68xx;
+ struct cvmx_l2c_tadx_ecc0_s cn68xxp1;
+ struct cvmx_l2c_tadx_ecc0_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_ecc0 cvmx_l2c_tadx_ecc0_t;
+
+/**
+ * cvmx_l2c_tad#_ecc1
+ *
+ * L2C_TAD_ECC1 = L2C ECC logging
+ *
+ * Description: holds the syndromes for a L2D read generated from L2C_XMC_CMD
+ */
+union cvmx_l2c_tadx_ecc1 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_ecc1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t ow7ecc : 10; /**< ECC for OW7 of cache block */
+ uint64_t reserved_42_47 : 6;
+ uint64_t ow6ecc : 10; /**< ECC for OW6 of cache block */
+ uint64_t reserved_26_31 : 6;
+ uint64_t ow5ecc : 10; /**< ECC for OW5 of cache block */
+ uint64_t reserved_10_15 : 6;
+ uint64_t ow4ecc : 10; /**< ECC for OW4 of cache block */
+#else
+ uint64_t ow4ecc : 10;
+ uint64_t reserved_10_15 : 6;
+ uint64_t ow5ecc : 10;
+ uint64_t reserved_26_31 : 6;
+ uint64_t ow6ecc : 10;
+ uint64_t reserved_42_47 : 6;
+ uint64_t ow7ecc : 10;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_ecc1_s cn61xx;
+ struct cvmx_l2c_tadx_ecc1_s cn63xx;
+ struct cvmx_l2c_tadx_ecc1_s cn63xxp1;
+ struct cvmx_l2c_tadx_ecc1_s cn66xx;
+ struct cvmx_l2c_tadx_ecc1_s cn68xx;
+ struct cvmx_l2c_tadx_ecc1_s cn68xxp1;
+ struct cvmx_l2c_tadx_ecc1_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_ecc1 cvmx_l2c_tadx_ecc1_t;
+
+/**
+ * cvmx_l2c_tad#_ien
+ *
+ * L2C_TAD_IEN = L2C TAD Interrupt Enable
+ *
+ */
+union cvmx_l2c_tadx_ien {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_ien_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t wrdislmc : 1; /**< Illegal Write to Disabled LMC Error enable
+ Enables L2C_TADX_INT[WRDISLMC] to
+ assert L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t rddislmc : 1; /**< Illegal Read to Disabled LMC Error enable
+ Enables L2C_TADX_INT[RDDISLMC] to
+ assert L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t noway : 1; /**< No way available interrupt enable
+ Enables L2C_ERR_TTGX[NOWAY]/L2C_TADX_INT[NOWAY] to
+ assert L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t vbfdbe : 1; /**< VBF Double-Bit Error enable
+ Enables L2C_ERR_TDTX[VDBE]/L2C_TADX_INT[VBFSBE] to
+ assert L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t vbfsbe : 1; /**< VBF Single-Bit Error enable
+ Enables L2C_ERR_TDTX[VSBE]/L2C_TADX_INT[VBFSBE] to
+ assert L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t tagdbe : 1; /**< TAG Double-Bit Error enable
+ Enables L2C_ERR_TTGX[DBE]/L2C_TADX_INT[TAGDBE] to
+ assert L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t tagsbe : 1; /**< TAG Single-Bit Error enable
+ Enables L2C_ERR_TTGX[SBE]/L2C_TADX_INT[TAGSBE] to
+ assert L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t l2ddbe : 1; /**< L2D Double-Bit Error enable
+ Enables L2C_ERR_TDTX[DBE]/L2C_TADX_INT[L2DDBE] to
+ assert L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t l2dsbe : 1; /**< L2D Single-Bit Error enable
+ Enables L2C_ERR_TDTX[SBE]/L2C_TADX_INT[L2DSBE] to
+ assert L2C_INT_REG[TADX] (and cause an interrupt) */
+#else
+ uint64_t l2dsbe : 1;
+ uint64_t l2ddbe : 1;
+ uint64_t tagsbe : 1;
+ uint64_t tagdbe : 1;
+ uint64_t vbfsbe : 1;
+ uint64_t vbfdbe : 1;
+ uint64_t noway : 1;
+ uint64_t rddislmc : 1;
+ uint64_t wrdislmc : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_ien_s cn61xx;
+ struct cvmx_l2c_tadx_ien_s cn63xx;
+ struct cvmx_l2c_tadx_ien_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t noway : 1; /**< No way available interrupt enable
+ Enables L2C_ERR_TTGX[NOWAY] to assert
+ L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t vbfdbe : 1; /**< VBF Double-Bit Error enable
+ Enables L2C_ERR_TDTX[VSBE] to assert
+ L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t vbfsbe : 1; /**< VBF Single-Bit Error enable
+ Enables L2C_ERR_TDTX[VSBE] to assert
+ L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t tagdbe : 1; /**< TAG Double-Bit Error enable
+ Enables L2C_ERR_TTGX[DBE] to assert
+ L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t tagsbe : 1; /**< TAG Single-Bit Error enable
+ Enables L2C_ERR_TTGX[SBE] to assert
+ L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t l2ddbe : 1; /**< L2D Double-Bit Error enable
+ Enables L2C_ERR_TDTX[DBE] to assert
+ L2C_INT_REG[TADX] (and cause an interrupt) */
+ uint64_t l2dsbe : 1; /**< L2D Single-Bit Error enable
+ Enables L2C_ERR_TDTX[SBE] to assert
+ L2C_INT_REG[TADX] (and cause an interrupt) */
+#else
+ uint64_t l2dsbe : 1;
+ uint64_t l2ddbe : 1;
+ uint64_t tagsbe : 1;
+ uint64_t tagdbe : 1;
+ uint64_t vbfsbe : 1;
+ uint64_t vbfdbe : 1;
+ uint64_t noway : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn63xxp1;
+ struct cvmx_l2c_tadx_ien_s cn66xx;
+ struct cvmx_l2c_tadx_ien_s cn68xx;
+ struct cvmx_l2c_tadx_ien_s cn68xxp1;
+ struct cvmx_l2c_tadx_ien_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_ien cvmx_l2c_tadx_ien_t;
+
+/**
+ * cvmx_l2c_tad#_int
+ *
+ * L2C_TAD_INT = L2C TAD Interrupt Register (not present in pass 1 O63)
+ *
+ *
+ * Notes:
+ * L2C_TAD_IEN is the interrupt enable register corresponding to this register.
+ *
+ */
+union cvmx_l2c_tadx_int {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t wrdislmc : 1; /**< Illegal Write to Disabled LMC Error
+ A DRAM write arrived before the LMC(s) were enabled */
+ uint64_t rddislmc : 1; /**< Illegal Read to Disabled LMC Error
+ A DRAM read arrived before the LMC(s) were enabled */
+ uint64_t noway : 1; /**< No way available interrupt
+ Shadow copy of L2C_ERR_TTGX[NOWAY]
+ Writes of 1 also clear L2C_ERR_TTGX[NOWAY] */
+ uint64_t vbfdbe : 1; /**< VBF Double-Bit Error
+ Shadow copy of L2C_ERR_TDTX[VDBE]
+ Writes of 1 also clear L2C_ERR_TDTX[VDBE] */
+ uint64_t vbfsbe : 1; /**< VBF Single-Bit Error
+ Shadow copy of L2C_ERR_TDTX[VSBE]
+ Writes of 1 also clear L2C_ERR_TDTX[VSBE] */
+ uint64_t tagdbe : 1; /**< TAG Double-Bit Error
+ Shadow copy of L2C_ERR_TTGX[DBE]
+ Writes of 1 also clear L2C_ERR_TTGX[DBE] */
+ uint64_t tagsbe : 1; /**< TAG Single-Bit Error
+ Shadow copy of L2C_ERR_TTGX[SBE]
+ Writes of 1 also clear L2C_ERR_TTGX[SBE] */
+ uint64_t l2ddbe : 1; /**< L2D Double-Bit Error
+ Shadow copy of L2C_ERR_TDTX[DBE]
+ Writes of 1 also clear L2C_ERR_TDTX[DBE] */
+ uint64_t l2dsbe : 1; /**< L2D Single-Bit Error
+ Shadow copy of L2C_ERR_TDTX[SBE]
+ Writes of 1 also clear L2C_ERR_TDTX[SBE] */
+#else
+ uint64_t l2dsbe : 1;
+ uint64_t l2ddbe : 1;
+ uint64_t tagsbe : 1;
+ uint64_t tagdbe : 1;
+ uint64_t vbfsbe : 1;
+ uint64_t vbfdbe : 1;
+ uint64_t noway : 1;
+ uint64_t rddislmc : 1;
+ uint64_t wrdislmc : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_int_s cn61xx;
+ struct cvmx_l2c_tadx_int_s cn63xx;
+ struct cvmx_l2c_tadx_int_s cn66xx;
+ struct cvmx_l2c_tadx_int_s cn68xx;
+ struct cvmx_l2c_tadx_int_s cn68xxp1;
+ struct cvmx_l2c_tadx_int_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_int cvmx_l2c_tadx_int_t;
+
+/**
+ * cvmx_l2c_tad#_pfc0
+ *
+ * L2C_TAD_PFC0 = L2C TAD Performance Counter 0
+ *
+ */
+union cvmx_l2c_tadx_pfc0 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_pfc0_s cn61xx;
+ struct cvmx_l2c_tadx_pfc0_s cn63xx;
+ struct cvmx_l2c_tadx_pfc0_s cn63xxp1;
+ struct cvmx_l2c_tadx_pfc0_s cn66xx;
+ struct cvmx_l2c_tadx_pfc0_s cn68xx;
+ struct cvmx_l2c_tadx_pfc0_s cn68xxp1;
+ struct cvmx_l2c_tadx_pfc0_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_pfc0 cvmx_l2c_tadx_pfc0_t;
+
+/**
+ * cvmx_l2c_tad#_pfc1
+ *
+ * L2C_TAD_PFC1 = L2C TAD Performance Counter 1
+ *
+ */
+union cvmx_l2c_tadx_pfc1 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_pfc1_s cn61xx;
+ struct cvmx_l2c_tadx_pfc1_s cn63xx;
+ struct cvmx_l2c_tadx_pfc1_s cn63xxp1;
+ struct cvmx_l2c_tadx_pfc1_s cn66xx;
+ struct cvmx_l2c_tadx_pfc1_s cn68xx;
+ struct cvmx_l2c_tadx_pfc1_s cn68xxp1;
+ struct cvmx_l2c_tadx_pfc1_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_pfc1 cvmx_l2c_tadx_pfc1_t;
+
+/**
+ * cvmx_l2c_tad#_pfc2
+ *
+ * L2C_TAD_PFC2 = L2C TAD Performance Counter 2
+ *
+ */
+union cvmx_l2c_tadx_pfc2 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_pfc2_s cn61xx;
+ struct cvmx_l2c_tadx_pfc2_s cn63xx;
+ struct cvmx_l2c_tadx_pfc2_s cn63xxp1;
+ struct cvmx_l2c_tadx_pfc2_s cn66xx;
+ struct cvmx_l2c_tadx_pfc2_s cn68xx;
+ struct cvmx_l2c_tadx_pfc2_s cn68xxp1;
+ struct cvmx_l2c_tadx_pfc2_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_pfc2 cvmx_l2c_tadx_pfc2_t;
+
+/**
+ * cvmx_l2c_tad#_pfc3
+ *
+ * L2C_TAD_PFC3 = L2C TAD Performance Counter 3
+ *
+ */
+union cvmx_l2c_tadx_pfc3 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_pfc3_s cn61xx;
+ struct cvmx_l2c_tadx_pfc3_s cn63xx;
+ struct cvmx_l2c_tadx_pfc3_s cn63xxp1;
+ struct cvmx_l2c_tadx_pfc3_s cn66xx;
+ struct cvmx_l2c_tadx_pfc3_s cn68xx;
+ struct cvmx_l2c_tadx_pfc3_s cn68xxp1;
+ struct cvmx_l2c_tadx_pfc3_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_pfc3 cvmx_l2c_tadx_pfc3_t;
+
+/**
+ * cvmx_l2c_tad#_prf
+ *
+ * L2C_TAD_PRF = L2C TAD Performance Counter Control
+ *
+ *
+ * Notes:
+ * (1) All four counters are equivalent and can use any of the defined selects.
+ *
+ * (2) the CNTnSEL legal values are:
+ * 0x00 -- Nothing (disabled)
+ * 0x01 -- L2 Tag Hit
+ * 0x02 -- L2 Tag Miss
+ * 0x03 -- L2 Tag NoAlloc (forced no-allocate)
+ * 0x04 -- L2 Victim
+ * 0x05 -- SC Fail
+ * 0x06 -- SC Pass
+ * 0x07 -- LFB Occupancy (each cycle adds \# of LFBs valid)
+ * 0x08 -- LFB Wait LFB (each cycle adds \# LFBs waiting for other LFBs)
+ * 0x09 -- LFB Wait VAB (each cycle adds \# LFBs waiting for VAB)
+ * 0x80 -- Quad 0 index bus inuse
+ * 0x81 -- Quad 0 read data bus inuse
+ * 0x82 -- Quad 0 \# banks inuse (0-4/cycle)
+ * 0x83 -- Quad 0 wdat flops inuse (0-4/cycle)
+ * 0x90 -- Quad 1 index bus inuse
+ * 0x91 -- Quad 1 read data bus inuse
+ * 0x92 -- Quad 1 \# banks inuse (0-4/cycle)
+ * 0x93 -- Quad 1 wdat flops inuse (0-4/cycle)
+ * 0xA0 -- Quad 2 index bus inuse
+ * 0xA1 -- Quad 2 read data bus inuse
+ * 0xA2 -- Quad 2 \# banks inuse (0-4/cycle)
+ * 0xA3 -- Quad 2 wdat flops inuse (0-4/cycle)
+ * 0xB0 -- Quad 3 index bus inuse
+ * 0xB1 -- Quad 3 read data bus inuse
+ * 0xB2 -- Quad 3 \# banks inuse (0-4/cycle)
+ * 0xB3 -- Quad 3 wdat flops inuse (0-4/cycle)
+ */
+union cvmx_l2c_tadx_prf {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_prf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt3sel : 8; /**< Selects event to count for L2C_TAD_PFC3 */
+ uint64_t cnt2sel : 8; /**< Selects event to count for L2C_TAD_PFC2 */
+ uint64_t cnt1sel : 8; /**< Selects event to count for L2C_TAD_PFC1 */
+ uint64_t cnt0sel : 8; /**< Selects event to count for L2C_TAD_PFC0 */
+#else
+ uint64_t cnt0sel : 8;
+ uint64_t cnt1sel : 8;
+ uint64_t cnt2sel : 8;
+ uint64_t cnt3sel : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_prf_s cn61xx;
+ struct cvmx_l2c_tadx_prf_s cn63xx;
+ struct cvmx_l2c_tadx_prf_s cn63xxp1;
+ struct cvmx_l2c_tadx_prf_s cn66xx;
+ struct cvmx_l2c_tadx_prf_s cn68xx;
+ struct cvmx_l2c_tadx_prf_s cn68xxp1;
+ struct cvmx_l2c_tadx_prf_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_prf cvmx_l2c_tadx_prf_t;
+
+/**
+ * cvmx_l2c_tad#_tag
+ *
+ * L2C_TAD_TAG = L2C tag data
+ *
+ * Description: holds the tag information for LTGL2I and STGL2I commands
+ *
+ * Notes:
+ * (1) For 63xx TAG[35] must be written zero for STGL2I's or operation is undefined. During normal
+ * operation, TAG[35] will also read 0.
+ *
+ * (2) If setting the LOCK bit, the USE bit should also be set or operation is undefined.
+ *
+ * (3) The tag is the corresponding bits from the L2C+LMC internal L2/DRAM byte address.
+ */
+union cvmx_l2c_tadx_tag {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_tag_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t ecc : 6; /**< The tag ECC */
+ uint64_t reserved_36_39 : 4;
+ uint64_t tag : 19; /**< The tag (see notes 1 and 3) */
+ uint64_t reserved_4_16 : 13;
+ uint64_t use : 1; /**< The LRU use bit */
+ uint64_t valid : 1; /**< The valid bit */
+ uint64_t dirty : 1; /**< The dirty bit */
+ uint64_t lock : 1; /**< The lock bit */
+#else
+ uint64_t lock : 1;
+ uint64_t dirty : 1;
+ uint64_t valid : 1;
+ uint64_t use : 1;
+ uint64_t reserved_4_16 : 13;
+ uint64_t tag : 19;
+ uint64_t reserved_36_39 : 4;
+ uint64_t ecc : 6;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } s;
+ struct cvmx_l2c_tadx_tag_s cn61xx;
+ struct cvmx_l2c_tadx_tag_s cn63xx;
+ struct cvmx_l2c_tadx_tag_s cn63xxp1;
+ struct cvmx_l2c_tadx_tag_s cn66xx;
+ struct cvmx_l2c_tadx_tag_s cn68xx;
+ struct cvmx_l2c_tadx_tag_s cn68xxp1;
+ struct cvmx_l2c_tadx_tag_s cnf71xx;
+};
+typedef union cvmx_l2c_tadx_tag cvmx_l2c_tadx_tag_t;
+
+/**
+ * cvmx_l2c_ver_id
+ *
+ * L2C_VER_ID = L2C Virtualization ID Error Register
+ *
+ * Description: records virtualization IDs associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts.
+ */
+union cvmx_l2c_ver_id {
+ uint64_t u64;
+ struct cvmx_l2c_ver_id_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Mask of virtualization IDs which had a
+ HOLEWR/BIGWR/VRTWR error */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_l2c_ver_id_s cn61xx;
+ struct cvmx_l2c_ver_id_s cn63xx;
+ struct cvmx_l2c_ver_id_s cn63xxp1;
+ struct cvmx_l2c_ver_id_s cn66xx;
+ struct cvmx_l2c_ver_id_s cn68xx;
+ struct cvmx_l2c_ver_id_s cn68xxp1;
+ struct cvmx_l2c_ver_id_s cnf71xx;
+};
+typedef union cvmx_l2c_ver_id cvmx_l2c_ver_id_t;
+
+/**
+ * cvmx_l2c_ver_iob
+ *
+ * L2C_VER_IOB = L2C Virtualization ID IOB Error Register
+ *
+ * Description: records IOBs associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts.
+ */
+union cvmx_l2c_ver_iob {
+ uint64_t u64;
+ struct cvmx_l2c_ver_iob_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t mask : 2; /**< Mask of IOBs which had a HOLEWR/BIGWR/VRTWR error */
+#else
+ uint64_t mask : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_l2c_ver_iob_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t mask : 1; /**< Mask of IOBs which had a HOLEWR/BIGWR/VRTWR error */
+#else
+ uint64_t mask : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_ver_iob_cn61xx cn63xx;
+ struct cvmx_l2c_ver_iob_cn61xx cn63xxp1;
+ struct cvmx_l2c_ver_iob_cn61xx cn66xx;
+ struct cvmx_l2c_ver_iob_s cn68xx;
+ struct cvmx_l2c_ver_iob_s cn68xxp1;
+ struct cvmx_l2c_ver_iob_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_ver_iob cvmx_l2c_ver_iob_t;
+
+/**
+ * cvmx_l2c_ver_msc
+ *
+ * L2C_VER_MSC = L2C Virtualization Miscellaneous Error Register (not in 63xx pass 1.x)
+ *
+ * Description: records type of command associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts
+ */
+union cvmx_l2c_ver_msc {
+ uint64_t u64;
+ struct cvmx_l2c_ver_msc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t invl2 : 1; /**< If set, a INVL2 caused HOLEWR/BIGWR/VRT* to set */
+ uint64_t dwb : 1; /**< If set, a DWB caused HOLEWR/BIGWR/VRT* to set */
+#else
+ uint64_t dwb : 1;
+ uint64_t invl2 : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_l2c_ver_msc_s cn61xx;
+ struct cvmx_l2c_ver_msc_s cn63xx;
+ struct cvmx_l2c_ver_msc_s cn66xx;
+ struct cvmx_l2c_ver_msc_s cn68xx;
+ struct cvmx_l2c_ver_msc_s cn68xxp1;
+ struct cvmx_l2c_ver_msc_s cnf71xx;
+};
+typedef union cvmx_l2c_ver_msc cvmx_l2c_ver_msc_t;
+
+/**
+ * cvmx_l2c_ver_pp
+ *
+ * L2C_VER_PP = L2C Virtualization ID PP Error Register
+ *
+ * Description: records PPs associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts.
+ */
+union cvmx_l2c_ver_pp {
+ uint64_t u64;
+ struct cvmx_l2c_ver_pp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t mask : 32; /**< Mask of PPs which had a HOLEWR/BIGWR/VRTWR error */
+#else
+ uint64_t mask : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_l2c_ver_pp_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mask : 4; /**< Mask of PPs which had a HOLEWR/BIGWR/VRTWR error */
+#else
+ uint64_t mask : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn61xx;
+ struct cvmx_l2c_ver_pp_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t mask : 6; /**< Mask of PPs which had a HOLEWR/BIGWR/VRTWR error */
+#else
+ uint64_t mask : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn63xx;
+ struct cvmx_l2c_ver_pp_cn63xx cn63xxp1;
+ struct cvmx_l2c_ver_pp_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t mask : 10; /**< Mask of PPs which had a HOLEWR/BIGWR/VRTWR error */
+#else
+ uint64_t mask : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn66xx;
+ struct cvmx_l2c_ver_pp_s cn68xx;
+ struct cvmx_l2c_ver_pp_s cn68xxp1;
+ struct cvmx_l2c_ver_pp_cn61xx cnf71xx;
+};
+typedef union cvmx_l2c_ver_pp cvmx_l2c_ver_pp_t;
+
+/**
+ * cvmx_l2c_virtid_iob#
+ *
+ * L2C_VIRTID_IOB = L2C IOB virtualization ID
+ *
+ * Description:
+ */
+union cvmx_l2c_virtid_iobx {
+ uint64_t u64;
+ struct cvmx_l2c_virtid_iobx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t dwbid : 6; /**< Virtualization ID to use for DWB commands */
+ uint64_t reserved_6_7 : 2;
+ uint64_t id : 6; /**< Virtualization ID to use for non-DWB commands */
+#else
+ uint64_t id : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t dwbid : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_l2c_virtid_iobx_s cn61xx;
+ struct cvmx_l2c_virtid_iobx_s cn63xx;
+ struct cvmx_l2c_virtid_iobx_s cn63xxp1;
+ struct cvmx_l2c_virtid_iobx_s cn66xx;
+ struct cvmx_l2c_virtid_iobx_s cn68xx;
+ struct cvmx_l2c_virtid_iobx_s cn68xxp1;
+ struct cvmx_l2c_virtid_iobx_s cnf71xx;
+};
+typedef union cvmx_l2c_virtid_iobx cvmx_l2c_virtid_iobx_t;
+
+/**
+ * cvmx_l2c_virtid_pp#
+ *
+ * L2C_VIRTID_PP = L2C PP virtualization ID
+ *
+ * Description:
+ */
+union cvmx_l2c_virtid_ppx {
+ uint64_t u64;
+ struct cvmx_l2c_virtid_ppx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t id : 6; /**< Virtualization ID to use for this PP. */
+#else
+ uint64_t id : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_l2c_virtid_ppx_s cn61xx;
+ struct cvmx_l2c_virtid_ppx_s cn63xx;
+ struct cvmx_l2c_virtid_ppx_s cn63xxp1;
+ struct cvmx_l2c_virtid_ppx_s cn66xx;
+ struct cvmx_l2c_virtid_ppx_s cn68xx;
+ struct cvmx_l2c_virtid_ppx_s cn68xxp1;
+ struct cvmx_l2c_virtid_ppx_s cnf71xx;
+};
+typedef union cvmx_l2c_virtid_ppx cvmx_l2c_virtid_ppx_t;
+
+/**
+ * cvmx_l2c_vrt_ctl
+ *
+ * L2C_VRT_CTL = L2C Virtualization control register
+ *
+ */
+union cvmx_l2c_vrt_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_vrt_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t ooberr : 1; /**< Whether out of bounds writes are an error
+ Determines virtualization hardware behavior for
+ a store to an L2/DRAM address larger than
+ indicated by MEMSZ. If OOBERR is set, all these
+ stores (from any virtualization ID) are blocked. If
+ OOBERR is clear, none of these stores are blocked. */
+ uint64_t reserved_7_7 : 1;
+ uint64_t memsz : 3; /**< Memory space coverage of L2C_VRT_MEM (encoded)
+ 0 = 1GB
+ 1 = 2GB
+ 2 = 4GB
+ 3 = 8GB
+ 4 = 16GB
+ 5 = 32GB
+ 6 = 64GB (**reserved in 63xx**)
+ 7 = 128GB (**reserved in 63xx**) */
+ uint64_t numid : 3; /**< Number of allowed virtualization IDs (encoded)
+ 0 = 2
+ 1 = 4
+ 2 = 8
+ 3 = 16
+ 4 = 32
+ 5 = 64
+ 6,7 illegal
+ Violations of this limit causes
+ L2C to set L2C_INT_REG[VRTIDRNG]. */
+ uint64_t enable : 1; /**< Global virtualization enable
+ When ENABLE is clear, stores are never blocked by
+ the L2C virtualization hardware and none of NUMID,
+ MEMSZ, OOBERR are used. */
+#else
+ uint64_t enable : 1;
+ uint64_t numid : 3;
+ uint64_t memsz : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t ooberr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_l2c_vrt_ctl_s cn61xx;
+ struct cvmx_l2c_vrt_ctl_s cn63xx;
+ struct cvmx_l2c_vrt_ctl_s cn63xxp1;
+ struct cvmx_l2c_vrt_ctl_s cn66xx;
+ struct cvmx_l2c_vrt_ctl_s cn68xx;
+ struct cvmx_l2c_vrt_ctl_s cn68xxp1;
+ struct cvmx_l2c_vrt_ctl_s cnf71xx;
+};
+typedef union cvmx_l2c_vrt_ctl cvmx_l2c_vrt_ctl_t;
+
+/**
+ * cvmx_l2c_vrt_mem#
+ *
+ * L2C_VRT_MEM = L2C Virtualization Memory
+ *
+ * Description: Virtualization memory mapped region. There are 1024 32b
+ * byte-parity protected entries.
+ *
+ * Notes:
+ * When a DATA bit is set in L2C_VRT_MEM when L2C virtualization is enabled, L2C
+ * prevents the selected virtual machine from storing to the selected L2/DRAM region.
+ * L2C uses L2C_VRT_MEM to block stores when:
+ * - L2C_VRT_CTL[ENABLE] is set, and
+ * - the address of the store exists in L2C+LMC internal L2/DRAM Address space
+ * and is within the L2C_VRT_CTL[MEMSZ] bounds, and
+ * - the virtID of the store is within the L2C_VRT_CTL[NUMID] bounds
+ *
+ * L2C_VRT_MEM is never used for these L2C transactions which are always allowed:
+ * - L2C CMI L2/DRAM transactions that cannot modify L2/DRAM, and
+ * - any L2/DRAM transaction originated from L2C_XMC_CMD
+ *
+ * L2C_VRT_MEM contains one DATA bit per L2C+LMC internal L2/DRAM region and virtID indicating whether the store
+ * to the region is allowed. The granularity of the checking is the region size, which is:
+ * 2 ^^ (L2C_VRT_CTL[NUMID]+L2C_VRT_CTL[MEMSZ]+16)
+ * which ranges from a minimum of 64KB to a maximum of 256MB, depending on the size
+ * of L2/DRAM that is protected and the number of virtual machines.
+ *
+ * The L2C_VRT_MEM DATA bit that L2C uses is:
+ *
+ * l2c_vrt_mem_bit_index = address >> (L2C_VRT_CTL[MEMSZ]+L2C_VRT_CTL[NUMID]+16); // address is a byte address
+ * l2c_vrt_mem_bit_index = l2c_vrt_mem_bit_index | (virtID << (14-L2C_VRT_CTL[NUMID]));
+ *
+ * L2C_VRT_MEM(l2c_vrt_mem_bit_index >> 5)[DATA<l2c_vrt_mem_bit_index & 0x1F>] is used
+ *
+ * A specific example:
+ *
+ * L2C_VRT_CTL[NUMID]=2 (i.e. 8 virtual machine ID's used)
+ * L2C_VRT_CTL[MEMSZ]=4 (i.e. L2C_VRT_MEM covers 16 GB)
+ *
+ * L2/DRAM region size (granularity) is 4MB
+ *
+ * l2c_vrt_mem_bit_index<14:12> = virtID<2:0>
+ * l2c_vrt_mem_bit_index<11:0> = address<33:22>
+ *
+ * For L2/DRAM physical address 0x51000000 with virtID=5:
+ * L2C_VRT_MEM648[DATA<4>] determines when the store is allowed (648 is decimal, not hex)
+ */
+union cvmx_l2c_vrt_memx {
+ uint64_t u64;
+ struct cvmx_l2c_vrt_memx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t parity : 4; /**< Parity to write into (or read from) the
+ virtualization memory.
+ PARITY<i> is the even parity of DATA<(i*8)+7:i*8> */
+ uint64_t data : 32; /**< Data to write into (or read from) the
+ virtualization memory. */
+#else
+ uint64_t data : 32;
+ uint64_t parity : 4;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_l2c_vrt_memx_s cn61xx;
+ struct cvmx_l2c_vrt_memx_s cn63xx;
+ struct cvmx_l2c_vrt_memx_s cn63xxp1;
+ struct cvmx_l2c_vrt_memx_s cn66xx;
+ struct cvmx_l2c_vrt_memx_s cn68xx;
+ struct cvmx_l2c_vrt_memx_s cn68xxp1;
+ struct cvmx_l2c_vrt_memx_s cnf71xx;
+};
+typedef union cvmx_l2c_vrt_memx cvmx_l2c_vrt_memx_t;
+
+/**
+ * cvmx_l2c_wpar_iob#
+ *
+ * L2C_WPAR_IOB = L2C IOB way partitioning
+ *
+ *
+ * Notes:
+ * (1) The read value of MASK will include bits set because of the L2C cripple fuses.
+ *
+ */
+union cvmx_l2c_wpar_iobx {
+ uint64_t u64;
+ struct cvmx_l2c_wpar_iobx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mask : 16; /**< Way partitioning mask. (1 means do not use) */
+#else
+ uint64_t mask : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_l2c_wpar_iobx_s cn61xx;
+ struct cvmx_l2c_wpar_iobx_s cn63xx;
+ struct cvmx_l2c_wpar_iobx_s cn63xxp1;
+ struct cvmx_l2c_wpar_iobx_s cn66xx;
+ struct cvmx_l2c_wpar_iobx_s cn68xx;
+ struct cvmx_l2c_wpar_iobx_s cn68xxp1;
+ struct cvmx_l2c_wpar_iobx_s cnf71xx;
+};
+typedef union cvmx_l2c_wpar_iobx cvmx_l2c_wpar_iobx_t;
+
+/**
+ * cvmx_l2c_wpar_pp#
+ *
+ * L2C_WPAR_PP = L2C PP way partitioning
+ *
+ *
+ * Notes:
+ * (1) The read value of MASK will include bits set because of the L2C cripple fuses.
+ *
+ */
+union cvmx_l2c_wpar_ppx {
+ uint64_t u64;
+ struct cvmx_l2c_wpar_ppx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mask : 16; /**< Way partitioning mask. (1 means do not use) */
+#else
+ uint64_t mask : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_l2c_wpar_ppx_s cn61xx;
+ struct cvmx_l2c_wpar_ppx_s cn63xx;
+ struct cvmx_l2c_wpar_ppx_s cn63xxp1;
+ struct cvmx_l2c_wpar_ppx_s cn66xx;
+ struct cvmx_l2c_wpar_ppx_s cn68xx;
+ struct cvmx_l2c_wpar_ppx_s cn68xxp1;
+ struct cvmx_l2c_wpar_ppx_s cnf71xx;
+};
+typedef union cvmx_l2c_wpar_ppx cvmx_l2c_wpar_ppx_t;
+
+/**
+ * cvmx_l2c_xmc#_pfc
+ *
+ * L2C_XMC_PFC = L2C XMC Performance Counter(s)
+ *
+ */
+union cvmx_l2c_xmcx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_xmcx_pfc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_xmcx_pfc_s cn61xx;
+ struct cvmx_l2c_xmcx_pfc_s cn63xx;
+ struct cvmx_l2c_xmcx_pfc_s cn63xxp1;
+ struct cvmx_l2c_xmcx_pfc_s cn66xx;
+ struct cvmx_l2c_xmcx_pfc_s cn68xx;
+ struct cvmx_l2c_xmcx_pfc_s cn68xxp1;
+ struct cvmx_l2c_xmcx_pfc_s cnf71xx;
+};
+typedef union cvmx_l2c_xmcx_pfc cvmx_l2c_xmcx_pfc_t;
+
+/**
+ * cvmx_l2c_xmc_cmd
+ *
+ * L2C_XMC_CMD = L2C XMC command register
+ *
+ *
+ * Notes:
+ * (1) the XMC command chosen MUST NOT be a IOB destined command or operation is UNDEFINED.
+ *
+ * (2) the XMC command will have sid forced to IOB, did forced to L2C, no virtualization checks
+ * performed (always pass), and xmdmsk forced to 0. Note that this implies that commands which
+ * REQUIRE an XMD cycle (STP,STC,SAA,FAA,FAS) should not be used or the results are unpredictable.
+ * The sid=IOB means that the way partitioning used for the command is L2C_WPAR_IOB.
+ * None of L2C_QOS_IOB, L2C_QOS_PP, L2C_VIRTID_IOB, L2C_VIRTID_PP are used for these commands.
+ *
+ * (3) any responses generated by the XMC command will be forced to PP7 (a non-existant PP) effectively
+ * causing them to be ignored. Generated STINs, however, will correctly invalidate the required
+ * PPs.
+ *
+ * (4) any L2D read generated by the XMC command will record the syndrome information in
+ * L2C_TAD_ECC0/1. If ECC is disabled prior to the CSR write this provides the ability to read the
+ * ECC bits directly. If ECC is not disabled this should log 0's (assuming no ECC errors were
+ * found in the block).
+ *
+ * (5) A write which arrives while the INUSE bit is set will block until the INUSE bit clears. This
+ * gives software 2 options when needing to issue a stream of writes to L2C_XMC_CMD: polling on the
+ * INUSE bit, or allowing HW to handle the interlock -- at the expense of locking up the RSL bus
+ * for potentially tens of cycles at a time while waiting for an available LFB/VAB entry.
+ *
+ * (6) The address written to L2C_XMC_CMD is a 38-bit OCTEON physical address. L2C performs hole removal and
+ * index aliasing (if enabled) on the written address and uses that for the command. This hole
+ * removed/index aliased 38-bit address is what is returned on a read of the L2C_XMC_CMD register.
+ */
+union cvmx_l2c_xmc_cmd {
+ uint64_t u64;
+ struct cvmx_l2c_xmc_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t inuse : 1; /**< Set to 1 by HW upon receiving a write, cleared when
+ command has issued (not necessarily completed, but
+ ordered relative to other traffic) and HW can accept
+ another command. */
+ uint64_t cmd : 6; /**< Command to use for simulated XMC request
+ a new request can be accepted */
+ uint64_t reserved_38_56 : 19;
+ uint64_t addr : 38; /**< Address to use for simulated XMC request (see Note 6) */
+#else
+ uint64_t addr : 38;
+ uint64_t reserved_38_56 : 19;
+ uint64_t cmd : 6;
+ uint64_t inuse : 1;
+#endif
+ } s;
+ struct cvmx_l2c_xmc_cmd_s cn61xx;
+ struct cvmx_l2c_xmc_cmd_s cn63xx;
+ struct cvmx_l2c_xmc_cmd_s cn63xxp1;
+ struct cvmx_l2c_xmc_cmd_s cn66xx;
+ struct cvmx_l2c_xmc_cmd_s cn68xx;
+ struct cvmx_l2c_xmc_cmd_s cn68xxp1;
+ struct cvmx_l2c_xmc_cmd_s cnf71xx;
+};
+typedef union cvmx_l2c_xmc_cmd cvmx_l2c_xmc_cmd_t;
+
+/**
+ * cvmx_l2c_xmd#_pfc
+ *
+ * L2C_XMD_PFC = L2C XMD Performance Counter(s)
+ *
+ */
+union cvmx_l2c_xmdx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_xmdx_pfc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t count : 64; /**< Current counter value */
+#else
+ uint64_t count : 64;
+#endif
+ } s;
+ struct cvmx_l2c_xmdx_pfc_s cn61xx;
+ struct cvmx_l2c_xmdx_pfc_s cn63xx;
+ struct cvmx_l2c_xmdx_pfc_s cn63xxp1;
+ struct cvmx_l2c_xmdx_pfc_s cn66xx;
+ struct cvmx_l2c_xmdx_pfc_s cn68xx;
+ struct cvmx_l2c_xmdx_pfc_s cn68xxp1;
+ struct cvmx_l2c_xmdx_pfc_s cnf71xx;
+};
+typedef union cvmx_l2c_xmdx_pfc cvmx_l2c_xmdx_pfc_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-l2c-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-l2c.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-l2c.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-l2c.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1629 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Implementation of the Level 2 Cache (L2C) control,
+ * measurement, and debugging facilities.
+ *
+ * <hr>$Revision: 70215 $<hr>
+ *
+ */
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-l2c.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "cvmx-config.h"
+#endif
+#include "cvmx.h"
+#include "cvmx-l2c.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-interrupt.h"
+#endif
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+/*
+ * This spinlock is used internally to ensure that only one core is
+ * performing certain L2 operations at a time.
+ *
+ * NOTE: This only protects calls from within a single application -
+ * if multiple applications or operating systems are running, then it
+ * is up to the user program to coordinate between them.
+ */
+CVMX_SHARED cvmx_spinlock_t cvmx_l2c_spinlock;
+#endif
+
+int cvmx_l2c_get_core_way_partition(uint32_t core)
+{
+ uint32_t field;
+
+ /* Validate the core number */
+ if (core >= cvmx_octeon_num_cores())
+ return -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ return (cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff);
+
+ /*
+ * Use the lower two bits of the coreNumber to determine the
+ * bit offset of the UMSK[] field in the L2C_SPAR register.
+ */
+ field = (core & 0x3) * 8;
+
+ /*
+ * Return the UMSK[] field from the appropriate L2C_SPAR
+ * register based on the coreNumber.
+ */
+
+ switch (core & 0xC) {
+ case 0x0:
+ return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
+ case 0x4:
+ return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
+ case 0x8:
+ return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
+ case 0xC:
+ return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
+ }
+ return 0;
+}
+
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
+{
+ uint32_t field;
+ uint32_t valid_mask;
+
+ valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+
+ mask &= valid_mask;
+
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ return -1;
+
+ /* Validate the core number */
+ if (core >= cvmx_octeon_num_cores())
+ return -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
+ return 0;
+ }
+
+ /*
+ * Use the lower two bits of core to determine the bit offset of the
+ * UMSK[] field in the L2C_SPAR register.
+ */
+ field = (core & 0x3) * 8;
+
+ /*
+ * Assign the new mask setting to the UMSK[] field in the appropriate
+ * L2C_SPAR register based on the core_num.
+ *
+ */
+ switch (core & 0xC) {
+ case 0x0:
+ cvmx_write_csr(CVMX_L2C_SPAR0,
+ (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
+ mask << field);
+ break;
+ case 0x4:
+ cvmx_write_csr(CVMX_L2C_SPAR1,
+ (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
+ mask << field);
+ break;
+ case 0x8:
+ cvmx_write_csr(CVMX_L2C_SPAR2,
+ (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
+ mask << field);
+ break;
+ case 0xC:
+ cvmx_write_csr(CVMX_L2C_SPAR3,
+ (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
+ mask << field);
+ break;
+ }
+ return 0;
+}
+
+int cvmx_l2c_set_hw_way_partition(uint32_t mask)
+{
+ uint32_t valid_mask;
+
+ valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+ mask &= valid_mask;
+
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ return -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
+ else
+ cvmx_write_csr(CVMX_L2C_SPAR4,
+ (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
+ return 0;
+}
+
+int cvmx_l2c_get_hw_way_partition(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
+ else
+ return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
+}
+
+int cvmx_l2c_set_hw_way_partition2(uint32_t mask)
+{
+ uint32_t valid_mask;
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return -1;
+
+ valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+ mask &= valid_mask;
+ cvmx_write_csr(CVMX_L2C_WPAR_IOBX(1), mask);
+ return 0;
+}
+
+int cvmx_l2c_get_hw_way_partition2(void)
+{
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_warn("only one IOB on this chip");
+ return -1;
+ }
+ return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(1)) & 0xffff;
+}
+
+
+
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
+ uint32_t clear_on_read)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_l2c_pfctl pfctl;
+
+ pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+
+ switch (counter) {
+ case 0:
+ pfctl.s.cnt0sel = event;
+ pfctl.s.cnt0ena = 1;
+ pfctl.s.cnt0rdclr = clear_on_read;
+ break;
+ case 1:
+ pfctl.s.cnt1sel = event;
+ pfctl.s.cnt1ena = 1;
+ pfctl.s.cnt1rdclr = clear_on_read;
+ break;
+ case 2:
+ pfctl.s.cnt2sel = event;
+ pfctl.s.cnt2ena = 1;
+ pfctl.s.cnt2rdclr = clear_on_read;
+ break;
+ case 3:
+ default:
+ pfctl.s.cnt3sel = event;
+ pfctl.s.cnt3ena = 1;
+ pfctl.s.cnt3rdclr = clear_on_read;
+ break;
+ }
+
+ cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+ } else {
+ union cvmx_l2c_tadx_prf l2c_tadx_prf;
+ int tad;
+
+ cvmx_warn("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
+
+ cvmx_warn_if(clear_on_read, "L2C counters don't support clear on read for this chip\n");
+
+ l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
+
+ switch (counter) {
+ case 0:
+ l2c_tadx_prf.s.cnt0sel = event;
+ break;
+ case 1:
+ l2c_tadx_prf.s.cnt1sel = event;
+ break;
+ case 2:
+ l2c_tadx_prf.s.cnt2sel = event;
+ break;
+ default:
+ case 3:
+ l2c_tadx_prf.s.cnt3sel = event;
+ break;
+ }
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
+ l2c_tadx_prf.u64);
+ }
+}
+
+uint64_t cvmx_l2c_read_perf(uint32_t counter)
+{
+ switch (counter) {
+ case 0:
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC0);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
+ return counter;
+ }
+ case 1:
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC1);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
+ return counter;
+ }
+ case 2:
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC2);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
+ return counter;
+ }
+ case 3:
+ default:
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC3);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
+ return counter;
+ }
+ }
+}
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+/**
+ * @INTERNAL
+ * Helper function use to fault in cache lines for L2 cache locking
+ *
+ * @param addr Address of base of memory region to read into L2 cache
+ * @param len Length (in bytes) of region to fault in
+ */
+static void fault_in(uint64_t addr, int len)
+{
+ volatile char *ptr;
+ volatile char dummy;
+ /*
+ * Adjust addr and length so we get all cache lines even for
+ * small ranges spanning two cache lines.
+ */
+ len += addr & CVMX_CACHE_LINE_MASK;
+ addr &= ~CVMX_CACHE_LINE_MASK;
+ ptr = (volatile char *)cvmx_phys_to_ptr(addr);
+ /*
+ * Invalidate L1 cache to make sure all loads result in data
+ * being in L2.
+ */
+ CVMX_DCACHE_INVALIDATE;
+ while (len > 0) {
+ dummy += *ptr;
+ len -= CVMX_CACHE_LINE_SIZE;
+ ptr += CVMX_CACHE_LINE_SIZE;
+ }
+}
+
+int cvmx_l2c_lock_line(uint64_t addr)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ uint64_t assoc = cvmx_l2c_get_num_assoc();
+ uint32_t tag = cvmx_l2c_v2_address_to_tag(addr);
+ uint64_t indext = cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT;
+ uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, indext);
+ uint64_t way;
+ uint32_t tad;
+ union cvmx_l2c_tadx_tag l2c_tadx_tag;
+
+ if (tag == 0xFFFFFFFF) {
+ cvmx_dprintf("ERROR: cvmx_l2c_lock_line: addr 0x%llx in LMC hole."
+ "\n", (unsigned long long) addr);
+ return -1;
+ }
+
+ tad = cvmx_l2c_address_to_tad(addr);
+
+ /* cvmx_dprintf("shift=%d index=%lx tag=%x\n",shift, index, tag); */
+ CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
+ CVMX_SYNCW;
+ /* Make sure we were able to lock the line */
+ for (way = 0; way < assoc; way++) {
+ uint64_t caddr = index | (way << shift);
+ CVMX_CACHE_LTGL2I(caddr, 0);
+ /* make sure CVMX_L2C_TADX_TAG is updated */
+ CVMX_SYNC;
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(tad));
+ if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
+ break;
+ /* cvmx_printf("caddr=%lx tad=%d tagu64=%lx valid=%x tag=%x \n", caddr,
+ tad, l2c_tadx_tag.u64, l2c_tadx_tag.s.valid, l2c_tadx_tag.s.tag); */
+ }
+
+ /* Check if a valid line is found */
+ if (way >= assoc) {
+ /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at"
+ " 0x%llx address\n", (unsigned long long)addr); */
+ return -1;
+ }
+
+ /* Check if lock bit is not set */
+ if (!l2c_tadx_tag.s.lock) {
+ /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at "
+ "0x%llx address\n", (unsigned long long)addr); */
+ return -1;
+ }
+ return 0;
+ } else {
+ int retval = 0;
+ union cvmx_l2c_dbg l2cdbg;
+ union cvmx_l2c_lckbase lckbase;
+ union cvmx_l2c_lckoff lckoff;
+ union cvmx_l2t_err l2t_err;
+
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+
+ l2cdbg.u64 = 0;
+ lckbase.u64 = 0;
+ lckoff.u64 = 0;
+
+ /* Clear l2t error bits if set */
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.lckerr = 1;
+ l2t_err.s.lckerr2 = 1;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+
+ addr &= ~CVMX_CACHE_LINE_MASK;
+
+ /* Set this core as debug core */
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
+ cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
+ cvmx_read_csr(CVMX_L2C_LCKOFF);
+
+ if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
+ int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * cvmx_l2c_get_set_bits() - 1;
+ uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> cvmx_l2c_get_set_bits();
+ lckbase.s.lck_base = addr_tmp >> 7;
+ } else {
+ lckbase.s.lck_base = addr >> 7;
+ }
+
+ lckbase.s.lck_ena = 1;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ /* Make sure it gets there */
+ cvmx_read_csr(CVMX_L2C_LCKBASE);
+
+ fault_in(addr, CVMX_CACHE_LINE_SIZE);
+
+ lckbase.s.lck_ena = 0;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ /* Make sure it gets there */
+ cvmx_read_csr(CVMX_L2C_LCKBASE);
+
+ /* Stop being debug core */
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
+ retval = 1; /* We were unable to lock the line */
+
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ return retval;
+ }
+}
+
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
+{
+ int retval = 0;
+
+ /* Round start/end to cache line boundaries */
+ len += start & CVMX_CACHE_LINE_MASK;
+ start &= ~CVMX_CACHE_LINE_MASK;
+ len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+
+ while (len) {
+ if (cvmx_l2c_lock_line(start) != 0)
+ retval--;
+ start += CVMX_CACHE_LINE_SIZE;
+ len -= CVMX_CACHE_LINE_SIZE;
+ }
+ return retval;
+}
+
+void cvmx_l2c_flush(void)
+{
+ uint64_t assoc, set;
+ uint64_t n_assoc, n_set;
+
+ n_set = cvmx_l2c_get_num_sets();
+ n_assoc = cvmx_l2c_get_num_assoc();
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ uint64_t address;
+ /* These may look like constants, but they aren't... */
+ int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
+ for (set = 0; set < n_set; set++) {
+ for (assoc = 0; assoc < n_assoc; assoc++) {
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << assoc_shift) | (set << set_shift));
+ CVMX_CACHE_WBIL2I(address, 0);
+ }
+ }
+ } else {
+ for (set = 0; set < n_set; set++)
+ for (assoc = 0; assoc < n_assoc; assoc++)
+ cvmx_l2c_flush_line(assoc, set);
+ }
+}
+
+int cvmx_l2c_unlock_line(uint64_t address)
+{
+ uint32_t tad = cvmx_l2c_address_to_tad(address);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ int assoc;
+ union cvmx_l2c_tag tag;
+ uint32_t tag_addr;
+ uint32_t index = cvmx_l2c_address_to_index(address);
+
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+
+ /*
+ * For OcteonII, we can flush a line by using the physical
+ * address directly, so finding the cache line used by
+ * the address is only required to provide the proper
+ * return value for the function.
+ */
+ for (assoc = 0; assoc < cvmx_l2c_get_num_assoc(); assoc++) {
+ tag = cvmx_l2c_get_tag_v2(assoc, index, tad);
+
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
+ return tag.s.L;
+ }
+ }
+ } else {
+ int assoc;
+ union cvmx_l2c_tag tag;
+ uint32_t tag_addr;
+
+ uint32_t index = cvmx_l2c_address_to_index(address);
+
+ /* Compute portion of address that is stored in tag */
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+ for (assoc = 0; assoc < cvmx_l2c_get_num_assoc(); assoc++) {
+ tag = cvmx_l2c_get_tag_v2(assoc, index, tad);
+
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ cvmx_l2c_flush_line(assoc, index);
+ return tag.s.L;
+ }
+ }
+ }
+ return 0;
+}
+
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
+{
+ int num_unlocked = 0;
+ /* Round start/end to cache line boundaries */
+ len += start & CVMX_CACHE_LINE_MASK;
+ start &= ~CVMX_CACHE_LINE_MASK;
+ len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+ while (len > 0) {
+ num_unlocked += cvmx_l2c_unlock_line(start);
+ start += CVMX_CACHE_LINE_SIZE;
+ len -= CVMX_CACHE_LINE_SIZE;
+ }
+
+ return num_unlocked;
+}
+
+/*
+ * Internal l2c tag types. These are converted to a generic structure
+ * that can be used on all chips.
+ */
+union __cvmx_l2c_tag {
+ uint64_t u64;
+#ifdef __BIG_ENDIAN_BITFIELD
+ struct cvmx_l2c_tag_cn50xx {
+ uint64_t reserved:40;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:20; /* Phys mem addr (33..14) */
+ } cn50xx;
+ struct cvmx_l2c_tag_cn30xx {
+ uint64_t reserved:41;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:19; /* Phys mem addr (33..15) */
+ } cn30xx;
+ struct cvmx_l2c_tag_cn31xx {
+ uint64_t reserved:42;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:18; /* Phys mem addr (33..16) */
+ } cn31xx;
+ struct cvmx_l2c_tag_cn38xx {
+ uint64_t reserved:43;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:17; /* Phys mem addr (33..17) */
+ } cn38xx;
+ struct cvmx_l2c_tag_cn58xx {
+ uint64_t reserved:44;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:16; /* Phys mem addr (33..18) */
+ } cn58xx;
+#else
+ struct cvmx_l2c_tag_cn50xx {
+ uint64_t addr:20; /* Phys mem addr (33..14) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:40;
+ } cn50xx;
+ struct cvmx_l2c_tag_cn30xx {
+ uint64_t addr:19; /* Phys mem addr (33..15) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:41;
+ } cn30xx;
+ struct cvmx_l2c_tag_cn31xx {
+ uint64_t addr:18; /* Phys mem addr (33..16) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:42;
+ } cn31xx;
+ struct cvmx_l2c_tag_cn38xx {
+ uint64_t addr:17; /* Phys mem addr (33..17) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:43;
+ } cn38xx;
+ struct cvmx_l2c_tag_cn58xx {
+ uint64_t addr:16; /* Phys mem addr (33..18) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:44;
+ } cn58xx;
+#endif
+ struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
+ struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
+};
+
+
+/**
+ * @INTERNAL
+ * Function to read a L2C tag. This code make the current core
+ * the 'debug core' for the L2. This code must only be executed by
+ * 1 core at a time.
+ *
+ * @param assoc Association (way) of the tag to dump
+ * @param index Index of the cacheline
+ *
+ * @return The Octeon model specific tag structure. This is
+ * translated by a wrapper function to a generic form that is
+ * easier for applications to use.
+ */
+static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
+{
+
+ uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
+ uint64_t core = cvmx_get_core_num();
+ union __cvmx_l2c_tag tag_val;
+ uint64_t dbg_addr = CVMX_L2C_DBG;
+ unsigned long flags;
+
+ union cvmx_l2c_dbg debug_val;
+ debug_val.u64 = 0;
+ /*
+ * For low core count parts, the core number is always small
+ * enough to stay in the correct field and not set any
+ * reserved bits.
+ */
+ debug_val.s.ppnum = core;
+ debug_val.s.l2t = 1;
+ debug_val.s.set = assoc;
+
+ cvmx_local_irq_save(flags);
+ /*
+ * Make sure core is quiet (no prefetches, etc.) before
+ * entering debug mode.
+ */
+ CVMX_SYNC;
+ /* Flush L1 to make sure debug load misses L1 */
+ CVMX_DCACHE_INVALIDATE;
+
+ /*
+ * The following must be done in assembly as when in debug
+ * mode all data loads from L2 return special debug data, not
+ * normal memory contents. Also, interrupts must be disabled,
+ * since if an interrupt occurs while in debug mode the ISR
+ * will get debug data from all its memory * reads instead of
+ * the contents of memory.
+ */
+
+ asm volatile (
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noreorder\n\t"
+ "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
+ "ld $0, 0(%[dbg_addr])\n\t"
+ "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
+ "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
+ "ld $0, 0(%[dbg_addr])\n\t"
+ "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
+ ".set pop"
+ : [tag_val] "=r" (tag_val)
+ : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
+ : "memory");
+
+ cvmx_local_irq_restore(flags);
+
+ return tag_val;
+}
+
+
+union cvmx_l2c_tag cvmx_l2c_get_tag_v2(uint32_t association, uint32_t index, uint32_t tad)
+{
+ union cvmx_l2c_tag tag;
+ tag.u64 = 0;
+
+ if ((int)association >= cvmx_l2c_get_num_assoc()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
+ return tag;
+ }
+ if ((int)index >= cvmx_l2c_get_num_sets()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
+ (int)index, cvmx_l2c_get_num_sets());
+ return tag;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ union cvmx_l2c_tadx_tag l2c_tadx_tag;
+ uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ /*
+ * Use L2 cache Index load tag cache instruction, as
+ * hardware loads the virtual tag for the L2 cache
+ * block with the contents of L2C_TAD0_TAG
+ * register.
+ */
+ if (tad > CVMX_L2C_TADS) {
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag_v2: TAD#%d out of range\n", (unsigned int)tad);
+ return tag;
+ }
+ CVMX_CACHE_LTGL2I(address, 0);
+ CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(tad));
+
+ tag.s.V = l2c_tadx_tag.s.valid;
+ tag.s.D = l2c_tadx_tag.s.dirty;
+ tag.s.L = l2c_tadx_tag.s.lock;
+ tag.s.U = l2c_tadx_tag.s.use;
+ tag.s.addr = l2c_tadx_tag.s.tag;
+ } else {
+ union __cvmx_l2c_tag tmp_tag;
+ /* __read_l2_tag is intended for internal use only */
+ tmp_tag = __read_l2_tag(association, index);
+
+ /*
+ * Convert all tag structure types to generic version,
+ * as it can represent all models.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.addr = tmp_tag.cn58xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.addr = tmp_tag.cn38xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.addr = tmp_tag.cn31xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.addr = tmp_tag.cn30xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.addr = tmp_tag.cn50xx.addr;
+ } else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ }
+ }
+ return tag;
+}
+
+union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
+{
+ union cvmx_l2c_tag tag;
+ tag.u64 = 0;
+
+ if ((int)association >= cvmx_l2c_get_num_assoc()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
+ return tag;
+ }
+ if ((int)index >= cvmx_l2c_get_num_sets()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
+ (int)index, cvmx_l2c_get_num_sets());
+ return tag;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ union cvmx_l2c_tadx_tag l2c_tadx_tag;
+ uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_dprintf("ERROR: Cannot use %s on OCTEON CN68XX, use cvmx_l2c_get_tag_v2 instead!\n",
+ __func__);
+ return tag;
+ }
+ /*
+ * Use L2 cache Index load tag cache instruction, as
+ * hardware loads the virtual tag for the L2 cache
+ * block with the contents of L2C_TAD0_TAG
+ * register.
+ */
+ CVMX_CACHE_LTGL2I(address, 0);
+ CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+
+ tag.s.V = l2c_tadx_tag.s.valid;
+ tag.s.D = l2c_tadx_tag.s.dirty;
+ tag.s.L = l2c_tadx_tag.s.lock;
+ tag.s.U = l2c_tadx_tag.s.use;
+ tag.s.addr = l2c_tadx_tag.s.tag;
+ } else {
+ union __cvmx_l2c_tag tmp_tag;
+ /* __read_l2_tag is intended for internal use only */
+ tmp_tag = __read_l2_tag(association, index);
+
+ /*
+ * Convert all tag structure types to generic version,
+ * as it can represent all models.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.addr = tmp_tag.cn58xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.addr = tmp_tag.cn38xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.addr = tmp_tag.cn31xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.addr = tmp_tag.cn30xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.addr = tmp_tag.cn50xx.addr;
+ } else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ }
+ }
+ return tag;
+}
+#endif
+
+int cvmx_l2c_address_to_tad(uint64_t addr)
+{
+ uint32_t tad;
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_l2c_ctl_t l2c_ctl;
+ l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
+ if (!l2c_ctl.s.disidxalias) {
+ tad = ((addr >> 7) ^ (addr >> 12) ^ (addr >> 18)) & 3;
+ } else {
+ tad = (addr >> 7) & 3;
+ }
+ } else {
+ tad = 0;
+ }
+ return tad;
+}
+
+uint32_t cvmx_l2c_v2_address_to_tag(uint64_t addr)
+{
+#define DR0_END ( (256 * 1024 * 1024) -1)
+#define DR1_START (512 * 1024 * 1024)
+#define L2_HOLE (256 * 1024 * 1024)
+
+ if ( (addr > DR0_END) && (addr < DR1_START) ) return (uint32_t) (-1);
+ if (addr > DR1_START) addr = addr - L2_HOLE ;
+ addr = addr & 0x7FFFFFFFFULL;
+ return (uint32_t )(addr >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+}
+
+uint32_t cvmx_l2c_address_to_index(uint64_t addr)
+{
+ uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
+ int indxalias = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ union cvmx_l2c_ctl l2c_ctl;
+ l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
+ indxalias = !l2c_ctl.s.disidxalias;
+ } else {
+ union cvmx_l2c_cfg l2c_cfg;
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ indxalias = l2c_cfg.s.idxalias;
+ }
+
+ if (indxalias) {
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
+ idx ^= (idx / cvmx_l2c_get_num_sets()) & 0x3ff;
+ idx ^= a_14_12 & 0x3;
+ idx ^= a_14_12 << 2;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
+ idx ^= idx / cvmx_l2c_get_num_sets();
+ idx ^= a_14_12;
+ } else {
+ idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ }
+ }
+ idx &= CVMX_L2C_IDX_MASK;
+ return idx;
+}
+
+int cvmx_l2c_get_cache_size_bytes(void)
+{
+ return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
+ CVMX_CACHE_LINE_SIZE;
+}
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * @return
+ */
+int cvmx_l2c_get_set_bits(void)
+{
+ int l2_set_bits;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
+ l2_set_bits = 11; /* 2048 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX))
+ l2_set_bits = 10; /* 1024 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ l2_set_bits = 9; /* 512 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2_set_bits = 8; /* 256 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ l2_set_bits = 7; /* 128 sets */
+ else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ l2_set_bits = 11; /* 2048 sets */
+ }
+ return l2_set_bits;
+}
+
+/* Return the number of sets in the L2 Cache */
+int cvmx_l2c_get_num_sets(void)
+{
+ return 1 << cvmx_l2c_get_set_bits();
+}
+
+/* Return the number of associations in the L2 Cache */
+int cvmx_l2c_get_num_assoc(void)
+{
+ int l2_assoc;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN58XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN50XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN38XX))
+ l2_assoc = 8;
+ else if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ l2_assoc = 16;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2_assoc = 4;
+ else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ l2_assoc = 8;
+ }
+
+ /* Check to see if part of the cache is disabled */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ union cvmx_mio_fus_dat3 mio_fus_dat3;
+
+ mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+ /*
+ * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
+ * <2> will be not used for 63xx
+ * <1> disables 1/2 ways
+ * <0> disables 1/4 ways
+ * They are cumulative, so for 63xx:
+ * <1> <0>
+ * 0 0 16-way 2MB cache
+ * 0 1 12-way 1.5MB cache
+ * 1 0 8-way 1MB cache
+ * 1 1 4-way 512KB cache
+ */
+
+ if (mio_fus_dat3.cn63xx.l2c_crip == 3)
+ l2_assoc = 4;
+ else if (mio_fus_dat3.cn63xx.l2c_crip == 2)
+ l2_assoc = 8;
+ else if (mio_fus_dat3.cn63xx.l2c_crip == 1)
+ l2_assoc = 12;
+ } else {
+ union cvmx_l2d_fus3 val;
+ val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ /*
+ * Using shifts here, as bit position names are
+ * different for each model but they all mean the
+ * same.
+ */
+ if ((val.u64 >> 35) & 0x1)
+ l2_assoc = l2_assoc >> 2;
+ else if ((val.u64 >> 34) & 0x1)
+ l2_assoc = l2_assoc >> 1;
+ }
+ return l2_assoc;
+}
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @param assoc Association (or way) to flush
+ * @param index Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
+{
+ /* Check the range of the index. */
+ if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
+ return;
+ }
+
+ /* Check the range of association. */
+ if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
+ return;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ uint64_t address;
+ /* Create the address based on index and association.
+ * Bits<20:17> select the way of the cache block involved in
+ * the operation
+ * Bits<16:7> of the effect address select the index
+ */
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ CVMX_CACHE_WBIL2I(address, 0);
+ } else {
+ union cvmx_l2c_dbg l2cdbg;
+
+ l2cdbg.u64 = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.finv = 1;
+
+ l2cdbg.s.set = assoc;
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+ /*
+ * Enter debug mode, and make sure all other writes
+ * complete before we enter debug mode
+ */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ index * CVMX_CACHE_LINE_SIZE),
+ 0);
+ /* Exit debug mode */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ }
+}
+#endif
+
+/**
+ * Initialize the BIG address in L2C+DRAM to generate proper error
+ * on reading/writing to an non-existant memory location.
+ *
+ * @param mem_size Amount of DRAM configured in MB.
+ * @param mode Allow/Disallow reporting errors L2C_INT_SUM[BIGRD,BIGWR].
+ */
+void cvmx_l2c_set_big_size(uint64_t mem_size, int mode)
+{
+ if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ && !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ cvmx_l2c_big_ctl_t big_ctl;
+ int bits = 0, zero_bits = 0;
+ uint64_t mem;
+
+ if (mem_size > (CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024))
+ {
+ cvmx_dprintf("WARNING: Invalid memory size(%lld) requested, should be <= %lld\n",
+ (unsigned long long)mem_size, (unsigned long long)CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024);
+ mem_size = CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024;
+ }
+
+ mem = mem_size;
+ while (mem)
+ {
+ if ((mem & 1) == 0)
+ zero_bits++;
+ bits++;
+ mem >>= 1;
+ }
+
+ if ((bits - zero_bits) != 1 || (bits - 9) <= 0)
+ {
+ cvmx_dprintf("ERROR: Invalid DRAM size (%lld) requested, refer to L2C_BIG_CTL[maxdram] for valid options.\n", (unsigned long long)mem_size);
+ return;
+ }
+
+ big_ctl.u64 = 0;
+ big_ctl.s.maxdram = bits - 9;
+ big_ctl.s.disable = mode;
+ cvmx_write_csr(CVMX_L2C_BIG_CTL, big_ctl.u64);
+ }
+}
+
+#if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_LINUX_KERNEL)
+/* L2C Virtualization APIs. These APIs are based on Octeon II documentation. */
+
+/*
+ * These could be used by the Linux kernel, but currently are not, so
+ * disable them to save space.
+ */
+
+/**
+ * @INTERNAL
+ * Helper function to decode VALUE to number of allowed virtualization IDS.
+ * Returns L2C_VRT_CTL[NUMID].
+ *
+ * @param nvid Number of virtual Ids.
+ * @return On success decode to NUMID, or to -1 on failure.
+ */
+static inline int __cvmx_l2c_vrt_decode_numid(int nvid)
+{
+ int bits = -1;
+ int zero_bits = -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return -1;
+
+ if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED) {
+ cvmx_dprintf("WARNING: Invalid number of virtual ids(%d) requested, should be <= 64\n",
+ nvid);
+ return bits;
+ }
+
+ while (nvid) {
+ if ((nvid & 1) == 0)
+ zero_bits++;
+
+ bits++;
+ nvid >>= 1;
+ }
+
+ if (bits == 1 || (zero_bits && ((bits - zero_bits) == 1)))
+ return zero_bits;
+ return -1;
+}
+
+/**
+ * Set maxium number of Virtual IDs allowed in a machine.
+ *
+ * @param nvid Number of virtial ids allowed in a machine.
+ * @return Return 0 on success or -1 on failure.
+ */
+int cvmx_l2c_vrt_set_max_virtids(int nvid)
+{
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return -1;
+
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+
+ if (l2c_vrt_ctl.s.enable) {
+ cvmx_dprintf("WARNING: Changing number of Virtual Machine IDs is not allowed after Virtualization is enabled\n");
+ return -1;
+ }
+
+ if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_max_virtids: Invalid number of Virtual Machine IDs(%d) requested, max allowed %d\n",
+ nvid, CVMX_L2C_VRT_MAX_VIRTID_ALLOWED);
+ return -1;
+ }
+
+ /* Calculate the numid based on nvid */
+ l2c_vrt_ctl.s.numid = __cvmx_l2c_vrt_decode_numid(nvid);
+ cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
+ return 0;
+}
+
+/**
+ * Get maxium number of virtual IDs allowed in a machine.
+ *
+ * @return Return number of virtual machine IDs or -1 on failure.
+ */
+int cvmx_l2c_vrt_get_max_virtids(void)
+{
+ int virtids;
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return -1;
+
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+ virtids = 1 << (l2c_vrt_ctl.s.numid + 1);
+ if (virtids > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_get_max_virtids: Invalid number of Virtual IDs initialized (%d)\n",
+ virtids);
+ return -1;
+ }
+ return virtids;
+}
+
+/**
+ * @INTERNAL
+ * Helper function to decode VALUE to memory space coverage of L2C_VRT_MEM.
+ * Returns L2C_VRT_CTL[MEMSZ].
+ *
+ * @param memsz Memory in GB.
+ * @return On success, decode to MEMSZ, or on failure return -1.
+ */
+static inline int __cvmx_l2c_vrt_decode_memsize(int memsz)
+{
+ int bits = 0;
+ int zero_bits = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return -1;
+
+ if (memsz == 0 || memsz > CVMX_L2C_MAX_MEMSZ_ALLOWED) {
+ cvmx_dprintf("WARNING: Invalid virtual memory size(%d) requested, should be <= %d\n",
+ memsz, CVMX_L2C_MAX_MEMSZ_ALLOWED);
+ return -1;
+ }
+
+ while (memsz) {
+ if ((memsz & 1) == 0)
+ zero_bits++;
+
+ bits++;
+ memsz >>= 1;
+ }
+
+ if (bits == 1 || (bits - zero_bits) == 1)
+ return zero_bits;
+ return -1;
+}
+
+/**
+ * Set the maxium size of memory space to be allocated for virtualization.
+ *
+ * @param memsz Size of the virtual memory in GB
+ * @return Return 0 on success or -1 on failure.
+ */
+int cvmx_l2c_vrt_set_max_memsz(int memsz)
+{
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+ int decode = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return -1;
+
+
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+
+ if (l2c_vrt_ctl.s.enable) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Changing the size of the memory after Virtualization is enabled is not allowed.\n");
+ return -1;
+ }
+
+ if (memsz >= (int)(cvmx_sysinfo_get()->system_dram_size / 1000000)) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), greater than available on the chip\n",
+ memsz);
+ return -1;
+ }
+
+ decode = __cvmx_l2c_vrt_decode_memsize(memsz);
+ if (decode == -1) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), refer to L2C_VRT_CTL[MEMSZ] for more information\n",
+ memsz);
+ return -1;
+ }
+
+ l2c_vrt_ctl.s.memsz = decode;
+ cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
+ return 0;
+}
+
+/**
+ * Set a Virtual ID to a set of cores.
+ *
+ * @param virtid Assign virtid to a set of cores.
+ * @param coremask The group of cores to assign a unique virtual id.
+ * @return Return 0 on success, otherwise -1.
+ */
+int cvmx_l2c_vrt_assign_virtid(int virtid, uint32_t coremask)
+{
+ uint32_t core = 0;
+ int found = 0;
+ int max_virtid;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return -1;
+
+ max_virtid = cvmx_l2c_vrt_get_max_virtids();
+
+ if (virtid > max_virtid) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Max %d number of virtids are allowed, passed %d.\n",
+ max_virtid, virtid);
+ return -1;
+ }
+
+ while (core < cvmx_octeon_num_cores()) {
+ if ((coremask >> core) & 1) {
+ cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
+ cvmx_l2c_virtid_iobx_t l2c_virtid_iobx;
+ l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
+
+ /* Check if the core already has a virtid assigned. */
+ if (l2c_virtid_ppx.s.id) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Changing virtid of core #%d to %d from %d.\n",
+ (unsigned int)core, virtid,
+ l2c_virtid_ppx.s.id);
+
+ /* Flush L2 cache to avoid write errors */
+ cvmx_l2c_flush();
+ }
+ cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), virtid & 0x3f);
+
+ /* Set the IOB to normal mode. */
+ l2c_virtid_iobx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_IOBX(core));
+ l2c_virtid_iobx.s.id = 1;
+ l2c_virtid_iobx.s.dwbid = 0;
+ cvmx_write_csr(CVMX_L2C_VIRTID_IOBX(core),
+ l2c_virtid_iobx.u64);
+ found = 1;
+ }
+ core++;
+ }
+
+ /* Invalid coremask passed. */
+ if (!found) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Invalid coremask(0x%x) passed\n",
+ (unsigned int)coremask);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Remove a virt id assigned to a set of cores. Update the virtid mask and
+ * virtid stored for each core.
+ *
+ * @param virtid Remove the specified Virtualization machine ID.
+ */
+void cvmx_l2c_vrt_remove_virtid(int virtid)
+{
+ uint32_t core;
+ cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return;
+
+ for (core = 0; core < cvmx_octeon_num_cores(); core++) {
+ l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
+ if (virtid == l2c_virtid_ppx.s.id)
+ cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), 0);
+ }
+}
+
+/**
+ * Helper function to protect the memory region based on the granularity.
+ */
+static uint64_t __cvmx_l2c_vrt_get_granularity(void)
+{
+ uint64_t gran = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ int nvid;
+ uint64_t szd;
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+ nvid = cvmx_l2c_vrt_get_max_virtids();
+ szd = (1ull << l2c_vrt_ctl.s.memsz) * 1024 * 1024 * 1024;
+ gran = (unsigned long long)(szd * nvid)/(32ull * 1024);
+ }
+ return gran;
+}
+
+CVMX_SHARED cvmx_spinlock_t cvmx_l2c_vrt_spinlock;
+
+/**
+ * Block a memory region to be updated for a given virtual id.
+ *
+ * @param start_addr Starting address of memory region
+ * @param size Size of the memory to protect
+ * @param virtid Virtual ID to use
+ * @param mode Allow/Disallow write access
+ * = 0, Allow write access by virtid
+ * = 1, Disallow write access by virtid
+ */
+int cvmx_l2c_vrt_memprotect(uint64_t start_addr, int size, int virtid, int mode)
+{
+ uint64_t gran;
+ uint64_t end_addr;
+ int byte_offset, virtid_offset;
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+ cvmx_l2c_vrt_memx_t l2c_vrt_mem;
+ cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
+ int found;
+ uint32_t core;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return -1;
+ /*
+ * Check the alignment of start address, should be aligned to the
+ * granularity.
+ */
+ gran = __cvmx_l2c_vrt_get_granularity();
+ end_addr = start_addr + size;
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+
+ /* No need to protect if virtualization is not enabled */
+ if (!l2c_vrt_ctl.s.enable) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization is not enabled.\n");
+ return -1;
+ }
+
+ if (virtid > cvmx_l2c_vrt_get_max_virtids()) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id is greater than max allowed\n");
+ return -1;
+ }
+
+ /* No need to protect if virtid is not assigned to a core */
+ found = 0;
+ for (core = 0; core < cvmx_octeon_num_cores(); core++) {
+ l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
+ if (l2c_virtid_ppx.s.id == virtid) {
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id (%d) is not assigned to any core.\n",
+ virtid);
+ return -1;
+ }
+
+ /*
+ * Make sure previous stores are through before protecting the
+ * memory.
+ */
+ CVMX_SYNCW;
+
+ /*
+ * If the L2/DRAM physical address is >= 512 MB, subtract 256
+ * MB to get the address to use. This is because L2C removes
+ * the 256MB "hole" between DR0 and DR1.
+ */
+ if (start_addr >= (512 * 1024 * 1024))
+ start_addr -= 256 * 1024 * 1024;
+
+ if (start_addr != ((start_addr + (gran - 1)) & ~(gran - 1))) {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Start address is not aligned\n");
+ return -1;
+ }
+
+ /*
+ * Check the size of the memory to protect, should be aligned
+ * to the granularity.
+ */
+ if (end_addr != ((end_addr + (gran - 1)) & ~(gran - 1))) {
+ end_addr = (start_addr + (gran - 1)) & ~(gran - 1);
+ size = start_addr - end_addr;
+ }
+
+ byte_offset = l2c_vrt_ctl.s.memsz + l2c_vrt_ctl.s.numid + 16;
+ virtid_offset = 14 - l2c_vrt_ctl.s.numid;
+
+ cvmx_spinlock_lock(&cvmx_l2c_vrt_spinlock);
+
+ /* Enable memory protection for each virtid for the specified range. */
+ while (start_addr < end_addr) {
+ /*
+ * When L2C virtualization is enabled and a bit is set
+ * in L2C_VRT_MEM(0..1023), then L2C prevents the
+ * selected virtual machine from storing to the
+ * selected L2C/DRAM region.
+ */
+ int offset, position, i;
+ int l2c_vrt_mem_bit_index = start_addr >> byte_offset;
+ l2c_vrt_mem_bit_index |= (virtid << virtid_offset);
+
+ offset = l2c_vrt_mem_bit_index >> 5;
+ position = l2c_vrt_mem_bit_index & 0x1f;
+
+ l2c_vrt_mem.u64 = cvmx_read_csr(CVMX_L2C_VRT_MEMX(offset));
+ /* Allow/Disallow write access to memory. */
+ if (mode == 0)
+ l2c_vrt_mem.s.data &= ~(1 << position);
+ else
+ l2c_vrt_mem.s.data |= 1 << position;
+ l2c_vrt_mem.s.parity = 0;
+ /* PARITY<i> is the even parity of DATA<i*8+7:i*8>, which means
+ * that each bit<i> in PARITY[0..3], is the XOR of all the bits
+ * in the corresponding byte in DATA.
+ */
+ for (i = 0; i <= 4; i++) {
+ uint64_t mask = 0xffull << (i*8);
+ if ((cvmx_pop(l2c_vrt_mem.s.data & mask) & 0x1))
+ l2c_vrt_mem.s.parity |= (1ull << i);
+ }
+ cvmx_write_csr(CVMX_L2C_VRT_MEMX(offset), l2c_vrt_mem.u64);
+ start_addr += gran;
+ }
+
+ cvmx_spinlock_unlock(&cvmx_l2c_vrt_spinlock);
+
+ return 0;
+}
+
+/**
+ * Enable virtualization.
+ *
+ * @param mode Whether out of bound writes are an error.
+ */
+void cvmx_l2c_vrt_enable(int mode)
+{
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return;
+
+ /* Enable global virtualization */
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+ l2c_vrt_ctl.s.ooberr = mode;
+ l2c_vrt_ctl.s.enable = 1;
+ cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
+}
+
+/**
+ * Disable virtualization.
+ */
+void cvmx_l2c_vrt_disable(void)
+{
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ return;
+
+ /* Disable global virtualization */
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+ l2c_vrt_ctl.s.enable = 0;
+ cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
+}
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-l2c.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-l2c.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-l2c.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-l2c.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,530 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2011 Cavium, Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the Level 2 Cache (L2C) control, measurement, and debugging
+ * facilities.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMX_L2C_H__
+#define __CVMX_L2C_H__
+
+#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
+#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
+
+/* Defines for index aliasing computations */
+#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
+#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
+#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
+
+/* Defines for Virtualizations, valid only from Octeon II onwards. */
+#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 64 : 0)
+#define CVMX_L2C_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 32 : 0)
+
+ /*------------*/
+ /* TYPEDEFS */
+ /*------------*/
+
+union cvmx_l2c_tag {
+ uint64_t u64;
+#ifdef __BIG_ENDIAN_BITFIELD
+ struct {
+ uint64_t reserved:28;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:32; /* Phys mem (not all bits valid) */
+ } s;
+#else
+ struct {
+ uint64_t addr:32; /* Phys mem (not all bits valid) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:28;
+ } s;
+
+#endif
+};
+typedef union cvmx_l2c_tag cvmx_l2c_tag_t;
+
+/* Maximium number of TADs */
+#define CVMX_L2C_MAX_TADS 4
+/* Maximium number of L2C performance counters */
+#define CVMX_L2C_MAX_PCNT 4
+
+/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
+#define CVMX_L2C_TADS ((OCTEON_IS_MODEL(OCTEON_CN68XX)) ? 4 : 1)
+/* Number of L2C IOBs connected to LMC. */
+#define CVMX_L2C_IOBS ((OCTEON_IS_MODEL(OCTEON_CN68XX)) ? 2 : 1)
+
+ /* L2C Performance Counter events. */
+enum cvmx_l2c_event {
+ CVMX_L2C_EVENT_CYCLES = 0, /**< Cycles */
+ CVMX_L2C_EVENT_INSTRUCTION_MISS = 1, /**< L2 Instruction Miss */
+ CVMX_L2C_EVENT_INSTRUCTION_HIT = 2, /**< L2 Instruction Hit */
+ CVMX_L2C_EVENT_DATA_MISS = 3, /**< L2 Data Miss */
+ CVMX_L2C_EVENT_DATA_HIT = 4, /**< L2 Data Hit */
+ CVMX_L2C_EVENT_MISS = 5, /**< L2 Miss (I/D) */
+ CVMX_L2C_EVENT_HIT = 6, /**< L2 Hit (I/D) */
+ CVMX_L2C_EVENT_VICTIM_HIT = 7, /**< L2 Victim Buffer Hit (Retry Probe) */
+ CVMX_L2C_EVENT_INDEX_CONFLICT = 8, /**< LFB-NQ Index Conflict */
+ CVMX_L2C_EVENT_TAG_PROBE = 9, /**< L2 Tag Probe (issued - could be VB-Retried) */
+ CVMX_L2C_EVENT_TAG_UPDATE = 10, /**< L2 Tag Update (completed). Note: Some CMD types do not update */
+ CVMX_L2C_EVENT_TAG_COMPLETE = 11, /**< L2 Tag Probe Completed (beyond VB-RTY window) */
+ CVMX_L2C_EVENT_TAG_DIRTY = 12, /**< L2 Tag Dirty Victim */
+ CVMX_L2C_EVENT_DATA_STORE_NOP = 13, /**< L2 Data Store NOP */
+ CVMX_L2C_EVENT_DATA_STORE_READ = 14, /**< L2 Data Store READ */
+ CVMX_L2C_EVENT_DATA_STORE_WRITE = 15, /**< L2 Data Store WRITE */
+ CVMX_L2C_EVENT_FILL_DATA_VALID = 16, /**< Memory Fill Data valid */
+ CVMX_L2C_EVENT_WRITE_REQUEST = 17, /**< Memory Write Request */
+ CVMX_L2C_EVENT_READ_REQUEST = 18, /**< Memory Read Request */
+ CVMX_L2C_EVENT_WRITE_DATA_VALID = 19, /**< Memory Write Data valid */
+ CVMX_L2C_EVENT_XMC_NOP = 20, /**< XMC NOP */
+ CVMX_L2C_EVENT_XMC_LDT = 21, /**< XMC LDT */
+ CVMX_L2C_EVENT_XMC_LDI = 22, /**< XMC LDI */
+ CVMX_L2C_EVENT_XMC_LDD = 23, /**< XMC LDD */
+ CVMX_L2C_EVENT_XMC_STF = 24, /**< XMC STF */
+ CVMX_L2C_EVENT_XMC_STT = 25, /**< XMC STT */
+ CVMX_L2C_EVENT_XMC_STP = 26, /**< XMC STP */
+ CVMX_L2C_EVENT_XMC_STC = 27, /**< XMC STC */
+ CVMX_L2C_EVENT_XMC_DWB = 28, /**< XMC DWB */
+ CVMX_L2C_EVENT_XMC_PL2 = 29, /**< XMC PL2 */
+ CVMX_L2C_EVENT_XMC_PSL1 = 30, /**< XMC PSL1 */
+ CVMX_L2C_EVENT_XMC_IOBLD = 31, /**< XMC IOBLD */
+ CVMX_L2C_EVENT_XMC_IOBST = 32, /**< XMC IOBST */
+ CVMX_L2C_EVENT_XMC_IOBDMA = 33, /**< XMC IOBDMA */
+ CVMX_L2C_EVENT_XMC_IOBRSP = 34, /**< XMC IOBRSP */
+ CVMX_L2C_EVENT_XMC_BUS_VALID = 35, /**< XMC Bus valid (all) */
+ CVMX_L2C_EVENT_XMC_MEM_DATA = 36, /**< XMC Bus valid (DST=L2C) Memory */
+ CVMX_L2C_EVENT_XMC_REFL_DATA = 37, /**< XMC Bus valid (DST=IOB) REFL Data */
+ CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38, /**< XMC Bus valid (DST=PP) IOBRSP Data */
+ CVMX_L2C_EVENT_RSC_NOP = 39, /**< RSC NOP */
+ CVMX_L2C_EVENT_RSC_STDN = 40, /**< RSC STDN */
+ CVMX_L2C_EVENT_RSC_FILL = 41, /**< RSC FILL */
+ CVMX_L2C_EVENT_RSC_REFL = 42, /**< RSC REFL */
+ CVMX_L2C_EVENT_RSC_STIN = 43, /**< RSC STIN */
+ CVMX_L2C_EVENT_RSC_SCIN = 44, /**< RSC SCIN */
+ CVMX_L2C_EVENT_RSC_SCFL = 45, /**< RSC SCFL */
+ CVMX_L2C_EVENT_RSC_SCDN = 46, /**< RSC SCDN */
+ CVMX_L2C_EVENT_RSC_DATA_VALID = 47, /**< RSC Data Valid */
+ CVMX_L2C_EVENT_RSC_VALID_FILL = 48, /**< RSC Data Valid (FILL) */
+ CVMX_L2C_EVENT_RSC_VALID_STRSP = 49, /**< RSC Data Valid (STRSP) */
+ CVMX_L2C_EVENT_RSC_VALID_REFL = 50, /**< RSC Data Valid (REFL) */
+ CVMX_L2C_EVENT_LRF_REQ = 51, /**< LRF-REQ (LFB-NQ) */
+ CVMX_L2C_EVENT_DT_RD_ALLOC = 52, /**< DT RD-ALLOC */
+ CVMX_L2C_EVENT_DT_WR_INVAL = 53, /**< DT WR-INVAL */
+ CVMX_L2C_EVENT_MAX
+};
+typedef enum cvmx_l2c_event cvmx_l2c_event_t;
+
+/* L2C Performance Counter events for Octeon2. */
+enum cvmx_l2c_tad_event {
+ CVMX_L2C_TAD_EVENT_NONE = 0, /* None */
+ CVMX_L2C_TAD_EVENT_TAG_HIT = 1, /* L2 Tag Hit */
+ CVMX_L2C_TAD_EVENT_TAG_MISS = 2, /* L2 Tag Miss */
+ CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3, /* L2 Tag NoAlloc (forced no-allocate) */
+ CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4, /* L2 Tag Victim */
+ CVMX_L2C_TAD_EVENT_SC_FAIL = 5, /* SC Fail */
+ CVMX_L2C_TAD_EVENT_SC_PASS = 6, /* SC Pass */
+ CVMX_L2C_TAD_EVENT_LFB_VALID = 7, /* LFB Occupancy (each cycle adds \# of LFBs valid) */
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8, /* LFB Wait LFB (each cycle adds \# LFBs waiting for other LFBs) */
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9, /* LFB Wait VAB (each cycle adds \# LFBs waiting for VAB) */
+ CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128, /* Quad 0 index bus inuse */
+ CVMX_L2C_TAD_EVENT_QUAD0_READ = 129, /* Quad 0 read data bus inuse */
+ CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130, /* Quad 0 \# banks inuse (0-4/cycle) */
+ CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131, /* Quad 0 wdat flops inuse (0-4/cycle) */
+ CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144, /* Quad 1 index bus inuse */
+ CVMX_L2C_TAD_EVENT_QUAD1_READ = 145, /* Quad 1 read data bus inuse */
+ CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146, /* Quad 1 \# banks inuse (0-4/cycle) */
+ CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147, /* Quad 1 wdat flops inuse (0-4/cycle) */
+ CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160, /* Quad 2 index bus inuse */
+ CVMX_L2C_TAD_EVENT_QUAD2_READ = 161, /* Quad 2 read data bus inuse */
+ CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162, /* Quad 2 \# banks inuse (0-4/cycle) */
+ CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163, /* Quad 2 wdat flops inuse (0-4/cycle) */
+ CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176, /* Quad 3 index bus inuse */
+ CVMX_L2C_TAD_EVENT_QUAD3_READ = 177, /* Quad 3 read data bus inuse */
+ CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178, /* Quad 3 \# banks inuse (0-4/cycle) */
+ CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179, /* Quad 3 wdat flops inuse (0-4/cycle) */
+ CVMX_L2C_TAD_EVENT_MAX
+};
+typedef enum cvmx_l2c_tad_event cvmx_l2c_tad_event_t;
+
+/**
+ * Configure one of the four L2 Cache performance counters to capture event
+ * occurences.
+ *
+ * @param counter The counter to configure. Range 0..3.
+ * @param event The type of L2 Cache event occurrence to count.
+ * @param clear_on_read When asserted, any read of the performance counter
+ * clears the counter.
+ *
+ * @note The routine does not clear the counter.
+ */
+void cvmx_l2c_config_perf(uint32_t counter, cvmx_l2c_event_t event, uint32_t clear_on_read);
+
+/**
+ * Read the given L2 Cache performance counter. The counter must be configured
+ * before reading, but this routine does not enforce this requirement.
+ *
+ * @param counter The counter to configure. Range 0..3.
+ *
+ * @return The current counter value.
+ */
+uint64_t cvmx_l2c_read_perf(uint32_t counter);
+
+/**
+ * Return the L2 Cache way partitioning for a given core.
+ *
+ * @param core The core processor of interest.
+ *
+ * @return The mask specifying the partitioning. 0 bits in mask indicates
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
+ */
+int cvmx_l2c_get_core_way_partition(uint32_t core);
+
+/**
+ * Partitions the L2 cache for a core
+ *
+ * @param core The core that the partitioning applies to.
+ * @param mask The partitioning of the ways expressed as a binary
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
+ *
+
+ * @note If any ways are blocked for all cores and the HW blocks, then
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
+ */
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
+
+/**
+ * Return the L2 Cache way partitioning for the hw blocks.
+ *
+ * @return The mask specifying the reserved way. 0 bits in mask indicates
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
+ */
+int cvmx_l2c_get_hw_way_partition(void);
+
+/**
+ * Partitions the L2 cache for the hardware blocks.
+ *
+ * @param mask The partitioning of the ways expressed as a binary
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
+ *
+
+ * @note If any ways are blocked for all cores and the HW blocks, then
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
+ */
+int cvmx_l2c_set_hw_way_partition(uint32_t mask);
+
+
+/**
+ * Return the L2 Cache way partitioning for the second set of hw blocks.
+ *
+ * @return The mask specifying the reserved way. 0 bits in mask indicates
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
+ */
+int cvmx_l2c_get_hw_way_partition2(void);
+
+/**
+ * Partitions the L2 cache for the second set of blocks.
+ *
+ * @param mask The partitioning of the ways expressed as a binary
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
+ *
+
+ * @note If any ways are blocked for all cores and the HW blocks, then
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
+ */
+int cvmx_l2c_set_hw_way_partition2(uint32_t mask);
+
+/**
+ * Locks a line in the L2 cache at the specified physical address
+ *
+ * @param addr physical address of line to lock
+ *
+ * @return 0 on success,
+ * 1 if line not locked.
+ */
+int cvmx_l2c_lock_line(uint64_t addr);
+
+/**
+ * Locks a specified memory region in the L2 cache.
+ *
+ * Note that if not all lines can be locked, that means that all
+ * but one of the ways (associations) available to the locking
+ * core are locked. Having only 1 association available for
+ * normal caching may have a significant adverse affect on performance.
+ * Care should be taken to ensure that enough of the L2 cache is left
+ * unlocked to allow for normal caching of DRAM.
+ *
+ * @param start Physical address of the start of the region to lock
+ * @param len Length (in bytes) of region to lock
+ *
+ * @return Number of requested lines that where not locked.
+ * 0 on success (all locked)
+ */
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
+
+
+/**
+ * Unlock and flush a cache line from the L2 cache.
+ * IMPORTANT: Must only be run by one core at a time due to use
+ * of L2C debug features.
+ * Note that this function will flush a matching but unlocked cache line.
+ * (If address is not in L2, no lines are flushed.)
+ *
+ * @param address Physical address to unlock
+ *
+ * @return 0: line not unlocked
+ * 1: line unlocked
+ */
+int cvmx_l2c_unlock_line(uint64_t address);
+
+/**
+ * Unlocks a region of memory that is locked in the L2 cache
+ *
+ * @param start start physical address
+ * @param len length (in bytes) to unlock
+ *
+ * @return Number of locked lines that the call unlocked
+ */
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
+
+
+/**
+ * Read the L2 controller tag for a given location in L2
+ *
+ * @param association
+ * Which association to read line from
+ * @param index Which way to read from.
+ *
+ * @return l2c tag structure for line requested.
+ *
+ * NOTE: This function is deprecated and cannot be used on devices with
+ * multiple L2C interfaces such as the OCTEON CN68XX.
+ * Please use cvmx_l2c_get_tag_v2 instead.
+ */
+cvmx_l2c_tag_t cvmx_l2c_get_tag(uint32_t association, uint32_t index)
+ __attribute__ ((deprecated));
+
+/**
+ * Read the L2 controller tag for a given location in L2
+ *
+ * @param association
+ * Which association to read line from
+ * @param index Which way to read from.
+ *
+ * @param tad Which TAD to read from, set to 0 except on OCTEON CN68XX.
+ *
+ * @return l2c tag structure for line requested.
+ */
+cvmx_l2c_tag_t cvmx_l2c_get_tag_v2(uint32_t association, uint32_t index, uint32_t tad);
+
+/**
+ * Find the TAD for the specified address
+ *
+ * @param addr physical address to get TAD for
+ *
+ * @return TAD number for address.
+ */
+int cvmx_l2c_address_to_tad(uint64_t addr);
+
+/**
+ * Returns the cache index for a given physical address
+ *
+ * @param addr physical address
+ *
+ * @return L2 cache index
+ */
+uint32_t cvmx_l2c_address_to_index (uint64_t addr);
+
+/**
+ * Returns the L2 tag that will be used for the given physical address
+ *
+ * @param addr physical address
+ * @return L2 cache tag. Addreses in the LMC hole are not valid.
+ * Returns 0xFFFFFFFF if the address specified is in the LMC hole.
+ */
+uint32_t cvmx_l2c_v2_address_to_tag(uint64_t addr);
+
+/**
+ * Flushes (and unlocks) the entire L2 cache.
+ * IMPORTANT: Must only be run by one core at a time due to use
+ * of L2C debug features.
+ */
+void cvmx_l2c_flush(void);
+
+/**
+ *
+ * @return Returns the size of the L2 cache in bytes,
+ * -1 on error (unrecognized model)
+ */
+int cvmx_l2c_get_cache_size_bytes(void);
+
+/**
+ * Return the number of sets in the L2 Cache
+ *
+ * @return
+ */
+int cvmx_l2c_get_num_sets(void);
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * @return
+ */
+int cvmx_l2c_get_set_bits(void);
+/**
+ * Return the number of associations in the L2 Cache
+ *
+ * @return
+ */
+int cvmx_l2c_get_num_assoc(void);
+
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @param assoc Association (or way) to flush
+ * @param index Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index);
+
+/**
+ * Initialize the BIG address in L2C+DRAM to generate proper error
+ * on reading/writing to an non-existant memory location.
+ *
+ * @param mem_size Amount of DRAM configured in MB.
+ * @param mode Allow/Disallow reporting errors L2C_INT_SUM[BIGRD,BIGWR].
+ */
+void cvmx_l2c_set_big_size(uint64_t mem_size, int mode);
+
+#if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_LINUX_KERNEL)
+
+/*
+ * Set maxium number of Virtual IDS allowed in a machine.
+ *
+ * @param nvid Number of virtial ids allowed in a machine.
+ * @return Return 0 on success or -1 on failure.
+ */
+int cvmx_l2c_vrt_set_max_virtids(int nvid);
+
+/**
+ * Get maxium number of virtual IDs allowed in a machine.
+ *
+ * @return Return number of virtual machine IDs. Return -1 on failure.
+ */
+int cvmx_l2c_vrt_get_max_virtids(void);
+
+/**
+ * Set the maxium size of memory space to be allocated for virtualization.
+ *
+ * @param memsz Size of the virtual memory in GB
+ * @return Return 0 on success or -1 on failure.
+ */
+int cvmx_l2c_vrt_set_max_memsz(int memsz);
+
+/**
+ * Set a Virtual ID to a set of cores.
+ *
+ * @param virtid Assign virtid to a set of cores.
+ * @param coremask The group of cores to assign a unique virtual id.
+ * @return Return 0 on success, otherwise -1.
+ */
+int cvmx_l2c_vrt_assign_virtid(int virtid, uint32_t coremask);
+
+/**
+ * Remove a virt id assigned to a set of cores. Update the virtid mask and
+ * virtid stored for each core.
+ *
+ * @param coremask the group of cores whose virtual id is removed.
+ */
+void cvmx_l2c_vrt_remove_virtid(int virtid);
+
+/**
+ * Block a memory region to be updated by a set of virtids.
+ *
+ * @param start_addr Starting address of memory region
+ * @param size Size of the memory to protect
+ * @param virtid_mask Virtual ID to use
+ * @param mode Allow/Disallow write access
+ * = 0, Allow write access by virtid
+ * = 1, Disallow write access by virtid
+ */
+int cvmx_l2c_vrt_memprotect(uint64_t start_addr, int size, int virtid, int mode);
+
+/**
+ * Enable virtualization.
+ */
+void cvmx_l2c_vrt_enable(int mode);
+
+/**
+ * Disable virtualization.
+ */
+void cvmx_l2c_vrt_disable(void);
+
+#endif /* CVMX_BUILD_FOR_LINUX_HOST */
+
+#endif /* __CVMX_L2C_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-l2c.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-l2d-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-l2d-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-l2d-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1137 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-l2d-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon l2d.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_L2D_DEFS_H__
+#define __CVMX_L2D_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_BST0 CVMX_L2D_BST0_FUNC()
+static inline uint64_t CVMX_L2D_BST0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_BST0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000780ull);
+}
+#else
+#define CVMX_L2D_BST0 (CVMX_ADD_IO_SEG(0x0001180080000780ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_BST1 CVMX_L2D_BST1_FUNC()
+static inline uint64_t CVMX_L2D_BST1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_BST1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000788ull);
+}
+#else
+#define CVMX_L2D_BST1 (CVMX_ADD_IO_SEG(0x0001180080000788ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_BST2 CVMX_L2D_BST2_FUNC()
+static inline uint64_t CVMX_L2D_BST2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_BST2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000790ull);
+}
+#else
+#define CVMX_L2D_BST2 (CVMX_ADD_IO_SEG(0x0001180080000790ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_BST3 CVMX_L2D_BST3_FUNC()
+static inline uint64_t CVMX_L2D_BST3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_BST3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000798ull);
+}
+#else
+#define CVMX_L2D_BST3 (CVMX_ADD_IO_SEG(0x0001180080000798ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_ERR CVMX_L2D_ERR_FUNC()
+static inline uint64_t CVMX_L2D_ERR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_ERR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000010ull);
+}
+#else
+#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_FADR CVMX_L2D_FADR_FUNC()
+static inline uint64_t CVMX_L2D_FADR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_FADR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000018ull);
+}
+#else
+#define CVMX_L2D_FADR (CVMX_ADD_IO_SEG(0x0001180080000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_FSYN0 CVMX_L2D_FSYN0_FUNC()
+static inline uint64_t CVMX_L2D_FSYN0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_FSYN0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000020ull);
+}
+#else
+#define CVMX_L2D_FSYN0 (CVMX_ADD_IO_SEG(0x0001180080000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_FSYN1 CVMX_L2D_FSYN1_FUNC()
+static inline uint64_t CVMX_L2D_FSYN1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_FSYN1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000028ull);
+}
+#else
+#define CVMX_L2D_FSYN1 (CVMX_ADD_IO_SEG(0x0001180080000028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_FUS0 CVMX_L2D_FUS0_FUNC()
+static inline uint64_t CVMX_L2D_FUS0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_FUS0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800007A0ull);
+}
+#else
+#define CVMX_L2D_FUS0 (CVMX_ADD_IO_SEG(0x00011800800007A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_FUS1 CVMX_L2D_FUS1_FUNC()
+static inline uint64_t CVMX_L2D_FUS1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_FUS1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800007A8ull);
+}
+#else
+#define CVMX_L2D_FUS1 (CVMX_ADD_IO_SEG(0x00011800800007A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_FUS2 CVMX_L2D_FUS2_FUNC()
+static inline uint64_t CVMX_L2D_FUS2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_FUS2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800007B0ull);
+}
+#else
+#define CVMX_L2D_FUS2 (CVMX_ADD_IO_SEG(0x00011800800007B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2D_FUS3 CVMX_L2D_FUS3_FUNC()
+static inline uint64_t CVMX_L2D_FUS3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2D_FUS3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800800007B8ull);
+}
+#else
+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
+#endif
+
+/**
+ * cvmx_l2d_bst0
+ *
+ * L2D_BST0 = L2C Data Store QUAD0 BIST Status Register
+ *
+ */
+union cvmx_l2d_bst0 {
+ uint64_t u64;
+ struct cvmx_l2d_bst0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t ftl : 1; /**< L2C Data Store Fatal Defect(across all QUADs)
+ 2 or more columns were detected bad across all
+ QUADs[0-3]. Please refer to individual quad failures
+ for bad column = 0x7e to determine which QUAD was in
+ error. */
+ uint64_t q0stat : 34; /**< Bist Results for QUAD0
+ Failure \#1 Status
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Status
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column
+ NOTES: For bad high/low column reporting:
+ 0x7f: No failure
+ 0x7e: Fatal Defect: 2 or more bad columns
+ 0-0x45: Bad column
+ NOTE: If there are less than 2 failures then the
+ bad bank will be 0x7. */
+#else
+ uint64_t q0stat : 34;
+ uint64_t ftl : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } s;
+ struct cvmx_l2d_bst0_s cn30xx;
+ struct cvmx_l2d_bst0_s cn31xx;
+ struct cvmx_l2d_bst0_s cn38xx;
+ struct cvmx_l2d_bst0_s cn38xxp2;
+ struct cvmx_l2d_bst0_s cn50xx;
+ struct cvmx_l2d_bst0_s cn52xx;
+ struct cvmx_l2d_bst0_s cn52xxp1;
+ struct cvmx_l2d_bst0_s cn56xx;
+ struct cvmx_l2d_bst0_s cn56xxp1;
+ struct cvmx_l2d_bst0_s cn58xx;
+ struct cvmx_l2d_bst0_s cn58xxp1;
+};
+typedef union cvmx_l2d_bst0 cvmx_l2d_bst0_t;
+
+/**
+ * cvmx_l2d_bst1
+ *
+ * L2D_BST1 = L2C Data Store QUAD1 BIST Status Register
+ *
+ */
+union cvmx_l2d_bst1 {
+ uint64_t u64;
+ struct cvmx_l2d_bst1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t q1stat : 34; /**< Bist Results for QUAD1
+ Failure \#1 Status
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Status
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column
+ NOTES: For bad high/low column reporting:
+ 0x7f: No failure
+ 0x7e: Fatal Defect: 2 or more bad columns
+ 0-0x45: Bad column
+ NOTE: If there are less than 2 failures then the
+ bad bank will be 0x7. */
+#else
+ uint64_t q1stat : 34;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_l2d_bst1_s cn30xx;
+ struct cvmx_l2d_bst1_s cn31xx;
+ struct cvmx_l2d_bst1_s cn38xx;
+ struct cvmx_l2d_bst1_s cn38xxp2;
+ struct cvmx_l2d_bst1_s cn50xx;
+ struct cvmx_l2d_bst1_s cn52xx;
+ struct cvmx_l2d_bst1_s cn52xxp1;
+ struct cvmx_l2d_bst1_s cn56xx;
+ struct cvmx_l2d_bst1_s cn56xxp1;
+ struct cvmx_l2d_bst1_s cn58xx;
+ struct cvmx_l2d_bst1_s cn58xxp1;
+};
+typedef union cvmx_l2d_bst1 cvmx_l2d_bst1_t;
+
+/**
+ * cvmx_l2d_bst2
+ *
+ * L2D_BST2 = L2C Data Store QUAD2 BIST Status Register
+ *
+ */
+union cvmx_l2d_bst2 {
+ uint64_t u64;
+ struct cvmx_l2d_bst2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t q2stat : 34; /**< Bist Results for QUAD2
+ Failure \#1 Status
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Status
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column
+ NOTES: For bad high/low column reporting:
+ 0x7f: No failure
+ 0x7e: Fatal Defect: 2 or more bad columns
+ 0-0x45: Bad column
+ NOTE: If there are less than 2 failures then the
+ bad bank will be 0x7. */
+#else
+ uint64_t q2stat : 34;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_l2d_bst2_s cn30xx;
+ struct cvmx_l2d_bst2_s cn31xx;
+ struct cvmx_l2d_bst2_s cn38xx;
+ struct cvmx_l2d_bst2_s cn38xxp2;
+ struct cvmx_l2d_bst2_s cn50xx;
+ struct cvmx_l2d_bst2_s cn52xx;
+ struct cvmx_l2d_bst2_s cn52xxp1;
+ struct cvmx_l2d_bst2_s cn56xx;
+ struct cvmx_l2d_bst2_s cn56xxp1;
+ struct cvmx_l2d_bst2_s cn58xx;
+ struct cvmx_l2d_bst2_s cn58xxp1;
+};
+typedef union cvmx_l2d_bst2 cvmx_l2d_bst2_t;
+
+/**
+ * cvmx_l2d_bst3
+ *
+ * L2D_BST3 = L2C Data Store QUAD3 BIST Status Register
+ *
+ */
+union cvmx_l2d_bst3 {
+ uint64_t u64;
+ struct cvmx_l2d_bst3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t q3stat : 34; /**< Bist Results for QUAD3
+ Failure \#1 Status
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Status
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column
+ NOTES: For bad high/low column reporting:
+ 0x7f: No failure
+ 0x7e: Fatal Defect: 2 or more bad columns
+ 0-0x45: Bad column
+ NOTE: If there are less than 2 failures then the
+ bad bank will be 0x7. */
+#else
+ uint64_t q3stat : 34;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_l2d_bst3_s cn30xx;
+ struct cvmx_l2d_bst3_s cn31xx;
+ struct cvmx_l2d_bst3_s cn38xx;
+ struct cvmx_l2d_bst3_s cn38xxp2;
+ struct cvmx_l2d_bst3_s cn50xx;
+ struct cvmx_l2d_bst3_s cn52xx;
+ struct cvmx_l2d_bst3_s cn52xxp1;
+ struct cvmx_l2d_bst3_s cn56xx;
+ struct cvmx_l2d_bst3_s cn56xxp1;
+ struct cvmx_l2d_bst3_s cn58xx;
+ struct cvmx_l2d_bst3_s cn58xxp1;
+};
+typedef union cvmx_l2d_bst3 cvmx_l2d_bst3_t;
+
+/**
+ * cvmx_l2d_err
+ *
+ * L2D_ERR = L2 Data Errors
+ *
+ * Description: L2 Data ECC SEC/DED Errors and Interrupt Enable
+ */
+union cvmx_l2d_err {
+ uint64_t u64;
+ struct cvmx_l2d_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t bmhclsel : 1; /**< L2 Bit Map Half CacheLine ECC Selector
+
+ When L2C_DBG[L2T]=1/L2D_ERR[ECC_ENA]=0, the BMHCLSEL selects
+ which half cacheline to conditionally latch into
+ the L2D_FSYN0/L2D_FSYN1 registers when an LDD command
+ is detected from the diagnostic PP (see L2C_DBG[PPNUM]).
+ - 0: OW[0-3] ECC (from first 1/2 cacheline) is selected to
+ be conditionally latched into the L2D_FSYN0/1 CSRs.
+ - 1: OW[4-7] ECC (from last 1/2 cacheline) is selected to
+ be conditionally latched into
+ the L2D_FSYN0/1 CSRs. */
+ uint64_t ded_err : 1; /**< L2D Double Error detected (DED) */
+ uint64_t sec_err : 1; /**< L2D Single Error corrected (SEC) */
+ uint64_t ded_intena : 1; /**< L2 Data ECC Double Error Detect(DED) Interrupt Enable bit
+ When set, allows interrupts to be reported on double bit
+ (uncorrectable) errors from the L2 Data Arrays. */
+ uint64_t sec_intena : 1; /**< L2 Data ECC Single Error Correct(SEC) Interrupt Enable bit
+ When set, allows interrupts to be reported on single bit
+ (correctable) errors from the L2 Data Arrays. */
+ uint64_t ecc_ena : 1; /**< L2 Data ECC Enable
+ When set, enables 10-bit SEC/DED codeword for 128bit L2
+ Data Arrays. */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t sec_intena : 1;
+ uint64_t ded_intena : 1;
+ uint64_t sec_err : 1;
+ uint64_t ded_err : 1;
+ uint64_t bmhclsel : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_l2d_err_s cn30xx;
+ struct cvmx_l2d_err_s cn31xx;
+ struct cvmx_l2d_err_s cn38xx;
+ struct cvmx_l2d_err_s cn38xxp2;
+ struct cvmx_l2d_err_s cn50xx;
+ struct cvmx_l2d_err_s cn52xx;
+ struct cvmx_l2d_err_s cn52xxp1;
+ struct cvmx_l2d_err_s cn56xx;
+ struct cvmx_l2d_err_s cn56xxp1;
+ struct cvmx_l2d_err_s cn58xx;
+ struct cvmx_l2d_err_s cn58xxp1;
+};
+typedef union cvmx_l2d_err cvmx_l2d_err_t;
+
+/**
+ * cvmx_l2d_fadr
+ *
+ * L2D_FADR = L2 Failing Address
+ *
+ * Description: L2 Data ECC SEC/DED Failing Address
+ *
+ * Notes:
+ * When L2D_SEC_ERR or L2D_DED_ERR are set, this field contains the failing L2 Data store index.
+ * (A DED Error will always overwrite a SEC Error SYNDROME and FADR).
+ */
+union cvmx_l2d_fadr {
+ uint64_t u64;
+ struct cvmx_l2d_fadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t fadru : 1; /**< Failing L2 Data Store Upper Index bit(MSB) */
+ uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED
+ error) */
+ uint64_t fset : 3; /**< Failing SET# */
+ uint64_t fadr : 11; /**< Failing L2 Data Store Lower Index bits
+ (NOTE: L2 Data Store Index is for each 1/2 cacheline)
+ [FADRU, FADR[10:1]]: cacheline index[17:7]
+ FADR[0]: 1/2 cacheline index
+ NOTE: FADR[1] is used to select between upper/lower 1MB
+ physical L2 Data Store banks. */
+#else
+ uint64_t fadr : 11;
+ uint64_t fset : 3;
+ uint64_t fowmsk : 4;
+ uint64_t fadru : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_l2d_fadr_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED
+ error) */
+ uint64_t reserved_13_13 : 1;
+ uint64_t fset : 2; /**< Failing SET# */
+ uint64_t reserved_9_10 : 2;
+ uint64_t fadr : 9; /**< Failing L2 Data Store Index(1of512 = 1/2 CL address) */
+#else
+ uint64_t fadr : 9;
+ uint64_t reserved_9_10 : 2;
+ uint64_t fset : 2;
+ uint64_t reserved_13_13 : 1;
+ uint64_t fowmsk : 4;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn30xx;
+ struct cvmx_l2d_fadr_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED
+ error) */
+ uint64_t reserved_13_13 : 1;
+ uint64_t fset : 2; /**< Failing SET# */
+ uint64_t reserved_10_10 : 1;
+ uint64_t fadr : 10; /**< Failing L2 Data Store Index
+ (1 of 1024 = half cacheline indices) */
+#else
+ uint64_t fadr : 10;
+ uint64_t reserved_10_10 : 1;
+ uint64_t fset : 2;
+ uint64_t reserved_13_13 : 1;
+ uint64_t fowmsk : 4;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn31xx;
+ struct cvmx_l2d_fadr_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED
+ error) */
+ uint64_t fset : 3; /**< Failing SET# */
+ uint64_t fadr : 11; /**< Failing L2 Data Store Index (1of2K = 1/2 CL address) */
+#else
+ uint64_t fadr : 11;
+ uint64_t fset : 3;
+ uint64_t fowmsk : 4;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn38xx;
+ struct cvmx_l2d_fadr_cn38xx cn38xxp2;
+ struct cvmx_l2d_fadr_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED
+ error) */
+ uint64_t fset : 3; /**< Failing SET# */
+ uint64_t reserved_8_10 : 3;
+ uint64_t fadr : 8; /**< Failing L2 Data Store Lower Index bits
+ (NOTE: L2 Data Store Index is for each 1/2 cacheline)
+ FADR[7:1]: cacheline index[13:7]
+ FADR[0]: 1/2 cacheline index */
+#else
+ uint64_t fadr : 8;
+ uint64_t reserved_8_10 : 3;
+ uint64_t fset : 3;
+ uint64_t fowmsk : 4;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn50xx;
+ struct cvmx_l2d_fadr_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED
+ error) */
+ uint64_t fset : 3; /**< Failing SET# */
+ uint64_t reserved_10_10 : 1;
+ uint64_t fadr : 10; /**< Failing L2 Data Store Lower Index bits
+ (NOTE: L2 Data Store Index is for each 1/2 cacheline)
+ FADR[9:1]: cacheline index[15:7]
+ FADR[0]: 1/2 cacheline index */
+#else
+ uint64_t fadr : 10;
+ uint64_t reserved_10_10 : 1;
+ uint64_t fset : 3;
+ uint64_t fowmsk : 4;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn52xx;
+ struct cvmx_l2d_fadr_cn52xx cn52xxp1;
+ struct cvmx_l2d_fadr_s cn56xx;
+ struct cvmx_l2d_fadr_s cn56xxp1;
+ struct cvmx_l2d_fadr_s cn58xx;
+ struct cvmx_l2d_fadr_s cn58xxp1;
+};
+typedef union cvmx_l2d_fadr cvmx_l2d_fadr_t;
+
+/**
+ * cvmx_l2d_fsyn0
+ *
+ * L2D_FSYN0 = L2 Failing Syndrome [OW0,4 / OW1,5]
+ *
+ * Description: L2 Data ECC SEC/DED Failing Syndrome for lower cache line
+ *
+ * Notes:
+ * When L2D_SEC_ERR or L2D_DED_ERR are set, this field contains the failing L2 Data ECC 10b syndrome.
+ * (A DED Error will always overwrite a SEC Error SYNDROME and FADR).
+ */
+union cvmx_l2d_fsyn0 {
+ uint64_t u64;
+ struct cvmx_l2d_fsyn0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t fsyn_ow1 : 10; /**< Failing L2 Data Store SYNDROME OW[1,5]
+ When L2D_ERR[ECC_ENA]=1 and either L2D_ERR[SEC_ERR]
+ or L2D_ERR[DED_ERR] are set, this field represents
+ the failing OWECC syndrome for the half cacheline
+ indexed by L2D_FADR[FADR].
+ NOTE: The L2D_FADR[FOWMSK] further qualifies which
+ OW lane(1of4) detected the error.
+ When L2C_DBG[L2T]=1 and L2D_ERR[ECC_ENA]=0, an LDD
+ command from the diagnostic PP will conditionally latch
+ the raw OWECC for the selected half cacheline.
+ (see: L2D_ERR[BMHCLSEL] */
+ uint64_t fsyn_ow0 : 10; /**< Failing L2 Data Store SYNDROME OW[0,4]
+ When L2D_ERR[ECC_ENA]=1 and either L2D_ERR[SEC_ERR]
+ or L2D_ERR[DED_ERR] are set, this field represents
+ the failing OWECC syndrome for the half cacheline
+ indexed by L2D_FADR[FADR].
+ NOTE: The L2D_FADR[FOWMSK] further qualifies which
+ OW lane(1of4) detected the error.
+ When L2C_DBG[L2T]=1 and L2D_ERR[ECC_ENA]=0, an LDD
+ (L1 load-miss) from the diagnostic PP will conditionally
+ latch the raw OWECC for the selected half cacheline.
+ (see: L2D_ERR[BMHCLSEL] */
+#else
+ uint64_t fsyn_ow0 : 10;
+ uint64_t fsyn_ow1 : 10;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_l2d_fsyn0_s cn30xx;
+ struct cvmx_l2d_fsyn0_s cn31xx;
+ struct cvmx_l2d_fsyn0_s cn38xx;
+ struct cvmx_l2d_fsyn0_s cn38xxp2;
+ struct cvmx_l2d_fsyn0_s cn50xx;
+ struct cvmx_l2d_fsyn0_s cn52xx;
+ struct cvmx_l2d_fsyn0_s cn52xxp1;
+ struct cvmx_l2d_fsyn0_s cn56xx;
+ struct cvmx_l2d_fsyn0_s cn56xxp1;
+ struct cvmx_l2d_fsyn0_s cn58xx;
+ struct cvmx_l2d_fsyn0_s cn58xxp1;
+};
+typedef union cvmx_l2d_fsyn0 cvmx_l2d_fsyn0_t;
+
+/**
+ * cvmx_l2d_fsyn1
+ *
+ * L2D_FSYN1 = L2 Failing Syndrome [OW2,6 / OW3,7]
+ *
+ * Description: L2 Data ECC SEC/DED Failing Syndrome for upper cache line
+ *
+ * Notes:
+ * When L2D_SEC_ERR or L2D_DED_ERR are set, this field contains the failing L2 Data ECC 10b syndrome.
+ * (A DED Error will always overwrite a SEC Error SYNDROME and FADR).
+ */
+union cvmx_l2d_fsyn1 {
+ uint64_t u64;
+ struct cvmx_l2d_fsyn1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t fsyn_ow3 : 10; /**< Failing L2 Data Store SYNDROME OW[3,7] */
+ uint64_t fsyn_ow2 : 10; /**< Failing L2 Data Store SYNDROME OW[2,5] */
+#else
+ uint64_t fsyn_ow2 : 10;
+ uint64_t fsyn_ow3 : 10;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_l2d_fsyn1_s cn30xx;
+ struct cvmx_l2d_fsyn1_s cn31xx;
+ struct cvmx_l2d_fsyn1_s cn38xx;
+ struct cvmx_l2d_fsyn1_s cn38xxp2;
+ struct cvmx_l2d_fsyn1_s cn50xx;
+ struct cvmx_l2d_fsyn1_s cn52xx;
+ struct cvmx_l2d_fsyn1_s cn52xxp1;
+ struct cvmx_l2d_fsyn1_s cn56xx;
+ struct cvmx_l2d_fsyn1_s cn56xxp1;
+ struct cvmx_l2d_fsyn1_s cn58xx;
+ struct cvmx_l2d_fsyn1_s cn58xxp1;
+};
+typedef union cvmx_l2d_fsyn1 cvmx_l2d_fsyn1_t;
+
+/**
+ * cvmx_l2d_fus0
+ *
+ * L2D_FUS0 = L2C Data Store QUAD0 Fuse Register
+ *
+ */
+union cvmx_l2d_fus0 {
+ uint64_t u64;
+ struct cvmx_l2d_fus0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t q0fus : 34; /**< Fuse Register for QUAD0
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuse are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q0fus : 34;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_l2d_fus0_s cn30xx;
+ struct cvmx_l2d_fus0_s cn31xx;
+ struct cvmx_l2d_fus0_s cn38xx;
+ struct cvmx_l2d_fus0_s cn38xxp2;
+ struct cvmx_l2d_fus0_s cn50xx;
+ struct cvmx_l2d_fus0_s cn52xx;
+ struct cvmx_l2d_fus0_s cn52xxp1;
+ struct cvmx_l2d_fus0_s cn56xx;
+ struct cvmx_l2d_fus0_s cn56xxp1;
+ struct cvmx_l2d_fus0_s cn58xx;
+ struct cvmx_l2d_fus0_s cn58xxp1;
+};
+typedef union cvmx_l2d_fus0 cvmx_l2d_fus0_t;
+
+/**
+ * cvmx_l2d_fus1
+ *
+ * L2D_FUS1 = L2C Data Store QUAD1 Fuse Register
+ *
+ */
+union cvmx_l2d_fus1 {
+ uint64_t u64;
+ struct cvmx_l2d_fus1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t q1fus : 34; /**< Fuse Register for QUAD1
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuse are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q1fus : 34;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_l2d_fus1_s cn30xx;
+ struct cvmx_l2d_fus1_s cn31xx;
+ struct cvmx_l2d_fus1_s cn38xx;
+ struct cvmx_l2d_fus1_s cn38xxp2;
+ struct cvmx_l2d_fus1_s cn50xx;
+ struct cvmx_l2d_fus1_s cn52xx;
+ struct cvmx_l2d_fus1_s cn52xxp1;
+ struct cvmx_l2d_fus1_s cn56xx;
+ struct cvmx_l2d_fus1_s cn56xxp1;
+ struct cvmx_l2d_fus1_s cn58xx;
+ struct cvmx_l2d_fus1_s cn58xxp1;
+};
+typedef union cvmx_l2d_fus1 cvmx_l2d_fus1_t;
+
+/**
+ * cvmx_l2d_fus2
+ *
+ * L2D_FUS2 = L2C Data Store QUAD2 Fuse Register
+ *
+ */
+union cvmx_l2d_fus2 {
+ uint64_t u64;
+ struct cvmx_l2d_fus2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t q2fus : 34; /**< Fuse Register for QUAD2
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuse are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q2fus : 34;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_l2d_fus2_s cn30xx;
+ struct cvmx_l2d_fus2_s cn31xx;
+ struct cvmx_l2d_fus2_s cn38xx;
+ struct cvmx_l2d_fus2_s cn38xxp2;
+ struct cvmx_l2d_fus2_s cn50xx;
+ struct cvmx_l2d_fus2_s cn52xx;
+ struct cvmx_l2d_fus2_s cn52xxp1;
+ struct cvmx_l2d_fus2_s cn56xx;
+ struct cvmx_l2d_fus2_s cn56xxp1;
+ struct cvmx_l2d_fus2_s cn58xx;
+ struct cvmx_l2d_fus2_s cn58xxp1;
+};
+typedef union cvmx_l2d_fus2 cvmx_l2d_fus2_t;
+
+/**
+ * cvmx_l2d_fus3
+ *
+ * L2D_FUS3 = L2C Data Store QUAD3 Fuse Register
+ *
+ */
+union cvmx_l2d_fus3 {
+ uint64_t u64;
+ struct cvmx_l2d_fus3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t ema_ctl : 3; /**< L2 Data Store EMA Control
+ These bits are used to 'observe' the EMA[1:0] inputs
+ for the L2 Data Store RAMs which are controlled by
+ either FUSES[141:140] or by MIO_FUSE_EMA[EMA] CSR.
+ From poweron (dc_ok), the EMA_CTL are driven from
+ FUSE[141:140]. However after the 1st CSR write to the
+ MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source
+ from the MIO_FUSE_EMA[EMA] register permanently
+ (until dc_ok). */
+ uint64_t reserved_34_36 : 3;
+ uint64_t q3fus : 34; /**< Fuse Register for QUAD3
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuses are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q3fus : 34;
+ uint64_t reserved_34_36 : 3;
+ uint64_t ema_ctl : 3;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_l2d_fus3_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t crip_64k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1. */
+ uint64_t q3fus : 34; /**< Fuse Register for QUAD3
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuses are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:15] UNUSED
+ [14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:32] UNUSED
+ [31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q3fus : 34;
+ uint64_t crip_64k : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } cn30xx;
+ struct cvmx_l2d_fus3_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t crip_128k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1. */
+ uint64_t q3fus : 34; /**< Fuse Register for QUAD3
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuses are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:15] UNUSED
+ [14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:32] UNUSED
+ [31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q3fus : 34;
+ uint64_t crip_128k : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } cn31xx;
+ struct cvmx_l2d_fus3_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t crip_256k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1.
+ *** NOTE: Pass2 Addition */
+ uint64_t crip_512k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1.
+ *** NOTE: Pass2 Addition */
+ uint64_t q3fus : 34; /**< Fuse Register for QUAD3
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuses are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q3fus : 34;
+ uint64_t crip_512k : 1;
+ uint64_t crip_256k : 1;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn38xx;
+ struct cvmx_l2d_fus3_cn38xx cn38xxp2;
+ struct cvmx_l2d_fus3_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t ema_ctl : 3; /**< L2 Data Store EMA Control
+ These bits are used to 'observe' the EMA[2:0] inputs
+ for the L2 Data Store RAMs which are controlled by
+ either FUSES[142:140] or by MIO_FUSE_EMA[EMA] CSR.
+ From poweron (dc_ok), the EMA_CTL are driven from
+ FUSE[141:140]. However after the 1st CSR write to the
+ MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source
+ from the MIO_FUSE_EMA[EMA] register permanently
+ (until dc_ok). */
+ uint64_t reserved_36_36 : 1;
+ uint64_t crip_32k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1. */
+ uint64_t crip_64k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1. */
+ uint64_t q3fus : 34; /**< Fuse Register for QUAD3
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuses are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:14] UNUSED (5020 uses single physical bank per quad)
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:31] UNUSED (5020 uses single physical bank per quad)
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q3fus : 34;
+ uint64_t crip_64k : 1;
+ uint64_t crip_32k : 1;
+ uint64_t reserved_36_36 : 1;
+ uint64_t ema_ctl : 3;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn50xx;
+ struct cvmx_l2d_fus3_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t ema_ctl : 3; /**< L2 Data Store EMA Control
+ These bits are used to 'observe' the EMA[2:0] inputs
+ for the L2 Data Store RAMs which are controlled by
+ either FUSES[142:140] or by MIO_FUSE_EMA[EMA] CSR.
+ From poweron (dc_ok), the EMA_CTL are driven from
+ FUSE[141:140]. However after the 1st CSR write to the
+ MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source
+ from the MIO_FUSE_EMA[EMA] register permanently
+ (until dc_ok). */
+ uint64_t reserved_36_36 : 1;
+ uint64_t crip_128k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1. */
+ uint64_t crip_256k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1. */
+ uint64_t q3fus : 34; /**< Fuse Register for QUAD3
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuses are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:14] UNUSED (5020 uses single physical bank per quad)
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:31] UNUSED (5020 uses single physical bank per quad)
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q3fus : 34;
+ uint64_t crip_256k : 1;
+ uint64_t crip_128k : 1;
+ uint64_t reserved_36_36 : 1;
+ uint64_t ema_ctl : 3;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn52xx;
+ struct cvmx_l2d_fus3_cn52xx cn52xxp1;
+ struct cvmx_l2d_fus3_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t ema_ctl : 3; /**< L2 Data Store EMA Control
+ These bits are used to 'observe' the EMA[2:0] inputs
+ for the L2 Data Store RAMs which are controlled by
+ either FUSES[142:140] or by MIO_FUSE_EMA[EMA] CSR.
+ From poweron (dc_ok), the EMA_CTL are driven from
+ FUSE[141:140]. However after the 1st CSR write to the
+ MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source
+ from the MIO_FUSE_EMA[EMA] register permanently
+ (until dc_ok). */
+ uint64_t reserved_36_36 : 1;
+ uint64_t crip_512k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1.
+ *** NOTE: Pass2 Addition */
+ uint64_t crip_1024k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1.
+ *** NOTE: Pass2 Addition */
+ uint64_t q3fus : 34; /**< Fuse Register for QUAD3
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuses are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q3fus : 34;
+ uint64_t crip_1024k : 1;
+ uint64_t crip_512k : 1;
+ uint64_t reserved_36_36 : 1;
+ uint64_t ema_ctl : 3;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn56xx;
+ struct cvmx_l2d_fus3_cn56xx cn56xxp1;
+ struct cvmx_l2d_fus3_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t ema_ctl : 2; /**< L2 Data Store EMA Control
+ These bits are used to 'observe' the EMA[1:0] inputs
+ for the L2 Data Store RAMs which are controlled by
+ either FUSES[141:140] or by MIO_FUSE_EMA[EMA] CSR.
+ From poweron (dc_ok), the EMA_CTL are driven from
+ FUSE[141:140]. However after the 1st CSR write to the
+ MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source
+ from the MIO_FUSE_EMA[EMA] register permanently
+ (until dc_ok). */
+ uint64_t reserved_36_36 : 1;
+ uint64_t crip_512k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1.
+ *** NOTE: Pass2 Addition */
+ uint64_t crip_1024k : 1; /**< This is purely for debug and not needed in the general
+ manufacturing flow.
+ If the FUSE is not-blown, then this bit should read
+ as 0. If the FUSE is blown, then this bit should read
+ as 1.
+ *** NOTE: Pass2 Addition */
+ uint64_t q3fus : 34; /**< Fuse Register for QUAD3
+ This is purely for debug and not needed in the general
+ manufacturing flow.
+ Note that the fuses are complementary (Assigning a
+ fuse to 1 will read as a zero). This means the case
+ where no fuses are blown result in these csr's showing
+ all ones.
+ Failure \#1 Fuse Mapping
+ [16:14] bad bank
+ [13:7] bad high column
+ [6:0] bad low column
+ Failure \#2 Fuse Mapping
+ [33:31] bad bank
+ [30:24] bad high column
+ [23:17] bad low column */
+#else
+ uint64_t q3fus : 34;
+ uint64_t crip_1024k : 1;
+ uint64_t crip_512k : 1;
+ uint64_t reserved_36_36 : 1;
+ uint64_t ema_ctl : 2;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } cn58xx;
+ struct cvmx_l2d_fus3_cn58xx cn58xxp1;
+};
+typedef union cvmx_l2d_fus3 cvmx_l2d_fus3_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-l2d-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-l2t-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-l2t-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-l2t-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,650 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-l2t-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon l2t.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_L2T_DEFS_H__
+#define __CVMX_L2T_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_L2T_ERR CVMX_L2T_ERR_FUNC()
+static inline uint64_t CVMX_L2T_ERR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
+ cvmx_warn("CVMX_L2T_ERR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180080000008ull);
+}
+#else
+#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
+#endif
+
+/**
+ * cvmx_l2t_err
+ *
+ * L2T_ERR = L2 Tag Errors
+ *
+ * Description: L2 Tag ECC SEC/DED Errors and Interrupt Enable
+ */
+union cvmx_l2t_err {
+ uint64_t u64;
+ struct cvmx_l2t_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t fadru : 1; /**< Failing L2 Tag Upper Address Bit (Index[10])
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the FADRU contains the upper(MSB bit) cacheline index
+ into the L2 Tag Store. */
+ uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */
+ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n
+ could not find an available/unlocked set (for
+ replacement).
+ Most likely, this is a result of SW mixing SET
+ PARTITIONING with ADDRESS LOCKING. If SW allows
+ another PP to LOCKDOWN all SETs available to PP#n,
+ then a Rd/Wr Miss from PP#n will be unable
+ to determine a 'valid' replacement set (since LOCKED
+ addresses should NEVER be replaced).
+ If such an event occurs, the HW will select the smallest
+ available SET(specified by UMSK'x)' as the replacement
+ set, and the address is unlocked. */
+ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */
+ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of
+ the INDEX (which is ignored by HW - but reported to SW).
+ The LDD(L1 load-miss) for the LOCK operation is completed
+ successfully, however the address is NOT locked.
+ NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*]
+ into account. For example, if diagnostic PPx has
+ UMSKx defined to only use SETs [1:0], and SET1 had
+ been previously LOCKED, then an attempt to LOCK the
+ last available SET0 would result in a LCKERR. (This
+ is to ensure that at least 1 SET at each INDEX is
+ not LOCKED for general use by other PPs). */
+ uint64_t fset : 3; /**< Failing L2 Tag Hit Set# (1-of-8)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and
+ (FSYN != 0), the FSET specifies the failing hit-set.
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set
+ is specified by the L2C_DBG[SET]. */
+ uint64_t fadr : 10; /**< Failing L2 Tag Address (10-bit Index)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the FADR contains the lower 10bit cacheline index
+ into the L2 Tag Store. */
+ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the contents of this register contain the 6-bit
+ syndrome for the hit set only.
+ If (FSYN = 0), the SBE or DBE reported was for one of
+ the "non-hit" sets at the failing index(FADR).
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set
+ is specified by the L2C_DBG[SET].
+ If (FSYN != 0), the SBE or DBE reported was for the
+ hit set at the failing index(FADR) and failing
+ set(FSET).
+ SW NOTE: To determine which "non-hit" set was in error,
+ SW can use the L2C_DBG[L2T] debug feature to explicitly
+ read the other sets at the failing index(FADR). When
+ (FSYN !=0), then the FSET contains the failing hit-set.
+ NOTE: A DED Error will always overwrite a SEC Error
+ SYNDROME and FADR). */
+ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for double bit errors(DBEs).
+ This bit is set if ANY of the 8 sets contains a DBE.
+ DBEs also generated an interrupt(if enabled). */
+ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for single bit errors(SBEs).
+ This bit is set if ANY of the 8 sets contains an SBE.
+ SBEs are auto corrected in HW and generate an
+ interrupt(if enabled). */
+ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on double bit (uncorrectable) errors from
+ the L2 Tag Arrays. */
+ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on single bit (correctable) errors from
+ the L2 Tag Arrays. */
+ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable
+ When set, enables 6-bit SEC/DED codeword for 19-bit
+ L2 Tag Arrays [V,D,L,TAG[33:18]] */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t sec_intena : 1;
+ uint64_t ded_intena : 1;
+ uint64_t sec_err : 1;
+ uint64_t ded_err : 1;
+ uint64_t fsyn : 6;
+ uint64_t fadr : 10;
+ uint64_t fset : 3;
+ uint64_t lckerr : 1;
+ uint64_t lck_intena : 1;
+ uint64_t lckerr2 : 1;
+ uint64_t lck_intena2 : 1;
+ uint64_t fadru : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_l2t_err_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */
+ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n
+ could not find an available/unlocked set (for
+ replacement).
+ Most likely, this is a result of SW mixing SET
+ PARTITIONING with ADDRESS LOCKING. If SW allows
+ another PP to LOCKDOWN all SETs available to PP#n,
+ then a Rd/Wr Miss from PP#n will be unable
+ to determine a 'valid' replacement set (since LOCKED
+ addresses should NEVER be replaced).
+ If such an event occurs, the HW will select the smallest
+ available SET(specified by UMSK'x)' as the replacement
+ set, and the address is unlocked. */
+ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */
+ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of
+ the INDEX (which is ignored by HW - but reported to SW).
+ The LDD(L1 load-miss) for the LOCK operation is
+ completed successfully, however the address is NOT
+ locked.
+ NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*]
+ into account. For example, if diagnostic PPx has
+ UMSKx defined to only use SETs [1:0], and SET1 had
+ been previously LOCKED, then an attempt to LOCK the
+ last available SET0 would result in a LCKERR. (This
+ is to ensure that at least 1 SET at each INDEX is
+ not LOCKED for general use by other PPs). */
+ uint64_t reserved_23_23 : 1;
+ uint64_t fset : 2; /**< Failing L2 Tag Hit Set# (1-of-4)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and
+ (FSYN != 0), the FSET specifies the failing hit-set.
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set
+ is specified by the L2C_DBG[SET]. */
+ uint64_t reserved_19_20 : 2;
+ uint64_t fadr : 8; /**< Failing L2 Tag Store Index (8-bit)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the FADR contains the 8bit cacheline index into the
+ L2 Tag Store. */
+ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the contents of this register contain the 6-bit
+ syndrome for the hit set only.
+ If (FSYN = 0), the SBE or DBE reported was for one of
+ the "non-hit" sets at the failing index(FADR).
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set
+ is specified by the L2C_DBG[SET].
+ If (FSYN != 0), the SBE or DBE reported was for the
+ hit set at the failing index(FADR) and failing
+ set(FSET).
+ SW NOTE: To determine which "non-hit" set was in error,
+ SW can use the L2C_DBG[L2T] debug feature to explicitly
+ read the other sets at the failing index(FADR). When
+ (FSYN !=0), then the FSET contains the failing hit-set.
+ NOTE: A DED Error will always overwrite a SEC Error
+ SYNDROME and FADR). */
+ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for double bit errors(DBEs).
+ This bit is set if ANY of the 8 sets contains a DBE.
+ DBEs also generated an interrupt(if enabled). */
+ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for single bit errors(SBEs).
+ This bit is set if ANY of the 8 sets contains an SBE.
+ SBEs are auto corrected in HW and generate an
+ interrupt(if enabled). */
+ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on double bit (uncorrectable) errors from
+ the L2 Tag Arrays. */
+ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on single bit (correctable) errors from
+ the L2 Tag Arrays. */
+ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable
+ When set, enables 6-bit SEC/DED codeword for 22-bit
+ L2 Tag Arrays [V,D,L,TAG[33:15]] */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t sec_intena : 1;
+ uint64_t ded_intena : 1;
+ uint64_t sec_err : 1;
+ uint64_t ded_err : 1;
+ uint64_t fsyn : 6;
+ uint64_t fadr : 8;
+ uint64_t reserved_19_20 : 2;
+ uint64_t fset : 2;
+ uint64_t reserved_23_23 : 1;
+ uint64_t lckerr : 1;
+ uint64_t lck_intena : 1;
+ uint64_t lckerr2 : 1;
+ uint64_t lck_intena2 : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn30xx;
+ struct cvmx_l2t_err_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */
+ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n
+ could not find an available/unlocked set (for
+ replacement).
+ Most likely, this is a result of SW mixing SET
+ PARTITIONING with ADDRESS LOCKING. If SW allows
+ another PP to LOCKDOWN all SETs available to PP#n,
+ then a Rd/Wr Miss from PP#n will be unable
+ to determine a 'valid' replacement set (since LOCKED
+ addresses should NEVER be replaced).
+ If such an event occurs, the HW will select the smallest
+ available SET(specified by UMSK'x)' as the replacement
+ set, and the address is unlocked. */
+ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */
+ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of
+ the INDEX (which is ignored by HW - but reported to SW).
+ The LDD(L1 load-miss) for the LOCK operation is completed
+ successfully, however the address is NOT locked.
+ NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*]
+ into account. For example, if diagnostic PPx has
+ UMSKx defined to only use SETs [1:0], and SET1 had
+ been previously LOCKED, then an attempt to LOCK the
+ last available SET0 would result in a LCKERR. (This
+ is to ensure that at least 1 SET at each INDEX is
+ not LOCKED for general use by other PPs). */
+ uint64_t reserved_23_23 : 1;
+ uint64_t fset : 2; /**< Failing L2 Tag Hit Set# (1-of-4)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and
+ (FSYN != 0), the FSET specifies the failing hit-set.
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set
+ is specified by the L2C_DBG[SET]. */
+ uint64_t reserved_20_20 : 1;
+ uint64_t fadr : 9; /**< Failing L2 Tag Address (9-bit Index)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the FADR contains the 9-bit cacheline index into the
+ L2 Tag Store. */
+ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the contents of this register contain the 6-bit
+ syndrome for the hit set only.
+ If (FSYN = 0), the SBE or DBE reported was for one of
+ the "non-hit" sets at the failing index(FADR).
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set
+ is specified by the L2C_DBG[SET].
+ If (FSYN != 0), the SBE or DBE reported was for the
+ hit set at the failing index(FADR) and failing
+ set(FSET).
+ SW NOTE: To determine which "non-hit" set was in error,
+ SW can use the L2C_DBG[L2T] debug feature to explicitly
+ read the other sets at the failing index(FADR). When
+ (FSYN !=0), then the FSET contains the failing hit-set.
+ NOTE: A DED Error will always overwrite a SEC Error
+ SYNDROME and FADR). */
+ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for double bit errors(DBEs).
+ This bit is set if ANY of the 8 sets contains a DBE.
+ DBEs also generated an interrupt(if enabled). */
+ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for single bit errors(SBEs).
+ This bit is set if ANY of the 8 sets contains an SBE.
+ SBEs are auto corrected in HW and generate an
+ interrupt(if enabled). */
+ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on double bit (uncorrectable) errors from
+ the L2 Tag Arrays. */
+ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on single bit (correctable) errors from
+ the L2 Tag Arrays. */
+ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable
+ When set, enables 6-bit SEC/DED codeword for 21-bit
+ L2 Tag Arrays [V,D,L,TAG[33:16]] */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t sec_intena : 1;
+ uint64_t ded_intena : 1;
+ uint64_t sec_err : 1;
+ uint64_t ded_err : 1;
+ uint64_t fsyn : 6;
+ uint64_t fadr : 9;
+ uint64_t reserved_20_20 : 1;
+ uint64_t fset : 2;
+ uint64_t reserved_23_23 : 1;
+ uint64_t lckerr : 1;
+ uint64_t lck_intena : 1;
+ uint64_t lckerr2 : 1;
+ uint64_t lck_intena2 : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn31xx;
+ struct cvmx_l2t_err_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */
+ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n
+ could not find an available/unlocked set (for
+ replacement).
+ Most likely, this is a result of SW mixing SET
+ PARTITIONING with ADDRESS LOCKING. If SW allows
+ another PP to LOCKDOWN all SETs available to PP#n,
+ then a Rd/Wr Miss from PP#n will be unable
+ to determine a 'valid' replacement set (since LOCKED
+ addresses should NEVER be replaced).
+ If such an event occurs, the HW will select the smallest
+ available SET(specified by UMSK'x)' as the replacement
+ set, and the address is unlocked. */
+ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */
+ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of
+ the INDEX (which is ignored by HW - but reported to SW).
+ The LDD(L1 load-miss) for the LOCK operation is completed
+ successfully, however the address is NOT locked.
+ NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*]
+ into account. For example, if diagnostic PPx has
+ UMSKx defined to only use SETs [1:0], and SET1 had
+ been previously LOCKED, then an attempt to LOCK the
+ last available SET0 would result in a LCKERR. (This
+ is to ensure that at least 1 SET at each INDEX is
+ not LOCKED for general use by other PPs). */
+ uint64_t fset : 3; /**< Failing L2 Tag Hit Set# (1-of-8)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and
+ (FSYN != 0), the FSET specifies the failing hit-set.
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set
+ is specified by the L2C_DBG[SET]. */
+ uint64_t fadr : 10; /**< Failing L2 Tag Address (10-bit Index)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the FADR contains the 10bit cacheline index into the
+ L2 Tag Store. */
+ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the contents of this register contain the 6-bit
+ syndrome for the hit set only.
+ If (FSYN = 0), the SBE or DBE reported was for one of
+ the "non-hit" sets at the failing index(FADR).
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set
+ is specified by the L2C_DBG[SET].
+ If (FSYN != 0), the SBE or DBE reported was for the
+ hit set at the failing index(FADR) and failing
+ set(FSET).
+ SW NOTE: To determine which "non-hit" set was in error,
+ SW can use the L2C_DBG[L2T] debug feature to explicitly
+ read the other sets at the failing index(FADR). When
+ (FSYN !=0), then the FSET contains the failing hit-set.
+ NOTE: A DED Error will always overwrite a SEC Error
+ SYNDROME and FADR). */
+ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for double bit errors(DBEs).
+ This bit is set if ANY of the 8 sets contains a DBE.
+ DBEs also generated an interrupt(if enabled). */
+ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for single bit errors(SBEs).
+ This bit is set if ANY of the 8 sets contains an SBE.
+ SBEs are auto corrected in HW and generate an
+ interrupt(if enabled). */
+ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on double bit (uncorrectable) errors from
+ the L2 Tag Arrays. */
+ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on single bit (correctable) errors from
+ the L2 Tag Arrays. */
+ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable
+ When set, enables 6-bit SEC/DED codeword for 20-bit
+ L2 Tag Arrays [V,D,L,TAG[33:17]] */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t sec_intena : 1;
+ uint64_t ded_intena : 1;
+ uint64_t sec_err : 1;
+ uint64_t ded_err : 1;
+ uint64_t fsyn : 6;
+ uint64_t fadr : 10;
+ uint64_t fset : 3;
+ uint64_t lckerr : 1;
+ uint64_t lck_intena : 1;
+ uint64_t lckerr2 : 1;
+ uint64_t lck_intena2 : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn38xx;
+ struct cvmx_l2t_err_cn38xx cn38xxp2;
+ struct cvmx_l2t_err_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */
+ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n
+ could not find an available/unlocked set (for
+ replacement).
+ Most likely, this is a result of SW mixing SET
+ PARTITIONING with ADDRESS LOCKING. If SW allows
+ another PP to LOCKDOWN all SETs available to PP#n,
+ then a Rd/Wr Miss from PP#n will be unable
+ to determine a 'valid' replacement set (since LOCKED
+ addresses should NEVER be replaced).
+ If such an event occurs, the HW will select the smallest
+ available SET(specified by UMSK'x)' as the replacement
+ set, and the address is unlocked. */
+ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */
+ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of
+ the INDEX (which is ignored by HW - but reported to SW).
+ The LDD(L1 load-miss) for the LOCK operation is completed
+ successfully, however the address is NOT locked.
+ NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*]
+ into account. For example, if diagnostic PPx has
+ UMSKx defined to only use SETs [1:0], and SET1 had
+ been previously LOCKED, then an attempt to LOCK the
+ last available SET0 would result in a LCKERR. (This
+ is to ensure that at least 1 SET at each INDEX is
+ not LOCKED for general use by other PPs). */
+ uint64_t fset : 3; /**< Failing L2 Tag Hit Set# (1-of-8)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and
+ (FSYN != 0), the FSET specifies the failing hit-set.
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set
+ is specified by the L2C_DBG[SET]. */
+ uint64_t reserved_18_20 : 3;
+ uint64_t fadr : 7; /**< Failing L2 Tag Address (7-bit Index)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the FADR contains the lower 7bit cacheline index
+ into the L2 Tag Store. */
+ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the contents of this register contain the 6-bit
+ syndrome for the hit set only.
+ If (FSYN = 0), the SBE or DBE reported was for one of
+ the "non-hit" sets at the failing index(FADR).
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set
+ is specified by the L2C_DBG[SET].
+ If (FSYN != 0), the SBE or DBE reported was for the
+ hit set at the failing index(FADR) and failing
+ set(FSET).
+ SW NOTE: To determine which "non-hit" set was in error,
+ SW can use the L2C_DBG[L2T] debug feature to explicitly
+ read the other sets at the failing index(FADR). When
+ (FSYN !=0), then the FSET contains the failing hit-set.
+ NOTE: A DED Error will always overwrite a SEC Error
+ SYNDROME and FADR). */
+ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for double bit errors(DBEs).
+ This bit is set if ANY of the 8 sets contains a DBE.
+ DBEs also generated an interrupt(if enabled). */
+ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for single bit errors(SBEs).
+ This bit is set if ANY of the 8 sets contains an SBE.
+ SBEs are auto corrected in HW and generate an
+ interrupt(if enabled). */
+ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on double bit (uncorrectable) errors from
+ the L2 Tag Arrays. */
+ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on single bit (correctable) errors from
+ the L2 Tag Arrays. */
+ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable
+ When set, enables 6-bit SEC/DED codeword for 23-bit
+ L2 Tag Arrays [V,D,L,TAG[33:14]] */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t sec_intena : 1;
+ uint64_t ded_intena : 1;
+ uint64_t sec_err : 1;
+ uint64_t ded_err : 1;
+ uint64_t fsyn : 6;
+ uint64_t fadr : 7;
+ uint64_t reserved_18_20 : 3;
+ uint64_t fset : 3;
+ uint64_t lckerr : 1;
+ uint64_t lck_intena : 1;
+ uint64_t lckerr2 : 1;
+ uint64_t lck_intena2 : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn50xx;
+ struct cvmx_l2t_err_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */
+ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n
+ could not find an available/unlocked set (for
+ replacement).
+ Most likely, this is a result of SW mixing SET
+ PARTITIONING with ADDRESS LOCKING. If SW allows
+ another PP to LOCKDOWN all SETs available to PP#n,
+ then a Rd/Wr Miss from PP#n will be unable
+ to determine a 'valid' replacement set (since LOCKED
+ addresses should NEVER be replaced).
+ If such an event occurs, the HW will select the smallest
+ available SET(specified by UMSK'x)' as the replacement
+ set, and the address is unlocked. */
+ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */
+ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of
+ the INDEX (which is ignored by HW - but reported to SW).
+ The LDD(L1 load-miss) for the LOCK operation is completed
+ successfully, however the address is NOT locked.
+ NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*]
+ into account. For example, if diagnostic PPx has
+ UMSKx defined to only use SETs [1:0], and SET1 had
+ been previously LOCKED, then an attempt to LOCK the
+ last available SET0 would result in a LCKERR. (This
+ is to ensure that at least 1 SET at each INDEX is
+ not LOCKED for general use by other PPs). */
+ uint64_t fset : 3; /**< Failing L2 Tag Hit Set# (1-of-8)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and
+ (FSYN != 0), the FSET specifies the failing hit-set.
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set
+ is specified by the L2C_DBG[SET]. */
+ uint64_t reserved_20_20 : 1;
+ uint64_t fadr : 9; /**< Failing L2 Tag Address (9-bit Index)
+ When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the FADR contains the lower 9bit cacheline index
+ into the L2 Tag Store. */
+ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set,
+ the contents of this register contain the 6-bit
+ syndrome for the hit set only.
+ If (FSYN = 0), the SBE or DBE reported was for one of
+ the "non-hit" sets at the failing index(FADR).
+ NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set
+ is specified by the L2C_DBG[SET].
+ If (FSYN != 0), the SBE or DBE reported was for the
+ hit set at the failing index(FADR) and failing
+ set(FSET).
+ SW NOTE: To determine which "non-hit" set was in error,
+ SW can use the L2C_DBG[L2T] debug feature to explicitly
+ read the other sets at the failing index(FADR). When
+ (FSYN !=0), then the FSET contains the failing hit-set.
+ NOTE: A DED Error will always overwrite a SEC Error
+ SYNDROME and FADR). */
+ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for double bit errors(DBEs).
+ This bit is set if ANY of the 8 sets contains a DBE.
+ DBEs also generated an interrupt(if enabled). */
+ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC)
+ During every L2 Tag Probe, all 8 sets Tag's (at a
+ given index) are checked for single bit errors(SBEs).
+ This bit is set if ANY of the 8 sets contains an SBE.
+ SBEs are auto corrected in HW and generate an
+ interrupt(if enabled). */
+ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on double bit (uncorrectable) errors from
+ the L2 Tag Arrays. */
+ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt
+ Enable bit. When set, allows interrupts to be
+ reported on single bit (correctable) errors from
+ the L2 Tag Arrays. */
+ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable
+ When set, enables 6-bit SEC/DED codeword for 21-bit
+ L2 Tag Arrays [V,D,L,TAG[33:16]] */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t sec_intena : 1;
+ uint64_t ded_intena : 1;
+ uint64_t sec_err : 1;
+ uint64_t ded_err : 1;
+ uint64_t fsyn : 6;
+ uint64_t fadr : 9;
+ uint64_t reserved_20_20 : 1;
+ uint64_t fset : 3;
+ uint64_t lckerr : 1;
+ uint64_t lck_intena : 1;
+ uint64_t lckerr2 : 1;
+ uint64_t lck_intena2 : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn52xx;
+ struct cvmx_l2t_err_cn52xx cn52xxp1;
+ struct cvmx_l2t_err_s cn56xx;
+ struct cvmx_l2t_err_s cn56xxp1;
+ struct cvmx_l2t_err_s cn58xx;
+ struct cvmx_l2t_err_s cn58xxp1;
+};
+typedef union cvmx_l2t_err cvmx_l2t_err_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-l2t-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-led-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-led-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-led-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,631 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-led-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon led.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_LED_DEFS_H__
+#define __CVMX_LED_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_LED_BLINK CVMX_LED_BLINK_FUNC()
+static inline uint64_t CVMX_LED_BLINK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_BLINK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001A48ull);
+}
+#else
+#define CVMX_LED_BLINK (CVMX_ADD_IO_SEG(0x0001180000001A48ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_LED_CLK_PHASE CVMX_LED_CLK_PHASE_FUNC()
+static inline uint64_t CVMX_LED_CLK_PHASE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_CLK_PHASE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001A08ull);
+}
+#else
+#define CVMX_LED_CLK_PHASE (CVMX_ADD_IO_SEG(0x0001180000001A08ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_LED_CYLON CVMX_LED_CYLON_FUNC()
+static inline uint64_t CVMX_LED_CYLON_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_CYLON not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001AF8ull);
+}
+#else
+#define CVMX_LED_CYLON (CVMX_ADD_IO_SEG(0x0001180000001AF8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_LED_DBG CVMX_LED_DBG_FUNC()
+static inline uint64_t CVMX_LED_DBG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_DBG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001A18ull);
+}
+#else
+#define CVMX_LED_DBG (CVMX_ADD_IO_SEG(0x0001180000001A18ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_LED_EN CVMX_LED_EN_FUNC()
+static inline uint64_t CVMX_LED_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001A00ull);
+}
+#else
+#define CVMX_LED_EN (CVMX_ADD_IO_SEG(0x0001180000001A00ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_LED_POLARITY CVMX_LED_POLARITY_FUNC()
+static inline uint64_t CVMX_LED_POLARITY_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_POLARITY not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001A50ull);
+}
+#else
+#define CVMX_LED_POLARITY (CVMX_ADD_IO_SEG(0x0001180000001A50ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_LED_PRT CVMX_LED_PRT_FUNC()
+static inline uint64_t CVMX_LED_PRT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_PRT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001A10ull);
+}
+#else
+#define CVMX_LED_PRT (CVMX_ADD_IO_SEG(0x0001180000001A10ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_LED_PRT_FMT CVMX_LED_PRT_FMT_FUNC()
+static inline uint64_t CVMX_LED_PRT_FMT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_PRT_FMT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001A30ull);
+}
+#else
+#define CVMX_LED_PRT_FMT (CVMX_ADD_IO_SEG(0x0001180000001A30ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LED_PRT_STATUSX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_LED_PRT_STATUSX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001A80ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_LED_PRT_STATUSX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A80ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LED_UDD_CNTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_LED_UDD_CNTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001A20ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_LED_UDD_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A20ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LED_UDD_DATX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_LED_UDD_DATX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001A38ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_LED_UDD_DATX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A38ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LED_UDD_DAT_CLRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_LED_UDD_DAT_CLRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001AC8ull) + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_LED_UDD_DAT_CLRX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC8ull) + ((offset) & 1) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LED_UDD_DAT_SETX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_LED_UDD_DAT_SETX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001AC0ull) + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_LED_UDD_DAT_SETX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC0ull) + ((offset) & 1) * 16)
+#endif
+
+/**
+ * cvmx_led_blink
+ *
+ * LED_BLINK = LED Blink Rate (in led_clks)
+ *
+ */
+union cvmx_led_blink {
+ uint64_t u64;
+ struct cvmx_led_blink_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rate : 8; /**< LED Blink rate in led_latch clks
+ RATE must be > 0 */
+#else
+ uint64_t rate : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_led_blink_s cn38xx;
+ struct cvmx_led_blink_s cn38xxp2;
+ struct cvmx_led_blink_s cn56xx;
+ struct cvmx_led_blink_s cn56xxp1;
+ struct cvmx_led_blink_s cn58xx;
+ struct cvmx_led_blink_s cn58xxp1;
+};
+typedef union cvmx_led_blink cvmx_led_blink_t;
+
+/**
+ * cvmx_led_clk_phase
+ *
+ * LED_CLK_PHASE = LED Clock Phase (in 64 eclks)
+ *
+ *
+ * Notes:
+ * Example:
+ * Given a 2ns eclk, an LED_CLK_PHASE[PHASE] = 1, indicates that each
+ * led_clk phase is 64 eclks, or 128ns. The led_clk period is 2*phase,
+ * or 256ns which is 3.9MHz. The default value of 4, yields an led_clk
+ * period of 64*4*2ns*2 = 1024ns or ~1MHz (977KHz).
+ */
+union cvmx_led_clk_phase {
+ uint64_t u64;
+ struct cvmx_led_clk_phase_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t phase : 7; /**< Number of 64 eclks in order to create the led_clk */
+#else
+ uint64_t phase : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_led_clk_phase_s cn38xx;
+ struct cvmx_led_clk_phase_s cn38xxp2;
+ struct cvmx_led_clk_phase_s cn56xx;
+ struct cvmx_led_clk_phase_s cn56xxp1;
+ struct cvmx_led_clk_phase_s cn58xx;
+ struct cvmx_led_clk_phase_s cn58xxp1;
+};
+typedef union cvmx_led_clk_phase cvmx_led_clk_phase_t;
+
+/**
+ * cvmx_led_cylon
+ *
+ * LED_CYLON = LED CYLON Effect (should remain undocumented)
+ *
+ */
+union cvmx_led_cylon {
+ uint64_t u64;
+ struct cvmx_led_cylon_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t rate : 16; /**< LED Cylon Effect when RATE!=0
+ Changes at RATE*LATCH period */
+#else
+ uint64_t rate : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_led_cylon_s cn38xx;
+ struct cvmx_led_cylon_s cn38xxp2;
+ struct cvmx_led_cylon_s cn56xx;
+ struct cvmx_led_cylon_s cn56xxp1;
+ struct cvmx_led_cylon_s cn58xx;
+ struct cvmx_led_cylon_s cn58xxp1;
+};
+typedef union cvmx_led_cylon cvmx_led_cylon_t;
+
+/**
+ * cvmx_led_dbg
+ *
+ * LED_DBG = LED Debug Port information
+ *
+ */
+union cvmx_led_dbg {
+ uint64_t u64;
+ struct cvmx_led_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t dbg_en : 1; /**< Add Debug Port Data to the LED shift chain
+ Debug Data is shifted out LSB to MSB */
+#else
+ uint64_t dbg_en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_led_dbg_s cn38xx;
+ struct cvmx_led_dbg_s cn38xxp2;
+ struct cvmx_led_dbg_s cn56xx;
+ struct cvmx_led_dbg_s cn56xxp1;
+ struct cvmx_led_dbg_s cn58xx;
+ struct cvmx_led_dbg_s cn58xxp1;
+};
+typedef union cvmx_led_dbg cvmx_led_dbg_t;
+
+/**
+ * cvmx_led_en
+ *
+ * LED_EN = LED Interface Enable
+ *
+ *
+ * Notes:
+ * The LED interface is comprised of a shift chain with a parallel latch. LED
+ * data is shifted out on each fallingg edge of led_clk and then captured by
+ * led_lat.
+ *
+ * The LED shift chain is comprised of the following...
+ *
+ * 32 - UDD header
+ * 6x8 - per port status
+ * 17 - debug port
+ * 32 - UDD trailer
+ *
+ * for a total of 129 bits.
+ *
+ * UDD header is programmable from 0-32 bits (LED_UDD_CNT0) and will shift out
+ * LSB to MSB (LED_UDD_DAT0[0], LED_UDD_DAT0[1],
+ * ... LED_UDD_DAT0[LED_UDD_CNT0].
+ *
+ * The per port status is also variable. Systems can control which ports send
+ * data (LED_PRT) as well as the status content (LED_PRT_FMT and
+ * LED_PRT_STATUS*). When multiple ports are enabled, they come out in lowest
+ * port to highest port (prt0, prt1, ...).
+ *
+ * The debug port data can also be added to the LED chain (LED_DBG). When
+ * enabled, the debug data shifts out LSB to MSB.
+ *
+ * The UDD trailer data is identical to the header data, but uses LED_UDD_CNT1
+ * and LED_UDD_DAT1.
+ */
+union cvmx_led_en {
+ uint64_t u64;
+ struct cvmx_led_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< Enable the LED interface shift-chain */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_led_en_s cn38xx;
+ struct cvmx_led_en_s cn38xxp2;
+ struct cvmx_led_en_s cn56xx;
+ struct cvmx_led_en_s cn56xxp1;
+ struct cvmx_led_en_s cn58xx;
+ struct cvmx_led_en_s cn58xxp1;
+};
+typedef union cvmx_led_en cvmx_led_en_t;
+
+/**
+ * cvmx_led_polarity
+ *
+ * LED_POLARITY = LED Polarity
+ *
+ */
+union cvmx_led_polarity {
+ uint64_t u64;
+ struct cvmx_led_polarity_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t polarity : 1; /**< LED active polarity
+ 0 = active HIGH LED
+ 1 = active LOW LED (invert led_dat) */
+#else
+ uint64_t polarity : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_led_polarity_s cn38xx;
+ struct cvmx_led_polarity_s cn38xxp2;
+ struct cvmx_led_polarity_s cn56xx;
+ struct cvmx_led_polarity_s cn56xxp1;
+ struct cvmx_led_polarity_s cn58xx;
+ struct cvmx_led_polarity_s cn58xxp1;
+};
+typedef union cvmx_led_polarity cvmx_led_polarity_t;
+
+/**
+ * cvmx_led_prt
+ *
+ * LED_PRT = LED Port status information
+ *
+ *
+ * Notes:
+ * Note:
+ * the PRT vector enables information of the 8 RGMII ports connected to
+ * Octane. It does not reflect the actual programmed PHY addresses.
+ */
+union cvmx_led_prt {
+ uint64_t u64;
+ struct cvmx_led_prt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t prt_en : 8; /**< Which ports are enabled to display status
+ PRT_EN<3:0> coresponds to RGMII ports 3-0 on int0
+ PRT_EN<7:4> coresponds to RGMII ports 7-4 on int1
+ Only applies when interface is in RGMII mode
+ The status format is defined by LED_PRT_FMT */
+#else
+ uint64_t prt_en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_led_prt_s cn38xx;
+ struct cvmx_led_prt_s cn38xxp2;
+ struct cvmx_led_prt_s cn56xx;
+ struct cvmx_led_prt_s cn56xxp1;
+ struct cvmx_led_prt_s cn58xx;
+ struct cvmx_led_prt_s cn58xxp1;
+};
+typedef union cvmx_led_prt cvmx_led_prt_t;
+
+/**
+ * cvmx_led_prt_fmt
+ *
+ * LED_PRT_FMT = LED Port Status Infomation Format
+ *
+ *
+ * Notes:
+ * TX: RGMII TX block is sending packet data or extends on the port
+ * RX: RGMII RX block has received non-idle cycle
+ *
+ * For short transfers, LEDs will remain on for at least one blink cycle
+ */
+union cvmx_led_prt_fmt {
+ uint64_t u64;
+ struct cvmx_led_prt_fmt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t format : 4; /**< Port Status Information for each enabled port in
+ LED_PRT. The formats are below
+ 0x0: [ LED_PRT_STATUS[0] ]
+ 0x1: [ LED_PRT_STATUS[1:0] ]
+ 0x2: [ LED_PRT_STATUS[3:0] ]
+ 0x3: [ LED_PRT_STATUS[5:0] ]
+ 0x4: [ (RX|TX), LED_PRT_STATUS[0] ]
+ 0x5: [ (RX|TX), LED_PRT_STATUS[1:0] ]
+ 0x6: [ (RX|TX), LED_PRT_STATUS[3:0] ]
+ 0x8: [ Tx, Rx, LED_PRT_STATUS[0] ]
+ 0x9: [ Tx, Rx, LED_PRT_STATUS[1:0] ]
+ 0xa: [ Tx, Rx, LED_PRT_STATUS[3:0] ] */
+#else
+ uint64_t format : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_led_prt_fmt_s cn38xx;
+ struct cvmx_led_prt_fmt_s cn38xxp2;
+ struct cvmx_led_prt_fmt_s cn56xx;
+ struct cvmx_led_prt_fmt_s cn56xxp1;
+ struct cvmx_led_prt_fmt_s cn58xx;
+ struct cvmx_led_prt_fmt_s cn58xxp1;
+};
+typedef union cvmx_led_prt_fmt cvmx_led_prt_fmt_t;
+
+/**
+ * cvmx_led_prt_status#
+ *
+ * LED_PRT_STATUS = LED Port Status information
+ *
+ */
+union cvmx_led_prt_statusx {
+ uint64_t u64;
+ struct cvmx_led_prt_statusx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t status : 6; /**< Bits that software can set to be added to the
+ LED shift chain - depending on LED_PRT_FMT
+ LED_PRT_STATUS(3..0) corespond to RGMII ports 3-0
+ on interface0
+ LED_PRT_STATUS(7..4) corespond to RGMII ports 7-4
+ on interface1
+ Only applies when interface is in RGMII mode */
+#else
+ uint64_t status : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_led_prt_statusx_s cn38xx;
+ struct cvmx_led_prt_statusx_s cn38xxp2;
+ struct cvmx_led_prt_statusx_s cn56xx;
+ struct cvmx_led_prt_statusx_s cn56xxp1;
+ struct cvmx_led_prt_statusx_s cn58xx;
+ struct cvmx_led_prt_statusx_s cn58xxp1;
+};
+typedef union cvmx_led_prt_statusx cvmx_led_prt_statusx_t;
+
+/**
+ * cvmx_led_udd_cnt#
+ *
+ * LED_UDD_CNT = LED UDD Counts
+ *
+ */
+union cvmx_led_udd_cntx {
+ uint64_t u64;
+ struct cvmx_led_udd_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t cnt : 6; /**< Number of bits of user-defined data to include in
+ the LED shift chain. Legal values: 0-32. */
+#else
+ uint64_t cnt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_led_udd_cntx_s cn38xx;
+ struct cvmx_led_udd_cntx_s cn38xxp2;
+ struct cvmx_led_udd_cntx_s cn56xx;
+ struct cvmx_led_udd_cntx_s cn56xxp1;
+ struct cvmx_led_udd_cntx_s cn58xx;
+ struct cvmx_led_udd_cntx_s cn58xxp1;
+};
+typedef union cvmx_led_udd_cntx cvmx_led_udd_cntx_t;
+
+/**
+ * cvmx_led_udd_dat#
+ *
+ * LED_UDD_DAT = User defined data (header or trailer)
+ *
+ *
+ * Notes:
+ * Bits come out LSB to MSB on the shift chain. If LED_UDD_CNT is set to 4
+ * then the bits comes out LED_UDD_DAT[0], LED_UDD_DAT[1], LED_UDD_DAT[2],
+ * LED_UDD_DAT[3].
+ */
+union cvmx_led_udd_datx {
+ uint64_t u64;
+ struct cvmx_led_udd_datx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t dat : 32; /**< Header or trailer UDD data to be displayed on
+ the LED shift chain. Number of bits to include
+ is controled by LED_UDD_CNT */
+#else
+ uint64_t dat : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_led_udd_datx_s cn38xx;
+ struct cvmx_led_udd_datx_s cn38xxp2;
+ struct cvmx_led_udd_datx_s cn56xx;
+ struct cvmx_led_udd_datx_s cn56xxp1;
+ struct cvmx_led_udd_datx_s cn58xx;
+ struct cvmx_led_udd_datx_s cn58xxp1;
+};
+typedef union cvmx_led_udd_datx cvmx_led_udd_datx_t;
+
+/**
+ * cvmx_led_udd_dat_clr#
+ *
+ * LED_UDD_DAT_CLR = User defined data (header or trailer)
+ *
+ */
+union cvmx_led_udd_dat_clrx {
+ uint64_t u64;
+ struct cvmx_led_udd_dat_clrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t clr : 32; /**< Bitwise clear for the Header or trailer UDD data to
+ be displayed on the LED shift chain. */
+#else
+ uint64_t clr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_led_udd_dat_clrx_s cn38xx;
+ struct cvmx_led_udd_dat_clrx_s cn38xxp2;
+ struct cvmx_led_udd_dat_clrx_s cn56xx;
+ struct cvmx_led_udd_dat_clrx_s cn56xxp1;
+ struct cvmx_led_udd_dat_clrx_s cn58xx;
+ struct cvmx_led_udd_dat_clrx_s cn58xxp1;
+};
+typedef union cvmx_led_udd_dat_clrx cvmx_led_udd_dat_clrx_t;
+
+/**
+ * cvmx_led_udd_dat_set#
+ *
+ * LED_UDD_DAT_SET = User defined data (header or trailer)
+ *
+ */
+union cvmx_led_udd_dat_setx {
+ uint64_t u64;
+ struct cvmx_led_udd_dat_setx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t set : 32; /**< Bitwise set for the Header or trailer UDD data to
+ be displayed on the LED shift chain. */
+#else
+ uint64_t set : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_led_udd_dat_setx_s cn38xx;
+ struct cvmx_led_udd_dat_setx_s cn38xxp2;
+ struct cvmx_led_udd_dat_setx_s cn56xx;
+ struct cvmx_led_udd_dat_setx_s cn56xxp1;
+ struct cvmx_led_udd_dat_setx_s cn58xx;
+ struct cvmx_led_udd_dat_setx_s cn58xxp1;
+};
+typedef union cvmx_led_udd_dat_setx cvmx_led_udd_dat_setx_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-led-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-llm.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-llm.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-llm.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,930 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Configuration functions for low latency memory.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-llm.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-csr-db.h"
+
+#define MIN(a,b) (((a)<(b))?(a):(b))
+
+typedef struct
+{
+ uint32_t dfa_memcfg0_base;
+ uint32_t dfa_memcfg1_base;
+ uint32_t mrs_dat_p0bunk0;
+ uint32_t mrs_dat_p0bunk1;
+ uint32_t mrs_dat_p1bunk0;
+ uint32_t mrs_dat_p1bunk1;
+ uint8_t p0_ena;
+ uint8_t p1_ena;
+ uint8_t bunkport;
+} rldram_csr_config_t;
+
+
+
+
+
+int rld_csr_config_generate(llm_descriptor_t *llm_desc_ptr, rldram_csr_config_t *cfg_ptr);
+
+
+void print_rld_cfg(rldram_csr_config_t *cfg_ptr);
+void write_rld_cfg(rldram_csr_config_t *cfg_ptr);
+static void cn31xx_dfa_memory_init(void);
+
+static uint32_t process_address_map_str(uint32_t mrs_dat, char *addr_str);
+
+
+
+#ifndef CVMX_LLM_NUM_PORTS
+#warning WARNING: default CVMX_LLM_NUM_PORTS used. Defaults deprecated, please set in executive-config.h
+#define CVMX_LLM_NUM_PORTS 1
+#endif
+
+
+#if (CVMX_LLM_NUM_PORTS != 1) && (CVMX_LLM_NUM_PORTS != 2)
+#error "Invalid CVMX_LLM_NUM_PORTS value: must be 1 or 2\n"
+#endif
+
+int cvmx_llm_initialize()
+{
+ if (cvmx_llm_initialize_desc(NULL) < 0)
+ return -1;
+
+ return 0;
+}
+
+
+int cvmx_llm_get_default_descriptor(llm_descriptor_t *llm_desc_ptr)
+{
+ cvmx_sysinfo_t *sys_ptr;
+ sys_ptr = cvmx_sysinfo_get();
+
+ if (!llm_desc_ptr)
+ return -1;
+
+ memset(llm_desc_ptr, 0, sizeof(llm_descriptor_t));
+
+ llm_desc_ptr->cpu_hz = cvmx_clock_get_rate(CVMX_CLOCK_CORE);
+
+ if (sys_ptr->board_type == CVMX_BOARD_TYPE_EBT3000)
+ { // N3K->RLD0 Address Swizzle
+ strcpy(llm_desc_ptr->addr_rld0_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld0_bb_str, "22 21 19 20 08 07 06 05 04 03 02 01 00 09 18 17 16 15 14 13 12 11 10");
+ // N3K->RLD1 Address Swizzle
+ strcpy(llm_desc_ptr->addr_rld1_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld1_bb_str, "22 21 20 00 08 07 06 05 04 13 02 01 03 09 18 17 16 15 14 10 12 11 19");
+ /* NOTE: The ebt3000 has a strange RLDRAM configuration for validation purposes. It is not recommended to have
+ ** different amounts of memory on different ports as that renders some memory unusable */
+ llm_desc_ptr->rld0_bunks = 2;
+ llm_desc_ptr->rld1_bunks = 2;
+ llm_desc_ptr->rld0_mbytes = 128; // RLD0: 4x 32Mx9
+ llm_desc_ptr->rld1_mbytes = 64; // RLD1: 2x 16Mx18
+ }
+ else if (sys_ptr->board_type == CVMX_BOARD_TYPE_EBT5800)
+ {
+ strcpy(llm_desc_ptr->addr_rld0_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld0_bb_str, "22 21 20 00 08 07 06 05 04 13 02 01 03 09 18 17 16 15 14 10 12 11 19");
+ strcpy(llm_desc_ptr->addr_rld1_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld1_bb_str, "22 21 20 00 08 07 06 05 04 13 02 01 03 09 18 17 16 15 14 10 12 11 19");
+ llm_desc_ptr->rld0_bunks = 2;
+ llm_desc_ptr->rld1_bunks = 2;
+ llm_desc_ptr->rld0_mbytes = 128;
+ llm_desc_ptr->rld1_mbytes = 128;
+ llm_desc_ptr->max_rld_clock_mhz = 400; /* CN58XX needs a max clock speed for selecting optimal divisor */
+ }
+ else if (sys_ptr->board_type == CVMX_BOARD_TYPE_EBH3000)
+ {
+ strcpy(llm_desc_ptr->addr_rld0_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld0_bb_str, "22 21 19 20 08 07 06 05 04 03 02 01 00 09 18 17 16 15 14 13 12 11 10");
+ strcpy(llm_desc_ptr->addr_rld1_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld1_bb_str, "22 21 19 20 08 07 06 05 04 03 02 01 00 09 18 17 16 15 14 13 12 11 10");
+ llm_desc_ptr->rld0_bunks = 2;
+ llm_desc_ptr->rld1_bunks = 2;
+ llm_desc_ptr->rld0_mbytes = 128;
+ llm_desc_ptr->rld1_mbytes = 128;
+ }
+ else if (sys_ptr->board_type == CVMX_BOARD_TYPE_THUNDER)
+ {
+
+ if (sys_ptr->board_rev_major >= 4)
+ {
+ strcpy(llm_desc_ptr->addr_rld0_fb_str, "22 21 13 11 01 02 07 19 03 18 10 12 20 06 04 08 17 05 14 16 00 09 15");
+ strcpy(llm_desc_ptr->addr_rld0_bb_str, "22 21 11 13 04 08 17 05 14 16 00 09 15 06 01 02 07 19 03 18 10 12 20");
+ strcpy(llm_desc_ptr->addr_rld1_fb_str, "22 21 02 19 18 17 16 09 14 13 20 11 10 01 08 03 06 15 04 07 05 12 00");
+ strcpy(llm_desc_ptr->addr_rld1_bb_str, "22 21 19 02 08 03 06 15 04 07 05 12 00 01 18 17 16 09 14 13 20 11 10");
+ }
+ else
+ {
+ strcpy(llm_desc_ptr->addr_rld0_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld0_bb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld1_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld1_bb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ }
+
+ llm_desc_ptr->rld0_bunks = 2;
+ llm_desc_ptr->rld1_bunks = 2;
+ llm_desc_ptr->rld0_mbytes = 128;
+ llm_desc_ptr->rld1_mbytes = 128;
+ }
+ else if (sys_ptr->board_type == CVMX_BOARD_TYPE_NICPRO2)
+ {
+ strcpy(llm_desc_ptr->addr_rld0_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld0_bb_str, "22 21 19 20 08 07 06 05 04 03 02 01 00 09 18 17 16 15 14 13 12 11 10");
+ strcpy(llm_desc_ptr->addr_rld1_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld1_bb_str, "22 21 19 20 08 07 06 05 04 03 02 01 00 09 18 17 16 15 14 13 12 11 10");
+ llm_desc_ptr->rld0_bunks = 2;
+ llm_desc_ptr->rld1_bunks = 2;
+ llm_desc_ptr->rld0_mbytes = 256;
+ llm_desc_ptr->rld1_mbytes = 256;
+ llm_desc_ptr->max_rld_clock_mhz = 400; /* CN58XX needs a max clock speed for selecting optimal divisor */
+ }
+ else if (sys_ptr->board_type == CVMX_BOARD_TYPE_EBH3100)
+ {
+ /* CN31xx DFA memory is DDR based, so it is completely different from the CN38XX DFA memory */
+ llm_desc_ptr->rld0_bunks = 1;
+ llm_desc_ptr->rld0_mbytes = 256;
+ }
+ else if (sys_ptr->board_type == CVMX_BOARD_TYPE_KBP)
+ {
+ strcpy(llm_desc_ptr->addr_rld0_fb_str, "");
+ strcpy(llm_desc_ptr->addr_rld0_bb_str, "");
+ llm_desc_ptr->rld0_bunks = 0;
+ llm_desc_ptr->rld0_mbytes = 0;
+ strcpy(llm_desc_ptr->addr_rld1_fb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ strcpy(llm_desc_ptr->addr_rld1_bb_str, "22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00");
+ llm_desc_ptr->rld1_bunks = 2;
+ llm_desc_ptr->rld1_mbytes = 64;
+ }
+ else
+ {
+ cvmx_dprintf("No default LLM configuration available for board %s (%d)\n", cvmx_board_type_to_string(sys_ptr->board_type), sys_ptr->board_type);
+ return -1;
+ }
+
+ return(0);
+}
+
+int cvmx_llm_initialize_desc(llm_descriptor_t *llm_desc_ptr)
+{
+ cvmx_sysinfo_t *sys_ptr;
+ sys_ptr = cvmx_sysinfo_get();
+ llm_descriptor_t default_llm_desc;
+
+ memset(&default_llm_desc, 0, sizeof(default_llm_desc));
+ if (sys_ptr->board_type == CVMX_BOARD_TYPE_SIM)
+ {
+ cvmx_dprintf("Skipping llm configuration for simulator.\n");
+ return 0;
+ }
+
+ if (sys_ptr->board_type == CVMX_BOARD_TYPE_EBH3100)
+ {
+ /* CN31xx DFA memory is DDR based, so it is completely different from the CN38XX DFA memory
+ ** config descriptors are not supported yet.*/
+ cvmx_dprintf("Warning: preliminary DFA memory configuration\n");
+ cn31xx_dfa_memory_init();
+ return(256*1024*1024);
+ }
+
+ /* If no descriptor passed, generate default descriptor based on board type.
+ ** Fail if no default available for given board type
+ */
+ if (!llm_desc_ptr)
+ {
+ /* Get default descriptor */
+ if (0 > cvmx_llm_get_default_descriptor(&default_llm_desc))
+ return -1;
+
+ /* Disable second port depending on CVMX config */
+ if (CVMX_LLM_NUM_PORTS == 1)
+ default_llm_desc.rld0_bunks = 0; // For single port: Force RLD0(P1) to appear EMPTY
+
+ cvmx_dprintf("Using default LLM configuration for board %s (%d)\n", cvmx_board_type_to_string(sys_ptr->board_type), sys_ptr->board_type);
+
+ llm_desc_ptr = &default_llm_desc;
+ }
+
+
+
+ rldram_csr_config_t ebt3000_rld_cfg;
+ if (!rld_csr_config_generate(llm_desc_ptr, &ebt3000_rld_cfg))
+ {
+ cvmx_dprintf("Configuring %d llm port(s).\n", !!llm_desc_ptr->rld0_bunks + !!llm_desc_ptr->rld1_bunks);
+ write_rld_cfg(&ebt3000_rld_cfg);
+ }
+ else
+ {
+ cvmx_dprintf("Error creating rldram configuration\n");
+ return(-1);
+ }
+
+ /* Compute how much memory is configured
+ ** Memory is interleaved, so if one port has more than the other some memory is not usable */
+
+ /* If both ports are enabled, handle the case where one port has more than the other.
+ ** This is an unusual and not recommended configuration that exists on the ebt3000 board */
+ if (!!llm_desc_ptr->rld0_bunks && !!llm_desc_ptr->rld1_bunks)
+ llm_desc_ptr->rld0_mbytes = llm_desc_ptr->rld1_mbytes = MIN(llm_desc_ptr->rld0_mbytes, llm_desc_ptr->rld1_mbytes);
+
+ return(((!!llm_desc_ptr->rld0_bunks) * llm_desc_ptr->rld0_mbytes
+ + (!!llm_desc_ptr->rld1_bunks) * llm_desc_ptr->rld1_mbytes) * 1024*1024);
+}
+
+//======================
+// SUPPORT FUNCTIONS:
+//======================
+//======================================================================
+// Extracts srcvec[srcbitpos] and places it in return int (bit[0])
+int bit_extract ( int srcvec, // source word (to extract)
+ int srcbitpos // source bit position
+ )
+{
+ return(((1 << srcbitpos) & srcvec) >> srcbitpos);
+}
+//======================================================================
+// Inserts srcvec[0] into dstvec[dstbitpos] (without affecting other bits)
+int bit_insert ( int srcvec, // srcvec[0] = bit to be inserted
+ int dstbitpos, // Bit position to insert into returned int
+ int dstvec // dstvec (destination vector)
+ )
+{
+ return((srcvec << dstbitpos) | dstvec); // Shift bit to insert into bit position/OR with accumulated number
+}
+//======================================================================
+
+int rld_csr_config_generate(llm_descriptor_t *llm_desc_ptr, rldram_csr_config_t *cfg_ptr)
+{
+ char *addr_rld0_fb_str;
+ char *addr_rld0_bb_str;
+ char *addr_rld1_fb_str;
+ char *addr_rld1_bb_str;
+ int eclk_ps;
+ int mtype = 0; // MTYPE (0: RLDRAM/1: FCRAM
+ int trcmin = 20; // tRC(min) - from RLDRAM data sheet
+ int trc_cyc; // TRC(cyc)
+ int trc_mod;
+ int trl_cyc; // TRL(cyc)
+ int twl_cyc; // TWL(cyc)
+ int tmrsc_cyc = 6; // tMRSC(cyc) [2-7]
+ int mclk_ps; // DFA Memory Clock(in ps) = 2x eclk
+ int rldcfg = 99; // RLDRAM-II CFG (1,2,3)
+ int mrs_odt = 0; // RLDRAM MRS A[9]=ODT (default)
+ int mrs_impmatch = 0; // RLDRAM MRS A[8]=Impedance Matching (default)
+ int mrs_dllrst = 1; // RLDRAM MRS A[7]=DLL Reset (default)
+ uint32_t mrs_dat;
+ int mrs_dat_p0bunk0 = 0; // MRS Register Data After Address Map (for Port0 Bunk0)
+ int mrs_dat_p0bunk1 = 0; // MRS Register Data After Address Map (for Port0 Bunk1)
+ int mrs_dat_p1bunk0 = 0; // MRS Register Data After Address Map (for Port1 Bunk0)
+ int mrs_dat_p1bunk1 = 0; // MRS Register Data After Address Map (for Port1 Bunk1)
+ int p0_ena = 0; // DFA Port#0 Enabled
+ int p1_ena = 0; // DFA Port#1 Enabled
+ int memport = 0; // Memory(MB) per Port [MAX=512]
+ int membunk; // Memory(MB) per Bunk
+ int bunkport = 0; // Bunks/Port [1/2]
+ int pbunk = 0; // Physical Bunk(or Rank) encoding for address bit
+ int tref_ms = 32; // tREF(ms) (RLDRAM-II overall device refresh interval
+ int trefi_ns; // tREFI(ns) = tREF(ns)/#rows/bank
+ int rows = 8; // #rows/bank (K) typically 8K
+ int ref512int;
+ int ref512mod;
+ int tskw_cyc = 0;
+ int fprch = 1;
+ int bprch = 0;
+ int dfa_memcfg0_base = 0;
+ int dfa_memcfg1_base = 0;
+ int tbl = 1; // tBL (1: 2-burst /2: 4-burst)
+ int rw_dly;
+ int wr_dly;
+ int r2r = 1;
+ int sil_lat = 1;
+ int clkdiv = 2; /* CN38XX is fixed at 2, CN58XX supports 2,3,4 */
+ int clkdiv_enc = 0x0; /* Encoded clock divisor, only used for CN58XX */
+
+ if (!llm_desc_ptr)
+ return -1;
+
+ /* Setup variables from descriptor */
+
+ addr_rld0_fb_str = llm_desc_ptr->addr_rld0_fb_str;
+ addr_rld0_bb_str = llm_desc_ptr->addr_rld0_bb_str;
+ addr_rld1_fb_str = llm_desc_ptr->addr_rld1_fb_str;
+ addr_rld1_bb_str = llm_desc_ptr->addr_rld1_bb_str;
+
+ p0_ena = !!llm_desc_ptr->rld1_bunks; // NOTE: P0 == RLD1
+ p1_ena = !!llm_desc_ptr->rld0_bunks; // NOTE: P1 == RLD0
+
+ // Massage the code, so that if the user had imbalanced memory per-port (or imbalanced bunks/port), we
+ // at least try to configure 'workable' memory.
+ if (p0_ena && p1_ena) // IF BOTH PORTS Enabled (imbalanced memory), select smaller of BOTH
+ {
+ memport = MIN(llm_desc_ptr->rld0_mbytes, llm_desc_ptr->rld1_mbytes);
+ bunkport = MIN(llm_desc_ptr->rld0_bunks, llm_desc_ptr->rld1_bunks);
+ }
+ else if (p0_ena) // P0=RLD1 Enabled
+ {
+ memport = llm_desc_ptr->rld1_mbytes;
+ bunkport = llm_desc_ptr->rld1_bunks;
+ }
+ else if (p1_ena) // P1=RLD0 Enabled
+ {
+ memport = llm_desc_ptr->rld0_mbytes;
+ bunkport = llm_desc_ptr->rld0_bunks;
+ }
+ else
+ return -1;
+
+ uint32_t eclk_mhz = llm_desc_ptr->cpu_hz/1000000;
+
+
+
+ /* Tweak skew based on cpu clock */
+ if (eclk_mhz <= 367)
+ {
+ tskw_cyc = 0;
+ }
+ else
+ {
+ tskw_cyc = 1;
+ }
+
+ /* Determine clock divider ratio (only required for CN58XX) */
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX))
+ {
+ uint32_t max_llm_clock_mhz = llm_desc_ptr->max_rld_clock_mhz;
+ if (!max_llm_clock_mhz)
+ {
+ max_llm_clock_mhz = 400; /* Default to 400 MHz */
+ cvmx_dprintf("Warning, using default max_rld_clock_mhz of: %lu MHz\n", (unsigned long)max_llm_clock_mhz);
+ }
+
+ /* Compute the divisor, and round up */
+ clkdiv = eclk_mhz/max_llm_clock_mhz;
+ if (clkdiv * max_llm_clock_mhz < eclk_mhz)
+ clkdiv++;
+
+ if (clkdiv > 4)
+ {
+ cvmx_dprintf("ERROR: CN58XX LLM clock divisor out of range\n");
+ goto TERMINATE;
+ }
+ if (clkdiv < 2)
+ clkdiv = 2;
+
+ cvmx_dprintf("Using llm clock divisor: %d, llm clock is: %lu MHz\n", clkdiv, (unsigned long)eclk_mhz/clkdiv);
+ /* Translate divisor into bit encoding for register */
+ /* 0 -> div 2
+ ** 1 -> reserved
+ ** 2 -> div 3
+ ** 3 -> div 4
+ */
+ if (clkdiv == 2)
+ clkdiv_enc = 0;
+ else
+ clkdiv_enc = clkdiv - 1;
+
+ /* Odd divisor needs sil_lat to be 2 */
+ if (clkdiv == 0x3)
+ sil_lat = 2;
+
+ /* Increment tskw for high clock speeds */
+ if ((unsigned long)eclk_mhz/clkdiv >= 375)
+ tskw_cyc += 1;
+ }
+
+ eclk_ps = (1000000+(eclk_mhz-1)) / eclk_mhz; // round up if nonzero remainder
+ //=======================================================================
+
+ //=======================================================================
+ // Now, Query User for DFA Memory Type
+ if (mtype != 0)
+ {
+ goto TERMINATE; // Complete this code for FCRAM usage on N3K-P2
+ }
+ //=======================================================================
+ // Query what the tRC(min) value is from the data sheets
+ //=======================================================================
+ // Now determine the Best CFG based on Memory clock(ps) and tRCmin(ns)
+ mclk_ps = eclk_ps * clkdiv;
+ trc_cyc = ((trcmin * 1000)/mclk_ps);
+ trc_mod = ((trcmin * 1000) % mclk_ps);
+ // If remainder exists, bump up to the next integer multiple
+ if (trc_mod != 0)
+ {
+ trc_cyc = trc_cyc + 1;
+ }
+ // If tRC is now ODD, then bump it to the next EVEN integer (RLDRAM-II does not support odd tRC values at this time).
+ if (trc_cyc & 1)
+ {
+ trc_cyc = trc_cyc + 1; // Bump it to an even #
+ }
+ // RLDRAM CFG Range Check: If the computed trc_cyc is less than 4, then set it to min CFG1 [tRC=4]
+ if (trc_cyc < 4)
+ {
+ trc_cyc = 4; // If computed trc_cyc < 4 then clamp to 4
+ }
+ else if (trc_cyc > 8)
+ { // If the computed trc_cyc > 8, then report an error (because RLDRAM cannot support a tRC>8
+ goto TERMINATE;
+ }
+ // Assuming all is ok(up to here)
+ // At this point the tRC_cyc has been clamped between 4 and 8 (and is even), So it can only be 4,6,8 which are
+ // the RLDRAM valid CFG range values.
+ trl_cyc = trc_cyc; // tRL = tRC (for RLDRAM=II)
+ twl_cyc = trl_cyc + 1; // tWL = tRL + 1 (for RLDRAM-II)
+ // NOTE: RLDRAM-II (as of 4/25/05) only have 3 supported CFG encodings:
+ if (trc_cyc == 4)
+ {
+ rldcfg = 1; // CFG #1 (tRL=4/tRC=4/tWL=5)
+ }
+ else if (trc_cyc == 6)
+ {
+ rldcfg = 2; // CFG #2 (tRL=6/tRC=6/tWL=7)
+ }
+ else if (trc_cyc == 8)
+ {
+ rldcfg = 3; // CFG #3 (tRL=8/tRC=8/tWL=9)
+ }
+ else
+ {
+ goto TERMINATE;
+ }
+ //=======================================================================
+ mrs_dat = ( (mrs_odt << 9) | (mrs_impmatch << 8) | (mrs_dllrst << 7) | rldcfg );
+ //=======================================================================
+ // If there is only a single bunk, then skip over address mapping queries (which are not required)
+ if (bunkport == 1)
+ {
+ goto CALC_PBUNK;
+ }
+
+ /* Process the address mappings */
+ /* Note that that RLD0 pins corresponds to Port#1, and
+ ** RLD1 pins corresponds to Port#0.
+ */
+ mrs_dat_p1bunk0 = process_address_map_str(mrs_dat, addr_rld0_fb_str);
+ mrs_dat_p1bunk1 = process_address_map_str(mrs_dat, addr_rld0_bb_str);
+ mrs_dat_p0bunk0 = process_address_map_str(mrs_dat, addr_rld1_fb_str);
+ mrs_dat_p0bunk1 = process_address_map_str(mrs_dat, addr_rld1_bb_str);
+
+
+ //=======================================================================
+ CALC_PBUNK:
+ // Determine the PBUNK field (based on Memory/Bunk)
+ // This determines the addr bit used to distinguish when crossing a bunk.
+ // NOTE: For RLDRAM, the bunk bit is extracted from 'a' programmably selected high
+ // order addr bit. [linear address per-bunk]
+ if (bunkport == 2)
+ {
+ membunk = (memport / 2);
+ }
+ else
+ {
+ membunk = memport;
+ }
+ if (membunk == 16)
+ { // 16MB/bunk MA[19]
+ pbunk = 0;
+ }
+ else if (membunk == 32)
+ { // 32MB/bunk MA[20]
+ pbunk = 1;
+ }
+ else if (membunk == 64)
+ { // 64MB/bunk MA[21]
+ pbunk = 2;
+ }
+ else if (membunk == 128)
+ { // 128MB/bunk MA[22]
+ pbunk = 3;
+ }
+ else if (membunk == 256)
+ { // 256MB/bunk MA[23]
+ pbunk = 4;
+ }
+ else if (membunk == 512)
+ { // 512MB/bunk
+ }
+ //=======================================================================
+ //=======================================================================
+ //=======================================================================
+ // Now determine N3K REFINT
+ trefi_ns = (tref_ms * 1000 * 1000) / (rows * 1024);
+ ref512int = ((trefi_ns * 1000) / (eclk_ps * 512));
+ ref512mod = ((trefi_ns * 1000) % (eclk_ps * 512));
+ //=======================================================================
+ // Ask about tSKW
+#if 0
+ if (tskw_ps == 0)
+ {
+ tskw_cyc = 0;
+ }
+ else
+ { // CEILING function
+ tskw_cyc = (tskw_ps / eclk_ps);
+ tskw_mod = (tskw_ps % eclk_ps);
+ if (tskw_mod != 0)
+ { // If there's a remainder - then bump to next (+1)
+ tskw_cyc = tskw_cyc + 1;
+ }
+ }
+#endif
+ if (tskw_cyc > 3)
+ {
+ goto TERMINATE;
+ }
+
+ tbl = 1; // BLEN=2 (ALWAYs for RLDRAM)
+ //=======================================================================
+ // RW_DLY = (ROUND_UP{[[(TRL+TBL)*2 + tSKW + BPRCH] + 1] / 2}) - tWL
+ rw_dly = ((((trl_cyc + tbl) * 2 + tskw_cyc + bprch) + 1) / 2);
+ if (rw_dly & 1)
+ { // If it's ODD then round up
+ rw_dly = rw_dly + 1;
+ }
+ rw_dly = rw_dly - twl_cyc +1 ;
+ if (rw_dly < 0)
+ { // range check - is it positive
+ goto TERMINATE;
+ }
+ //=======================================================================
+ // WR_DLY = (ROUND_UP[[(tWL + tBL)*2 - tSKW + FPRCH] / 2]) - tRL
+ wr_dly = (((twl_cyc + tbl) * 2 - tskw_cyc + fprch) / 2);
+ if (wr_dly & 1)
+ { // If it's ODD then round up
+ wr_dly = wr_dly + 1;
+ }
+ wr_dly = wr_dly - trl_cyc + 1;
+ if (wr_dly < 0)
+ { // range check - is it positive
+ goto TERMINATE;
+ }
+
+
+ dfa_memcfg0_base = 0;
+ dfa_memcfg0_base = ( p0_ena |
+ (p1_ena << 1) |
+ (mtype << 3) |
+ (sil_lat << 4) |
+ (rw_dly << 6) |
+ (wr_dly << 10) |
+ (fprch << 14) |
+ (bprch << 16) |
+ (0 << 18) | // BLEN=0(2-burst for RLDRAM)
+ (pbunk << 19) |
+ (r2r << 22) | // R2R=1
+ (clkdiv_enc << 28 )
+ );
+
+
+ dfa_memcfg1_base = 0;
+ dfa_memcfg1_base = ( ref512int |
+ (tskw_cyc << 4) |
+ (trl_cyc << 8) |
+ (twl_cyc << 12) |
+ (trc_cyc << 16) |
+ (tmrsc_cyc << 20)
+ );
+
+
+
+
+ cfg_ptr->dfa_memcfg0_base = dfa_memcfg0_base;
+ cfg_ptr->dfa_memcfg1_base = dfa_memcfg1_base;
+ cfg_ptr->mrs_dat_p0bunk0 = mrs_dat_p0bunk0;
+ cfg_ptr->mrs_dat_p1bunk0 = mrs_dat_p1bunk0;
+ cfg_ptr->mrs_dat_p0bunk1 = mrs_dat_p0bunk1;
+ cfg_ptr->mrs_dat_p1bunk1 = mrs_dat_p1bunk1;
+ cfg_ptr->p0_ena = p0_ena;
+ cfg_ptr->p1_ena = p1_ena;
+ cfg_ptr->bunkport = bunkport;
+ //=======================================================================
+
+ return(0);
+ TERMINATE:
+ return(-1);
+
+}
+
+
+
+static uint32_t process_address_map_str(uint32_t mrs_dat, char *addr_str)
+{
+ int count = 0;
+ int amap [23];
+ uint32_t new_mrs_dat = 0;
+
+// cvmx_dprintf("mrs_dat: 0x%x, str: %x\n", mrs_dat, addr_str);
+ char *charptr = strtok(addr_str," ");
+ while ((charptr != NULL) & (count <= 22))
+ {
+ amap[22-count] = atoi(charptr); // Assign the AMAP Array
+ charptr = strtok(NULL," "); // Get Next char string (which represents next addr bit mapping)
+ count++;
+ }
+ // Now do the bit swap of MRSDAT (based on address mapping)
+ uint32_t mrsdat_bit;
+ for (count=0;count<=22;count++)
+ {
+ mrsdat_bit = bit_extract(mrs_dat, count);
+ new_mrs_dat = bit_insert(mrsdat_bit, amap[count], new_mrs_dat);
+ }
+
+ return new_mrs_dat;
+}
+
+
+//#define PRINT_LLM_CONFIG
+#ifdef PRINT_LLM_CONFIG
+#define ll_printf printf
+#else
+#define ll_printf(...)
+#define cvmx_csr_db_decode(...)
+#endif
+
+static void cn31xx_dfa_memory_init(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ {
+ cvmx_dfa_ddr2_cfg_t dfaCfg;
+ cvmx_dfa_eclkcfg_t dfaEcklCfg;
+ cvmx_dfa_ddr2_addr_t dfaAddr;
+ cvmx_dfa_ddr2_tmg_t dfaTmg;
+ cvmx_dfa_ddr2_pll_t dfaPll;
+ int mem_freq_hz = 533*1000000;
+ int ref_freq_hz = cvmx_sysinfo_get()->dfa_ref_clock_hz;
+ if (!ref_freq_hz)
+ ref_freq_hz = 33*1000000;
+
+ cvmx_dprintf ("Configuring DFA memory for %d MHz operation.\n",mem_freq_hz/1000000);
+
+ /* Turn on the DFA memory port. */
+ dfaCfg.u64 = cvmx_read_csr (CVMX_DFA_DDR2_CFG);
+ dfaCfg.s.prtena = 1;
+ cvmx_write_csr (CVMX_DFA_DDR2_CFG, dfaCfg.u64);
+
+ /* Start the PLL alignment sequence */
+ dfaPll.u64 = 0;
+ dfaPll.s.pll_ratio = mem_freq_hz/ref_freq_hz /*400Mhz / 33MHz*/;
+ dfaPll.s.pll_div2 = 1 /*400 - 1 */;
+ dfaPll.s.pll_bypass = 0;
+ cvmx_write_csr (CVMX_DFA_DDR2_PLL, dfaPll.u64);
+
+ dfaPll.s.pll_init = 1;
+ cvmx_write_csr (CVMX_DFA_DDR2_PLL, dfaPll.u64);
+
+ cvmx_wait (RLD_INIT_DELAY); //want 150uS
+ dfaPll.s.qdll_ena = 1;
+ cvmx_write_csr (CVMX_DFA_DDR2_PLL, dfaPll.u64);
+
+ cvmx_wait (RLD_INIT_DELAY); //want 10us
+ dfaEcklCfg.u64 = 0;
+ dfaEcklCfg.s.dfa_frstn = 1;
+ cvmx_write_csr (CVMX_DFA_ECLKCFG, dfaEcklCfg.u64);
+
+ /* Configure the DFA Memory */
+ dfaCfg.s.silo_hc = 1 /*400 - 1 */;
+ dfaCfg.s.silo_qc = 0 /*400 - 0 */;
+ dfaCfg.s.tskw = 1 /*400 - 1 */;
+ dfaCfg.s.ref_int = 0x820 /*533 - 0x820 400 - 0x618*/;
+ dfaCfg.s.trfc = 0x1A /*533 - 0x23 400 - 0x1A*/;
+ dfaCfg.s.fprch = 0; /* 1 more conservative*/
+ dfaCfg.s.bprch = 0; /* 1 */
+ cvmx_write_csr (CVMX_DFA_DDR2_CFG, dfaCfg.u64);
+
+ dfaEcklCfg.u64 = cvmx_read_csr (CVMX_DFA_ECLKCFG);
+ dfaEcklCfg.s.maxbnk = 1;
+ cvmx_write_csr (CVMX_DFA_ECLKCFG, dfaEcklCfg.u64);
+
+ dfaAddr.u64 = cvmx_read_csr (CVMX_DFA_DDR2_ADDR);
+ dfaAddr.s.num_cols = 0x1;
+ dfaAddr.s.num_colrows = 0x2;
+ dfaAddr.s.num_rnks = 0x1;
+ cvmx_write_csr (CVMX_DFA_DDR2_ADDR, dfaAddr.u64);
+
+ dfaTmg.u64 = cvmx_read_csr (CVMX_DFA_DDR2_TMG);
+ dfaTmg.s.ddr2t = 0;
+ dfaTmg.s.tmrd = 0x2;
+ dfaTmg.s.caslat = 0x4 /*400 - 0x3, 500 - 0x4*/;
+ dfaTmg.s.pocas = 0;
+ dfaTmg.s.addlat = 0;
+ dfaTmg.s.trcd = 4 /*400 - 3, 500 - 4*/;
+ dfaTmg.s.trrd = 2;
+ dfaTmg.s.tras = 0xB /*400 - 8, 500 - 0xB*/;
+ dfaTmg.s.trp = 4 /*400 - 3, 500 - 4*/;
+ dfaTmg.s.twr = 4 /*400 - 3, 500 - 4*/;
+ dfaTmg.s.twtr = 2 /*400 - 2 */;
+ dfaTmg.s.tfaw = 0xE /*400 - 0xA, 500 - 0xE*/;
+ dfaTmg.s.r2r_slot = 0;
+ dfaTmg.s.dic = 0; /*400 - 0 */
+ dfaTmg.s.dqsn_ena = 0;
+ dfaTmg.s.odt_rtt = 0;
+ cvmx_write_csr (CVMX_DFA_DDR2_TMG, dfaTmg.u64);
+
+ /* Turn on the DDR2 interface and wait a bit for the hardware to setup. */
+ dfaCfg.s.init = 1;
+ cvmx_write_csr (CVMX_DFA_DDR2_CFG, dfaCfg.u64);
+ cvmx_wait(RLD_INIT_DELAY); // want at least 64K cycles
+ }
+}
+
+void write_rld_cfg(rldram_csr_config_t *cfg_ptr)
+{
+ cvmx_dfa_memcfg0_t memcfg0;
+ cvmx_dfa_memcfg2_t memcfg2;
+
+ memcfg0.u64 = cfg_ptr->dfa_memcfg0_base;
+
+ if ((OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ {
+ uint32_t dfa_memcfg0;
+
+ if (OCTEON_IS_MODEL (OCTEON_CN58XX)) {
+ // Set RLDQK90_RST and RDLCK_RST to reset all three DLLs.
+ memcfg0.s.rldck_rst = 1;
+ memcfg0.s.rldqck90_rst = 1;
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, memcfg0.u64);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x clk/qk90 reset\n", (uint32_t) memcfg0.u64);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), memcfg0.u64);
+
+ // Clear RDLCK_RST while asserting RLDQK90_RST to bring RLDCK DLL out of reset.
+ memcfg0.s.rldck_rst = 0;
+ memcfg0.s.rldqck90_rst = 1;
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, memcfg0.u64);
+ cvmx_wait(4000000); /* Wait */
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x qk90 reset\n", (uint32_t) memcfg0.u64);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), memcfg0.u64);
+
+ // Clear both RDLCK90_RST and RLDQK90_RST to bring the RLDQK90 DLL out of reset.
+ memcfg0.s.rldck_rst = 0;
+ memcfg0.s.rldqck90_rst = 0;
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, memcfg0.u64);
+ cvmx_wait(4000000); /* Wait */
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x DLL out of reset\n", (uint32_t) memcfg0.u64);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), memcfg0.u64);
+ }
+
+ //=======================================================================
+ // Now print out the sequence of events:
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, cfg_ptr->dfa_memcfg0_base);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x port enables\n", cfg_ptr->dfa_memcfg0_base);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), cfg_ptr->dfa_memcfg0_base);
+ cvmx_wait(4000000); /* Wait */
+
+ cvmx_write_csr(CVMX_DFA_MEMCFG1, cfg_ptr->dfa_memcfg1_base);
+ ll_printf("CVMX_DFA_MEMCFG1: 0x%08x\n", cfg_ptr->dfa_memcfg1_base);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG1 & ~(1ull<<63), cfg_ptr->dfa_memcfg1_base);
+
+ if (cfg_ptr->p0_ena ==1)
+ {
+ cvmx_write_csr(CVMX_DFA_MEMRLD, cfg_ptr->mrs_dat_p0bunk0);
+ ll_printf("CVMX_DFA_MEMRLD : 0x%08x p0_ena memrld\n", cfg_ptr->mrs_dat_p0bunk0);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMRLD & ~(1ull<<63), cfg_ptr->mrs_dat_p0bunk0);
+
+ dfa_memcfg0 = ( cfg_ptr->dfa_memcfg0_base |
+ (1 << 23) | // P0_INIT
+ (1 << 25) // BUNK_INIT[1:0]=Bunk#0
+ );
+
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x p0_init/bunk_init\n", dfa_memcfg0);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
+ cvmx_wait(RLD_INIT_DELAY);
+ ll_printf("Delay.....\n");
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, cfg_ptr->dfa_memcfg0_base);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x back to base\n", cfg_ptr->dfa_memcfg0_base);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), cfg_ptr->dfa_memcfg0_base);
+ }
+
+ if (cfg_ptr->p1_ena ==1)
+ {
+ cvmx_write_csr(CVMX_DFA_MEMRLD, cfg_ptr->mrs_dat_p1bunk0);
+ ll_printf("CVMX_DFA_MEMRLD : 0x%08x p1_ena memrld\n", cfg_ptr->mrs_dat_p1bunk0);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMRLD & ~(1ull<<63), cfg_ptr->mrs_dat_p1bunk0);
+
+ dfa_memcfg0 = ( cfg_ptr->dfa_memcfg0_base |
+ (1 << 24) | // P1_INIT
+ (1 << 25) // BUNK_INIT[1:0]=Bunk#0
+ );
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x p1_init/bunk_init\n", dfa_memcfg0);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
+ cvmx_wait(RLD_INIT_DELAY);
+ ll_printf("Delay.....\n");
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, cfg_ptr->dfa_memcfg0_base);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x back to base\n", cfg_ptr->dfa_memcfg0_base);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), cfg_ptr->dfa_memcfg0_base);
+ }
+
+ // P0 Bunk#1
+ if ((cfg_ptr->p0_ena ==1) && (cfg_ptr->bunkport == 2))
+ {
+ cvmx_write_csr(CVMX_DFA_MEMRLD, cfg_ptr->mrs_dat_p0bunk1);
+ ll_printf("CVMX_DFA_MEMRLD : 0x%08x p0_ena memrld\n", cfg_ptr->mrs_dat_p0bunk1);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMRLD & ~(1ull<<63), cfg_ptr->mrs_dat_p0bunk1);
+
+ dfa_memcfg0 = ( cfg_ptr->dfa_memcfg0_base |
+ (1 << 23) | // P0_INIT
+ (2 << 25) // BUNK_INIT[1:0]=Bunk#1
+ );
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x p0_init/bunk_init\n", dfa_memcfg0);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
+ cvmx_wait(RLD_INIT_DELAY);
+ ll_printf("Delay.....\n");
+
+ if (cfg_ptr->p1_ena == 1)
+ { // Re-arm Px_INIT if P1-B1 init is required
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, cfg_ptr->dfa_memcfg0_base);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x px_init rearm\n", cfg_ptr->dfa_memcfg0_base);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), cfg_ptr->dfa_memcfg0_base);
+ }
+ }
+
+ if ((cfg_ptr->p1_ena == 1) && (cfg_ptr->bunkport == 2))
+ {
+ cvmx_write_csr(CVMX_DFA_MEMRLD, cfg_ptr->mrs_dat_p1bunk1);
+ ll_printf("CVMX_DFA_MEMRLD : 0x%08x p1_ena memrld\n", cfg_ptr->mrs_dat_p1bunk1);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMRLD & ~(1ull<<63), cfg_ptr->mrs_dat_p1bunk1);
+
+ dfa_memcfg0 = ( cfg_ptr->dfa_memcfg0_base |
+ (1 << 24) | // P1_INIT
+ (2 << 25) // BUNK_INIT[1:0]=10
+ );
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x p1_init/bunk_init\n", dfa_memcfg0);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
+ }
+ cvmx_wait(4000000); // 1/100S, 0.01S, 10mS
+ ll_printf("Delay.....\n");
+
+ /* Enable bunks */
+ dfa_memcfg0 = cfg_ptr->dfa_memcfg0_base |((cfg_ptr->bunkport >= 1) << 25) | ((cfg_ptr->bunkport == 2) << 26);
+ cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
+ ll_printf("CVMX_DFA_MEMCFG0: 0x%08x enable bunks\n", dfa_memcfg0);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
+ cvmx_wait(RLD_INIT_DELAY);
+ ll_printf("Delay.....\n");
+
+ /* Issue a Silo reset by toggling SILRST in memcfg2. */
+ memcfg2.u64 = cvmx_read_csr (CVMX_DFA_MEMCFG2);
+ memcfg2.s.silrst = 1;
+ cvmx_write_csr (CVMX_DFA_MEMCFG2, memcfg2.u64);
+ ll_printf("CVMX_DFA_MEMCFG2: 0x%08x silo reset start\n", (uint32_t) memcfg2.u64);
+ memcfg2.s.silrst = 0;
+ cvmx_write_csr (CVMX_DFA_MEMCFG2, memcfg2.u64);
+ ll_printf("CVMX_DFA_MEMCFG2: 0x%08x silo reset done\n", (uint32_t) memcfg2.u64);
+ }
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-llm.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-llm.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-llm.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-llm.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,392 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * interface to the low latency DRAM
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMX_LLM_H__
+#define __CVMX_LLM_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ENABLE_DEPRECATED /* Set to enable the old 18/36 bit names */
+
+typedef enum
+{
+ CVMX_LLM_REPLICATION_NONE = 0,
+ CVMX_LLM_REPLICATION_2X = 1, // on both interfaces, or 2x if only one interface
+ CVMX_LLM_REPLICATION_4X = 2, // both interfaces, 2x, or 4x if only one interface
+ CVMX_LLM_REPLICATION_8X = 3, // both interfaces, 4x, or 8x if only one interface
+} cvmx_llm_replication_t;
+
+/**
+ * This structure defines the address used to the low-latency memory.
+ * This address format is used for both loads and stores.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t mbz :30;
+ cvmx_llm_replication_t repl : 2;
+ uint64_t address :32; // address<1:0> mbz, address<31:30> mbz
+ } s;
+} cvmx_llm_address_t;
+
+/**
+ * This structure defines the data format in the low-latency memory
+ */
+typedef union
+{
+ uint64_t u64;
+
+ /**
+ * this format defines the format returned on a load
+ * a load returns the 32/36-bits in memory, plus xxor = even_parity(dat<35:0>)
+ * typically, dat<35> = parity(dat<34:0>), so the xor bit directly indicates parity error
+ * Note that the data field size is 36 bits on the 36XX/38XX, and 32 bits on the 31XX
+ */
+ struct
+ {
+ uint64_t mbz1 :27;
+ uint64_t xxor : 1;
+ uint64_t mbz : 4;
+ uint64_t dat :32;
+ } cn31xx;
+
+ struct
+ {
+ uint64_t mbz :27;
+ uint64_t xxor : 1;
+ uint64_t dat :36;
+ } s;
+
+ /**
+ * This format defines what should be used if parity is desired. Hardware returns
+ * the XOR of all the bits in the 36/32 bit data word, so for parity software must use
+ * one of the data field bits as a parity bit.
+ */
+ struct cn31xx_par_struct
+ {
+ uint64_t mbz :32;
+ uint64_t par : 1;
+ uint64_t dat :31;
+ } cn31xx_par;
+ struct cn38xx_par_struct
+ {
+ uint64_t mbz :28;
+ uint64_t par : 1;
+ uint64_t dat :35;
+ } cn38xx_par;
+#if !OCTEON_IS_COMMON_BINARY()
+#if CVMX_COMPILED_FOR(OCTEON_CN31XX)
+ struct cn31xx_par_struct spar;
+#else
+ struct cn38xx_par_struct spar;
+#endif
+#endif
+} cvmx_llm_data_t;
+
+#define CVMX_LLM_NARROW_DATA_WIDTH ((CVMX_COMPILED_FOR(OCTEON_CN31XX)) ? 32 : 36)
+
+/**
+ * Calculate the parity value of a number
+ *
+ * @param value
+ * @return parity value
+ */
+static inline uint64_t cvmx_llm_parity(uint64_t value)
+{
+ uint64_t result;
+ CVMX_DPOP(result, value);
+ return result;
+}
+
+
+/**
+ * Calculate the ECC needed for 36b LLM mode
+ *
+ * @param value
+ * @return ECC value
+ */
+static inline int cvmx_llm_ecc(uint64_t value)
+{
+ /* FIXME: This needs a re-write */
+ static const uint32_t ecc_code_29[7] = {
+ 0x08962595,
+ 0x112a4aaa,
+ 0x024c934f,
+ 0x04711c73,
+ 0x0781e07c,
+ 0x1801ff80,
+ 0x1ffe0000};
+ uint64_t pop0, pop1, pop2, pop3, pop4, pop5, pop6;
+
+ pop0 = ecc_code_29[0];
+ pop1 = ecc_code_29[1];
+ pop2 = ecc_code_29[2];
+ pop0 &= value;
+ pop3 = ecc_code_29[3];
+ CVMX_DPOP(pop0, pop0);
+ pop4 = ecc_code_29[4];
+ pop1 &= value;
+ CVMX_DPOP(pop1, pop1);
+ pop2 &= value;
+ pop5 = ecc_code_29[5];
+ CVMX_DPOP(pop2, pop2);
+ pop6 = ecc_code_29[6];
+ pop3 &= value;
+ CVMX_DPOP(pop3, pop3);
+ pop4 &= value;
+ CVMX_DPOP(pop4, pop4);
+ pop5 &= value;
+ CVMX_DPOP(pop5, pop5);
+ pop6 &= value;
+ CVMX_DPOP(pop6, pop6);
+
+ return((pop6&1)<<6) | ((pop5&1)<<5) | ((pop4&1)<<4) | ((pop3&1)<<3) | ((pop2&1)<<2) | ((pop1&1)<<1) | (pop0&1);
+}
+
+
+#ifdef ENABLE_DEPRECATED
+/* These macros are provided to provide compatibility with code that uses
+** the old names for the llm access functions. The names were changed
+** when support for the 31XX llm was added, as the widths differ between Octeon Models.
+** The wide/narrow names are preferred, and should be used in all new code */
+#define cvmx_llm_write36 cvmx_llm_write_narrow
+#define cvmx_llm_read36 cvmx_llm_read_narrow
+#define cvmx_llm_write64 cvmx_llm_write_wide
+#define cvmx_llm_read64 cvmx_llm_read_wide
+#endif
+/**
+ * Write to LLM memory - 36 bit
+ *
+ * @param address Address in LLM to write. Consecutive writes increment the
+ * address by 4. The replication mode is also encoded in this
+ * address.
+ * @param value Value to write to LLM. Only the low 36 bits will be used.
+ * @param set Which of the two coprocessor 2 register sets to use for the
+ * write. May be used to get two outstanding LLM access at once
+ * per core. Range: 0-1
+ */
+static inline void cvmx_llm_write_narrow(cvmx_llm_address_t address, uint64_t value, int set)
+{
+ cvmx_llm_data_t data;
+ data.s.mbz = 0;
+
+ data.s.dat = value;
+
+ data.s.xxor = 0;
+
+ if (set)
+ {
+ CVMX_MT_LLM_DATA(1, data.u64);
+ CVMX_MT_LLM_WRITE_ADDR_INTERNAL(1, address.u64);
+ }
+ else
+ {
+ CVMX_MT_LLM_DATA(0, data.u64);
+ CVMX_MT_LLM_WRITE_ADDR_INTERNAL(0, address.u64);
+ }
+}
+
+
+/**
+ * Write to LLM memory - 64 bit
+ *
+ * @param address Address in LLM to write. Consecutive writes increment the
+ * address by 8. The replication mode is also encoded in this
+ * address.
+ * @param value Value to write to LLM.
+ * @param set Which of the two coprocessor 2 register sets to use for the
+ * write. May be used to get two outstanding LLM access at once
+ * per core. Range: 0-1
+ */
+static inline void cvmx_llm_write_wide(cvmx_llm_address_t address, uint64_t value, int set)
+{
+ if (set)
+ {
+ CVMX_MT_LLM_DATA(1, value);
+ CVMX_MT_LLM_WRITE64_ADDR_INTERNAL(1, address.u64);
+ }
+ else
+ {
+ CVMX_MT_LLM_DATA(0, value);
+ CVMX_MT_LLM_WRITE64_ADDR_INTERNAL(0, address.u64);
+ }
+}
+
+
+/**
+ * Read from LLM memory - 36 bit
+ *
+ * @param address Address in LLM to read. Consecutive reads increment the
+ * address by 4. The replication mode is also encoded in this
+ * address.
+ * @param set Which of the two coprocessor 2 register sets to use for the
+ * write. May be used to get two outstanding LLM access at once
+ * per core. Range: 0-1
+ * @return The lower 36 bits contain the result of the read
+ */
+static inline cvmx_llm_data_t cvmx_llm_read_narrow(cvmx_llm_address_t address, int set)
+{
+ cvmx_llm_data_t value;
+ if (set)
+ {
+ CVMX_MT_LLM_READ_ADDR(1, address.u64);
+ CVMX_MF_LLM_DATA(1, value.u64);
+ }
+ else
+ {
+ CVMX_MT_LLM_READ_ADDR(0, address.u64);
+ CVMX_MF_LLM_DATA(0, value.u64);
+ }
+ return value;
+}
+
+
+/**
+ * Read from LLM memory - 64 bit
+ *
+ * @param address Address in LLM to read. Consecutive reads increment the
+ * address by 8. The replication mode is also encoded in this
+ * address.
+ * @param set Which of the two coprocessor 2 register sets to use for the
+ * write. May be used to get two outstanding LLM access at once
+ * per core. Range: 0-1
+ * @return The result of the read
+ */
+static inline uint64_t cvmx_llm_read_wide(cvmx_llm_address_t address, int set)
+{
+ uint64_t value;
+ if (set)
+ {
+ CVMX_MT_LLM_READ64_ADDR(1, address);
+ CVMX_MF_LLM_DATA(1, value);
+ }
+ else
+ {
+ CVMX_MT_LLM_READ64_ADDR(0, address);
+ CVMX_MF_LLM_DATA(0, value);
+ }
+ return value;
+}
+
+
+#define RLD_INIT_DELAY (1<<18)
+
+
+
+/* This structure describes the RLDRAM configuration for a board. This structure
+** must be populated with the correct values and passed to the initialization function.
+*/
+typedef struct
+{
+ uint32_t cpu_hz; /* CPU frequency in Hz */
+ char addr_rld0_fb_str [100]; /* String describing RLDRAM connections on rld 0 front (0) bunk*/
+ char addr_rld0_bb_str [100]; /* String describing RLDRAM connections on rld 0 back (1) bunk*/
+ char addr_rld1_fb_str [100]; /* String describing RLDRAM connections on rld 1 front (0) bunk*/
+ char addr_rld1_bb_str [100]; /* String describing RLDRAM connections on rld 1 back (1) bunk*/
+ uint8_t rld0_bunks; /* Number of bunks on rld 0 (0 is disabled) */
+ uint8_t rld1_bunks; /* Number of bunks on rld 1 (0 is disabled) */
+ uint16_t rld0_mbytes; /* mbytes on rld 0 */
+ uint16_t rld1_mbytes; /* mbytes on rld 1 */
+ uint16_t max_rld_clock_mhz; /* Maximum RLD clock in MHz, only used for CN58XX */
+} llm_descriptor_t;
+
+/**
+ * Initialize LLM memory controller. This must be done
+ * before the low latency memory can be used.
+ * This is simply a wrapper around cvmx_llm_initialize_desc(),
+ * and is deprecated.
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+int cvmx_llm_initialize(void);
+
+
+/**
+ * Initialize LLM memory controller. This must be done
+ * before the low latency memory can be used.
+ *
+ * @param llm_desc_ptr
+ * Pointer to descriptor structure. If NULL
+ * is passed, a default setting is used if available.
+ *
+ * @return -1 on error
+ * Size of llm in bytes on success
+ */
+int cvmx_llm_initialize_desc(llm_descriptor_t *llm_desc_ptr);
+
+
+
+/**
+ * Gets the default llm descriptor for the board code is being run on.
+ *
+ * @param llm_desc_ptr
+ * Pointer to descriptor structure to be filled in. Contents are only
+ * valid after successful completion. Must not be NULL.
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+int cvmx_llm_get_default_descriptor(llm_descriptor_t *llm_desc_ptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVM_LLM_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-llm.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-lmcx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-lmcx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-lmcx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,8422 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-lmcx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon lmcx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_LMCX_DEFS_H__
+#define __CVMX_LMCX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_BIST_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_LMCX_BIST_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_BIST_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_BIST_RESULT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_LMCX_BIST_RESULT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_BIST_RESULT(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CHAR_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CHAR_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000220ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_CHAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000220ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CHAR_MASK0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CHAR_MASK0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000228ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_CHAR_MASK0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000228ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CHAR_MASK1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CHAR_MASK1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000230ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_CHAR_MASK1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000230ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CHAR_MASK2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CHAR_MASK2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000238ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_CHAR_MASK2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000238ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CHAR_MASK3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CHAR_MASK3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000240ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_CHAR_MASK3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000240ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CHAR_MASK4(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CHAR_MASK4(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000318ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_CHAR_MASK4(block_id) (CVMX_ADD_IO_SEG(0x0001180088000318ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_COMP_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_COMP_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_COMP_CTL2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_COMP_CTL2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001B8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_COMP_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CONFIG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CONFIG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000188ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_CONFIG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000188ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CONTROL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CONTROL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000190ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000190ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_CTL1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CTL1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DCLK_CNT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DCLK_CNT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001E0ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_DCLK_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E0ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DCLK_CNT_HI(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DCLK_CNT_HI(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_DCLK_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DCLK_CNT_LO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DCLK_CNT_LO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_DCLK_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DCLK_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_LMCX_DCLK_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_DCLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DDR2_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DDR2_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_DDR2_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DDR_PLL_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DDR_PLL_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000258ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_DDR_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000258ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DELAY_CFG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DELAY_CFG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_DELAY_CFG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DIMMX_PARAMS(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 1)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 1)) && ((block_id <= 3)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 1)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_LMCX_DIMMX_PARAMS(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_LMCX_DIMMX_PARAMS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DIMM_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DIMM_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000310ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_DIMM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000310ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DLL_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_LMCX_DLL_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_DLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DLL_CTL2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DLL_CTL2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001C8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_DLL_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001C8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_DLL_CTL3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DLL_CTL3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000218ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_DLL_CTL3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000218ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+static inline uint64_t CVMX_LMCX_DUAL_MEMCFG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 0) * 0x60000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 1) * 0x60000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 3))
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 3) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_LMCX_DUAL_MEMCFG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 0) * 0x60000000ull;
+}
+static inline uint64_t CVMX_LMCX_ECC_SYND(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 0) * 0x60000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 1) * 0x60000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 3))
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 3) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_LMCX_ECC_SYND (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 0) * 0x60000000ull;
+}
+static inline uint64_t CVMX_LMCX_FADR(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 0) * 0x60000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 1) * 0x60000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 3))
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 3) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_LMCX_FADR (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 0) * 0x60000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_IFB_CNT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_IFB_CNT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001D0ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_IFB_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D0ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_IFB_CNT_HI(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_IFB_CNT_HI(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_IFB_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_IFB_CNT_LO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_IFB_CNT_LO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_IFB_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_INT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_INT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001F0ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F0ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_INT_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_INT_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001E8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_MEM_CFG0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_MEM_CFG0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_MEM_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_MEM_CFG1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_MEM_CFG1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_MEM_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_MODEREG_PARAMS0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_MODEREG_PARAMS0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001A8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_MODEREG_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_MODEREG_PARAMS1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_MODEREG_PARAMS1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000260ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_MODEREG_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000260ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+static inline uint64_t CVMX_LMCX_NXM(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 0) * 0x60000000ull;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 1) * 0x60000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 3))
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 3) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_LMCX_NXM (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 0) * 0x60000000ull;
+}
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_OPS_CNT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_OPS_CNT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001D8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_OPS_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_OPS_CNT_HI(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_OPS_CNT_HI(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_OPS_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_OPS_CNT_LO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_OPS_CNT_LO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_OPS_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_PHY_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_PHY_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000210ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_PHY_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000210ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_PLL_BWCTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_PLL_BWCTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000040ull);
+}
+#else
+#define CVMX_LMCX_PLL_BWCTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_PLL_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_PLL_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_PLL_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_PLL_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_PLL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_READ_LEVEL_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_LMCX_READ_LEVEL_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_READ_LEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_READ_LEVEL_DBG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_LMCX_READ_LEVEL_DBG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_READ_LEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_READ_LEVEL_RANKX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_LMCX_READ_LEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8;
+}
+#else
+#define CVMX_LMCX_READ_LEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_RESET_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_RESET_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000180ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_RESET_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000180ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_RLEVEL_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_RLEVEL_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880002A0ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_RLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A0ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_RLEVEL_DBG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_RLEVEL_DBG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880002A8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_RLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_RLEVEL_RANKX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 3)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 3)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_LMCX_RLEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_LMCX_RLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_RODT_COMP_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_RODT_COMP_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_RODT_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_RODT_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_RODT_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_RODT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_RODT_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_RODT_MASK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000268ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_RODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x0001180088000268ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_SCRAMBLED_FADR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_SCRAMBLED_FADR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000330ull);
+}
+#else
+#define CVMX_LMCX_SCRAMBLED_FADR(block_id) (CVMX_ADD_IO_SEG(0x0001180088000330ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_SCRAMBLE_CFG0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_SCRAMBLE_CFG0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000320ull);
+}
+#else
+#define CVMX_LMCX_SCRAMBLE_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000320ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_SCRAMBLE_CFG1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_SCRAMBLE_CFG1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000328ull);
+}
+#else
+#define CVMX_LMCX_SCRAMBLE_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000328ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_SLOT_CTL0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_SLOT_CTL0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001F8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_SLOT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_SLOT_CTL1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_SLOT_CTL1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000200ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_SLOT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000200ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_SLOT_CTL2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_SLOT_CTL2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000208ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_SLOT_CTL2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000208ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_TIMING_PARAMS0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_TIMING_PARAMS0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000198ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_TIMING_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000198ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_TIMING_PARAMS1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_TIMING_PARAMS1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001A0ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_TIMING_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A0ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_TRO_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_TRO_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000248ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_TRO_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000248ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_TRO_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_TRO_STAT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000250ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_TRO_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180088000250ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_WLEVEL_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_WLEVEL_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000300ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_WLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000300ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_WLEVEL_DBG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_WLEVEL_DBG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000308ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_WLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000308ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_WLEVEL_RANKX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 3)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 3)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_LMCX_WLEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_LMCX_WLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_WODT_CTL0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_WODT_CTL0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_WODT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_WODT_CTL1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_LMCX_WODT_CTL1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull;
+}
+#else
+#define CVMX_LMCX_WODT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_LMCX_WODT_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_WODT_MASK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800880001B0ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_LMCX_WODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B0ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+
+/**
+ * cvmx_lmc#_bist_ctl
+ *
+ * Notes:
+ * This controls BiST only for the memories that operate on DCLK. The normal, chip-wide BiST flow
+ * controls BiST for the memories that operate on ECLK.
+ */
+union cvmx_lmcx_bist_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_bist_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t start : 1; /**< A 0->1 transition causes BiST to run. */
+#else
+ uint64_t start : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_lmcx_bist_ctl_s cn50xx;
+ struct cvmx_lmcx_bist_ctl_s cn52xx;
+ struct cvmx_lmcx_bist_ctl_s cn52xxp1;
+ struct cvmx_lmcx_bist_ctl_s cn56xx;
+ struct cvmx_lmcx_bist_ctl_s cn56xxp1;
+};
+typedef union cvmx_lmcx_bist_ctl cvmx_lmcx_bist_ctl_t;
+
+/**
+ * cvmx_lmc#_bist_result
+ *
+ * Notes:
+ * Access to the internal BiST results
+ * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail).
+ */
+union cvmx_lmcx_bist_result {
+ uint64_t u64;
+ struct cvmx_lmcx_bist_result_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t csrd2e : 1; /**< BiST result of CSRD2E memory (0=pass, !0=fail) */
+ uint64_t csre2d : 1; /**< BiST result of CSRE2D memory (0=pass, !0=fail) */
+ uint64_t mwf : 1; /**< BiST result of MWF memories (0=pass, !0=fail) */
+ uint64_t mwd : 3; /**< BiST result of MWD memories (0=pass, !0=fail) */
+ uint64_t mwc : 1; /**< BiST result of MWC memories (0=pass, !0=fail) */
+ uint64_t mrf : 1; /**< BiST result of MRF memories (0=pass, !0=fail) */
+ uint64_t mrd : 3; /**< BiST result of MRD memories (0=pass, !0=fail) */
+#else
+ uint64_t mrd : 3;
+ uint64_t mrf : 1;
+ uint64_t mwc : 1;
+ uint64_t mwd : 3;
+ uint64_t mwf : 1;
+ uint64_t csre2d : 1;
+ uint64_t csrd2e : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_lmcx_bist_result_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t mwf : 1; /**< BiST result of MWF memories (0=pass, !0=fail) */
+ uint64_t mwd : 3; /**< BiST result of MWD memories (0=pass, !0=fail) */
+ uint64_t mwc : 1; /**< BiST result of MWC memories (0=pass, !0=fail) */
+ uint64_t mrf : 1; /**< BiST result of MRF memories (0=pass, !0=fail) */
+ uint64_t mrd : 3; /**< BiST result of MRD memories (0=pass, !0=fail) */
+#else
+ uint64_t mrd : 3;
+ uint64_t mrf : 1;
+ uint64_t mwc : 1;
+ uint64_t mwd : 3;
+ uint64_t mwf : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_bist_result_s cn52xx;
+ struct cvmx_lmcx_bist_result_s cn52xxp1;
+ struct cvmx_lmcx_bist_result_s cn56xx;
+ struct cvmx_lmcx_bist_result_s cn56xxp1;
+};
+typedef union cvmx_lmcx_bist_result cvmx_lmcx_bist_result_t;
+
+/**
+ * cvmx_lmc#_char_ctl
+ *
+ * LMC_CHAR_CTL = LMC Characterization Control
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ */
+union cvmx_lmcx_char_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_char_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t dr : 1; /**< Pattern at Data Rate (not Clock Rate) */
+ uint64_t skew_on : 1; /**< Skew adjacent bits */
+ uint64_t en : 1; /**< Enable characterization */
+ uint64_t sel : 1; /**< Pattern select
+ 0 = PRBS
+ 1 = Programmable pattern */
+ uint64_t prog : 8; /**< Programmable pattern */
+ uint64_t prbs : 32; /**< PRBS Polynomial */
+#else
+ uint64_t prbs : 32;
+ uint64_t prog : 8;
+ uint64_t sel : 1;
+ uint64_t en : 1;
+ uint64_t skew_on : 1;
+ uint64_t dr : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_lmcx_char_ctl_s cn61xx;
+ struct cvmx_lmcx_char_ctl_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t en : 1; /**< Enable characterization */
+ uint64_t sel : 1; /**< Pattern select
+ 0 = PRBS
+ 1 = Programmable pattern */
+ uint64_t prog : 8; /**< Programmable pattern */
+ uint64_t prbs : 32; /**< PRBS Polynomial */
+#else
+ uint64_t prbs : 32;
+ uint64_t prog : 8;
+ uint64_t sel : 1;
+ uint64_t en : 1;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_char_ctl_cn63xx cn63xxp1;
+ struct cvmx_lmcx_char_ctl_s cn66xx;
+ struct cvmx_lmcx_char_ctl_s cn68xx;
+ struct cvmx_lmcx_char_ctl_cn63xx cn68xxp1;
+ struct cvmx_lmcx_char_ctl_s cnf71xx;
+};
+typedef union cvmx_lmcx_char_ctl cvmx_lmcx_char_ctl_t;
+
+/**
+ * cvmx_lmc#_char_mask0
+ *
+ * LMC_CHAR_MASK0 = LMC Characterization Mask0
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ */
+union cvmx_lmcx_char_mask0 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Mask for DQ0[63:0] */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask0_s cn61xx;
+ struct cvmx_lmcx_char_mask0_s cn63xx;
+ struct cvmx_lmcx_char_mask0_s cn63xxp1;
+ struct cvmx_lmcx_char_mask0_s cn66xx;
+ struct cvmx_lmcx_char_mask0_s cn68xx;
+ struct cvmx_lmcx_char_mask0_s cn68xxp1;
+ struct cvmx_lmcx_char_mask0_s cnf71xx;
+};
+typedef union cvmx_lmcx_char_mask0 cvmx_lmcx_char_mask0_t;
+
+/**
+ * cvmx_lmc#_char_mask1
+ *
+ * LMC_CHAR_MASK1 = LMC Characterization Mask1
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ */
+union cvmx_lmcx_char_mask1 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t mask : 8; /**< Mask for DQ0[71:64] */
+#else
+ uint64_t mask : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask1_s cn61xx;
+ struct cvmx_lmcx_char_mask1_s cn63xx;
+ struct cvmx_lmcx_char_mask1_s cn63xxp1;
+ struct cvmx_lmcx_char_mask1_s cn66xx;
+ struct cvmx_lmcx_char_mask1_s cn68xx;
+ struct cvmx_lmcx_char_mask1_s cn68xxp1;
+ struct cvmx_lmcx_char_mask1_s cnf71xx;
+};
+typedef union cvmx_lmcx_char_mask1 cvmx_lmcx_char_mask1_t;
+
+/**
+ * cvmx_lmc#_char_mask2
+ *
+ * LMC_CHAR_MASK2 = LMC Characterization Mask2
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ */
+union cvmx_lmcx_char_mask2 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Mask for DQ1[63:0] */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask2_s cn61xx;
+ struct cvmx_lmcx_char_mask2_s cn63xx;
+ struct cvmx_lmcx_char_mask2_s cn63xxp1;
+ struct cvmx_lmcx_char_mask2_s cn66xx;
+ struct cvmx_lmcx_char_mask2_s cn68xx;
+ struct cvmx_lmcx_char_mask2_s cn68xxp1;
+ struct cvmx_lmcx_char_mask2_s cnf71xx;
+};
+typedef union cvmx_lmcx_char_mask2 cvmx_lmcx_char_mask2_t;
+
+/**
+ * cvmx_lmc#_char_mask3
+ *
+ * LMC_CHAR_MASK3 = LMC Characterization Mask3
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ */
+union cvmx_lmcx_char_mask3 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t mask : 8; /**< Mask for DQ1[71:64] */
+#else
+ uint64_t mask : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask3_s cn61xx;
+ struct cvmx_lmcx_char_mask3_s cn63xx;
+ struct cvmx_lmcx_char_mask3_s cn63xxp1;
+ struct cvmx_lmcx_char_mask3_s cn66xx;
+ struct cvmx_lmcx_char_mask3_s cn68xx;
+ struct cvmx_lmcx_char_mask3_s cn68xxp1;
+ struct cvmx_lmcx_char_mask3_s cnf71xx;
+};
+typedef union cvmx_lmcx_char_mask3 cvmx_lmcx_char_mask3_t;
+
+/**
+ * cvmx_lmc#_char_mask4
+ *
+ * LMC_CHAR_MASK4 = LMC Characterization Mask4
+ * This register is an assortment of various control fields needed to charecterize the DDR3 interface
+ */
+union cvmx_lmcx_char_mask4 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t reset_n_mask : 1; /**< Mask for RESET_L */
+ uint64_t a_mask : 16; /**< Mask for A[15:0] */
+ uint64_t ba_mask : 3; /**< Mask for BA[2:0] */
+ uint64_t we_n_mask : 1; /**< Mask for WE_N */
+ uint64_t cas_n_mask : 1; /**< Mask for CAS_N */
+ uint64_t ras_n_mask : 1; /**< Mask for RAS_N */
+ uint64_t odt1_mask : 2; /**< Mask for ODT1 */
+ uint64_t odt0_mask : 2; /**< Mask for ODT0 */
+ uint64_t cs1_n_mask : 2; /**< Mask for CS1_N */
+ uint64_t cs0_n_mask : 2; /**< Mask for CS0_N */
+ uint64_t cke_mask : 2; /**< Mask for CKE* */
+#else
+ uint64_t cke_mask : 2;
+ uint64_t cs0_n_mask : 2;
+ uint64_t cs1_n_mask : 2;
+ uint64_t odt0_mask : 2;
+ uint64_t odt1_mask : 2;
+ uint64_t ras_n_mask : 1;
+ uint64_t cas_n_mask : 1;
+ uint64_t we_n_mask : 1;
+ uint64_t ba_mask : 3;
+ uint64_t a_mask : 16;
+ uint64_t reset_n_mask : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask4_s cn61xx;
+ struct cvmx_lmcx_char_mask4_s cn63xx;
+ struct cvmx_lmcx_char_mask4_s cn63xxp1;
+ struct cvmx_lmcx_char_mask4_s cn66xx;
+ struct cvmx_lmcx_char_mask4_s cn68xx;
+ struct cvmx_lmcx_char_mask4_s cn68xxp1;
+ struct cvmx_lmcx_char_mask4_s cnf71xx;
+};
+typedef union cvmx_lmcx_char_mask4 cvmx_lmcx_char_mask4_t;
+
+/**
+ * cvmx_lmc#_comp_ctl
+ *
+ * LMC_COMP_CTL = LMC Compensation control
+ *
+ */
+union cvmx_lmcx_comp_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_comp_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t nctl_csr : 4; /**< Compensation control bits */
+ uint64_t nctl_clk : 4; /**< Compensation control bits */
+ uint64_t nctl_cmd : 4; /**< Compensation control bits */
+ uint64_t nctl_dat : 4; /**< Compensation control bits */
+ uint64_t pctl_csr : 4; /**< Compensation control bits */
+ uint64_t pctl_clk : 4; /**< Compensation control bits */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t pctl_clk : 4;
+ uint64_t pctl_csr : 4;
+ uint64_t nctl_dat : 4;
+ uint64_t nctl_cmd : 4;
+ uint64_t nctl_clk : 4;
+ uint64_t nctl_csr : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_comp_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t nctl_csr : 4; /**< Compensation control bits */
+ uint64_t nctl_clk : 4; /**< Compensation control bits */
+ uint64_t nctl_cmd : 4; /**< Compensation control bits */
+ uint64_t nctl_dat : 4; /**< Compensation control bits */
+ uint64_t pctl_csr : 4; /**< Compensation control bits */
+ uint64_t pctl_clk : 4; /**< Compensation control bits */
+ uint64_t pctl_cmd : 4; /**< Compensation control bits */
+ uint64_t pctl_dat : 4; /**< Compensation control bits */
+#else
+ uint64_t pctl_dat : 4;
+ uint64_t pctl_cmd : 4;
+ uint64_t pctl_clk : 4;
+ uint64_t pctl_csr : 4;
+ uint64_t nctl_dat : 4;
+ uint64_t nctl_cmd : 4;
+ uint64_t nctl_clk : 4;
+ uint64_t nctl_csr : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_comp_ctl_cn30xx cn31xx;
+ struct cvmx_lmcx_comp_ctl_cn30xx cn38xx;
+ struct cvmx_lmcx_comp_ctl_cn30xx cn38xxp2;
+ struct cvmx_lmcx_comp_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t nctl_csr : 4; /**< Compensation control bits */
+ uint64_t reserved_20_27 : 8;
+ uint64_t nctl_dat : 4; /**< Compensation control bits */
+ uint64_t pctl_csr : 4; /**< Compensation control bits */
+ uint64_t reserved_5_11 : 7;
+ uint64_t pctl_dat : 5; /**< Compensation control bits */
+#else
+ uint64_t pctl_dat : 5;
+ uint64_t reserved_5_11 : 7;
+ uint64_t pctl_csr : 4;
+ uint64_t nctl_dat : 4;
+ uint64_t reserved_20_27 : 8;
+ uint64_t nctl_csr : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn52xx;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn52xxp1;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn56xx;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn56xxp1;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn58xx;
+ struct cvmx_lmcx_comp_ctl_cn58xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t nctl_csr : 4; /**< Compensation control bits */
+ uint64_t reserved_20_27 : 8;
+ uint64_t nctl_dat : 4; /**< Compensation control bits */
+ uint64_t pctl_csr : 4; /**< Compensation control bits */
+ uint64_t reserved_4_11 : 8;
+ uint64_t pctl_dat : 4; /**< Compensation control bits */
+#else
+ uint64_t pctl_dat : 4;
+ uint64_t reserved_4_11 : 8;
+ uint64_t pctl_csr : 4;
+ uint64_t nctl_dat : 4;
+ uint64_t reserved_20_27 : 8;
+ uint64_t nctl_csr : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn58xxp1;
+};
+typedef union cvmx_lmcx_comp_ctl cvmx_lmcx_comp_ctl_t;
+
+/**
+ * cvmx_lmc#_comp_ctl2
+ *
+ * LMC_COMP_CTL2 = LMC Compensation control
+ *
+ */
+union cvmx_lmcx_comp_ctl2 {
+ uint64_t u64;
+ struct cvmx_lmcx_comp_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ddr__ptune : 4; /**< DDR PCTL from compensation circuit
+ The encoded value provides debug information for the
+ compensation impedance on P-pullup */
+ uint64_t ddr__ntune : 4; /**< DDR NCTL from compensation circuit
+ The encoded value provides debug information for the
+ compensation impedance on N-pulldown */
+ uint64_t m180 : 1; /**< Cap impedance at 180 Ohm (instead of 240 Ohm) */
+ uint64_t byp : 1; /**< Bypass mode
+ When set, PTUNE,NTUNE are the compensation setting.
+ When clear, DDR_PTUNE,DDR_NTUNE are the compensation setting. */
+ uint64_t ptune : 4; /**< PCTL impedance control in bypass mode */
+ uint64_t ntune : 4; /**< NCTL impedance control in bypass mode */
+ uint64_t rodt_ctl : 4; /**< NCTL RODT impedance control bits
+ This field controls ODT values during a memory read
+ on the Octeon side
+ 0000 = No ODT
+ 0001 = 20 ohm
+ 0010 = 30 ohm
+ 0011 = 40 ohm
+ 0100 = 60 ohm
+ 0101 = 120 ohm
+ 0110-1111 = Reserved */
+ uint64_t cmd_ctl : 4; /**< Drive strength control for CMD/A/RESET_L drivers
+ 0001 = 24 ohm
+ 0010 = 26.67 ohm
+ 0011 = 30 ohm
+ 0100 = 34.3 ohm
+ 0101 = 40 ohm
+ 0110 = 48 ohm
+ 0111 = 60 ohm
+ 0000,1000-1111 = Reserved */
+ uint64_t ck_ctl : 4; /**< Drive strength control for CK/CS*_L/ODT/CKE* drivers
+ 0001 = 24 ohm
+ 0010 = 26.67 ohm
+ 0011 = 30 ohm
+ 0100 = 34.3 ohm
+ 0101 = 40 ohm
+ 0110 = 48 ohm
+ 0111 = 60 ohm
+ 0000,1000-1111 = Reserved */
+ uint64_t dqx_ctl : 4; /**< Drive strength control for DQ/DQS drivers
+ 0001 = 24 ohm
+ 0010 = 26.67 ohm
+ 0011 = 30 ohm
+ 0100 = 34.3 ohm
+ 0101 = 40 ohm
+ 0110 = 48 ohm
+ 0111 = 60 ohm
+ 0000,1000-1111 = Reserved */
+#else
+ uint64_t dqx_ctl : 4;
+ uint64_t ck_ctl : 4;
+ uint64_t cmd_ctl : 4;
+ uint64_t rodt_ctl : 4;
+ uint64_t ntune : 4;
+ uint64_t ptune : 4;
+ uint64_t byp : 1;
+ uint64_t m180 : 1;
+ uint64_t ddr__ntune : 4;
+ uint64_t ddr__ptune : 4;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_lmcx_comp_ctl2_s cn61xx;
+ struct cvmx_lmcx_comp_ctl2_s cn63xx;
+ struct cvmx_lmcx_comp_ctl2_s cn63xxp1;
+ struct cvmx_lmcx_comp_ctl2_s cn66xx;
+ struct cvmx_lmcx_comp_ctl2_s cn68xx;
+ struct cvmx_lmcx_comp_ctl2_s cn68xxp1;
+ struct cvmx_lmcx_comp_ctl2_s cnf71xx;
+};
+typedef union cvmx_lmcx_comp_ctl2 cvmx_lmcx_comp_ctl2_t;
+
+/**
+ * cvmx_lmc#_config
+ *
+ * LMC_CONFIG = LMC Configuration Register
+ *
+ * This register controls certain parameters of Memory Configuration
+ *
+ * Notes:
+ * a. Priority order for hardware writes to LMC*_CONFIG/LMC*_FADR/LMC*_SCRAMBLED_FADR/LMC*_ECC_SYND: DED error >= NXM error > SEC error
+ * b. The self refresh entry sequence(s) power the DLL up/down (depending on LMC*_MODEREG_PARAMS0[DLL])
+ * when LMC*_CONFIG[SREF_WITH_DLL] is set
+ * c. Prior to the self-refresh exit sequence, LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 should be re-programmed (if needed) to the
+ * appropriate values
+ *
+ * LMC Bringup Sequence:
+ * 1. SW must ensure there are no pending DRAM transactions and that the DDR PLL and the DLL have been initialized.
+ * 2. Write LMC*_COMP_CTL2, LMC*_CONTROL, LMC*_WODT_MASK, LMC*_DUAL_MEMCFG, LMC*_TIMING_PARAMS0, LMC*_TIMING_PARAMS1,
+ * LMC*_MODEREG_PARAMS0, LMC*_MODEREG_PARAMS1, LMC*_RESET_CTL (with DDR3RST=0), LMC*_CONFIG (with INIT_START=0)
+ * with appropriate values, if necessary.
+ * 3. Wait 200us, then write LMC*_RESET_CTL[DDR3RST] = 1.
+ * 4. Initialize all ranks at once by writing LMC*_CONFIG[RANKMASK][n] = 1, LMC*_CONFIG[INIT_STATUS][n] = 1, and LMC*_CONFIG[INIT_START] = 1
+ * where n is a valid rank index for the specific board configuration.
+ * 5. for each rank n to be write-leveled [
+ * if auto write-leveling is desired [
+ * write LMC*_CONFIG[RANKMASK][n] = 1, LMC*_WLEVEL_CTL appropriately and LMC*_CONFIG[INIT_START] = 1
+ * wait until LMC*_WLEVEL_RANKn[STATUS] = 3
+ * ] else [
+ * write LMC*_WLEVEL_RANKn with appropriate values
+ * ]
+ * ]
+ * 6. for each rank n to be read-leveled [
+ * if auto read-leveling is desired [
+ * write LMC*_CONFIG[RANKMASK][n] = 1, LMC*_RLEVEL_CTL appropriately and LMC*_CONFIG[INIT_START] = 1
+ * wait until LMC*_RLEVEL_RANKn[STATUS] = 3
+ * ] else [
+ * write LMC*_RLEVEL_RANKn with appropriate values
+ * ]
+ * ]
+ */
+union cvmx_lmcx_config {
+ uint64_t u64;
+ struct cvmx_lmcx_config_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t mode32b : 1; /**< 32b Datapath Mode NS
+ Set to 1 if we use only 32 DQ pins
+ 0 for 64b DQ mode. */
+ uint64_t scrz : 1; /**< Hide LMC*_SCRAMBLE_CFG0 and LMC*_SCRAMBLE_CFG1 when set */
+ uint64_t early_unload_d1_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 3
+ reads
+ The recommended EARLY_UNLOAD_D1_R1 value can be calculated
+ after the final LMC*_RLEVEL_RANK3[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi])
+ across all i), then set EARLY_UNLOAD_D1_R1
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d1_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 2
+ reads
+ The recommended EARLY_UNLOAD_D1_RO value can be calculated
+ after the final LMC*_RLEVEL_RANK2[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi])
+ across all i), then set EARLY_UNLOAD_D1_RO
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d0_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 1
+ reads
+ The recommended EARLY_UNLOAD_D0_R1 value can be calculated
+ after the final LMC*_RLEVEL_RANK1[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi])
+ across all i), then set EARLY_UNLOAD_D0_R1
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d0_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 0
+ reads.
+ The recommended EARLY_UNLOAD_D0_R0 value can be calculated
+ after the final LMC*_RLEVEL_RANK0[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi])
+ across all i), then set EARLY_UNLOAD_D0_R0
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
+ uint64_t init_status : 4; /**< Indicates status of initialization
+ INIT_STATUS[n] = 1 implies rank n has been initialized
+ SW must set necessary INIT_STATUS bits with the
+ same LMC*_CONFIG write that initiates
+ power-up/init and self-refresh exit sequences
+ (if the required INIT_STATUS bits are not already
+ set before LMC initiates the sequence).
+ INIT_STATUS determines the chip-selects that assert
+ during refresh, ZQCS, and precharge power-down and
+ self-refresh entry/exit SEQUENCE's. */
+ uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored.
+ MIRRMASK<n> = 1 means Rank n addresses are mirrored
+ for 0 <= n <= 3
+ A mirrored read/write has these differences:
+ - DDR_BA<1> is swapped with DDR_BA<0>
+ - DDR_A<8> is swapped with DDR_A<7>
+ - DDR_A<6> is swapped with DDR_A<5>
+ - DDR_A<4> is swapped with DDR_A<3>
+ When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
+ uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized.
+ To write-level/read-level/initialize rank i, set RANKMASK<i>
+ RANK_ENA=1 RANK_ENA=0
+ RANKMASK<0> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK<1> = DIMM0_CS1 MBZ
+ RANKMASK<2> = DIMM1_CS0 DIMM1_CS0
+ RANKMASK<3> = DIMM1_CS1 MBZ
+ For read/write leveling, each rank has to be leveled separately,
+ so RANKMASK should only have one bit set.
+ RANKMASK is not used during self-refresh entry/exit and
+ precharge power-down entry/exit instruction sequences.
+ When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
+ uint64_t rank_ena : 1; /**< RANK ena (for use with dual-rank DIMMs)
+ For dual-rank DIMMs, the rank_ena bit will enable
+ the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
+ (pbank_lsb-1) address bit.
+ Write 0 for SINGLE ranked DIMM's. */
+ uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2
+ When set, self-refresh entry and exit instruction sequences
+ write MR1 and MR2 (in all ranks). (The writes occur before
+ self-refresh entry, and after self-refresh exit.)
+ When clear, self-refresh entry and exit instruction sequences
+ do not write any registers in the DDR3 parts. */
+ uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when
+ the shortest DQx lines have a larger delay than the CK line */
+ uint64_t sequence : 3; /**< Selects the sequence that LMC runs after a 0->1
+ transition on LMC*_CONFIG[INIT_START].
+ SEQUENCE=0=power-up/init:
+ - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
+ - INIT_STATUS must equal RANKMASK
+ - DDR_DIMM*_CKE signals activated (if they weren't already active)
+ - RDIMM register control words 0-15 will be written to RANKMASK-selected
+ RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
+ LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
+ LMC*_DIMM_CTL descriptions below for more details.)
+ - MR0, MR1, MR2, and MR3 will be written to selected ranks
+ SEQUENCE=1=read-leveling:
+ - RANKMASK selects the rank to be read-leveled
+ - MR3 written to selected rank
+ SEQUENCE=2=self-refresh entry:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
+ - DDR_DIMM*_CKE signals de-activated
+ SEQUENCE=3=self-refresh exit:
+ - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
+ - DDR_DIMM*_CKE signals activated
+ - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
+ SEQUENCE=4=precharge power-down entry:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - DDR_DIMM*_CKE signals de-activated
+ SEQUENCE=5=precharge power-down exit:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - DDR_DIMM*_CKE signals activated
+ SEQUENCE=6=write-leveling:
+ - RANKMASK selects the rank to be write-leveled
+ - INIT_STATUS must indicate all ranks with attached DRAM
+ - MR1 and MR2 written to INIT_STATUS-selected ranks
+ SEQUENCE=7=illegal
+ Precharge power-down entry and exit SEQUENCE's may also
+ be automatically generated by the HW when IDLEPOWER!=0.
+ Self-refresh entry SEQUENCE's may also be automatically
+ generated by hardware upon a chip warm or soft reset
+ sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
+ LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
+ Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
+ If there are two consecutive power-up/init's without
+ a DRESET assertion between them, LMC asserts DDR_DIMM*_CKE as part of
+ the first power-up/init, and continues to assert DDR_DIMM*_CKE
+ through the remainder of the first and the second power-up/init.
+ If DDR_DIMM*_CKE deactivation and reactivation is needed for
+ a second power-up/init, a DRESET assertion is required
+ between the first and the second. */
+ uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
+ increments. A Refresh sequence is triggered when bits
+ [24:18] are equal to 0, and a ZQCS sequence is triggered
+ when [36:18] are equal to 0.
+ Program [24:18] to RND-DN(tREFI/clkPeriod/512)
+ Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
+ that this value should always be greater than 32, to account for
+ resistor calibration delays.
+ 000_00000000_00000000: RESERVED
+ Max Refresh interval = 127 * 512 = 65024 CKs
+ Max ZQCS interval = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
+ LMC*_CONFIG[INIT_STATUS] determines which ranks receive
+ the REF / ZQCS. LMC does not send any refreshes / ZQCS's
+ when LMC*_CONFIG[INIT_STATUS]=0. */
+ uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter,
+ and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
+ CSR's. SW should write this to a one, then re-write
+ it to a zero to cause the reset. */
+ uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation
+ 0=disabled, 1=enabled */
+ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
+ having waited for 2^FORCEWRITE CK cycles. 0=disabled. */
+ uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory
+ controller has been idle for 2^(2+IDLEPOWER) CK cycles.
+ 0=disabled.
+ This field should only be programmed after initialization.
+ LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
+ is disabled during the precharge power-down. */
+ uint64_t pbank_lsb : 4; /**< DIMM address bit select
+ Reverting to the explanation for ROW_LSB,
+ PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
+ In the 512MB DIMM Example, assuming no rank bits:
+ pbank_lsb=mem_addr[15+13] for 64b mode
+ =mem_addr[14+13] for 32b mode
+ Decoding for pbank_lsb
+ - 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA)
+ - 0001:DIMM = mem_adr[29] / rank = mem_adr[28] "
+ - 0010:DIMM = mem_adr[30] / rank = mem_adr[29] "
+ - 0011:DIMM = mem_adr[31] / rank = mem_adr[30] "
+ - 0100:DIMM = mem_adr[32] / rank = mem_adr[31] "
+ - 0101:DIMM = mem_adr[33] / rank = mem_adr[32] "
+ - 0110:DIMM = mem_adr[34] / rank = mem_adr[33] "
+ - 0111:DIMM = 0 / rank = mem_adr[34] "
+ - 1000-1111: RESERVED
+ For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10, so with
+ 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
+ With rank_ena = 0, pbank_lsb = 2
+ With rank_ena = 1, pbank_lsb = 3 */
+ uint64_t row_lsb : 3; /**< Row Address bit select
+ Encoding used to determine which memory address
+ bit position represents the low order DDR ROW address.
+ The processor's memory address[34:7] needs to be
+ translated to DRAM addresses (bnk,row,col,rank and DIMM)
+ and that is a function of the following:
+ 1. Datapath Width (64 or 32)
+ 2. \# Banks (8)
+ 3. \# Column Bits of the memory part - spec'd indirectly
+ by this register.
+ 4. \# Row Bits of the memory part - spec'd indirectly
+ 5. \# Ranks in a DIMM - spec'd by RANK_ENA
+ 6. \# DIMM's in the system by the register below (PBANK_LSB).
+ Col Address starts from mem_addr[2] for 32b (4Bytes)
+ dq width or from mem_addr[3] for 64b (8Bytes) dq width
+ \# col + \# bank = 12. Hence row_lsb is mem_adr[15] for
+ 64bmode or mem_adr[14] for 32b mode. Hence row_lsb
+ parameter should be set to 001 (64b) or 000 (32b).
+ Decoding for row_lsb
+ - 000: row_lsb = mem_adr[14]
+ - 001: row_lsb = mem_adr[15]
+ - 010: row_lsb = mem_adr[16]
+ - 011: row_lsb = mem_adr[17]
+ - 100: row_lsb = mem_adr[18]
+ - 101: row_lsb = mem_adr[19]
+ - 110: row_lsb = mem_adr[20]
+ - 111: RESERVED
+ For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10, so with
+ 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
+ uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC
+ check/correct logic. Should be 1 when used with DIMMs
+ with ECC. 0, otherwise.
+ When this mode is turned on, DQ[71:64]
+ on writes, will contain the ECC code generated for
+ the 64 bits of data which will
+ written in the memory and then later on reads, used
+ to check for Single bit error (which will be auto-
+ corrected) and Double Bit error (which will be
+ reported). When not turned on, DQ[71:64]
+ are driven to 0. Please refer to SEC_ERR, DED_ERR,
+ LMC*_FADR, LMC*_SCRAMBLED_FADR and LMC*_ECC_SYND registers
+ for diagnostics information when there is an error. */
+ uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is
+ selected by LMC*_CONFIG[SEQUENCE]. This register is a
+ oneshot and clears itself each time it is set. */
+#else
+ uint64_t init_start : 1;
+ uint64_t ecc_ena : 1;
+ uint64_t row_lsb : 3;
+ uint64_t pbank_lsb : 4;
+ uint64_t idlepower : 3;
+ uint64_t forcewrite : 4;
+ uint64_t ecc_adr : 1;
+ uint64_t reset : 1;
+ uint64_t ref_zqcs_int : 19;
+ uint64_t sequence : 3;
+ uint64_t early_dqx : 1;
+ uint64_t sref_with_dll : 1;
+ uint64_t rank_ena : 1;
+ uint64_t rankmask : 4;
+ uint64_t mirrmask : 4;
+ uint64_t init_status : 4;
+ uint64_t early_unload_d0_r0 : 1;
+ uint64_t early_unload_d0_r1 : 1;
+ uint64_t early_unload_d1_r0 : 1;
+ uint64_t early_unload_d1_r1 : 1;
+ uint64_t scrz : 1;
+ uint64_t mode32b : 1;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } s;
+ struct cvmx_lmcx_config_s cn61xx;
+ struct cvmx_lmcx_config_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t early_unload_d1_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 3
+ reads
+ The recommended EARLY_UNLOAD_D1_R1 value can be calculated
+ after the final LMC*_RLEVEL_RANK3[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi])
+ across all i), then set EARLY_UNLOAD_D1_R1
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d1_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 2
+ reads
+ The recommended EARLY_UNLOAD_D1_RO value can be calculated
+ after the final LMC*_RLEVEL_RANK2[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi])
+ across all i), then set EARLY_UNLOAD_D1_RO
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d0_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 1
+ reads
+ The recommended EARLY_UNLOAD_D0_R1 value can be calculated
+ after the final LMC*_RLEVEL_RANK1[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi])
+ across all i), then set EARLY_UNLOAD_D0_R1
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d0_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 0
+ reads.
+ The recommended EARLY_UNLOAD_D0_R0 value can be calculated
+ after the final LMC*_RLEVEL_RANK0[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi])
+ across all i), then set EARLY_UNLOAD_D0_R0
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
+ uint64_t init_status : 4; /**< Indicates status of initialization
+ INIT_STATUS[n] = 1 implies rank n has been initialized
+ SW must set necessary INIT_STATUS bits with the
+ same LMC*_CONFIG write that initiates
+ power-up/init and self-refresh exit sequences
+ (if the required INIT_STATUS bits are not already
+ set before LMC initiates the sequence).
+ INIT_STATUS determines the chip-selects that assert
+ during refresh, ZQCS, and precharge power-down and
+ self-refresh entry/exit SEQUENCE's. */
+ uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored.
+ MIRRMASK<n> = 1 means Rank n addresses are mirrored
+ for 0 <= n <= 3
+ A mirrored read/write has these differences:
+ - DDR_BA<1> is swapped with DDR_BA<0>
+ - DDR_A<8> is swapped with DDR_A<7>
+ - DDR_A<6> is swapped with DDR_A<5>
+ - DDR_A<4> is swapped with DDR_A<3>
+ When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
+ uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized.
+ To write-level/read-level/initialize rank i, set RANKMASK<i>
+ RANK_ENA=1 RANK_ENA=0
+ RANKMASK<0> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK<1> = DIMM0_CS1 MBZ
+ RANKMASK<2> = DIMM1_CS0 DIMM1_CS0
+ RANKMASK<3> = DIMM1_CS1 MBZ
+ For read/write leveling, each rank has to be leveled separately,
+ so RANKMASK should only have one bit set.
+ RANKMASK is not used during self-refresh entry/exit and
+ precharge power-down entry/exit instruction sequences.
+ When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
+ uint64_t rank_ena : 1; /**< RANK ena (for use with dual-rank DIMMs)
+ For dual-rank DIMMs, the rank_ena bit will enable
+ the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
+ (pbank_lsb-1) address bit.
+ Write 0 for SINGLE ranked DIMM's. */
+ uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2
+ When set, self-refresh entry and exit instruction sequences
+ write MR1 and MR2 (in all ranks). (The writes occur before
+ self-refresh entry, and after self-refresh exit.)
+ When clear, self-refresh entry and exit instruction sequences
+ do not write any registers in the DDR3 parts. */
+ uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when
+ the shortest DQx lines have a larger delay than the CK line */
+ uint64_t sequence : 3; /**< Selects the sequence that LMC runs after a 0->1
+ transition on LMC*_CONFIG[INIT_START].
+ SEQUENCE=0=power-up/init:
+ - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
+ - INIT_STATUS must equal RANKMASK
+ - DDR_CKE* signals activated (if they weren't already active)
+ - RDIMM register control words 0-15 will be written to RANKMASK-selected
+ RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
+ LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
+ LMC*_DIMM_CTL descriptions below for more details.)
+ - MR0, MR1, MR2, and MR3 will be written to selected ranks
+ SEQUENCE=1=read-leveling:
+ - RANKMASK selects the rank to be read-leveled
+ - MR3 written to selected rank
+ SEQUENCE=2=self-refresh entry:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
+ - DDR_CKE* signals de-activated
+ SEQUENCE=3=self-refresh exit:
+ - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
+ - DDR_CKE* signals activated
+ - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
+ SEQUENCE=4=precharge power-down entry:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - DDR_CKE* signals de-activated
+ SEQUENCE=5=precharge power-down exit:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - DDR_CKE* signals activated
+ SEQUENCE=6=write-leveling:
+ - RANKMASK selects the rank to be write-leveled
+ - INIT_STATUS must indicate all ranks with attached DRAM
+ - MR1 and MR2 written to INIT_STATUS-selected ranks
+ SEQUENCE=7=illegal
+ Precharge power-down entry and exit SEQUENCE's may also
+ be automatically generated by the HW when IDLEPOWER!=0.
+ Self-refresh entry SEQUENCE's may also be automatically
+ generated by hardware upon a chip warm or soft reset
+ sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
+ LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
+ Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
+ If there are two consecutive power-up/init's without
+ a DRESET assertion between them, LMC asserts DDR_CKE* as part of
+ the first power-up/init, and continues to assert DDR_CKE*
+ through the remainder of the first and the second power-up/init.
+ If DDR_CKE* deactivation and reactivation is needed for
+ a second power-up/init, a DRESET assertion is required
+ between the first and the second. */
+ uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
+ increments. A Refresh sequence is triggered when bits
+ [24:18] are equal to 0, and a ZQCS sequence is triggered
+ when [36:18] are equal to 0.
+ Program [24:18] to RND-DN(tREFI/clkPeriod/512)
+ Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
+ that this value should always be greater than 32, to account for
+ resistor calibration delays.
+ 000_00000000_00000000: RESERVED
+ Max Refresh interval = 127 * 512 = 65024 CKs
+ Max ZQCS interval = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
+ LMC*_CONFIG[INIT_STATUS] determines which ranks receive
+ the REF / ZQCS. LMC does not send any refreshes / ZQCS's
+ when LMC*_CONFIG[INIT_STATUS]=0. */
+ uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter,
+ and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
+ CSR's. SW should write this to a one, then re-write
+ it to a zero to cause the reset. */
+ uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation
+ 0=disabled, 1=enabled */
+ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
+ having waited for 2^FORCEWRITE CK cycles. 0=disabled. */
+ uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory
+ controller has been idle for 2^(2+IDLEPOWER) CK cycles.
+ 0=disabled.
+ This field should only be programmed after initialization.
+ LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
+ is disabled during the precharge power-down. */
+ uint64_t pbank_lsb : 4; /**< DIMM address bit select
+ Reverting to the explanation for ROW_LSB,
+ PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
+ Decoding for pbank_lsb
+ - 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA)
+ - 0001:DIMM = mem_adr[29] / rank = mem_adr[28] "
+ - 0010:DIMM = mem_adr[30] / rank = mem_adr[29] "
+ - 0011:DIMM = mem_adr[31] / rank = mem_adr[30] "
+ - 0100:DIMM = mem_adr[32] / rank = mem_adr[31] "
+ - 0101:DIMM = mem_adr[33] / rank = mem_adr[32] "
+ - 0110:DIMM = mem_adr[34] / rank = mem_adr[33] "
+ - 0111:DIMM = 0 / rank = mem_adr[34] "
+ - 1000-1111: RESERVED
+ For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10, so with
+ 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
+ With rank_ena = 0, pbank_lsb = 2
+ With rank_ena = 1, pbank_lsb = 3 */
+ uint64_t row_lsb : 3; /**< Row Address bit select
+ Encoding used to determine which memory address
+ bit position represents the low order DDR ROW address.
+ The processor's memory address[34:7] needs to be
+ translated to DRAM addresses (bnk,row,col,rank and DIMM)
+ and that is a function of the following:
+ 1. Datapath Width (64)
+ 2. \# Banks (8)
+ 3. \# Column Bits of the memory part - spec'd indirectly
+ by this register.
+ 4. \# Row Bits of the memory part - spec'd indirectly
+ 5. \# Ranks in a DIMM - spec'd by RANK_ENA
+ 6. \# DIMM's in the system by the register below (PBANK_LSB).
+ Decoding for row_lsb
+ - 000: row_lsb = mem_adr[14]
+ - 001: row_lsb = mem_adr[15]
+ - 010: row_lsb = mem_adr[16]
+ - 011: row_lsb = mem_adr[17]
+ - 100: row_lsb = mem_adr[18]
+ - 101: row_lsb = mem_adr[19]
+ - 110: row_lsb = mem_adr[20]
+ - 111: RESERVED
+ For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10, so with
+ 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
+ uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC
+ check/correct logic. Should be 1 when used with DIMMs
+ with ECC. 0, otherwise.
+ When this mode is turned on, DQ[71:64]
+ on writes, will contain the ECC code generated for
+ the 64 bits of data which will
+ written in the memory and then later on reads, used
+ to check for Single bit error (which will be auto-
+ corrected) and Double Bit error (which will be
+ reported). When not turned on, DQ[71:64]
+ are driven to 0. Please refer to SEC_ERR, DED_ERR,
+ LMC*_FADR, and LMC*_ECC_SYND registers
+ for diagnostics information when there is an error. */
+ uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is
+ selected by LMC*_CONFIG[SEQUENCE]. This register is a
+ oneshot and clears itself each time it is set. */
+#else
+ uint64_t init_start : 1;
+ uint64_t ecc_ena : 1;
+ uint64_t row_lsb : 3;
+ uint64_t pbank_lsb : 4;
+ uint64_t idlepower : 3;
+ uint64_t forcewrite : 4;
+ uint64_t ecc_adr : 1;
+ uint64_t reset : 1;
+ uint64_t ref_zqcs_int : 19;
+ uint64_t sequence : 3;
+ uint64_t early_dqx : 1;
+ uint64_t sref_with_dll : 1;
+ uint64_t rank_ena : 1;
+ uint64_t rankmask : 4;
+ uint64_t mirrmask : 4;
+ uint64_t init_status : 4;
+ uint64_t early_unload_d0_r0 : 1;
+ uint64_t early_unload_d0_r1 : 1;
+ uint64_t early_unload_d1_r0 : 1;
+ uint64_t early_unload_d1_r1 : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_config_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63 : 9;
+ uint64_t init_status : 4; /**< Indicates status of initialization
+ INIT_STATUS[n] = 1 implies rank n has been initialized
+ SW must set necessary INIT_STATUS bits with the
+ same LMC*_CONFIG write that initiates
+ power-up/init and self-refresh exit sequences
+ (if the required INIT_STATUS bits are not already
+ set before LMC initiates the sequence).
+ INIT_STATUS determines the chip-selects that assert
+ during refresh, ZQCS, and precharge power-down and
+ self-refresh entry/exit SEQUENCE's. */
+ uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored.
+ MIRRMASK<n> = 1 means Rank n addresses are mirrored
+ for 0 <= n <= 3
+ A mirrored read/write has these differences:
+ - DDR_BA<1> is swapped with DDR_BA<0>
+ - DDR_A<8> is swapped with DDR_A<7>
+ - DDR_A<6> is swapped with DDR_A<5>
+ - DDR_A<4> is swapped with DDR_A<3>
+ When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
+ uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized.
+ To write-level/read-level/initialize rank i, set RANKMASK<i>
+ RANK_ENA=1 RANK_ENA=0
+ RANKMASK<0> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK<1> = DIMM0_CS1 MBZ
+ RANKMASK<2> = DIMM1_CS0 DIMM1_CS0
+ RANKMASK<3> = DIMM1_CS1 MBZ
+ For read/write leveling, each rank has to be leveled separately,
+ so RANKMASK should only have one bit set.
+ RANKMASK is not used during self-refresh entry/exit and
+ precharge power-down entry/exit instruction sequences.
+ When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
+ uint64_t rank_ena : 1; /**< RANK ena (for use with dual-rank DIMMs)
+ For dual-rank DIMMs, the rank_ena bit will enable
+ the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
+ (pbank_lsb-1) address bit.
+ Write 0 for SINGLE ranked DIMM's. */
+ uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2
+ When set, self-refresh entry and exit instruction sequences
+ write MR1 and MR2 (in all ranks). (The writes occur before
+ self-refresh entry, and after self-refresh exit.)
+ When clear, self-refresh entry and exit instruction sequences
+ do not write any registers in the DDR3 parts. */
+ uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when
+ the shortest DQx lines have a larger delay than the CK line */
+ uint64_t sequence : 3; /**< Selects the sequence that LMC runs after a 0->1
+ transition on LMC*_CONFIG[INIT_START].
+ SEQUENCE=0=power-up/init:
+ - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
+ - INIT_STATUS must equal RANKMASK
+ - DDR_CKE* signals activated (if they weren't already active)
+ - RDIMM register control words 0-15 will be written to RANKMASK-selected
+ RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
+ LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
+ LMC*_DIMM_CTL descriptions below for more details.)
+ - MR0, MR1, MR2, and MR3 will be written to selected ranks
+ SEQUENCE=1=read-leveling:
+ - RANKMASK selects the rank to be read-leveled
+ - MR3 written to selected rank
+ SEQUENCE=2=self-refresh entry:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
+ - DDR_CKE* signals de-activated
+ SEQUENCE=3=self-refresh exit:
+ - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
+ - DDR_CKE* signals activated
+ - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
+ SEQUENCE=4=precharge power-down entry:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - DDR_CKE* signals de-activated
+ SEQUENCE=5=precharge power-down exit:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - DDR_CKE* signals activated
+ SEQUENCE=6=write-leveling:
+ - RANKMASK selects the rank to be write-leveled
+ - INIT_STATUS must indicate all ranks with attached DRAM
+ - MR1 and MR2 written to INIT_STATUS-selected ranks
+ SEQUENCE=7=illegal
+ Precharge power-down entry and exit SEQUENCE's may also
+ be automatically generated by the HW when IDLEPOWER!=0.
+ Self-refresh entry SEQUENCE's may also be automatically
+ generated by hardware upon a chip warm or soft reset
+ sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
+ LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
+ Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
+ If there are two consecutive power-up/init's without
+ a DRESET assertion between them, LMC asserts DDR_CKE* as part of
+ the first power-up/init, and continues to assert DDR_CKE*
+ through the remainder of the first and the second power-up/init.
+ If DDR_CKE* deactivation and reactivation is needed for
+ a second power-up/init, a DRESET assertion is required
+ between the first and the second. */
+ uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
+ increments. A Refresh sequence is triggered when bits
+ [24:18] are equal to 0, and a ZQCS sequence is triggered
+ when [36:18] are equal to 0.
+ Program [24:18] to RND-DN(tREFI/clkPeriod/512)
+ Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
+ that this value should always be greater than 32, to account for
+ resistor calibration delays.
+ 000_00000000_00000000: RESERVED
+ Max Refresh interval = 127 * 512 = 65024 CKs
+ Max ZQCS interval = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
+ LMC*_CONFIG[INIT_STATUS] determines which ranks receive
+ the REF / ZQCS. LMC does not send any refreshes / ZQCS's
+ when LMC*_CONFIG[INIT_STATUS]=0. */
+ uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter,
+ and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
+ CSR's. SW should write this to a one, then re-write
+ it to a zero to cause the reset. */
+ uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation
+ 0=disabled, 1=enabled */
+ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
+ having waited for 2^FORCEWRITE CK cycles. 0=disabled. */
+ uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory
+ controller has been idle for 2^(2+IDLEPOWER) CK cycles.
+ 0=disabled.
+ This field should only be programmed after initialization.
+ LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
+ is disabled during the precharge power-down. */
+ uint64_t pbank_lsb : 4; /**< DIMM address bit select
+ Reverting to the explanation for ROW_LSB,
+ PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
+ Decoding for pbank_lsb
+ - 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA)
+ - 0001:DIMM = mem_adr[29] / rank = mem_adr[28] "
+ - 0010:DIMM = mem_adr[30] / rank = mem_adr[29] "
+ - 0011:DIMM = mem_adr[31] / rank = mem_adr[30] "
+ - 0100:DIMM = mem_adr[32] / rank = mem_adr[31] "
+ - 0101:DIMM = mem_adr[33] / rank = mem_adr[32] "
+ - 0110:DIMM = mem_adr[34] / rank = mem_adr[33] "
+ - 0111:DIMM = 0 / rank = mem_adr[34] "
+ - 1000-1111: RESERVED
+ For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10, so with
+ 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
+ With rank_ena = 0, pbank_lsb = 2
+ With rank_ena = 1, pbank_lsb = 3 */
+ uint64_t row_lsb : 3; /**< Row Address bit select
+ Encoding used to determine which memory address
+ bit position represents the low order DDR ROW address.
+ The processor's memory address[34:7] needs to be
+ translated to DRAM addresses (bnk,row,col,rank and DIMM)
+ and that is a function of the following:
+ 1. Datapath Width (64)
+ 2. \# Banks (8)
+ 3. \# Column Bits of the memory part - spec'd indirectly
+ by this register.
+ 4. \# Row Bits of the memory part - spec'd indirectly
+ 5. \# Ranks in a DIMM - spec'd by RANK_ENA
+ 6. \# DIMM's in the system by the register below (PBANK_LSB).
+ Decoding for row_lsb
+ - 000: row_lsb = mem_adr[14]
+ - 001: row_lsb = mem_adr[15]
+ - 010: row_lsb = mem_adr[16]
+ - 011: row_lsb = mem_adr[17]
+ - 100: row_lsb = mem_adr[18]
+ - 101: row_lsb = mem_adr[19]
+ - 110: row_lsb = mem_adr[20]
+ - 111: RESERVED
+ For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10, so with
+ 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
+ uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC
+ check/correct logic. Should be 1 when used with DIMMs
+ with ECC. 0, otherwise.
+ When this mode is turned on, DQ[71:64]
+ on writes, will contain the ECC code generated for
+ the 64 bits of data which will
+ written in the memory and then later on reads, used
+ to check for Single bit error (which will be auto-
+ corrected) and Double Bit error (which will be
+ reported). When not turned on, DQ[71:64]
+ are driven to 0. Please refer to SEC_ERR, DED_ERR,
+ LMC*_FADR, and LMC*_ECC_SYND registers
+ for diagnostics information when there is an error. */
+ uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is
+ selected by LMC*_CONFIG[SEQUENCE]. This register is a
+ oneshot and clears itself each time it is set. */
+#else
+ uint64_t init_start : 1;
+ uint64_t ecc_ena : 1;
+ uint64_t row_lsb : 3;
+ uint64_t pbank_lsb : 4;
+ uint64_t idlepower : 3;
+ uint64_t forcewrite : 4;
+ uint64_t ecc_adr : 1;
+ uint64_t reset : 1;
+ uint64_t ref_zqcs_int : 19;
+ uint64_t sequence : 3;
+ uint64_t early_dqx : 1;
+ uint64_t sref_with_dll : 1;
+ uint64_t rank_ena : 1;
+ uint64_t rankmask : 4;
+ uint64_t mirrmask : 4;
+ uint64_t init_status : 4;
+ uint64_t reserved_55_63 : 9;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_config_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t scrz : 1; /**< Hide LMC*_SCRAMBLE_CFG0 and LMC*_SCRAMBLE_CFG1 when set */
+ uint64_t early_unload_d1_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 3
+ reads
+ The recommended EARLY_UNLOAD_D1_R1 value can be calculated
+ after the final LMC*_RLEVEL_RANK3[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi])
+ across all i), then set EARLY_UNLOAD_D1_R1
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d1_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 2
+ reads
+ The recommended EARLY_UNLOAD_D1_RO value can be calculated
+ after the final LMC*_RLEVEL_RANK2[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi])
+ across all i), then set EARLY_UNLOAD_D1_RO
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d0_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 1
+ reads
+ The recommended EARLY_UNLOAD_D0_R1 value can be calculated
+ after the final LMC*_RLEVEL_RANK1[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi])
+ across all i), then set EARLY_UNLOAD_D0_R1
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
+ uint64_t early_unload_d0_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 0
+ reads.
+ The recommended EARLY_UNLOAD_D0_R0 value can be calculated
+ after the final LMC*_RLEVEL_RANK0[BYTE*] values are
+ selected (as part of read-leveling initialization).
+ Then, determine the largest read-leveling setting
+ for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi])
+ across all i), then set EARLY_UNLOAD_D0_R0
+ when the low two bits of this largest setting is not
+ 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
+ uint64_t init_status : 4; /**< Indicates status of initialization
+ INIT_STATUS[n] = 1 implies rank n has been initialized
+ SW must set necessary INIT_STATUS bits with the
+ same LMC*_CONFIG write that initiates
+ power-up/init and self-refresh exit sequences
+ (if the required INIT_STATUS bits are not already
+ set before LMC initiates the sequence).
+ INIT_STATUS determines the chip-selects that assert
+ during refresh, ZQCS, and precharge power-down and
+ self-refresh entry/exit SEQUENCE's. */
+ uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored.
+ MIRRMASK<n> = 1 means Rank n addresses are mirrored
+ for 0 <= n <= 3
+ A mirrored read/write has these differences:
+ - DDR_BA<1> is swapped with DDR_BA<0>
+ - DDR_A<8> is swapped with DDR_A<7>
+ - DDR_A<6> is swapped with DDR_A<5>
+ - DDR_A<4> is swapped with DDR_A<3>
+ When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
+ uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized.
+ To write-level/read-level/initialize rank i, set RANKMASK<i>
+ RANK_ENA=1 RANK_ENA=0
+ RANKMASK<0> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK<1> = DIMM0_CS1 MBZ
+ RANKMASK<2> = DIMM1_CS0 DIMM1_CS0
+ RANKMASK<3> = DIMM1_CS1 MBZ
+ For read/write leveling, each rank has to be leveled separately,
+ so RANKMASK should only have one bit set.
+ RANKMASK is not used during self-refresh entry/exit and
+ precharge power-down entry/exit instruction sequences.
+ When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
+ uint64_t rank_ena : 1; /**< RANK ena (for use with dual-rank DIMMs)
+ For dual-rank DIMMs, the rank_ena bit will enable
+ the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
+ (pbank_lsb-1) address bit.
+ Write 0 for SINGLE ranked DIMM's. */
+ uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2
+ When set, self-refresh entry and exit instruction sequences
+ write MR1 and MR2 (in all ranks). (The writes occur before
+ self-refresh entry, and after self-refresh exit.)
+ When clear, self-refresh entry and exit instruction sequences
+ do not write any registers in the DDR3 parts. */
+ uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when
+ the shortest DQx lines have a larger delay than the CK line */
+ uint64_t sequence : 3; /**< Selects the sequence that LMC runs after a 0->1
+ transition on LMC*_CONFIG[INIT_START].
+ SEQUENCE=0=power-up/init:
+ - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
+ - INIT_STATUS must equal RANKMASK
+ - DDR_CKE* signals activated (if they weren't already active)
+ - RDIMM register control words 0-15 will be written to RANKMASK-selected
+ RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
+ LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
+ LMC*_DIMM_CTL descriptions below for more details.)
+ - MR0, MR1, MR2, and MR3 will be written to selected ranks
+ SEQUENCE=1=read-leveling:
+ - RANKMASK selects the rank to be read-leveled
+ - MR3 written to selected rank
+ SEQUENCE=2=self-refresh entry:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
+ - DDR_CKE* signals de-activated
+ SEQUENCE=3=self-refresh exit:
+ - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
+ - DDR_CKE* signals activated
+ - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
+ SEQUENCE=4=precharge power-down entry:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - DDR_CKE* signals de-activated
+ SEQUENCE=5=precharge power-down exit:
+ - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
+ - DDR_CKE* signals activated
+ SEQUENCE=6=write-leveling:
+ - RANKMASK selects the rank to be write-leveled
+ - INIT_STATUS must indicate all ranks with attached DRAM
+ - MR1 and MR2 written to INIT_STATUS-selected ranks
+ SEQUENCE=7=illegal
+ Precharge power-down entry and exit SEQUENCE's may also
+ be automatically generated by the HW when IDLEPOWER!=0.
+ Self-refresh entry SEQUENCE's may also be automatically
+ generated by hardware upon a chip warm or soft reset
+ sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
+ LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
+ Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
+ If there are two consecutive power-up/init's without
+ a DRESET assertion between them, LMC asserts DDR_CKE* as part of
+ the first power-up/init, and continues to assert DDR_CKE*
+ through the remainder of the first and the second power-up/init.
+ If DDR_CKE* deactivation and reactivation is needed for
+ a second power-up/init, a DRESET assertion is required
+ between the first and the second. */
+ uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
+ increments. A Refresh sequence is triggered when bits
+ [24:18] are equal to 0, and a ZQCS sequence is triggered
+ when [36:18] are equal to 0.
+ Program [24:18] to RND-DN(tREFI/clkPeriod/512)
+ Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
+ that this value should always be greater than 32, to account for
+ resistor calibration delays.
+ 000_00000000_00000000: RESERVED
+ Max Refresh interval = 127 * 512 = 65024 CKs
+ Max ZQCS interval = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
+ LMC*_CONFIG[INIT_STATUS] determines which ranks receive
+ the REF / ZQCS. LMC does not send any refreshes / ZQCS's
+ when LMC*_CONFIG[INIT_STATUS]=0. */
+ uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter,
+ and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
+ CSR's. SW should write this to a one, then re-write
+ it to a zero to cause the reset. */
+ uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation
+ 0=disabled, 1=enabled */
+ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
+ having waited for 2^FORCEWRITE CK cycles. 0=disabled. */
+ uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory
+ controller has been idle for 2^(2+IDLEPOWER) CK cycles.
+ 0=disabled.
+ This field should only be programmed after initialization.
+ LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
+ is disabled during the precharge power-down. */
+ uint64_t pbank_lsb : 4; /**< DIMM address bit select
+ Reverting to the explanation for ROW_LSB,
+ PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
+ Decoding for pbank_lsb
+ - 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA)
+ - 0001:DIMM = mem_adr[29] / rank = mem_adr[28] "
+ - 0010:DIMM = mem_adr[30] / rank = mem_adr[29] "
+ - 0011:DIMM = mem_adr[31] / rank = mem_adr[30] "
+ - 0100:DIMM = mem_adr[32] / rank = mem_adr[31] "
+ - 0101:DIMM = mem_adr[33] / rank = mem_adr[32] "
+ - 0110:DIMM = mem_adr[34] / rank = mem_adr[33] "
+ - 0111:DIMM = 0 / rank = mem_adr[34] "
+ - 1000-1111: RESERVED
+ For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10, so with
+ 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
+ With rank_ena = 0, pbank_lsb = 2
+ With rank_ena = 1, pbank_lsb = 3 */
+ uint64_t row_lsb : 3; /**< Row Address bit select
+ Encoding used to determine which memory address
+ bit position represents the low order DDR ROW address.
+ The processor's memory address[34:7] needs to be
+ translated to DRAM addresses (bnk,row,col,rank and DIMM)
+ and that is a function of the following:
+ 1. Datapath Width (64)
+ 2. \# Banks (8)
+ 3. \# Column Bits of the memory part - spec'd indirectly
+ by this register.
+ 4. \# Row Bits of the memory part - spec'd indirectly
+ 5. \# Ranks in a DIMM - spec'd by RANK_ENA
+ 6. \# DIMM's in the system by the register below (PBANK_LSB).
+ Decoding for row_lsb
+ - 000: row_lsb = mem_adr[14]
+ - 001: row_lsb = mem_adr[15]
+ - 010: row_lsb = mem_adr[16]
+ - 011: row_lsb = mem_adr[17]
+ - 100: row_lsb = mem_adr[18]
+ - 101: row_lsb = mem_adr[19]
+ - 110: row_lsb = mem_adr[20]
+ - 111: RESERVED
+ For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ DDR3 parts, the column address width = 10, so with
+ 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
+ uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC
+ check/correct logic. Should be 1 when used with DIMMs
+ with ECC. 0, otherwise.
+ When this mode is turned on, DQ[71:64]
+ on writes, will contain the ECC code generated for
+ the 64 bits of data which will
+ written in the memory and then later on reads, used
+ to check for Single bit error (which will be auto-
+ corrected) and Double Bit error (which will be
+ reported). When not turned on, DQ[71:64]
+ are driven to 0. Please refer to SEC_ERR, DED_ERR,
+ LMC*_FADR, LMC*_SCRAMBLED_FADR and LMC*_ECC_SYND registers
+ for diagnostics information when there is an error. */
+ uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is
+ selected by LMC*_CONFIG[SEQUENCE]. This register is a
+ oneshot and clears itself each time it is set. */
+#else
+ uint64_t init_start : 1;
+ uint64_t ecc_ena : 1;
+ uint64_t row_lsb : 3;
+ uint64_t pbank_lsb : 4;
+ uint64_t idlepower : 3;
+ uint64_t forcewrite : 4;
+ uint64_t ecc_adr : 1;
+ uint64_t reset : 1;
+ uint64_t ref_zqcs_int : 19;
+ uint64_t sequence : 3;
+ uint64_t early_dqx : 1;
+ uint64_t sref_with_dll : 1;
+ uint64_t rank_ena : 1;
+ uint64_t rankmask : 4;
+ uint64_t mirrmask : 4;
+ uint64_t init_status : 4;
+ uint64_t early_unload_d0_r0 : 1;
+ uint64_t early_unload_d0_r1 : 1;
+ uint64_t early_unload_d1_r0 : 1;
+ uint64_t early_unload_d1_r1 : 1;
+ uint64_t scrz : 1;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } cn66xx;
+ struct cvmx_lmcx_config_cn63xx cn68xx;
+ struct cvmx_lmcx_config_cn63xx cn68xxp1;
+ struct cvmx_lmcx_config_s cnf71xx;
+};
+typedef union cvmx_lmcx_config cvmx_lmcx_config_t;
+
+/**
+ * cvmx_lmc#_control
+ *
+ * LMC_CONTROL = LMC Control
+ * This register is an assortment of various control fields needed by the memory controller
+ */
+union cvmx_lmcx_control {
+ uint64_t u64;
+ struct cvmx_lmcx_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scramble_ena : 1; /**< When set, will enable the scramble/descramble logic */
+ uint64_t thrcnt : 12; /**< Fine Count */
+ uint64_t persub : 8; /**< Offset for DFA rate-matching */
+ uint64_t thrmax : 4; /**< Fine Rate Matching Max Bucket Size
+ 0 = Reserved
+ In conjunction with the Coarse Rate Matching Logic, the Fine Rate
+ Matching Logic gives SW the ability to prioritize DFA Rds over
+ L2C Writes. Higher PERSUB values result in a lower DFA Rd
+ bandwidth. */
+ uint64_t crm_cnt : 5; /**< Coarse Count */
+ uint64_t crm_thr : 5; /**< Coarse Rate Matching Threshold */
+ uint64_t crm_max : 5; /**< Coarse Rate Matching Max Bucket Size
+ 0 = Reserved
+ The Coarse Rate Matching Logic is used to control the bandwidth
+ allocated to DFA Rds. CRM_MAX is subdivided into two regions
+ with DFA Rds being preferred over LMC Rd/Wrs when
+ CRM_CNT < CRM_THR. CRM_CNT increments by 1 when a DFA Rd is
+ slotted and by 2 when a LMC Rd/Wr is slotted, and rolls over
+ when CRM_MAX is reached. */
+ uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ RD cmd is delayed an additional CK cycle. */
+ uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ WR cmd is delayed an additional CK cycle. */
+ uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for
+ the default DDR_DQ/DQS drivers is delayed an additional BPRCH
+ CK cycles.
+ 00 = 0 CKs
+ 01 = 1 CKs
+ 10 = 2 CKs
+ 11 = 3 CKs */
+ uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration
+ When clear, LMC runs external ZQ calibration
+ every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration
+ When clear, LMC runs internal ZQ calibration
+ every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t auto_dclkdis : 1; /**< When 1, LMC will automatically shut off its internal
+ clock to conserve power when there is no traffic. Note
+ that this has no effect on the DDR3 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[2:0]=address[9:7] ^ address[14:12]
+ else
+ bank[2:0]=address[9:7] */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ forcing reads to interrupt. */
+ uint64_t nxm_write_en : 1; /**< NXM Write mode
+ When clear, LMC discards writes to addresses that don't
+ exist in the DRAM (as defined by LMC*_NXM configuration).
+ When set, LMC completes writes to addresses that don't
+ exist in the DRAM at an aliased address. */
+ uint64_t elev_prio_dis : 1; /**< Disable elevate priority logic.
+ When set, writes are sent in
+ regardless of priority information from L2C. */
+ uint64_t inorder_wr : 1; /**< Send writes in order(regardless of priority) */
+ uint64_t inorder_rd : 1; /**< Send reads in order (regardless of priority) */
+ uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes */
+ uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads */
+ uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off
+ time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
+ 00 = 0 CKs
+ 01 = 1 CKs
+ 10 = 2 CKs
+ 11 = RESERVED */
+ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3.
+ This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0,
+ and clear otherwise. */
+ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and
+ address. This mode helps relieve setup time pressure
+ on the Address and command bus which nominally have
+ a very large fanout. Please refer to Micron's tech
+ note tn_47_01 titled "DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems" for physical details. */
+ uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
+ Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and
+ LMC*_DCLK_CNT registers. SW should first write this
+ field to a one, then write this field to a zero to
+ clear the CSR's. */
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require address and
+ control bits to be registered in the controller. */
+#else
+ uint64_t rdimm_ena : 1;
+ uint64_t bwcnt : 1;
+ uint64_t ddr2t : 1;
+ uint64_t pocas : 1;
+ uint64_t fprch2 : 2;
+ uint64_t throttle_rd : 1;
+ uint64_t throttle_wr : 1;
+ uint64_t inorder_rd : 1;
+ uint64_t inorder_wr : 1;
+ uint64_t elev_prio_dis : 1;
+ uint64_t nxm_write_en : 1;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t auto_dclkdis : 1;
+ uint64_t int_zqcs_dis : 1;
+ uint64_t ext_zqcs_dis : 1;
+ uint64_t bprch : 2;
+ uint64_t wodt_bprch : 1;
+ uint64_t rodt_bprch : 1;
+ uint64_t crm_max : 5;
+ uint64_t crm_thr : 5;
+ uint64_t crm_cnt : 5;
+ uint64_t thrmax : 4;
+ uint64_t persub : 8;
+ uint64_t thrcnt : 12;
+ uint64_t scramble_ena : 1;
+#endif
+ } s;
+ struct cvmx_lmcx_control_s cn61xx;
+ struct cvmx_lmcx_control_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ RD cmd is delayed an additional CK cycle. */
+ uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ WR cmd is delayed an additional CK cycle. */
+ uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for
+ the default DDR_DQ/DQS drivers is delayed an additional BPRCH
+ CK cycles.
+ 00 = 0 CKs
+ 01 = 1 CKs
+ 10 = 2 CKs
+ 11 = 3 CKs */
+ uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration
+ When clear, LMC runs external ZQ calibration
+ every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration
+ When clear, LMC runs internal ZQ calibration
+ every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t auto_dclkdis : 1; /**< When 1, LMC will automatically shut off its internal
+ clock to conserve power when there is no traffic. Note
+ that this has no effect on the DDR3 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[2:0]=address[9:7] ^ address[14:12]
+ else
+ bank[2:0]=address[9:7] */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ forcing reads to interrupt. */
+ uint64_t nxm_write_en : 1; /**< NXM Write mode
+ When clear, LMC discards writes to addresses that don't
+ exist in the DRAM (as defined by LMC*_NXM configuration).
+ When set, LMC completes writes to addresses that don't
+ exist in the DRAM at an aliased address. */
+ uint64_t elev_prio_dis : 1; /**< Disable elevate priority logic.
+ When set, writes are sent in
+ regardless of priority information from L2C. */
+ uint64_t inorder_wr : 1; /**< Send writes in order(regardless of priority) */
+ uint64_t inorder_rd : 1; /**< Send reads in order (regardless of priority) */
+ uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes */
+ uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads */
+ uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off
+ time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
+ 00 = 0 CKs
+ 01 = 1 CKs
+ 10 = 2 CKs
+ 11 = RESERVED */
+ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3.
+ This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0,
+ and clear otherwise. */
+ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and
+ address. This mode helps relieve setup time pressure
+ on the Address and command bus which nominally have
+ a very large fanout. Please refer to Micron's tech
+ note tn_47_01 titled "DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems" for physical details. */
+ uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
+ Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and
+ LMC*_DCLK_CNT registers. SW should first write this
+ field to a one, then write this field to a zero to
+ clear the CSR's. */
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require address and
+ control bits to be registered in the controller. */
+#else
+ uint64_t rdimm_ena : 1;
+ uint64_t bwcnt : 1;
+ uint64_t ddr2t : 1;
+ uint64_t pocas : 1;
+ uint64_t fprch2 : 2;
+ uint64_t throttle_rd : 1;
+ uint64_t throttle_wr : 1;
+ uint64_t inorder_rd : 1;
+ uint64_t inorder_wr : 1;
+ uint64_t elev_prio_dis : 1;
+ uint64_t nxm_write_en : 1;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t auto_dclkdis : 1;
+ uint64_t int_zqcs_dis : 1;
+ uint64_t ext_zqcs_dis : 1;
+ uint64_t bprch : 2;
+ uint64_t wodt_bprch : 1;
+ uint64_t rodt_bprch : 1;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_control_cn63xx cn63xxp1;
+ struct cvmx_lmcx_control_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scramble_ena : 1; /**< When set, will enable the scramble/descramble logic */
+ uint64_t reserved_24_62 : 39;
+ uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ RD cmd is delayed an additional CK cycle. */
+ uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ WR cmd is delayed an additional CK cycle. */
+ uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for
+ the default DDR_DQ/DQS drivers is delayed an additional BPRCH
+ CK cycles.
+ 00 = 0 CKs
+ 01 = 1 CKs
+ 10 = 2 CKs
+ 11 = 3 CKs */
+ uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration
+ When clear, LMC runs external ZQ calibration
+ every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration
+ When clear, LMC runs internal ZQ calibration
+ every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t auto_dclkdis : 1; /**< When 1, LMC will automatically shut off its internal
+ clock to conserve power when there is no traffic. Note
+ that this has no effect on the DDR3 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[2:0]=address[9:7] ^ address[14:12]
+ else
+ bank[2:0]=address[9:7] */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ forcing reads to interrupt. */
+ uint64_t nxm_write_en : 1; /**< NXM Write mode
+ When clear, LMC discards writes to addresses that don't
+ exist in the DRAM (as defined by LMC*_NXM configuration).
+ When set, LMC completes writes to addresses that don't
+ exist in the DRAM at an aliased address. */
+ uint64_t elev_prio_dis : 1; /**< Disable elevate priority logic.
+ When set, writes are sent in
+ regardless of priority information from L2C. */
+ uint64_t inorder_wr : 1; /**< Send writes in order(regardless of priority) */
+ uint64_t inorder_rd : 1; /**< Send reads in order (regardless of priority) */
+ uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes */
+ uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads */
+ uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off
+ time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
+ 00 = 0 CKs
+ 01 = 1 CKs
+ 10 = 2 CKs
+ 11 = RESERVED */
+ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3.
+ This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0,
+ and clear otherwise. */
+ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and
+ address. This mode helps relieve setup time pressure
+ on the Address and command bus which nominally have
+ a very large fanout. Please refer to Micron's tech
+ note tn_47_01 titled "DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems" for physical details. */
+ uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
+ Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and
+ LMC*_DCLK_CNT registers. SW should first write this
+ field to a one, then write this field to a zero to
+ clear the CSR's. */
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require address and
+ control bits to be registered in the controller. */
+#else
+ uint64_t rdimm_ena : 1;
+ uint64_t bwcnt : 1;
+ uint64_t ddr2t : 1;
+ uint64_t pocas : 1;
+ uint64_t fprch2 : 2;
+ uint64_t throttle_rd : 1;
+ uint64_t throttle_wr : 1;
+ uint64_t inorder_rd : 1;
+ uint64_t inorder_wr : 1;
+ uint64_t elev_prio_dis : 1;
+ uint64_t nxm_write_en : 1;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t auto_dclkdis : 1;
+ uint64_t int_zqcs_dis : 1;
+ uint64_t ext_zqcs_dis : 1;
+ uint64_t bprch : 2;
+ uint64_t wodt_bprch : 1;
+ uint64_t rodt_bprch : 1;
+ uint64_t reserved_24_62 : 39;
+ uint64_t scramble_ena : 1;
+#endif
+ } cn66xx;
+ struct cvmx_lmcx_control_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63 : 1;
+ uint64_t thrcnt : 12; /**< Fine Count */
+ uint64_t persub : 8; /**< Offset for DFA rate-matching */
+ uint64_t thrmax : 4; /**< Fine Rate Matching Max Bucket Size
+ 0 = Reserved
+ In conjunction with the Coarse Rate Matching Logic, the Fine Rate
+ Matching Logic gives SW the ability to prioritize DFA Rds over
+ L2C Writes. Higher PERSUB values result in a lower DFA Rd
+ bandwidth. */
+ uint64_t crm_cnt : 5; /**< Coarse Count */
+ uint64_t crm_thr : 5; /**< Coarse Rate Matching Threshold */
+ uint64_t crm_max : 5; /**< Coarse Rate Matching Max Bucket Size
+ 0 = Reserved
+ The Coarse Rate Matching Logic is used to control the bandwidth
+ allocated to DFA Rds. CRM_MAX is subdivided into two regions
+ with DFA Rds being preferred over LMC Rd/Wrs when
+ CRM_CNT < CRM_THR. CRM_CNT increments by 1 when a DFA Rd is
+ slotted and by 2 when a LMC Rd/Wr is slotted, and rolls over
+ when CRM_MAX is reached. */
+ uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ RD cmd is delayed an additional CK cycle. */
+ uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
+ WR cmd is delayed an additional CK cycle. */
+ uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for
+ the default DDR_DQ/DQS drivers is delayed an additional BPRCH
+ CK cycles.
+ 00 = 0 CKs
+ 01 = 1 CKs
+ 10 = 2 CKs
+ 11 = 3 CKs */
+ uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration
+ When clear, LMC runs external ZQ calibration
+ every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration
+ When clear, LMC runs internal ZQ calibration
+ every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t auto_dclkdis : 1; /**< When 1, LMC will automatically shut off its internal
+ clock to conserve power when there is no traffic. Note
+ that this has no effect on the DDR3 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[2:0]=address[9:7] ^ address[14:12]
+ else
+ bank[2:0]=address[9:7] */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ forcing reads to interrupt. */
+ uint64_t nxm_write_en : 1; /**< NXM Write mode
+ When clear, LMC discards writes to addresses that don't
+ exist in the DRAM (as defined by LMC*_NXM configuration).
+ When set, LMC completes writes to addresses that don't
+ exist in the DRAM at an aliased address. */
+ uint64_t elev_prio_dis : 1; /**< Disable elevate priority logic.
+ When set, writes are sent in
+ regardless of priority information from L2C. */
+ uint64_t inorder_wr : 1; /**< Send writes in order(regardless of priority) */
+ uint64_t inorder_rd : 1; /**< Send reads in order (regardless of priority) */
+ uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes */
+ uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads */
+ uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off
+ time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
+ 00 = 0 CKs
+ 01 = 1 CKs
+ 10 = 2 CKs
+ 11 = RESERVED */
+ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3.
+ This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0,
+ and clear otherwise. */
+ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and
+ address. This mode helps relieve setup time pressure
+ on the Address and command bus which nominally have
+ a very large fanout. Please refer to Micron's tech
+ note tn_47_01 titled "DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems" for physical details. */
+ uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
+ Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and
+ LMC*_DCLK_CNT registers. SW should first write this
+ field to a one, then write this field to a zero to
+ clear the CSR's. */
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require address and
+ control bits to be registered in the controller. */
+#else
+ uint64_t rdimm_ena : 1;
+ uint64_t bwcnt : 1;
+ uint64_t ddr2t : 1;
+ uint64_t pocas : 1;
+ uint64_t fprch2 : 2;
+ uint64_t throttle_rd : 1;
+ uint64_t throttle_wr : 1;
+ uint64_t inorder_rd : 1;
+ uint64_t inorder_wr : 1;
+ uint64_t elev_prio_dis : 1;
+ uint64_t nxm_write_en : 1;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t auto_dclkdis : 1;
+ uint64_t int_zqcs_dis : 1;
+ uint64_t ext_zqcs_dis : 1;
+ uint64_t bprch : 2;
+ uint64_t wodt_bprch : 1;
+ uint64_t rodt_bprch : 1;
+ uint64_t crm_max : 5;
+ uint64_t crm_thr : 5;
+ uint64_t crm_cnt : 5;
+ uint64_t thrmax : 4;
+ uint64_t persub : 8;
+ uint64_t thrcnt : 12;
+ uint64_t reserved_63_63 : 1;
+#endif
+ } cn68xx;
+ struct cvmx_lmcx_control_cn68xx cn68xxp1;
+ struct cvmx_lmcx_control_cn66xx cnf71xx;
+};
+typedef union cvmx_lmcx_control cvmx_lmcx_control_t;
+
+/**
+ * cvmx_lmc#_ctl
+ *
+ * LMC_CTL = LMC Control
+ * This register is an assortment of various control fields needed by the memory controller
+ */
+union cvmx_lmcx_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pulldns. */
+ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pullup. */
+ uint64_t slow_scf : 1; /**< Should be cleared to zero */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
+ else
+ bank[n:0]=address[n+7:7]
+ where n=1 for a 4 bank part and n=2 for an 8 bank part */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ allowing reads to interrupt. */
+ uint64_t pll_div2 : 1; /**< PLL Div2. */
+ uint64_t pll_bypass : 1; /**< PLL Bypass. */
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require Write
+ data to be registered in the controller. */
+ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans
+ will slot an additional 1 cycle data bus bubble to
+ avoid DQ/DQS bus contention. This is only a CYA bit,
+ in case the "built-in" DIMM and RANK crossing logic
+ which should auto-detect and perfectly slot
+ read-to-reads to the same DIMM/RANK. */
+ uint64_t inorder_mwf : 1; /**< Reads as zero */
+ uint64_t inorder_mrf : 1; /**< Always clear to zero */
+ uint64_t reserved_10_11 : 2;
+ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off
+ time for the DDR_DQ/DQS drivers is 1 dclk earlier.
+ This bit should typically be set. */
+ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for
+ the DDR_DQ/DQS drivers is delayed an additional DCLK
+ cycle. This should be set to one whenever both SILO_HC
+ and SILO_QC are set. */
+ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional
+ dclks to wait (on top of TCL+1+TSKW) before pulling
+ data out of the pad silos.
+ - 00: illegal
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: illegal
+ This should always be set to 1. */
+ uint64_t tskw : 2; /**< This component is a representation of total BOARD
+ DELAY on DQ (used in the controller to determine the
+ R->W spacing to avoid DQS/DQ bus conflicts). Enter
+ the largest of the per byte Board delay
+ - 00: 0 dclk
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: 3 dclks */
+ uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting
+ A non Zero value in this register
+ enables the On Die Termination (ODT) in DDR parts.
+ These two bits are loaded into the RTT
+ portion of the EMRS register bits A6 & A2. If DDR2's
+ termination (for the memory's DQ/DQS/DM pads) is not
+ desired, set it to 00. If it is, chose between
+ 01 for 75 ohm and 10 for 150 ohm termination.
+ 00 = ODT Disabled
+ 01 = 75 ohm Termination
+ 10 = 150 ohm Termination
+ 11 = 50 ohm Termination
+ Octeon, on writes, by default, drives the 4/8 ODT
+ pins (64/128b mode) based on what the masks
+ (LMC_WODT_CTL) are programmed to.
+ LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
+ for READS. LMC_RODT_CTL needs to be programmed based
+ on the system's needs for ODT. */
+ uint64_t dic : 2; /**< Drive Strength Control:
+ DIC[0] is
+ loaded into the Extended Mode Register (EMRS) A1 bit
+ during initialization.
+ 0 = Normal
+ 1 = Reduced
+ DIC[1] is used to load into EMRS
+ bit 10 - DQSN Enable/Disable field. By default, we
+ program the DDR's to drive the DQSN also. Set it to
+ 1 if DQSN should be Hi-Z.
+ 0 - DQSN Enable
+ 1 - DQSN Disable */
+#else
+ uint64_t dic : 2;
+ uint64_t qs_dic : 2;
+ uint64_t tskw : 2;
+ uint64_t sil_lat : 2;
+ uint64_t bprch : 1;
+ uint64_t fprch2 : 1;
+ uint64_t reserved_10_11 : 2;
+ uint64_t inorder_mrf : 1;
+ uint64_t inorder_mwf : 1;
+ uint64_t r2r_slot : 1;
+ uint64_t rdimm_ena : 1;
+ uint64_t pll_bypass : 1;
+ uint64_t pll_div2 : 1;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t slow_scf : 1;
+ uint64_t ddr__pctl : 4;
+ uint64_t ddr__nctl : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pulldns. */
+ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pullup. */
+ uint64_t slow_scf : 1; /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency
+ when compared to pass1 */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
+ else
+ bank[n:0]=address[n+7:7]
+ where n=1 for a 4 bank part and n=2 for an 8 bank part */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ allowing reads to interrupt. */
+ uint64_t pll_div2 : 1; /**< PLL Div2. */
+ uint64_t pll_bypass : 1; /**< PLL Bypass. */
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require Write
+ data to be registered in the controller. */
+ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans
+ will slot an additional 1 cycle data bus bubble to
+ avoid DQ/DQS bus contention. This is only a CYA bit,
+ in case the "built-in" DIMM and RANK crossing logic
+ which should auto-detect and perfectly slot
+ read-to-reads to the same DIMM/RANK. */
+ uint64_t inorder_mwf : 1; /**< Reads as zero */
+ uint64_t inorder_mrf : 1; /**< Always set to zero */
+ uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the
+ Dclk domain is (DRESET || ECLK_RESET). */
+ uint64_t mode32b : 1; /**< 32b data Path Mode
+ Set to 1 if we use only 32 DQ pins
+ 0 for 16b DQ mode. */
+ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off
+ time for the DDR_DQ/DQS drivers is 1 dclk earlier.
+ This bit should typically be set. */
+ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for
+ the DDR_DQ/DQS drivers is delayed an additional DCLK
+ cycle. This should be set to one whenever both SILO_HC
+ and SILO_QC are set. */
+ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional
+ dclks to wait (on top of TCL+1+TSKW) before pulling
+ data out of the pad silos.
+ - 00: illegal
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: illegal
+ This should always be set to 1. */
+ uint64_t tskw : 2; /**< This component is a representation of total BOARD
+ DELAY on DQ (used in the controller to determine the
+ R->W spacing to avoid DQS/DQ bus conflicts). Enter
+ the largest of the per byte Board delay
+ - 00: 0 dclk
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: 3 dclks */
+ uint64_t qs_dic : 2; /**< QS Drive Strength Control (DDR1):
+ & DDR2 Termination Resistor Setting
+ When in DDR2, a non Zero value in this register
+ enables the On Die Termination (ODT) in DDR parts.
+ These two bits are loaded into the RTT
+ portion of the EMRS register bits A6 & A2. If DDR2's
+ termination (for the memory's DQ/DQS/DM pads) is not
+ desired, set it to 00. If it is, chose between
+ 01 for 75 ohm and 10 for 150 ohm termination.
+ 00 = ODT Disabled
+ 01 = 75 ohm Termination
+ 10 = 150 ohm Termination
+ 11 = 50 ohm Termination
+ Octeon, on writes, by default, drives the 8 ODT
+ pins based on what the masks (LMC_WODT_CTL1 & 2)
+ are programmed to. LMC_DDR2_CTL->ODT_ENA
+ enables Octeon to drive ODT pins for READS.
+ LMC_RODT_CTL needs to be programmed based on
+ the system's needs for ODT. */
+ uint64_t dic : 2; /**< Drive Strength Control:
+ For DDR-I/II Mode, DIC[0] is
+ loaded into the Extended Mode Register (EMRS) A1 bit
+ during initialization. (see DDR-I data sheet EMRS
+ description)
+ 0 = Normal
+ 1 = Reduced
+ For DDR-II Mode, DIC[1] is used to load into EMRS
+ bit 10 - DQSN Enable/Disable field. By default, we
+ program the DDR's to drive the DQSN also. Set it to
+ 1 if DQSN should be Hi-Z.
+ 0 - DQSN Enable
+ 1 - DQSN Disable */
+#else
+ uint64_t dic : 2;
+ uint64_t qs_dic : 2;
+ uint64_t tskw : 2;
+ uint64_t sil_lat : 2;
+ uint64_t bprch : 1;
+ uint64_t fprch2 : 1;
+ uint64_t mode32b : 1;
+ uint64_t dreset : 1;
+ uint64_t inorder_mrf : 1;
+ uint64_t inorder_mwf : 1;
+ uint64_t r2r_slot : 1;
+ uint64_t rdimm_ena : 1;
+ uint64_t pll_bypass : 1;
+ uint64_t pll_div2 : 1;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t slow_scf : 1;
+ uint64_t ddr__pctl : 4;
+ uint64_t ddr__nctl : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_ctl_cn30xx cn31xx;
+ struct cvmx_lmcx_ctl_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pulldns. */
+ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pullup. */
+ uint64_t slow_scf : 1; /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency
+ when compared to pass1
+ NOTE - This bit has NO effect in PASS1 */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
+ else
+ bank[n:0]=address[n+7:7]
+ where n=1 for a 4 bank part and n=2 for an 8 bank part */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ allowing reads to interrupt. */
+ uint64_t reserved_16_17 : 2;
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require Write
+ data to be registered in the controller. */
+ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans
+ will slot an additional 1 cycle data bus bubble to
+ avoid DQ/DQS bus contention. This is only a CYA bit,
+ in case the "built-in" DIMM and RANK crossing logic
+ which should auto-detect and perfectly slot
+ read-to-reads to the same DIMM/RANK. */
+ uint64_t inorder_mwf : 1; /**< When set, forces LMC_MWF (writes) into strict, in-order
+ mode. When clear, writes may be serviced out of order
+ (optimized to keep multiple banks active).
+ This bit is ONLY to be set at power-on and
+ should not be set for normal use.
+ NOTE: For PASS1, set as follows:
+ DDR-I -> 1
+ DDR-II -> 0
+ For Pass2, this bit is RA0, write ignore (this feature
+ is permanently disabled) */
+ uint64_t inorder_mrf : 1; /**< When set, forces LMC_MRF (reads) into strict, in-order
+ mode. When clear, reads may be serviced out of order
+ (optimized to keep multiple banks active).
+ This bit is ONLY to be set at power-on and
+ should not be set for normal use.
+ NOTE: For PASS1, set as follows:
+ DDR-I -> 1
+ DDR-II -> 0
+ For Pass2, this bit should be written ZERO for
+ DDR I & II */
+ uint64_t set_zero : 1; /**< Reserved. Always Set this Bit to Zero */
+ uint64_t mode128b : 1; /**< 128b data Path Mode
+ Set to 1 if we use all 128 DQ pins
+ 0 for 64b DQ mode. */
+ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off
+ time for the DDR_DQ/DQS drivers is 1 dclk earlier.
+ This bit should typically be set. */
+ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for
+ the DDR_DQ/DQS drivers is delayed an additional DCLK
+ cycle. This should be set to one whenever both SILO_HC
+ and SILO_QC are set. */
+ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional
+ dclks to wait (on top of TCL+1+TSKW) before pulling
+ data out of the pad silos.
+ - 00: illegal
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: illegal
+ This should always be set to 1. */
+ uint64_t tskw : 2; /**< This component is a representation of total BOARD
+ DELAY on DQ (used in the controller to determine the
+ R->W spacing to avoid DQS/DQ bus conflicts). Enter
+ the largest of the per byte Board delay
+ - 00: 0 dclk
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: 3 dclks */
+ uint64_t qs_dic : 2; /**< QS Drive Strength Control (DDR1):
+ & DDR2 Termination Resistor Setting
+ When in DDR2, a non Zero value in this register
+ enables the On Die Termination (ODT) in DDR parts.
+ These two bits are loaded into the RTT
+ portion of the EMRS register bits A6 & A2. If DDR2's
+ termination (for the memory's DQ/DQS/DM pads) is not
+ desired, set it to 00. If it is, chose between
+ 01 for 75 ohm and 10 for 150 ohm termination.
+ 00 = ODT Disabled
+ 01 = 75 ohm Termination
+ 10 = 150 ohm Termination
+ 11 = 50 ohm Termination
+ Octeon, on writes, by default, drives the 4/8 ODT
+ pins (64/128b mode) based on what the masks
+ (LMC_WODT_CTL) are programmed to.
+ LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
+ for READS. LMC_RODT_CTL needs to be programmed based
+ on the system's needs for ODT. */
+ uint64_t dic : 2; /**< Drive Strength Control:
+ For DDR-I/II Mode, DIC[0] is
+ loaded into the Extended Mode Register (EMRS) A1 bit
+ during initialization. (see DDR-I data sheet EMRS
+ description)
+ 0 = Normal
+ 1 = Reduced
+ For DDR-II Mode, DIC[1] is used to load into EMRS
+ bit 10 - DQSN Enable/Disable field. By default, we
+ program the DDR's to drive the DQSN also. Set it to
+ 1 if DQSN should be Hi-Z.
+ 0 - DQSN Enable
+ 1 - DQSN Disable */
+#else
+ uint64_t dic : 2;
+ uint64_t qs_dic : 2;
+ uint64_t tskw : 2;
+ uint64_t sil_lat : 2;
+ uint64_t bprch : 1;
+ uint64_t fprch2 : 1;
+ uint64_t mode128b : 1;
+ uint64_t set_zero : 1;
+ uint64_t inorder_mrf : 1;
+ uint64_t inorder_mwf : 1;
+ uint64_t r2r_slot : 1;
+ uint64_t rdimm_ena : 1;
+ uint64_t reserved_16_17 : 2;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t slow_scf : 1;
+ uint64_t ddr__pctl : 4;
+ uint64_t ddr__nctl : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn38xx;
+ struct cvmx_lmcx_ctl_cn38xx cn38xxp2;
+ struct cvmx_lmcx_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pulldns. */
+ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pullup. */
+ uint64_t slow_scf : 1; /**< Should be cleared to zero */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
+ else
+ bank[n:0]=address[n+7:7]
+ where n=1 for a 4 bank part and n=2 for an 8 bank part */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ allowing reads to interrupt. */
+ uint64_t reserved_17_17 : 1;
+ uint64_t pll_bypass : 1; /**< PLL Bypass. */
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require Write
+ data to be registered in the controller. */
+ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans
+ will slot an additional 1 cycle data bus bubble to
+ avoid DQ/DQS bus contention. This is only a CYA bit,
+ in case the "built-in" DIMM and RANK crossing logic
+ which should auto-detect and perfectly slot
+ read-to-reads to the same DIMM/RANK. */
+ uint64_t inorder_mwf : 1; /**< Reads as zero */
+ uint64_t inorder_mrf : 1; /**< Always clear to zero */
+ uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the
+ Dclk domain is (DRESET || ECLK_RESET). */
+ uint64_t mode32b : 1; /**< 32b data Path Mode
+ Set to 1 if we use 32 DQ pins
+ 0 for 16b DQ mode. */
+ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off
+ time for the DDR_DQ/DQS drivers is 1 dclk earlier.
+ This bit should typically be set. */
+ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for
+ the DDR_DQ/DQS drivers is delayed an additional DCLK
+ cycle. This should be set to one whenever both SILO_HC
+ and SILO_QC are set. */
+ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional
+ dclks to wait (on top of TCL+1+TSKW) before pulling
+ data out of the pad silos.
+ - 00: illegal
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: illegal
+ This should always be set to 1. */
+ uint64_t tskw : 2; /**< This component is a representation of total BOARD
+ DELAY on DQ (used in the controller to determine the
+ R->W spacing to avoid DQS/DQ bus conflicts). Enter
+ the largest of the per byte Board delay
+ - 00: 0 dclk
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: 3 dclks */
+ uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting
+ When in DDR2, a non Zero value in this register
+ enables the On Die Termination (ODT) in DDR parts.
+ These two bits are loaded into the RTT
+ portion of the EMRS register bits A6 & A2. If DDR2's
+ termination (for the memory's DQ/DQS/DM pads) is not
+ desired, set it to 00. If it is, chose between
+ 01 for 75 ohm and 10 for 150 ohm termination.
+ 00 = ODT Disabled
+ 01 = 75 ohm Termination
+ 10 = 150 ohm Termination
+ 11 = 50 ohm Termination
+ Octeon, on writes, by default, drives the ODT
+ pins based on what the masks
+ (LMC_WODT_CTL) are programmed to.
+ LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
+ for READS. LMC_RODT_CTL needs to be programmed based
+ on the system's needs for ODT. */
+ uint64_t dic : 2; /**< Drive Strength Control:
+ DIC[0] is
+ loaded into the Extended Mode Register (EMRS) A1 bit
+ during initialization.
+ 0 = Normal
+ 1 = Reduced
+ DIC[1] is used to load into EMRS
+ bit 10 - DQSN Enable/Disable field. By default, we
+ program the DDR's to drive the DQSN also. Set it to
+ 1 if DQSN should be Hi-Z.
+ 0 - DQSN Enable
+ 1 - DQSN Disable */
+#else
+ uint64_t dic : 2;
+ uint64_t qs_dic : 2;
+ uint64_t tskw : 2;
+ uint64_t sil_lat : 2;
+ uint64_t bprch : 1;
+ uint64_t fprch2 : 1;
+ uint64_t mode32b : 1;
+ uint64_t dreset : 1;
+ uint64_t inorder_mrf : 1;
+ uint64_t inorder_mwf : 1;
+ uint64_t r2r_slot : 1;
+ uint64_t rdimm_ena : 1;
+ uint64_t pll_bypass : 1;
+ uint64_t reserved_17_17 : 1;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t slow_scf : 1;
+ uint64_t ddr__pctl : 4;
+ uint64_t ddr__nctl : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_ctl_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pulldns. */
+ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pullup. */
+ uint64_t slow_scf : 1; /**< Always clear to zero */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
+ else
+ bank[n:0]=address[n+7:7]
+ where n=1 for a 4 bank part and n=2 for an 8 bank part */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ allowing reads to interrupt. */
+ uint64_t reserved_16_17 : 2;
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require Write
+ data to be registered in the controller. */
+ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans
+ will slot an additional 1 cycle data bus bubble to
+ avoid DQ/DQS bus contention. This is only a CYA bit,
+ in case the "built-in" DIMM and RANK crossing logic
+ which should auto-detect and perfectly slot
+ read-to-reads to the same DIMM/RANK. */
+ uint64_t inorder_mwf : 1; /**< Reads as zero */
+ uint64_t inorder_mrf : 1; /**< Always set to zero */
+ uint64_t dreset : 1; /**< MBZ
+ THIS IS OBSOLETE. Use LMC_DLL_CTL[DRESET] instead. */
+ uint64_t mode32b : 1; /**< 32b data Path Mode
+ Set to 1 if we use only 32 DQ pins
+ 0 for 64b DQ mode. */
+ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off
+ time for the DDR_DQ/DQS drivers is 1 dclk earlier.
+ This bit should typically be set. */
+ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for
+ the DDR_DQ/DQS drivers is delayed an additional DCLK
+ cycle. This should be set to one whenever both SILO_HC
+ and SILO_QC are set. */
+ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional
+ dclks to wait (on top of TCL+1+TSKW) before pulling
+ data out of the pad silos.
+ - 00: illegal
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: illegal
+ This should always be set to 1.
+ THIS IS OBSOLETE. Use READ_LEVEL_RANK instead. */
+ uint64_t tskw : 2; /**< This component is a representation of total BOARD
+ DELAY on DQ (used in the controller to determine the
+ R->W spacing to avoid DQS/DQ bus conflicts). Enter
+ the largest of the per byte Board delay
+ - 00: 0 dclk
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: 3 dclks
+ THIS IS OBSOLETE. Use READ_LEVEL_RANK instead. */
+ uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting
+ When in DDR2, a non Zero value in this register
+ enables the On Die Termination (ODT) in DDR parts.
+ These two bits are loaded into the RTT
+ portion of the EMRS register bits A6 & A2. If DDR2's
+ termination (for the memory's DQ/DQS/DM pads) is not
+ desired, set it to 00. If it is, chose between
+ 01 for 75 ohm and 10 for 150 ohm termination.
+ 00 = ODT Disabled
+ 01 = 75 ohm Termination
+ 10 = 150 ohm Termination
+ 11 = 50 ohm Termination
+ Octeon, on writes, by default, drives the 4/8 ODT
+ pins (64/128b mode) based on what the masks
+ (LMC_WODT_CTL0 & 1) are programmed to.
+ LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
+ for READS. LMC_RODT_CTL needs to be programmed based
+ on the system's needs for ODT. */
+ uint64_t dic : 2; /**< Drive Strength Control:
+ DIC[0] is
+ loaded into the Extended Mode Register (EMRS) A1 bit
+ during initialization.
+ 0 = Normal
+ 1 = Reduced
+ DIC[1] is used to load into EMRS
+ bit 10 - DQSN Enable/Disable field. By default, we
+ program the DDR's to drive the DQSN also. Set it to
+ 1 if DQSN should be Hi-Z.
+ 0 - DQSN Enable
+ 1 - DQSN Disable */
+#else
+ uint64_t dic : 2;
+ uint64_t qs_dic : 2;
+ uint64_t tskw : 2;
+ uint64_t sil_lat : 2;
+ uint64_t bprch : 1;
+ uint64_t fprch2 : 1;
+ uint64_t mode32b : 1;
+ uint64_t dreset : 1;
+ uint64_t inorder_mrf : 1;
+ uint64_t inorder_mwf : 1;
+ uint64_t r2r_slot : 1;
+ uint64_t rdimm_ena : 1;
+ uint64_t reserved_16_17 : 2;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t slow_scf : 1;
+ uint64_t ddr__pctl : 4;
+ uint64_t ddr__nctl : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn52xx;
+ struct cvmx_lmcx_ctl_cn52xx cn52xxp1;
+ struct cvmx_lmcx_ctl_cn52xx cn56xx;
+ struct cvmx_lmcx_ctl_cn52xx cn56xxp1;
+ struct cvmx_lmcx_ctl_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pulldns. */
+ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit
+ The encoded value on this will adjust the drive strength
+ of the DDR DQ pullup. */
+ uint64_t slow_scf : 1; /**< Should be cleared to zero */
+ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then
+ bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
+ else
+ bank[n:0]=address[n+7:7]
+ where n=1 for a 4 bank part and n=2 for an 8 bank part */
+ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before
+ allowing reads to interrupt. */
+ uint64_t reserved_16_17 : 2;
+ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use
+ of JEDEC Registered DIMMs which require Write
+ data to be registered in the controller. */
+ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans
+ will slot an additional 1 cycle data bus bubble to
+ avoid DQ/DQS bus contention. This is only a CYA bit,
+ in case the "built-in" DIMM and RANK crossing logic
+ which should auto-detect and perfectly slot
+ read-to-reads to the same DIMM/RANK. */
+ uint64_t inorder_mwf : 1; /**< Reads as zero */
+ uint64_t inorder_mrf : 1; /**< Always clear to zero */
+ uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the
+ Dclk domain is (DRESET || ECLK_RESET). */
+ uint64_t mode128b : 1; /**< 128b data Path Mode
+ Set to 1 if we use all 128 DQ pins
+ 0 for 64b DQ mode. */
+ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off
+ time for the DDR_DQ/DQS drivers is 1 dclk earlier.
+ This bit should typically be set. */
+ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for
+ the DDR_DQ/DQS drivers is delayed an additional DCLK
+ cycle. This should be set to one whenever both SILO_HC
+ and SILO_QC are set. */
+ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional
+ dclks to wait (on top of TCL+1+TSKW) before pulling
+ data out of the pad silos.
+ - 00: illegal
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: illegal
+ This should always be set to 1. */
+ uint64_t tskw : 2; /**< This component is a representation of total BOARD
+ DELAY on DQ (used in the controller to determine the
+ R->W spacing to avoid DQS/DQ bus conflicts). Enter
+ the largest of the per byte Board delay
+ - 00: 0 dclk
+ - 01: 1 dclks
+ - 10: 2 dclks
+ - 11: 3 dclks */
+ uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting
+ A non Zero value in this register
+ enables the On Die Termination (ODT) in DDR parts.
+ These two bits are loaded into the RTT
+ portion of the EMRS register bits A6 & A2. If DDR2's
+ termination (for the memory's DQ/DQS/DM pads) is not
+ desired, set it to 00. If it is, chose between
+ 01 for 75 ohm and 10 for 150 ohm termination.
+ 00 = ODT Disabled
+ 01 = 75 ohm Termination
+ 10 = 150 ohm Termination
+ 11 = 50 ohm Termination
+ Octeon, on writes, by default, drives the 4/8 ODT
+ pins (64/128b mode) based on what the masks
+ (LMC_WODT_CTL) are programmed to.
+ LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
+ for READS. LMC_RODT_CTL needs to be programmed based
+ on the system's needs for ODT. */
+ uint64_t dic : 2; /**< Drive Strength Control:
+ DIC[0] is
+ loaded into the Extended Mode Register (EMRS) A1 bit
+ during initialization.
+ 0 = Normal
+ 1 = Reduced
+ DIC[1] is used to load into EMRS
+ bit 10 - DQSN Enable/Disable field. By default, we
+ program the DDR's to drive the DQSN also. Set it to
+ 1 if DQSN should be Hi-Z.
+ 0 - DQSN Enable
+ 1 - DQSN Disable */
+#else
+ uint64_t dic : 2;
+ uint64_t qs_dic : 2;
+ uint64_t tskw : 2;
+ uint64_t sil_lat : 2;
+ uint64_t bprch : 1;
+ uint64_t fprch2 : 1;
+ uint64_t mode128b : 1;
+ uint64_t dreset : 1;
+ uint64_t inorder_mrf : 1;
+ uint64_t inorder_mwf : 1;
+ uint64_t r2r_slot : 1;
+ uint64_t rdimm_ena : 1;
+ uint64_t reserved_16_17 : 2;
+ uint64_t max_write_batch : 4;
+ uint64_t xor_bank : 1;
+ uint64_t slow_scf : 1;
+ uint64_t ddr__pctl : 4;
+ uint64_t ddr__nctl : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn58xx;
+ struct cvmx_lmcx_ctl_cn58xx cn58xxp1;
+};
+typedef union cvmx_lmcx_ctl cvmx_lmcx_ctl_t;
+
+/**
+ * cvmx_lmc#_ctl1
+ *
+ * LMC_CTL1 = LMC Control1
+ * This register is an assortment of various control fields needed by the memory controller
+ */
+union cvmx_lmcx_ctl1 {
+ uint64_t u64;
+ struct cvmx_lmcx_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation
+ 0=disabled, 1=enabled */
+ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
+ having waited for 2^FORCEWRITE cycles. 0=disabled. */
+ uint64_t idlepower : 3; /**< Enter power-down mode after the memory controller has
+ been idle for 2^(2+IDLEPOWER) cycles. 0=disabled. */
+ uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1 transition
+ on LMC_MEM_CFG0[INIT_START].
+ 0=DDR2 power-up/init, 1=read-leveling
+ 2=self-refresh entry, 3=self-refresh exit,
+ 4=power-down entry, 5=power-down exit, 6=7=illegal */
+ uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */
+ uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable.
+ 0=disable, 1=enable
+ If the memory part does not support DCC, then this bit
+ must be set to 0. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t data_layout : 2; /**< Logical data layout per DQ byte lane:
+ In 32b mode, this setting has no effect and the data
+ layout DQ[35:0] is the following:
+ [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]]
+ In 16b mode, the DQ[35:0] layouts are the following:
+ 0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]]
+ 1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]]
+ 2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]]
+ where E means ecc, D means data, and 0 means unused
+ (ignored on reads and written as 0 on writes) */
+#else
+ uint64_t data_layout : 2;
+ uint64_t reserved_2_7 : 6;
+ uint64_t dcc_enable : 1;
+ uint64_t sil_mode : 1;
+ uint64_t sequence : 3;
+ uint64_t idlepower : 3;
+ uint64_t forcewrite : 4;
+ uint64_t ecc_adr : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_lmcx_ctl1_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t data_layout : 2; /**< Logical data layout per DQ byte lane:
+ In 32b mode, this setting has no effect and the data
+ layout DQ[35:0] is the following:
+ [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]]
+ In 16b mode, the DQ[35:0] layouts are the following:
+ 0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]]
+ 1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]]
+ 2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]]
+ where E means ecc, D means data, and 0 means unused
+ (ignored on reads and written as 0 on writes) */
+#else
+ uint64_t data_layout : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_ctl1_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */
+ uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable.
+ 0=disable, 1=enable
+ If the memory part does not support DCC, then this bit
+ must be set to 0. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t data_layout : 2; /**< Logical data layout per DQ byte lane:
+ In 32b mode, this setting has no effect and the data
+ layout DQ[35:0] is the following:
+ [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]]
+ In 16b mode, the DQ[35:0] layouts are the following:
+ 0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]]
+ 1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]]
+ 2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]]
+ where E means ecc, D means data, and 0 means unused
+ (ignored on reads and written as 0 on writes) */
+#else
+ uint64_t data_layout : 2;
+ uint64_t reserved_2_7 : 6;
+ uint64_t dcc_enable : 1;
+ uint64_t sil_mode : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_ctl1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation
+ 0=disabled, 1=enabled */
+ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
+ having waited for 2^FORCEWRITE cycles. 0=disabled. */
+ uint64_t idlepower : 3; /**< Enter power-down mode after the memory controller has
+ been idle for 2^(2+IDLEPOWER) cycles. 0=disabled. */
+ uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1 transition
+ on LMC_MEM_CFG0[INIT_START].
+ 0=DDR2 power-up/init, 1=read-leveling
+ 2=self-refresh entry, 3=self-refresh exit,
+ 4=power-down entry, 5=power-down exit, 6=7=illegal */
+ uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */
+ uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable.
+ 0=disable, 1=enable
+ If the memory part does not support DCC, then this bit
+ must be set to 0. */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t dcc_enable : 1;
+ uint64_t sil_mode : 1;
+ uint64_t sequence : 3;
+ uint64_t idlepower : 3;
+ uint64_t forcewrite : 4;
+ uint64_t ecc_adr : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } cn52xx;
+ struct cvmx_lmcx_ctl1_cn52xx cn52xxp1;
+ struct cvmx_lmcx_ctl1_cn52xx cn56xx;
+ struct cvmx_lmcx_ctl1_cn52xx cn56xxp1;
+ struct cvmx_lmcx_ctl1_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */
+ uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable.
+ 0=disable, 1=enable
+ If the memory part does not support DCC, then this bit
+ must be set to 0. */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t dcc_enable : 1;
+ uint64_t sil_mode : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn58xx;
+ struct cvmx_lmcx_ctl1_cn58xx cn58xxp1;
+};
+typedef union cvmx_lmcx_ctl1 cvmx_lmcx_ctl1_t;
+
+/**
+ * cvmx_lmc#_dclk_cnt
+ *
+ * LMC_DCLK_CNT = Performance Counters
+ *
+ */
+union cvmx_lmcx_dclk_cnt {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dclkcnt : 64; /**< Performance Counter
+ 64-bit counter that increments every CK cycle */
+#else
+ uint64_t dclkcnt : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_dclk_cnt_s cn61xx;
+ struct cvmx_lmcx_dclk_cnt_s cn63xx;
+ struct cvmx_lmcx_dclk_cnt_s cn63xxp1;
+ struct cvmx_lmcx_dclk_cnt_s cn66xx;
+ struct cvmx_lmcx_dclk_cnt_s cn68xx;
+ struct cvmx_lmcx_dclk_cnt_s cn68xxp1;
+ struct cvmx_lmcx_dclk_cnt_s cnf71xx;
+};
+typedef union cvmx_lmcx_dclk_cnt cvmx_lmcx_dclk_cnt_t;
+
+/**
+ * cvmx_lmc#_dclk_cnt_hi
+ *
+ * LMC_DCLK_CNT_HI = Performance Counters
+ *
+ */
+union cvmx_lmcx_dclk_cnt_hi {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_cnt_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t dclkcnt_hi : 32; /**< Performance Counter that counts dclks
+ Upper 32-bits of a 64-bit counter. */
+#else
+ uint64_t dclkcnt_hi : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn30xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn31xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn38xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn50xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn52xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn56xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn58xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1;
+};
+typedef union cvmx_lmcx_dclk_cnt_hi cvmx_lmcx_dclk_cnt_hi_t;
+
+/**
+ * cvmx_lmc#_dclk_cnt_lo
+ *
+ * LMC_DCLK_CNT_LO = Performance Counters
+ *
+ */
+union cvmx_lmcx_dclk_cnt_lo {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_cnt_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t dclkcnt_lo : 32; /**< Performance Counter that counts dclks
+ Lower 32-bits of a 64-bit counter. */
+#else
+ uint64_t dclkcnt_lo : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn30xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn31xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn38xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn50xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn52xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn56xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn58xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1;
+};
+typedef union cvmx_lmcx_dclk_cnt_lo cvmx_lmcx_dclk_cnt_lo_t;
+
+/**
+ * cvmx_lmc#_dclk_ctl
+ *
+ * LMC_DCLK_CTL = LMC DCLK generation control
+ *
+ *
+ * Notes:
+ * This CSR is only relevant for LMC1. LMC0_DCLK_CTL is not used.
+ *
+ */
+union cvmx_lmcx_dclk_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t off90_ena : 1; /**< 0=use global DCLK (i.e. the PLL) directly for LMC1
+ 1=use the 90 degree DCLK DLL to offset LMC1 DCLK */
+ uint64_t dclk90_byp : 1; /**< 0=90 degree DCLK DLL uses sampled delay from LMC0
+ 1=90 degree DCLK DLL uses DCLK90_VLU
+ See DCLK90_VLU. */
+ uint64_t dclk90_ld : 1; /**< The 90 degree DCLK DLL samples the delay setting
+ from LMC0's DLL when this field transitions 0->1 */
+ uint64_t dclk90_vlu : 5; /**< Manual open-loop delay setting.
+ The LMC1 90 degree DCLK DLL uses DCLK90_VLU rather
+ than the delay setting sampled from LMC0 when
+ DCLK90_BYP=1. */
+#else
+ uint64_t dclk90_vlu : 5;
+ uint64_t dclk90_ld : 1;
+ uint64_t dclk90_byp : 1;
+ uint64_t off90_ena : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_lmcx_dclk_ctl_s cn56xx;
+ struct cvmx_lmcx_dclk_ctl_s cn56xxp1;
+};
+typedef union cvmx_lmcx_dclk_ctl cvmx_lmcx_dclk_ctl_t;
+
+/**
+ * cvmx_lmc#_ddr2_ctl
+ *
+ * LMC_DDR2_CTL = LMC DDR2 & DLL Control Register
+ *
+ */
+union cvmx_lmcx_ddr2_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_ddr2_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bank8 : 1; /**< For 8 bank DDR2 parts
+ 1 - DDR2 parts have 8 internal banks (BA is 3 bits
+ wide).
+ 0 - DDR2 parts have 4 internal banks (BA is 2 bits
+ wide). */
+ uint64_t burst8 : 1; /**< 8-burst mode.
+ 1 - DDR data transfer happens in burst of 8
+ 0 - DDR data transfer happens in burst of 4
+ BURST8 should be set when DDR2T is set
+ to minimize the command bandwidth loss. */
+ uint64_t addlat : 3; /**< Additional Latency for posted CAS
+ When Posted CAS is on, this configures the additional
+ latency. This should be set to
+ 1 .. LMC_MEM_CFG1[TRCD]-2
+ (Note the implication that posted CAS should not
+ be used when tRCD is two.) */
+ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR2. */
+ uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
+ Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and
+ LMC_DCLK_CNT_* registers. SW should first write this
+ field to a one, then write this field to a zero to
+ clear the CSR's. */
+ uint64_t twr : 3; /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay
+ This is not a direct encoding of the value. Its
+ programmed as below per DDR2 spec. The decimal number
+ on the right is RNDUP(tWR(ns) / tCYC(ns))
+ TYP=15ns
+ - 000: RESERVED
+ - 001: 2
+ - 010: 3
+ - 011: 4
+ - 100: 5
+ - 101: 6
+ - 110: 7
+ - 111: 8 */
+ uint64_t silo_hc : 1; /**< Delays the read sample window by a Half Cycle. */
+ uint64_t ddr_eof : 4; /**< Early Fill Counter Init.
+ L2 needs to know a few cycle before a fill completes so
+ it can get its Control pipe started (for better overall
+ performance). This counter contains an init value which
+ is a function of Eclk/Dclk ratio to account for the
+ asynchronous boundary between L2 cache and the DRAM
+ controller. This init value will
+ determine when to safely let the L2 know that a fill
+ termination is coming up.
+ Set DDR_EOF according to the following rule:
+ eclkFreq/dclkFreq = dclkPeriod/eclkPeriod = RATIO
+ RATIO < 6/6 -> illegal
+ 6/6 <= RATIO < 6/5 -> DDR_EOF=3
+ 6/5 <= RATIO < 6/4 -> DDR_EOF=3
+ 6/4 <= RATIO < 6/3 -> DDR_EOF=2
+ 6/3 <= RATIO < 6/2 -> DDR_EOF=1
+ 6/2 <= RATIO < 6/1 -> DDR_EOF=0
+ 6/1 <= RATIO -> DDR_EOF=0 */
+ uint64_t tfaw : 5; /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1
+ Four Access Window time. Relevant only in DDR2 AND in
+ 8-bank parts.
+ tFAW = 5'b0 in DDR2-4bank
+ tFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1
+ in DDR2-8bank */
+ uint64_t crip_mode : 1; /**< Cripple Mode - When set, the LMC allows only
+ 1 inflight transaction (.vs. 8 in normal mode).
+ This bit is ONLY to be set at power-on and
+ should not be set for normal use. */
+ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
+ address. This mode helps relieve setup time pressure
+ on the Address and command bus which nominally have
+ a very large fanout. Please refer to Micron's tech
+ note tn_47_01 titled "DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems" for physical details.
+ BURST8 should be set when DDR2T is set to minimize
+ add/cmd loss. */
+ uint64_t odt_ena : 1; /**< Enable Obsolete ODT on Reads
+ Obsolete Read ODT wiggles DDR_ODT_* pins on reads.
+ Should normally be cleared to zero.
+ When this is on, the following fields must also be
+ programmed:
+ LMC_CTL->QS_DIC - programs the termination value
+ LMC_RODT_CTL - programs the ODT I/O mask for Reads */
+ uint64_t qdll_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
+ DCLK init sequence will reset the DDR 90 DLL. Should
+ happen at startup before any activity in DDR.
+ DRESET should be asserted before and for 10 usec
+ following the 0->1 transition on QDLL_ENA. */
+ uint64_t dll90_vlu : 5; /**< Contains the open loop setting value for the DDR90 delay
+ line. */
+ uint64_t dll90_byp : 1; /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be
+ bypassed and the setting is defined by DLL90_VLU */
+ uint64_t rdqs : 1; /**< DDR2 RDQS mode. When set, configures memory subsystem to
+ use unidirectional DQS pins. RDQS/DM - Rcv & DQS - Xmit */
+ uint64_t ddr2 : 1; /**< Should be set */
+#else
+ uint64_t ddr2 : 1;
+ uint64_t rdqs : 1;
+ uint64_t dll90_byp : 1;
+ uint64_t dll90_vlu : 5;
+ uint64_t qdll_ena : 1;
+ uint64_t odt_ena : 1;
+ uint64_t ddr2t : 1;
+ uint64_t crip_mode : 1;
+ uint64_t tfaw : 5;
+ uint64_t ddr_eof : 4;
+ uint64_t silo_hc : 1;
+ uint64_t twr : 3;
+ uint64_t bwcnt : 1;
+ uint64_t pocas : 1;
+ uint64_t addlat : 3;
+ uint64_t burst8 : 1;
+ uint64_t bank8 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_ddr2_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bank8 : 1; /**< For 8 bank DDR2 parts
+ 1 - DDR2 parts have 8 internal banks (BA is 3 bits
+ wide).
+ 0 - DDR2 parts have 4 internal banks (BA is 2 bits
+ wide). */
+ uint64_t burst8 : 1; /**< 8-burst mode.
+ 1 - DDR data transfer happens in burst of 8
+ 0 - DDR data transfer happens in burst of 4
+ BURST8 should be set when DDR2T is set to minimize
+ add/cmd bandwidth loss. */
+ uint64_t addlat : 3; /**< Additional Latency for posted CAS
+ When Posted CAS is on, this configures the additional
+ latency. This should be set to
+ 1 .. LMC_MEM_CFG1[TRCD]-2
+ (Note the implication that posted CAS should not
+ be used when tRCD is two.) */
+ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR2. */
+ uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
+ Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and
+ LMC_DCLK_CNT_* registers. SW should first write this
+ field to a one, then write this field to a zero to
+ clear the CSR's. */
+ uint64_t twr : 3; /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay
+ This is not a direct encoding of the value. Its
+ programmed as below per DDR2 spec. The decimal number
+ on the right is RNDUP(tWR(ns) / tCYC(ns))
+ TYP=15ns
+ - 000: RESERVED
+ - 001: 2
+ - 010: 3
+ - 011: 4
+ - 100: 5
+ - 101: 6
+ - 110-111: RESERVED */
+ uint64_t silo_hc : 1; /**< Delays the read sample window by a Half Cycle. */
+ uint64_t ddr_eof : 4; /**< Early Fill Counter Init.
+ L2 needs to know a few cycle before a fill completes so
+ it can get its Control pipe started (for better overall
+ performance). This counter contains an init value which
+ is a function of Eclk/Dclk ratio to account for the
+ asynchronous boundary between L2 cache and the DRAM
+ controller. This init value will
+ determine when to safely let the L2 know that a fill
+ termination is coming up.
+ DDR_EOF = RNDUP (DCLK period/Eclk Period). If the ratio
+ is above 3, set DDR_EOF to 3.
+ DCLK/ECLK period DDR_EOF
+ Less than 1 1
+ Less than 2 2
+ More than 2 3 */
+ uint64_t tfaw : 5; /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1
+ Four Access Window time. Relevant only in
+ 8-bank parts.
+ TFAW = 5'b0 for DDR2-4bank
+ TFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 in DDR2-8bank */
+ uint64_t crip_mode : 1; /**< Cripple Mode - When set, the LMC allows only
+ 1 inflight transaction (.vs. 8 in normal mode).
+ This bit is ONLY to be set at power-on and
+ should not be set for normal use. */
+ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
+ address. This mode helps relieve setup time pressure
+ on the Address and command bus which nominally have
+ a very large fanout. Please refer to Micron's tech
+ note tn_47_01 titled "DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems" for physical details.
+ BURST8 should be used when DDR2T is set to minimize
+ add/cmd bandwidth loss. */
+ uint64_t odt_ena : 1; /**< Enable ODT for DDR2 on Reads
+ When this is on, the following fields must also be
+ programmed:
+ LMC_CTL->QS_DIC - programs the termination value
+ LMC_RODT_CTL - programs the ODT I/O mask for writes
+ Program as 0 for DDR1 mode and ODT needs to be off
+ on Octeon Reads */
+ uint64_t qdll_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
+ erst deassertion will reset the DDR 90 DLL. Should
+ happen at startup before any activity in DDR. */
+ uint64_t dll90_vlu : 5; /**< Contains the open loop setting value for the DDR90 delay
+ line. */
+ uint64_t dll90_byp : 1; /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be
+ bypassed and the setting is defined by DLL90_VLU */
+ uint64_t reserved_1_1 : 1;
+ uint64_t ddr2 : 1; /**< DDR2 Enable: When set, configures memory subsystem for
+ DDR-II SDRAMs. */
+#else
+ uint64_t ddr2 : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t dll90_byp : 1;
+ uint64_t dll90_vlu : 5;
+ uint64_t qdll_ena : 1;
+ uint64_t odt_ena : 1;
+ uint64_t ddr2t : 1;
+ uint64_t crip_mode : 1;
+ uint64_t tfaw : 5;
+ uint64_t ddr_eof : 4;
+ uint64_t silo_hc : 1;
+ uint64_t twr : 3;
+ uint64_t bwcnt : 1;
+ uint64_t pocas : 1;
+ uint64_t addlat : 3;
+ uint64_t burst8 : 1;
+ uint64_t bank8 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn38xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn38xxp2;
+ struct cvmx_lmcx_ddr2_ctl_s cn50xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn52xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn52xxp1;
+ struct cvmx_lmcx_ddr2_ctl_s cn56xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn56xxp1;
+ struct cvmx_lmcx_ddr2_ctl_s cn58xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn58xxp1;
+};
+typedef union cvmx_lmcx_ddr2_ctl cvmx_lmcx_ddr2_ctl_t;
+
+/**
+ * cvmx_lmc#_ddr_pll_ctl
+ *
+ * LMC_DDR_PLL_CTL = LMC DDR PLL control
+ *
+ *
+ * Notes:
+ * DDR PLL Bringup sequence:
+ * 1. Write CLKF, DDR_PS_EN, DFM_PS_EN, DIFFAMP, CPS, CPB.
+ * If test mode is going to be activated, then also write jtg__ddr_pll_tm_en1, jtg__ddr_pll_tm_en2, jtg__ddr_pll_tm_en3,
+ * jtg__ddr_pll_tm_en4, jtg__dfa_pll_tm_en1, jtg__dfa_pll_tm_en2, jtg__dfa_pll_tm_en3, jtg__dfa_pll_tm_en4, JTAG_TEST_MODE
+ * 2. Wait 128 ref clock cycles (7680 rclk cycles)
+ * 3. Write 1 to RESET_N
+ * 4. Wait 1152 ref clocks (1152*16 rclk cycles)
+ * 5. Write 0 to DDR_DIV_RESET and DFM_DIV_RESET
+ * 6. Wait 10 ref clock cycles (160 rclk cycles) before bringing up the DDR interface
+ * If test mode is going to be activated, wait an additional 8191 ref clocks (8191*16 rclk cycles) to allow PLL
+ * clock alignment
+ */
+union cvmx_lmcx_ddr_pll_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_ddr_pll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t jtg_test_mode : 1; /**< JTAG Test Mode
+ Clock alignment between DCLK & REFCLK as well as FCLK &
+ REFCLK can only be performed after the ddr_pll_divider_reset
+ is deasserted. SW need to wait atleast 10 reference clock
+ cycles after deasserting pll_divider_reset before asserting
+ LMC(0)_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can
+ take upto 160 microseconds) DCLK and FCLK can exhibit some
+ high frequency pulses. Therefore, all bring up activities in
+ that clock domain need to be delayed (when the chip operates
+ in jtg_test_mode) by about 160 microseconds to ensure that
+ lock is achieved. */
+ uint64_t dfm_div_reset : 1; /**< DFM postscalar divider reset */
+ uint64_t dfm_ps_en : 3; /**< DFM postscalar divide ratio
+ Determines the DFM CK speed.
+ 0x0 : Divide LMC+DFM PLL output by 1
+ 0x1 : Divide LMC+DFM PLL output by 2
+ 0x2 : Divide LMC+DFM PLL output by 3
+ 0x3 : Divide LMC+DFM PLL output by 4
+ 0x4 : Divide LMC+DFM PLL output by 6
+ 0x5 : Divide LMC+DFM PLL output by 8
+ 0x6 : Divide LMC+DFM PLL output by 12
+ 0x7 : Divide LMC+DFM PLL output by 12
+ DFM_PS_EN is not used when DFM_DIV_RESET = 1 */
+ uint64_t ddr_div_reset : 1; /**< DDR postscalar divider reset */
+ uint64_t ddr_ps_en : 3; /**< DDR postscalar divide ratio
+ Determines the LMC CK speed.
+ 0x0 : Divide LMC+DFM PLL output by 1
+ 0x1 : Divide LMC+DFM PLL output by 2
+ 0x2 : Divide LMC+DFM PLL output by 3
+ 0x3 : Divide LMC+DFM PLL output by 4
+ 0x4 : Divide LMC+DFM PLL output by 6
+ 0x5 : Divide LMC+DFM PLL output by 8
+ 0x6 : Divide LMC+DFM PLL output by 12
+ 0x7 : Divide LMC+DFM PLL output by 12
+ DDR_PS_EN is not used when DDR_DIV_RESET = 1 */
+ uint64_t diffamp : 4; /**< PLL diffamp input transconductance */
+ uint64_t cps : 3; /**< PLL charge-pump current */
+ uint64_t cpb : 3; /**< PLL charge-pump current */
+ uint64_t reset_n : 1; /**< PLL reset */
+ uint64_t clkf : 7; /**< Multiply reference by CLKF
+ 32 <= CLKF <= 64
+ LMC+DFM PLL frequency = 50 * CLKF
+ min = 1.6 GHz, max = 3.2 GHz */
+#else
+ uint64_t clkf : 7;
+ uint64_t reset_n : 1;
+ uint64_t cpb : 3;
+ uint64_t cps : 3;
+ uint64_t diffamp : 4;
+ uint64_t ddr_ps_en : 3;
+ uint64_t ddr_div_reset : 1;
+ uint64_t dfm_ps_en : 3;
+ uint64_t dfm_div_reset : 1;
+ uint64_t jtg_test_mode : 1;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } s;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn61xx;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn63xx;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn63xxp1;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn66xx;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn68xx;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn68xxp1;
+ struct cvmx_lmcx_ddr_pll_ctl_s cnf71xx;
+};
+typedef union cvmx_lmcx_ddr_pll_ctl cvmx_lmcx_ddr_pll_ctl_t;
+
+/**
+ * cvmx_lmc#_delay_cfg
+ *
+ * LMC_DELAY_CFG = Open-loop delay line settings
+ *
+ *
+ * Notes:
+ * The DQ bits add OUTGOING delay only to dq, dqs_[p,n], cb, cbs_[p,n], dqm. Delay is approximately
+ * 50-80ps per setting depending on process/voltage. There is no need to add incoming delay since by
+ * default all strobe bits are delayed internally by 90 degrees (as was always the case in previous
+ * passes and past chips.
+ *
+ * The CMD add delay to all command bits DDR_RAS, DDR_CAS, DDR_A<15:0>, DDR_BA<2:0>, DDR_n_CS<1:0>_L,
+ * DDR_WE, DDR_CKE and DDR_ODT_<7:0>. Again, delay is 50-80ps per tap.
+ *
+ * The CLK bits add delay to all clock signals DDR_CK_<5:0>_P and DDR_CK_<5:0>_N. Again, delay is
+ * 50-80ps per tap.
+ *
+ * The usage scenario is the following: There is too much delay on command signals and setup on command
+ * is not met. The user can then delay the clock until setup is met.
+ *
+ * At the same time though, dq/dqs should be delayed because there is also a DDR spec tying dqs with
+ * clock. If clock is too much delayed with respect to dqs, writes will start to fail.
+ *
+ * This scheme should eliminate the board need of adding routing delay to clock signals to make high
+ * frequencies work.
+ */
+union cvmx_lmcx_delay_cfg {
+ uint64_t u64;
+ struct cvmx_lmcx_delay_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t dq : 5; /**< Setting for DQ delay line */
+ uint64_t cmd : 5; /**< Setting for CMD delay line */
+ uint64_t clk : 5; /**< Setting for CLK delay line */
+#else
+ uint64_t clk : 5;
+ uint64_t cmd : 5;
+ uint64_t dq : 5;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_lmcx_delay_cfg_s cn30xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t dq : 4; /**< Setting for DQ delay line */
+ uint64_t reserved_9_9 : 1;
+ uint64_t cmd : 4; /**< Setting for CMD delay line */
+ uint64_t reserved_4_4 : 1;
+ uint64_t clk : 4; /**< Setting for CLK delay line */
+#else
+ uint64_t clk : 4;
+ uint64_t reserved_4_4 : 1;
+ uint64_t cmd : 4;
+ uint64_t reserved_9_9 : 1;
+ uint64_t dq : 4;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn38xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn50xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn52xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn56xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn58xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1;
+};
+typedef union cvmx_lmcx_delay_cfg cvmx_lmcx_delay_cfg_t;
+
+/**
+ * cvmx_lmc#_dimm#_params
+ *
+ * LMC_DIMMX_PARAMS = LMC DIMMX Params
+ * This register contains values to be programmed into each control word in the corresponding (registered) DIMM. The control words allow
+ * optimization of the device properties for different raw card designs.
+ *
+ * Notes:
+ * LMC only uses this CSR when LMC*_CONTROL[RDIMM_ENA]=1. During a power-up/init sequence, LMC writes
+ * these fields into the control words in the JEDEC standard SSTE32882 registering clock driver on an
+ * RDIMM when corresponding LMC*_DIMM_CTL[DIMM*_WMASK] bits are set.
+ */
+union cvmx_lmcx_dimmx_params {
+ uint64_t u64;
+ struct cvmx_lmcx_dimmx_params_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rc15 : 4; /**< RC15, Reserved */
+ uint64_t rc14 : 4; /**< RC14, Reserved */
+ uint64_t rc13 : 4; /**< RC13, Reserved */
+ uint64_t rc12 : 4; /**< RC12, Reserved */
+ uint64_t rc11 : 4; /**< RC11, Encoding for RDIMM Operating VDD */
+ uint64_t rc10 : 4; /**< RC10, Encoding for RDIMM Operating Speed */
+ uint64_t rc9 : 4; /**< RC9 , Power Savings Settings Control Word */
+ uint64_t rc8 : 4; /**< RC8 , Additional IBT Settings Control Word */
+ uint64_t rc7 : 4; /**< RC7 , Reserved */
+ uint64_t rc6 : 4; /**< RC6 , Reserved */
+ uint64_t rc5 : 4; /**< RC5 , CK Driver Characterstics Control Word */
+ uint64_t rc4 : 4; /**< RC4 , Control Signals Driver Characteristics Control Word */
+ uint64_t rc3 : 4; /**< RC3 , CA Signals Driver Characterstics Control Word */
+ uint64_t rc2 : 4; /**< RC2 , Timing Control Word */
+ uint64_t rc1 : 4; /**< RC1 , Clock Driver Enable Control Word */
+ uint64_t rc0 : 4; /**< RC0 , Global Features Control Word */
+#else
+ uint64_t rc0 : 4;
+ uint64_t rc1 : 4;
+ uint64_t rc2 : 4;
+ uint64_t rc3 : 4;
+ uint64_t rc4 : 4;
+ uint64_t rc5 : 4;
+ uint64_t rc6 : 4;
+ uint64_t rc7 : 4;
+ uint64_t rc8 : 4;
+ uint64_t rc9 : 4;
+ uint64_t rc10 : 4;
+ uint64_t rc11 : 4;
+ uint64_t rc12 : 4;
+ uint64_t rc13 : 4;
+ uint64_t rc14 : 4;
+ uint64_t rc15 : 4;
+#endif
+ } s;
+ struct cvmx_lmcx_dimmx_params_s cn61xx;
+ struct cvmx_lmcx_dimmx_params_s cn63xx;
+ struct cvmx_lmcx_dimmx_params_s cn63xxp1;
+ struct cvmx_lmcx_dimmx_params_s cn66xx;
+ struct cvmx_lmcx_dimmx_params_s cn68xx;
+ struct cvmx_lmcx_dimmx_params_s cn68xxp1;
+ struct cvmx_lmcx_dimmx_params_s cnf71xx;
+};
+typedef union cvmx_lmcx_dimmx_params cvmx_lmcx_dimmx_params_t;
+
+/**
+ * cvmx_lmc#_dimm_ctl
+ *
+ * LMC_DIMM_CTL = LMC DIMM Control
+ *
+ *
+ * Notes:
+ * This CSR is only used when LMC*_CONTROL[RDIMM_ENA]=1. During a power-up/init sequence, this CSR
+ * controls LMC's writes to the control words in the JEDEC standard SSTE32882 registering clock driver
+ * on an RDIMM.
+ */
+union cvmx_lmcx_dimm_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_dimm_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t parity : 1; /**< Parity
+ The PAR_IN input of a registered DIMM should be
+ tied off. LMC adjusts the value of the DDR_WE_L (DWE#)
+ pin during DDR3 register part control word writes to
+ ensure the parity is observed correctly by the receiving
+ SSTE32882 register part.
+ When PAR_IN is grounded, PARITY should be cleared to 0. */
+ uint64_t tcws : 13; /**< LMC waits for this time period before and after a RDIMM
+ Control Word Access during a power-up/init SEQUENCE.
+ TCWS is in multiples of 8 CK cycles.
+ Set TCWS (CSR field) = RNDUP[tcws(ns)/(8*tCYC(ns))],
+ where tCWS is the desired time (ns), and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=0x4e0 (equivalent to 15us) when changing
+ clock timing (RC2.DBA1, RC6.DA4, RC10.DA3, RC10.DA4,
+ RC11.DA3, and RC11.DA4)
+ TYP=0x8, otherwise
+ 0x0 = Reserved */
+ uint64_t dimm1_wmask : 16; /**< DIMM1 Write Mask
+ if (DIMM1_WMASK[n] = 1)
+ Write DIMM1.RCn */
+ uint64_t dimm0_wmask : 16; /**< DIMM0 Write Mask
+ if (DIMM0_WMASK[n] = 1)
+ Write DIMM0.RCn */
+#else
+ uint64_t dimm0_wmask : 16;
+ uint64_t dimm1_wmask : 16;
+ uint64_t tcws : 13;
+ uint64_t parity : 1;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } s;
+ struct cvmx_lmcx_dimm_ctl_s cn61xx;
+ struct cvmx_lmcx_dimm_ctl_s cn63xx;
+ struct cvmx_lmcx_dimm_ctl_s cn63xxp1;
+ struct cvmx_lmcx_dimm_ctl_s cn66xx;
+ struct cvmx_lmcx_dimm_ctl_s cn68xx;
+ struct cvmx_lmcx_dimm_ctl_s cn68xxp1;
+ struct cvmx_lmcx_dimm_ctl_s cnf71xx;
+};
+typedef union cvmx_lmcx_dimm_ctl cvmx_lmcx_dimm_ctl_t;
+
+/**
+ * cvmx_lmc#_dll_ctl
+ *
+ * LMC_DLL_CTL = LMC DLL control and DCLK reset
+ *
+ */
+union cvmx_lmcx_dll_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_dll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the
+ Dclk domain is (DRESET || ECLK_RESET). */
+ uint64_t dll90_byp : 1; /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be
+ bypassed and the setting is defined by DLL90_VLU */
+ uint64_t dll90_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
+ DCLK init sequence resets the DDR 90 DLL. Should
+ happen at startup before any activity in DDR. QDLL_ENA
+ must not transition 1->0 outside of a DRESET sequence
+ (i.e. it must remain 1 until the next DRESET).
+ DRESET should be asserted before and for 10 usec
+ following the 0->1 transition on QDLL_ENA. */
+ uint64_t dll90_vlu : 5; /**< Contains the open loop setting value for the DDR90 delay
+ line. */
+#else
+ uint64_t dll90_vlu : 5;
+ uint64_t dll90_ena : 1;
+ uint64_t dll90_byp : 1;
+ uint64_t dreset : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_lmcx_dll_ctl_s cn52xx;
+ struct cvmx_lmcx_dll_ctl_s cn52xxp1;
+ struct cvmx_lmcx_dll_ctl_s cn56xx;
+ struct cvmx_lmcx_dll_ctl_s cn56xxp1;
+};
+typedef union cvmx_lmcx_dll_ctl cvmx_lmcx_dll_ctl_t;
+
+/**
+ * cvmx_lmc#_dll_ctl2
+ *
+ * LMC_DLL_CTL2 = LMC (Octeon) DLL control and DCLK reset
+ *
+ *
+ * Notes:
+ * DLL Bringup sequence:
+ * 1. If not done already, set LMC*_DLL_CTL2 = 0, except when LMC*_DLL_CTL2[DRESET] = 1.
+ * 2. Write 1 to LMC*_DLL_CTL2[DLL_BRINGUP]
+ * 3. Wait for 10 CK cycles, then write 1 to LMC*_DLL_CTL2[QUAD_DLL_ENA]. It may not be feasible to count 10 CK cycles, but the
+ * idea is to configure the delay line into DLL mode by asserting DLL_BRING_UP earlier than [QUAD_DLL_ENA], even if it is one
+ * cycle early. LMC*_DLL_CTL2[QUAD_DLL_ENA] must not change after this point without restarting the LMC and/or DRESET initialization
+ * sequence.
+ * 4. Read L2D_BST0 and wait for the result. (L2D_BST0 is subject to change depending on how it called in o63. It is still ok to go
+ * without step 4, since step 5 has enough time)
+ * 5. Wait 10 us.
+ * 6. Write 0 to LMC*_DLL_CTL2[DLL_BRINGUP]. LMC*_DLL_CTL2[DLL_BRINGUP] must not change after this point without restarting the LMC
+ * and/or DRESET initialization sequence.
+ * 7. Read L2D_BST0 and wait for the result. (same as step 4, but the idea here is the wait some time before going to step 8, even it
+ * is one cycle is fine)
+ * 8. Write 0 to LMC*_DLL_CTL2[DRESET]. LMC*_DLL_CTL2[DRESET] must not change after this point without restarting the LMC and/or
+ * DRESET initialization sequence.
+ */
+union cvmx_lmcx_dll_ctl2 {
+ uint64_t u64;
+ struct cvmx_lmcx_dll_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t intf_en : 1; /**< Interface Enable */
+ uint64_t dll_bringup : 1; /**< DLL Bringup */
+ uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the
+ Dclk domain is (DRESET || ECLK_RESET). */
+ uint64_t quad_dll_ena : 1; /**< DLL Enable */
+ uint64_t byp_sel : 4; /**< Bypass select
+ 0000 : no byte
+ 0001 : byte 0
+ - ...
+ 1001 : byte 8
+ 1010 : all bytes
+ 1011-1111 : Reserved */
+ uint64_t byp_setting : 8; /**< Bypass setting
+ DDR3-1600: 00100010
+ DDR3-1333: 00110010
+ DDR3-1066: 01001011
+ DDR3-800 : 01110101
+ DDR3-667 : 10010110
+ DDR3-600 : 10101100 */
+#else
+ uint64_t byp_setting : 8;
+ uint64_t byp_sel : 4;
+ uint64_t quad_dll_ena : 1;
+ uint64_t dreset : 1;
+ uint64_t dll_bringup : 1;
+ uint64_t intf_en : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_lmcx_dll_ctl2_s cn61xx;
+ struct cvmx_lmcx_dll_ctl2_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t dll_bringup : 1; /**< DLL Bringup */
+ uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the
+ Dclk domain is (DRESET || ECLK_RESET). */
+ uint64_t quad_dll_ena : 1; /**< DLL Enable */
+ uint64_t byp_sel : 4; /**< Bypass select
+ 0000 : no byte
+ 0001 : byte 0
+ - ...
+ 1001 : byte 8
+ 1010 : all bytes
+ 1011-1111 : Reserved */
+ uint64_t byp_setting : 8; /**< Bypass setting
+ DDR3-1600: 00100010
+ DDR3-1333: 00110010
+ DDR3-1066: 01001011
+ DDR3-800 : 01110101
+ DDR3-667 : 10010110
+ DDR3-600 : 10101100 */
+#else
+ uint64_t byp_setting : 8;
+ uint64_t byp_sel : 4;
+ uint64_t quad_dll_ena : 1;
+ uint64_t dreset : 1;
+ uint64_t dll_bringup : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_dll_ctl2_cn63xx cn63xxp1;
+ struct cvmx_lmcx_dll_ctl2_cn63xx cn66xx;
+ struct cvmx_lmcx_dll_ctl2_s cn68xx;
+ struct cvmx_lmcx_dll_ctl2_s cn68xxp1;
+ struct cvmx_lmcx_dll_ctl2_s cnf71xx;
+};
+typedef union cvmx_lmcx_dll_ctl2 cvmx_lmcx_dll_ctl2_t;
+
+/**
+ * cvmx_lmc#_dll_ctl3
+ *
+ * LMC_DLL_CTL3 = LMC DLL control and DCLK reset
+ *
+ */
+union cvmx_lmcx_dll_ctl3 {
+ uint64_t u64;
+ struct cvmx_lmcx_dll_ctl3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_41_63 : 23;
+ uint64_t dclk90_fwd : 1; /**< Forward setting
+ 0 : disable
+ 1 : forward (generates a 1 cycle pulse to forward setting)
+ This register is oneshot and clears itself each time
+ it is set */
+ uint64_t ddr_90_dly_byp : 1; /**< Bypass DDR90_DLY in Clock Tree */
+ uint64_t dclk90_recal_dis : 1; /**< Disable periodic recalibration of DDR90 Delay Line in */
+ uint64_t dclk90_byp_sel : 1; /**< Bypass Setting Select for DDR90 Delay Line */
+ uint64_t dclk90_byp_setting : 8; /**< Bypass Setting for DDR90 Delay Line */
+ uint64_t dll_fast : 1; /**< DLL lock
+ 0 = DLL locked */
+ uint64_t dll90_setting : 8; /**< Encoded DLL settings. Works in conjuction with
+ DLL90_BYTE_SEL */
+ uint64_t fine_tune_mode : 1; /**< DLL Fine Tune Mode
+ 0 = disabled
+ 1 = enable.
+ When enabled, calibrate internal PHY DLL every
+ LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll_mode : 1; /**< DLL Mode */
+ uint64_t dll90_byte_sel : 4; /**< Observe DLL settings for selected byte
+ 0001 : byte 0
+ - ...
+ 1001 : byte 8
+ 0000,1010-1111 : Reserved */
+ uint64_t offset_ena : 1; /**< Offset enable
+ 0 = disable
+ 1 = enable */
+ uint64_t load_offset : 1; /**< Load offset
+ 0 : disable
+ 1 : load (generates a 1 cycle pulse to the PHY)
+ This register is oneshot and clears itself each time
+ it is set */
+ uint64_t mode_sel : 2; /**< Mode select
+ 00 : reset
+ 01 : write
+ 10 : read
+ 11 : write & read */
+ uint64_t byte_sel : 4; /**< Byte select
+ 0000 : no byte
+ 0001 : byte 0
+ - ...
+ 1001 : byte 8
+ 1010 : all bytes
+ 1011-1111 : Reserved */
+ uint64_t offset : 6; /**< Write/read offset setting
+ [4:0] : offset
+ [5] : 0 = increment, 1 = decrement
+ Not a 2's complement value */
+#else
+ uint64_t offset : 6;
+ uint64_t byte_sel : 4;
+ uint64_t mode_sel : 2;
+ uint64_t load_offset : 1;
+ uint64_t offset_ena : 1;
+ uint64_t dll90_byte_sel : 4;
+ uint64_t dll_mode : 1;
+ uint64_t fine_tune_mode : 1;
+ uint64_t dll90_setting : 8;
+ uint64_t dll_fast : 1;
+ uint64_t dclk90_byp_setting : 8;
+ uint64_t dclk90_byp_sel : 1;
+ uint64_t dclk90_recal_dis : 1;
+ uint64_t ddr_90_dly_byp : 1;
+ uint64_t dclk90_fwd : 1;
+ uint64_t reserved_41_63 : 23;
+#endif
+ } s;
+ struct cvmx_lmcx_dll_ctl3_s cn61xx;
+ struct cvmx_lmcx_dll_ctl3_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t dll_fast : 1; /**< DLL lock
+ 0 = DLL locked */
+ uint64_t dll90_setting : 8; /**< Encoded DLL settings. Works in conjuction with
+ DLL90_BYTE_SEL */
+ uint64_t fine_tune_mode : 1; /**< DLL Fine Tune Mode
+ 0 = disabled
+ 1 = enable.
+ When enabled, calibrate internal PHY DLL every
+ LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll_mode : 1; /**< DLL Mode */
+ uint64_t dll90_byte_sel : 4; /**< Observe DLL settings for selected byte
+ 0001 : byte 0
+ - ...
+ 1001 : byte 8
+ 0000,1010-1111 : Reserved */
+ uint64_t offset_ena : 1; /**< Offset enable
+ 0 = disable
+ 1 = enable */
+ uint64_t load_offset : 1; /**< Load offset
+ 0 : disable
+ 1 : load (generates a 1 cycle pulse to the PHY)
+ This register is oneshot and clears itself each time
+ it is set */
+ uint64_t mode_sel : 2; /**< Mode select
+ 00 : reset
+ 01 : write
+ 10 : read
+ 11 : write & read */
+ uint64_t byte_sel : 4; /**< Byte select
+ 0000 : no byte
+ 0001 : byte 0
+ - ...
+ 1001 : byte 8
+ 1010 : all bytes
+ 1011-1111 : Reserved */
+ uint64_t offset : 6; /**< Write/read offset setting
+ [4:0] : offset
+ [5] : 0 = increment, 1 = decrement
+ Not a 2's complement value */
+#else
+ uint64_t offset : 6;
+ uint64_t byte_sel : 4;
+ uint64_t mode_sel : 2;
+ uint64_t load_offset : 1;
+ uint64_t offset_ena : 1;
+ uint64_t dll90_byte_sel : 4;
+ uint64_t dll_mode : 1;
+ uint64_t fine_tune_mode : 1;
+ uint64_t dll90_setting : 8;
+ uint64_t dll_fast : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_dll_ctl3_cn63xx cn63xxp1;
+ struct cvmx_lmcx_dll_ctl3_cn63xx cn66xx;
+ struct cvmx_lmcx_dll_ctl3_s cn68xx;
+ struct cvmx_lmcx_dll_ctl3_s cn68xxp1;
+ struct cvmx_lmcx_dll_ctl3_s cnf71xx;
+};
+typedef union cvmx_lmcx_dll_ctl3 cvmx_lmcx_dll_ctl3_t;
+
+/**
+ * cvmx_lmc#_dual_memcfg
+ *
+ * LMC_DUAL_MEMCFG = LMC Dual Memory Configuration Register
+ *
+ * This register controls certain parameters of Dual Memory Configuration
+ *
+ * Notes:
+ * This register enables the design to have two, separate memory configurations, selected dynamically
+ * by the reference address. Note however, that both configurations share
+ * LMC*_CONTROL[XOR_BANK], LMC*_CONFIG[PBANK_LSB], LMC*_CONFIG[RANK_ENA], and all timing parameters.
+ * In this description, "config0" refers to the normal memory configuration that is defined by the
+ * LMC*_CONFIG[ROW_LSB] parameters and "config1" refers to the dual (or second)
+ * memory configuration that is defined by this register.
+ *
+ * Enable mask to chip select mapping is shown below:
+ * CS_MASK[3] -> DIMM1_CS_<1>
+ * CS_MASK[2] -> DIMM1_CS_<0>
+ *
+ * CS_MASK[1] -> DIMM0_CS_<1>
+ * CS_MASK[0] -> DIMM0_CS_<0>
+ *
+ * DIMM n uses the pair of chip selects DIMMn_CS_<1:0>.
+ *
+ * Programming restrictions for CS_MASK:
+ * when LMC*_CONFIG[RANK_ENA] == 0, CS_MASK[2n + 1] = CS_MASK[2n]
+ */
+union cvmx_lmcx_dual_memcfg {
+ uint64_t u64;
+ struct cvmx_lmcx_dual_memcfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t bank8 : 1; /**< See LMC_DDR2_CTL[BANK8] */
+ uint64_t row_lsb : 3; /**< See LMC*_CONFIG[ROW_LSB] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t cs_mask : 8; /**< Chip select mask.
+ This mask corresponds to the 8 chip selects for a memory
+ configuration. Each reference address will assert one of
+ the chip selects. If that chip select has its
+ corresponding CS_MASK bit set, then the "config1"
+ parameters are used, otherwise the "config0" parameters
+ are used. See additional notes below.
+ [7:4] *UNUSED IN 6xxx* */
+#else
+ uint64_t cs_mask : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t row_lsb : 3;
+ uint64_t bank8 : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_lmcx_dual_memcfg_s cn50xx;
+ struct cvmx_lmcx_dual_memcfg_s cn52xx;
+ struct cvmx_lmcx_dual_memcfg_s cn52xxp1;
+ struct cvmx_lmcx_dual_memcfg_s cn56xx;
+ struct cvmx_lmcx_dual_memcfg_s cn56xxp1;
+ struct cvmx_lmcx_dual_memcfg_s cn58xx;
+ struct cvmx_lmcx_dual_memcfg_s cn58xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t row_lsb : 3; /**< See LMC*_CONFIG[ROW_LSB] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t cs_mask : 8; /**< Chip select mask.
+ This mask corresponds to the 8 chip selects for a memory
+ configuration. Each reference address will assert one of
+ the chip selects. If that chip select has its
+ corresponding CS_MASK bit set, then the "config1"
+ parameters are used, otherwise the "config0" parameters
+ are used. See additional notes below.
+ [7:4] *UNUSED IN 6xxx* */
+#else
+ uint64_t cs_mask : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t row_lsb : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn61xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn63xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn63xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn66xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn68xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn68xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cnf71xx;
+};
+typedef union cvmx_lmcx_dual_memcfg cvmx_lmcx_dual_memcfg_t;
+
+/**
+ * cvmx_lmc#_ecc_synd
+ *
+ * LMC_ECC_SYND = MRD ECC Syndromes
+ *
+ */
+union cvmx_lmcx_ecc_synd {
+ uint64_t u64;
+ struct cvmx_lmcx_ecc_synd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t mrdsyn3 : 8; /**< MRD ECC Syndrome Quad3
+ MRDSYN3 corresponds to DQ[63:0]_c1_p1
+ In 32b mode, ecc is calculated on 4 cycle worth of data
+ MRDSYN3 corresponds to [DQ[31:0]_c3_p1, DQ[31:0]_c3_p0]
+ where _cC_pP denotes cycle C and phase P */
+ uint64_t mrdsyn2 : 8; /**< MRD ECC Syndrome Quad2
+ MRDSYN2 corresponds to DQ[63:0]_c1_p0
+ In 32b mode, ecc is calculated on 4 cycle worth of data
+ MRDSYN2 corresponds to [DQ[31:0]_c2_p1, DQ[31:0]_c2_p0]
+ where _cC_pP denotes cycle C and phase P */
+ uint64_t mrdsyn1 : 8; /**< MRD ECC Syndrome Quad1
+ MRDSYN1 corresponds to DQ[63:0]_c0_p1
+ In 32b mode, ecc is calculated on 4 cycle worth of data
+ MRDSYN1 corresponds to [DQ[31:0]_c1_p1, DQ[31:0]_c1_p0]
+ where _cC_pP denotes cycle C and phase P */
+ uint64_t mrdsyn0 : 8; /**< MRD ECC Syndrome Quad0
+ MRDSYN0 corresponds to DQ[63:0]_c0_p0
+ In 32b mode, ecc is calculated on 4 cycle worth of data
+ MRDSYN0 corresponds to [DQ[31:0]_c0_p1, DQ[31:0]_c0_p0]
+ where _cC_pP denotes cycle C and phase P */
+#else
+ uint64_t mrdsyn0 : 8;
+ uint64_t mrdsyn1 : 8;
+ uint64_t mrdsyn2 : 8;
+ uint64_t mrdsyn3 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_ecc_synd_s cn30xx;
+ struct cvmx_lmcx_ecc_synd_s cn31xx;
+ struct cvmx_lmcx_ecc_synd_s cn38xx;
+ struct cvmx_lmcx_ecc_synd_s cn38xxp2;
+ struct cvmx_lmcx_ecc_synd_s cn50xx;
+ struct cvmx_lmcx_ecc_synd_s cn52xx;
+ struct cvmx_lmcx_ecc_synd_s cn52xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn56xx;
+ struct cvmx_lmcx_ecc_synd_s cn56xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn58xx;
+ struct cvmx_lmcx_ecc_synd_s cn58xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn61xx;
+ struct cvmx_lmcx_ecc_synd_s cn63xx;
+ struct cvmx_lmcx_ecc_synd_s cn63xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn66xx;
+ struct cvmx_lmcx_ecc_synd_s cn68xx;
+ struct cvmx_lmcx_ecc_synd_s cn68xxp1;
+ struct cvmx_lmcx_ecc_synd_s cnf71xx;
+};
+typedef union cvmx_lmcx_ecc_synd cvmx_lmcx_ecc_synd_t;
+
+/**
+ * cvmx_lmc#_fadr
+ *
+ * LMC_FADR = LMC Failing Address Register (SEC/DED/NXM)
+ *
+ * This register only captures the first transaction with ecc/nxm errors. A DED/NXM error can
+ * over-write this register with its failing addresses if the first error was a SEC. If you write
+ * LMC*_CONFIG->SEC_ERR/DED_ERR/NXM_ERR then it will clear the error bits and capture the
+ * next failing address.
+ *
+ * If FDIMM is 2 that means the error is in the higher bits DIMM.
+ *
+ * Notes:
+ * LMC*_FADR captures the failing pre-scrambled address location (split into dimm, bunk, bank, etc). If
+ * scrambling is off, then LMC*_FADR will also capture the failing physical location in the DRAM parts.
+ *
+ * LMC*_SCRAMBLED_FADR captures the actual failing address location in the physical DRAM parts, i.e.,
+ * a. if scrambling is on, LMC*_SCRAMBLE_FADR contains the failing physical location in the DRAM parts (split
+ * into dimm, bunk, bank, etc)
+ * b. if scrambling is off, the pre-scramble and post-scramble addresses are the same, and so the contents of
+ * LMC*_SCRAMBLED_FADR match the contents of LMC*_FADR
+ */
+union cvmx_lmcx_fadr {
+ uint64_t u64;
+ struct cvmx_lmcx_fadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_fadr_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t fdimm : 2; /**< Failing DIMM# */
+ uint64_t fbunk : 1; /**< Failing Rank */
+ uint64_t fbank : 3; /**< Failing Bank[2:0] */
+ uint64_t frow : 14; /**< Failing Row Address[13:0] */
+ uint64_t fcol : 12; /**< Failing Column Start Address[11:0]
+ Represents the Failing read's starting column address
+ (and not the exact column address in which the SEC/DED
+ was detected) */
+#else
+ uint64_t fcol : 12;
+ uint64_t frow : 14;
+ uint64_t fbank : 3;
+ uint64_t fbunk : 1;
+ uint64_t fdimm : 2;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_fadr_cn30xx cn31xx;
+ struct cvmx_lmcx_fadr_cn30xx cn38xx;
+ struct cvmx_lmcx_fadr_cn30xx cn38xxp2;
+ struct cvmx_lmcx_fadr_cn30xx cn50xx;
+ struct cvmx_lmcx_fadr_cn30xx cn52xx;
+ struct cvmx_lmcx_fadr_cn30xx cn52xxp1;
+ struct cvmx_lmcx_fadr_cn30xx cn56xx;
+ struct cvmx_lmcx_fadr_cn30xx cn56xxp1;
+ struct cvmx_lmcx_fadr_cn30xx cn58xx;
+ struct cvmx_lmcx_fadr_cn30xx cn58xxp1;
+ struct cvmx_lmcx_fadr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t fdimm : 2; /**< Failing DIMM# */
+ uint64_t fbunk : 1; /**< Failing Rank */
+ uint64_t fbank : 3; /**< Failing Bank[2:0] */
+ uint64_t frow : 16; /**< Failing Row Address[15:0] */
+ uint64_t fcol : 14; /**< Failing Column Address[13:0]
+ Technically, represents the address of the 128b data
+ that had an ecc error, i.e., fcol[0] is always 0. Can
+ be used in conjuction with LMC*_CONFIG[DED_ERR] to
+ isolate the 64b chunk of data in error */
+#else
+ uint64_t fcol : 14;
+ uint64_t frow : 16;
+ uint64_t fbank : 3;
+ uint64_t fbunk : 1;
+ uint64_t fdimm : 2;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn61xx;
+ struct cvmx_lmcx_fadr_cn61xx cn63xx;
+ struct cvmx_lmcx_fadr_cn61xx cn63xxp1;
+ struct cvmx_lmcx_fadr_cn61xx cn66xx;
+ struct cvmx_lmcx_fadr_cn61xx cn68xx;
+ struct cvmx_lmcx_fadr_cn61xx cn68xxp1;
+ struct cvmx_lmcx_fadr_cn61xx cnf71xx;
+};
+typedef union cvmx_lmcx_fadr cvmx_lmcx_fadr_t;
+
+/**
+ * cvmx_lmc#_ifb_cnt
+ *
+ * LMC_IFB_CNT = Performance Counters
+ *
+ */
+union cvmx_lmcx_ifb_cnt {
+ uint64_t u64;
+ struct cvmx_lmcx_ifb_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ifbcnt : 64; /**< Performance Counter
+ 64-bit counter that increments every
+ CK cycle there is something in the in-flight buffer. */
+#else
+ uint64_t ifbcnt : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_ifb_cnt_s cn61xx;
+ struct cvmx_lmcx_ifb_cnt_s cn63xx;
+ struct cvmx_lmcx_ifb_cnt_s cn63xxp1;
+ struct cvmx_lmcx_ifb_cnt_s cn66xx;
+ struct cvmx_lmcx_ifb_cnt_s cn68xx;
+ struct cvmx_lmcx_ifb_cnt_s cn68xxp1;
+ struct cvmx_lmcx_ifb_cnt_s cnf71xx;
+};
+typedef union cvmx_lmcx_ifb_cnt cvmx_lmcx_ifb_cnt_t;
+
+/**
+ * cvmx_lmc#_ifb_cnt_hi
+ *
+ * LMC_IFB_CNT_HI = Performance Counters
+ *
+ */
+union cvmx_lmcx_ifb_cnt_hi {
+ uint64_t u64;
+ struct cvmx_lmcx_ifb_cnt_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ifbcnt_hi : 32; /**< Performance Counter to measure Bus Utilization
+ Upper 32-bits of 64-bit counter that increments every
+ cycle there is something in the in-flight buffer. */
+#else
+ uint64_t ifbcnt_hi : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn30xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn31xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn38xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn50xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn52xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn56xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn58xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1;
+};
+typedef union cvmx_lmcx_ifb_cnt_hi cvmx_lmcx_ifb_cnt_hi_t;
+
+/**
+ * cvmx_lmc#_ifb_cnt_lo
+ *
+ * LMC_IFB_CNT_LO = Performance Counters
+ *
+ */
+union cvmx_lmcx_ifb_cnt_lo {
+ uint64_t u64;
+ struct cvmx_lmcx_ifb_cnt_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ifbcnt_lo : 32; /**< Performance Counter
+ Low 32-bits of 64-bit counter that increments every
+ cycle there is something in the in-flight buffer. */
+#else
+ uint64_t ifbcnt_lo : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn30xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn31xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn38xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn50xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn52xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn56xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn58xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1;
+};
+typedef union cvmx_lmcx_ifb_cnt_lo cvmx_lmcx_ifb_cnt_lo_t;
+
+/**
+ * cvmx_lmc#_int
+ *
+ * LMC_INT = LMC Interrupt Register
+ *
+ */
+union cvmx_lmcx_int {
+ uint64_t u64;
+ struct cvmx_lmcx_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t ded_err : 4; /**< Double Error detected (DED) of Rd Data
+ [0] corresponds to DQ[63:0]_c0_p0
+ [1] corresponds to DQ[63:0]_c0_p1
+ [2] corresponds to DQ[63:0]_c1_p0
+ [3] corresponds to DQ[63:0]_c1_p1
+ In 32b mode, ecc is calculated on 4 cycle worth of data
+ [0] corresponds to [DQ[31:0]_c0_p1, DQ[31:0]_c0_p0]
+ [1] corresponds to [DQ[31:0]_c1_p1, DQ[31:0]_c1_p0]
+ [2] corresponds to [DQ[31:0]_c2_p1, DQ[31:0]_c2_p0]
+ [3] corresponds to [DQ[31:0]_c3_p1, DQ[31:0]_c3_p0]
+ where _cC_pP denotes cycle C and phase P
+ Write of 1 will clear the corresponding error bit */
+ uint64_t sec_err : 4; /**< Single Error (corrected) of Rd Data
+ [0] corresponds to DQ[63:0]_c0_p0
+ [1] corresponds to DQ[63:0]_c0_p1
+ [2] corresponds to DQ[63:0]_c1_p0
+ [3] corresponds to DQ[63:0]_c1_p1
+ In 32b mode, ecc is calculated on 4 cycle worth of data
+ [0] corresponds to [DQ[31:0]_c0_p1, DQ[31:0]_c0_p0]
+ [1] corresponds to [DQ[31:0]_c1_p1, DQ[31:0]_c1_p0]
+ [2] corresponds to [DQ[31:0]_c2_p1, DQ[31:0]_c2_p0]
+ [3] corresponds to [DQ[31:0]_c3_p1, DQ[31:0]_c3_p0]
+ where _cC_pP denotes cycle C and phase P
+ Write of 1 will clear the corresponding error bit */
+ uint64_t nxm_wr_err : 1; /**< Write to non-existent memory
+ Write of 1 will clear the corresponding error bit */
+#else
+ uint64_t nxm_wr_err : 1;
+ uint64_t sec_err : 4;
+ uint64_t ded_err : 4;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_lmcx_int_s cn61xx;
+ struct cvmx_lmcx_int_s cn63xx;
+ struct cvmx_lmcx_int_s cn63xxp1;
+ struct cvmx_lmcx_int_s cn66xx;
+ struct cvmx_lmcx_int_s cn68xx;
+ struct cvmx_lmcx_int_s cn68xxp1;
+ struct cvmx_lmcx_int_s cnf71xx;
+};
+typedef union cvmx_lmcx_int cvmx_lmcx_int_t;
+
+/**
+ * cvmx_lmc#_int_en
+ *
+ * LMC_INT_EN = LMC Interrupt Enable Register
+ *
+ */
+union cvmx_lmcx_int_en {
+ uint64_t u64;
+ struct cvmx_lmcx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t intr_ded_ena : 1; /**< ECC Double Error Detect(DED) Interrupt Enable bit
+ When set, the memory controller raises a processor
+ interrupt on detecting an uncorrectable Dbl Bit ECC
+ error. */
+ uint64_t intr_sec_ena : 1; /**< ECC Single Error Correct(SEC) Interrupt Enable bit
+ When set, the memory controller raises a processor
+ interrupt on detecting a correctable Single Bit ECC
+ error. */
+ uint64_t intr_nxm_wr_ena : 1; /**< Non Write Error Interrupt Enable bit
+ When set, the memory controller raises a processor
+ interrupt on detecting an non-existent memory write */
+#else
+ uint64_t intr_nxm_wr_ena : 1;
+ uint64_t intr_sec_ena : 1;
+ uint64_t intr_ded_ena : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_lmcx_int_en_s cn61xx;
+ struct cvmx_lmcx_int_en_s cn63xx;
+ struct cvmx_lmcx_int_en_s cn63xxp1;
+ struct cvmx_lmcx_int_en_s cn66xx;
+ struct cvmx_lmcx_int_en_s cn68xx;
+ struct cvmx_lmcx_int_en_s cn68xxp1;
+ struct cvmx_lmcx_int_en_s cnf71xx;
+};
+typedef union cvmx_lmcx_int_en cvmx_lmcx_int_en_t;
+
+/**
+ * cvmx_lmc#_mem_cfg0
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * LMC_MEM_CFG0 = LMC Memory Configuration Register0
+ *
+ * This register controls certain parameters of Memory Configuration
+ */
+union cvmx_lmcx_mem_cfg0 {
+ uint64_t u64;
+ struct cvmx_lmcx_mem_cfg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter,
+ and LMC_OPS_CNT_*, LMC_IFB_CNT_*, and LMC_DCLK_CNT_*
+ CSR's. SW should write this to a one, then re-write
+ it to a zero to cause the reset. */
+ uint64_t silo_qc : 1; /**< Adds a Quarter Cycle granularity to generate
+ dqs pulse generation for silo.
+ Combination of Silo_HC and Silo_QC gives the
+ ability to position the read enable with quarter
+ cycle resolution. This is applied on all the bytes
+ uniformly. */
+ uint64_t bunk_ena : 1; /**< Bunk Enable aka RANK ena (for use with dual-rank DIMMs)
+ For dual-rank DIMMs, the bunk_ena bit will enable
+ the drive of the CS_N[1:0] pins based on the
+ (pbank_lsb-1) address bit.
+ Write 0 for SINGLE ranked DIMM's. */
+ uint64_t ded_err : 4; /**< Double Error detected (DED) of Rd Data
+ In 128b mode, ecc is calulated on 1 cycle worth of data
+ [25] corresponds to DQ[63:0], Phase0
+ [26] corresponds to DQ[127:64], Phase0
+ [27] corresponds to DQ[63:0], Phase1
+ [28] corresponds to DQ[127:64], Phase1
+ In 64b mode, ecc is calculated on 2 cycle worth of data
+ [25] corresponds to DQ[63:0], Phase0, cycle0
+ [26] corresponds to DQ[63:0], Phase0, cycle1
+ [27] corresponds to DQ[63:0], Phase1, cycle0
+ [28] corresponds to DQ[63:0], Phase1, cycle1
+ Write of 1 will clear the corresponding error bit */
+ uint64_t sec_err : 4; /**< Single Error (corrected) of Rd Data
+ In 128b mode, ecc is calulated on 1 cycle worth of data
+ [21] corresponds to DQ[63:0], Phase0
+ [22] corresponds to DQ[127:64], Phase0
+ [23] corresponds to DQ[63:0], Phase1
+ [24] corresponds to DQ[127:64], Phase1
+ In 64b mode, ecc is calculated on 2 cycle worth of data
+ [21] corresponds to DQ[63:0], Phase0, cycle0
+ [22] corresponds to DQ[63:0], Phase0, cycle1
+ [23] corresponds to DQ[63:0], Phase1, cycle0
+ [24] corresponds to DQ[63:0], Phase1, cycle1
+ Write of 1 will clear the corresponding error bit */
+ uint64_t intr_ded_ena : 1; /**< ECC Double Error Detect(DED) Interrupt Enable bit
+ When set, the memory controller raises a processor
+ interrupt on detecting an uncorrectable Dbl Bit ECC
+ error. */
+ uint64_t intr_sec_ena : 1; /**< ECC Single Error Correct(SEC) Interrupt Enable bit
+ When set, the memory controller raises a processor
+ interrupt on detecting a correctable Single Bit ECC
+ error. */
+ uint64_t tcl : 4; /**< This register is not used */
+ uint64_t ref_int : 6; /**< Refresh interval represented in \#of 512 dclk increments.
+ Program this to RND-DN(tREFI/clkPeriod/512)
+ - 000000: RESERVED
+ - 000001: 1 * 512 = 512 dclks
+ - ...
+ - 111111: 63 * 512 = 32256 dclks */
+ uint64_t pbank_lsb : 4; /**< Physical Bank address select
+ Reverting to the explanation for ROW_LSB,
+ PBank_LSB would be Row_LSB bit + \#rowbits
+ + \#rankbits
+ In the 512MB DIMM Example, assuming no rank bits:
+ pbank_lsb=mem_addr[15+13] for 64 b mode
+ =mem_addr[16+13] for 128b mode
+ Hence the parameter
+ 0000:pbank[1:0] = mem_adr[28:27] / rank = mem_adr[26] (if bunk_ena)
+ 0001:pbank[1:0] = mem_adr[29:28] / rank = mem_adr[27] "
+ 0010:pbank[1:0] = mem_adr[30:29] / rank = mem_adr[28] "
+ 0011:pbank[1:0] = mem_adr[31:30] / rank = mem_adr[29] "
+ 0100:pbank[1:0] = mem_adr[32:31] / rank = mem_adr[30] "
+ 0101:pbank[1:0] = mem_adr[33:32] / rank = mem_adr[31] "
+ 0110:pbank[1:0] =[1'b0,mem_adr[33]] / rank = mem_adr[32] "
+ 0111:pbank[1:0] =[2'b0] / rank = mem_adr[33] "
+ 1000-1111: RESERVED */
+ uint64_t row_lsb : 3; /**< Encoding used to determine which memory address
+ bit position represents the low order DDR ROW address.
+ The processor's memory address[33:7] needs to be
+ translated to DRAM addresses (bnk,row,col,rank and dimm)
+ and that is a function of the following:
+ 1. \# Banks (4 or 8) - spec'd by BANK8
+ 2. Datapath Width(64 or 128) - MODE128b
+ 3. \# Ranks in a DIMM - spec'd by BUNK_ENA
+ 4. \# DIMM's in the system
+ 5. \# Column Bits of the memory part - spec'd indirectly
+ by this register.
+ 6. \# Row Bits of the memory part - spec'd indirectly
+ by the register below (PBANK_LSB).
+ Illustration: For Micron's MT18HTF6472A,512MB DDR2
+ Unbuffered DIMM which uses 256Mb parts (8M x 8 x 4),
+ \# Banks = 4 -> 2 bits of BA
+ \# Columns = 1K -> 10 bits of Col
+ \# Rows = 8K -> 13 bits of Row
+ Assuming that the total Data width is 128, this is how
+ we arrive at row_lsb:
+ Col Address starts from mem_addr[4] for 128b (16Bytes)
+ dq width or from mem_addr[3] for 64b (8Bytes) dq width
+ \# col + \# bank = 12. Hence row_lsb is mem_adr[15] for
+ 64bmode or mem_adr[16] for 128b mode. Hence row_lsb
+ parameter should be set to 001 (64b) or 010 (128b).
+ - 000: row_lsb = mem_adr[14]
+ - 001: row_lsb = mem_adr[15]
+ - 010: row_lsb = mem_adr[16]
+ - 011: row_lsb = mem_adr[17]
+ - 100: row_lsb = mem_adr[18]
+ - 101-111:row_lsb = RESERVED */
+ uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC
+ check/correct logic. Should be 1 when used with DIMMs
+ with ECC. 0, otherwise.
+ When this mode is turned on, DQ[71:64] and DQ[143:137]
+ on writes, will contain the ECC code generated for
+ the lower 64 and upper 64 bits of data which will
+ written in the memory and then later on reads, used
+ to check for Single bit error (which will be auto-
+ corrected) and Double Bit error (which will be
+ reported). When not turned on, DQ[71:64] and DQ[143:137]
+ are driven to 0. Please refer to SEC_ERR, DED_ERR,
+ LMC_FADR, and LMC_ECC_SYND registers
+ for diagnostics information when there is an error. */
+ uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory initialization
+ sequence. */
+#else
+ uint64_t init_start : 1;
+ uint64_t ecc_ena : 1;
+ uint64_t row_lsb : 3;
+ uint64_t pbank_lsb : 4;
+ uint64_t ref_int : 6;
+ uint64_t tcl : 4;
+ uint64_t intr_sec_ena : 1;
+ uint64_t intr_ded_ena : 1;
+ uint64_t sec_err : 4;
+ uint64_t ded_err : 4;
+ uint64_t bunk_ena : 1;
+ uint64_t silo_qc : 1;
+ uint64_t reset : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_mem_cfg0_s cn30xx;
+ struct cvmx_lmcx_mem_cfg0_s cn31xx;
+ struct cvmx_lmcx_mem_cfg0_s cn38xx;
+ struct cvmx_lmcx_mem_cfg0_s cn38xxp2;
+ struct cvmx_lmcx_mem_cfg0_s cn50xx;
+ struct cvmx_lmcx_mem_cfg0_s cn52xx;
+ struct cvmx_lmcx_mem_cfg0_s cn52xxp1;
+ struct cvmx_lmcx_mem_cfg0_s cn56xx;
+ struct cvmx_lmcx_mem_cfg0_s cn56xxp1;
+ struct cvmx_lmcx_mem_cfg0_s cn58xx;
+ struct cvmx_lmcx_mem_cfg0_s cn58xxp1;
+};
+typedef union cvmx_lmcx_mem_cfg0 cvmx_lmcx_mem_cfg0_t;
+
+/**
+ * cvmx_lmc#_mem_cfg1
+ *
+ * LMC_MEM_CFG1 = LMC Memory Configuration Register1
+ *
+ * This register controls the External Memory Configuration Timing Parameters. Please refer to the
+ * appropriate DDR part spec from your memory vendor for the various values in this CSR.
+ * The details of each of these timing parameters can be found in the JEDEC spec or the vendor
+ * spec of the memory parts.
+ */
+union cvmx_lmcx_mem_cfg1 {
+ uint64_t u64;
+ struct cvmx_lmcx_mem_cfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t comp_bypass : 1; /**< Compensation bypass. */
+ uint64_t trrd : 3; /**< tRRD cycles: ACT-ACT timing parameter for different
+ banks. (Represented in tCYC cycles == 1dclks)
+ TYP=15ns (66MHz=1,167MHz=3,200MHz=3)
+ For DDR2, TYP=7.5ns
+ - 000: RESERVED
+ - 001: 1 tCYC
+ - 010: 2 tCYC
+ - 011: 3 tCYC
+ - 100: 4 tCYC
+ - 101: 5 tCYC
+ - 110: 6 tCYC
+ - 111: 7 tCYC */
+ uint64_t caslat : 3; /**< CAS Latency Encoding which is loaded into each DDR
+ SDRAM device (MRS[6:4]) upon power-up (INIT_START=1).
+ (Represented in tCYC cycles == 1 dclks)
+ 000 RESERVED
+ 001 RESERVED
+ 010 2.0 tCYC
+ 011 3.0 tCYC
+ 100 4.0 tCYC
+ 101 5.0 tCYC
+ 110 6.0 tCYC
+ 111 RESERVED
+ eg). The parameters TSKW, SILO_HC, and SILO_QC can
+ account for 1/4 cycle granularity in board/etch delays. */
+ uint64_t tmrd : 3; /**< tMRD Cycles
+ (Represented in dclk tCYC)
+ For DDR2, its TYP 2*tCYC)
+ - 000: RESERVED
+ - 001: 1
+ - 010: 2
+ - 011: 3
+ - 100: 4
+ - 101-111: RESERVED */
+ uint64_t trfc : 5; /**< Indicates tRFC constraints.
+ Set TRFC (CSR field) = RNDUP[tRFC(ns)/4*tcyc(ns)],
+ where tRFC is from the DDR2 spec, and tcyc(ns)
+ is the DDR clock frequency (not data rate).
+ For example, with 2Gb, DDR2-667 parts,
+ typ tRFC=195ns, so TRFC (CSR field) = 0x11.
+ TRFC (binary): Corresponding tRFC Cycles
+ ----------------------------------------
+ - 00000-00001: RESERVED
+ - 00010: 0-8
+ - 00011: 9-12
+ - 00100: 13-16
+ - ...
+ - 11110: 117-120
+ - 11111: 121-124 */
+ uint64_t trp : 4; /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)]
+ (Represented in tCYC cycles == 1dclk)
+ TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
+ - 0000: RESERVED
+ - 0001: 1
+ - ...
+ - 1001: 9
+ - 1010-1111: RESERVED
+ When using parts with 8 banks (LMC_DDR2_CTL->BANK8
+ is 1), load tRP cycles + 1 into this register. */
+ uint64_t twtr : 4; /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)]
+ Last Wr Data to Rd Command time.
+ (Represented in tCYC cycles == 1dclks)
+ TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP)
+ - 0000: RESERVED
+ - 0001: 1
+ - ...
+ - 0111: 7
+ - 1000-1111: RESERVED */
+ uint64_t trcd : 4; /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)]
+ (Represented in tCYC cycles == 1dclk)
+ TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
+ - 0000: RESERVED
+ - 0001: 2 (2 is the smallest value allowed)
+ - 0002: 2
+ - ...
+ - 1001: 9
+ - 1010-1111: RESERVED
+ In 2T mode, make this register TRCD-1, not going
+ below 2. */
+ uint64_t tras : 5; /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)]
+ (Represented in tCYC cycles == 1 dclk)
+ - 00000-0001: RESERVED
+ - 00010: 2
+ - ...
+ - 11111: 31 */
+#else
+ uint64_t tras : 5;
+ uint64_t trcd : 4;
+ uint64_t twtr : 4;
+ uint64_t trp : 4;
+ uint64_t trfc : 5;
+ uint64_t tmrd : 3;
+ uint64_t caslat : 3;
+ uint64_t trrd : 3;
+ uint64_t comp_bypass : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_mem_cfg1_s cn30xx;
+ struct cvmx_lmcx_mem_cfg1_s cn31xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t trrd : 3; /**< tRRD cycles: ACT-ACT timing parameter for different
+ banks. (Represented in tCYC cycles == 1dclks)
+ TYP=15ns (66MHz=1,167MHz=3,200MHz=3)
+ For DDR2, TYP=7.5ns
+ - 000: RESERVED
+ - 001: 1 tCYC
+ - 010: 2 tCYC
+ - 011: 3 tCYC
+ - 100: 4 tCYC
+ - 101: 5 tCYC
+ - 110-111: RESERVED */
+ uint64_t caslat : 3; /**< CAS Latency Encoding which is loaded into each DDR
+ SDRAM device (MRS[6:4]) upon power-up (INIT_START=1).
+ (Represented in tCYC cycles == 1 dclks)
+ 000 RESERVED
+ 001 RESERVED
+ 010 2.0 tCYC
+ 011 3.0 tCYC
+ 100 4.0 tCYC
+ 101 5.0 tCYC
+ 110 6.0 tCYC (DDR2)
+ 2.5 tCYC (DDR1)
+ 111 RESERVED
+ eg). The parameters TSKW, SILO_HC, and SILO_QC can
+ account for 1/4 cycle granularity in board/etch delays. */
+ uint64_t tmrd : 3; /**< tMRD Cycles
+ (Represented in dclk tCYC)
+ For DDR2, its TYP 2*tCYC)
+ - 000: RESERVED
+ - 001: 1
+ - 010: 2
+ - 011: 3
+ - 100: 4
+ - 101-111: RESERVED */
+ uint64_t trfc : 5; /**< Indicates tRFC constraints.
+ Set TRFC (CSR field) = RNDUP[tRFC(ns)/4*tcyc(ns)],
+ where tRFC is from the DDR2 spec, and tcyc(ns)
+ is the DDR clock frequency (not data rate).
+ For example, with 2Gb, DDR2-667 parts,
+ typ tRFC=195ns, so TRFC (CSR field) = 0x11.
+ TRFC (binary): Corresponding tRFC Cycles
+ ----------------------------------------
+ - 00000-00001: RESERVED
+ - 00010: 0-8
+ - 00011: 9-12
+ - 00100: 13-16
+ - ...
+ - 11110: 117-120
+ - 11111: 121-124 */
+ uint64_t trp : 4; /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)]
+ (Represented in tCYC cycles == 1dclk)
+ TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
+ - 0000: RESERVED
+ - 0001: 1
+ - ...
+ - 0111: 7
+ - 1000-1111: RESERVED
+ When using parts with 8 banks (LMC_DDR2_CTL->BANK8
+ is 1), load tRP cycles + 1 into this register. */
+ uint64_t twtr : 4; /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)]
+ Last Wr Data to Rd Command time.
+ (Represented in tCYC cycles == 1dclks)
+ TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP)
+ - 0000: RESERVED
+ - 0001: 1
+ - ...
+ - 0111: 7
+ - 1000-1111: RESERVED */
+ uint64_t trcd : 4; /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)]
+ (Represented in tCYC cycles == 1dclk)
+ TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
+ - 0000: RESERVED
+ - 0001: 2 (2 is the smallest value allowed)
+ - 0002: 2
+ - ...
+ - 0111: 7
+ - 1110-1111: RESERVED
+ In 2T mode, make this register TRCD-1, not going
+ below 2. */
+ uint64_t tras : 5; /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)]
+ (Represented in tCYC cycles == 1 dclk)
+ For DDR-I mode:
+ TYP=45ns (66MHz=3,167MHz=8,400MHz=18
+ - 00000-0001: RESERVED
+ - 00010: 2
+ - ...
+ - 10100: 20
+ - 10101-11111: RESERVED */
+#else
+ uint64_t tras : 5;
+ uint64_t trcd : 4;
+ uint64_t twtr : 4;
+ uint64_t trp : 4;
+ uint64_t trfc : 5;
+ uint64_t tmrd : 3;
+ uint64_t caslat : 3;
+ uint64_t trrd : 3;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn38xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2;
+ struct cvmx_lmcx_mem_cfg1_s cn50xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1;
+};
+typedef union cvmx_lmcx_mem_cfg1 cvmx_lmcx_mem_cfg1_t;
+
+/**
+ * cvmx_lmc#_modereg_params0
+ *
+ * Notes:
+ * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
+ *
+ */
+union cvmx_lmcx_modereg_params0 {
+ uint64_t u64;
+ struct cvmx_lmcx_modereg_params0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t ppd : 1; /**< DLL Control for precharge powerdown
+ 0 = Slow exit (DLL off)
+ 1 = Fast exit (DLL on)
+ LMC writes this value to MR0[PPD] in the selected DDR3 parts
+ during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ This value must equal the MR0[PPD] value in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t wrp : 3; /**< Write recovery for auto precharge
+ Should be programmed to be equal to or greater than
+ RNDUP[tWR(ns)/tCYC(ns)]
+ 000 = 5
+ 001 = 5
+ 010 = 6
+ 011 = 7
+ 100 = 8
+ 101 = 10
+ 110 = 12
+ 111 = 14
+ LMC writes this value to MR0[WR] in the selected DDR3 parts
+ during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ This value must equal the MR0[WR] value in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t dllr : 1; /**< DLL Reset
+ LMC writes this value to MR0[DLL] in the selected DDR3 parts
+ during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR0[DLL] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t tm : 1; /**< Test Mode
+ LMC writes this value to MR0[TM] in the selected DDR3 parts
+ during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR0[TM] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t rbt : 1; /**< Read Burst Type
+ 1 = interleaved (fixed)
+ LMC writes this value to MR0[RBT] in the selected DDR3 parts
+ during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR0[RBT] value must be 1 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t cl : 4; /**< CAS Latency
+ 0010 = 5
+ 0100 = 6
+ 0110 = 7
+ 1000 = 8
+ 1010 = 9
+ 1100 = 10
+ 1110 = 11
+ 0001 = 12
+ 0011 = 13
+ 0101 = 14
+ 0111 = 15
+ 1001 = 16
+ 0000, 1011, 1101, 1111 = Reserved
+ LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts
+ during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ This value must equal the MR0[CAS Latency / CL] value in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t bl : 2; /**< Burst Length
+ 0 = 8 (fixed)
+ LMC writes this value to MR0[BL] in the selected DDR3 parts
+ during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR0[BL] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t qoff : 1; /**< Qoff Enable
+ 0 = enable
+ 1 = disable
+ LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks
+ during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ The MR1[Qoff] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t tdqs : 1; /**< TDQS Enable
+ 0 = disable
+ LMC writes this value to MR1[TDQS] in the DDR3 parts in the selected ranks
+ during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t wlev : 1; /**< Write Leveling Enable
+ 0 = disable
+ LMC writes MR1[Level]=0 in the DDR3 parts in the selected ranks
+ during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ (Write-leveling can only be initiated via the
+ write-leveling instruction sequence.)
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t al : 2; /**< Additive Latency
+ 00 = 0
+ 01 = CL-1
+ 10 = CL-2
+ 11 = Reserved
+ LMC writes this value to MR1[AL] in the selected DDR3 parts
+ during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ This value must equal the MR1[AL] value in all the DDR3
+ parts attached to all ranks during normal operation.
+ See also LMC*_CONTROL[POCAS]. */
+ uint64_t dll : 1; /**< DLL Enable
+ 0 = enable
+ 1 = disable.
+ LMC writes this value to MR1[DLL] in the selected DDR3 parts
+ during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ This value must equal the MR1[DLL] value in all the DDR3
+ parts attached to all ranks during normal operation.
+ In dll-off mode, CL/CWL must be programmed
+ equal to 6/6, respectively, as per the DDR3 specifications. */
+ uint64_t mpr : 1; /**< MPR
+ LMC writes this value to MR3[MPR] in the selected DDR3 parts
+ during power-up/init, read-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences.
+ (LMC also writes MR3[MPR]=1 at the beginning of the
+ read-leveling instruction sequence. Read-leveling should only be initiated via the
+ read-leveling instruction sequence.)
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR3[MPR] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t mprloc : 2; /**< MPR Location
+ LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts
+ during power-up/init, read-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences.
+ (LMC also writes MR3[MPRLoc]=0 at the beginning of the
+ read-leveling instruction sequence.)
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
+ The MR3[MPRLoc] value must be 0 in all the DDR3
+ parts attached to all ranks during normal operation. */
+ uint64_t cwl : 3; /**< CAS Write Latency
+ - 000: 5
+ - 001: 6
+ - 010: 7
+ - 011: 8
+ - 100: 9
+ - 101: 10
+ - 110: 11
+ - 111: 12
+ LMC writes this value to MR2[CWL] in the selected DDR3 parts
+ during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ This value must equal the MR2[CWL] value in all the DDR3
+ parts attached to all ranks during normal operation. */
+#else
+ uint64_t cwl : 3;
+ uint64_t mprloc : 2;
+ uint64_t mpr : 1;
+ uint64_t dll : 1;
+ uint64_t al : 2;
+ uint64_t wlev : 1;
+ uint64_t tdqs : 1;
+ uint64_t qoff : 1;
+ uint64_t bl : 2;
+ uint64_t cl : 4;
+ uint64_t rbt : 1;
+ uint64_t tm : 1;
+ uint64_t dllr : 1;
+ uint64_t wrp : 3;
+ uint64_t ppd : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_lmcx_modereg_params0_s cn61xx;
+ struct cvmx_lmcx_modereg_params0_s cn63xx;
+ struct cvmx_lmcx_modereg_params0_s cn63xxp1;
+ struct cvmx_lmcx_modereg_params0_s cn66xx;
+ struct cvmx_lmcx_modereg_params0_s cn68xx;
+ struct cvmx_lmcx_modereg_params0_s cn68xxp1;
+ struct cvmx_lmcx_modereg_params0_s cnf71xx;
+};
+typedef union cvmx_lmcx_modereg_params0 cvmx_lmcx_modereg_params0_t;
+
+/**
+ * cvmx_lmc#_modereg_params1
+ *
+ * Notes:
+ * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
+ *
+ */
+union cvmx_lmcx_modereg_params1 {
+ uint64_t u64;
+ struct cvmx_lmcx_modereg_params1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t rtt_nom_11 : 3; /**< RTT_NOM Rank 3
+ LMC writes this value to MR1[Rtt_Nom] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
+ only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
+ Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
+ uint64_t dic_11 : 2; /**< Output Driver Impedance Control Rank 3
+ LMC writes this value to MR1[D.I.C.] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_11 : 2; /**< RTT_WR Rank 3
+ LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_11 : 1; /**< Self-refresh temperature range Rank 3
+ LMC writes this value to MR2[SRT] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_11 : 1; /**< Auto self-refresh Rank 3
+ LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_11 : 3; /**< Partial array self-refresh Rank 3
+ LMC writes this value to MR2[PASR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_10 : 3; /**< RTT_NOM Rank 2
+ LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
+ only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
+ Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
+ uint64_t dic_10 : 2; /**< Output Driver Impedance Control Rank 2
+ LMC writes this value to MR1[D.I.C.] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_10 : 2; /**< RTT_WR Rank 2
+ LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_10 : 1; /**< Self-refresh temperature range Rank 2
+ LMC writes this value to MR2[SRT] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_10 : 1; /**< Auto self-refresh Rank 2
+ LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_10 : 3; /**< Partial array self-refresh Rank 2
+ LMC writes this value to MR2[PASR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_01 : 3; /**< RTT_NOM Rank 1
+ LMC writes this value to MR1[Rtt_Nom] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
+ only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
+ Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
+ uint64_t dic_01 : 2; /**< Output Driver Impedance Control Rank 1
+ LMC writes this value to MR1[D.I.C.] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_01 : 2; /**< RTT_WR Rank 1
+ LMC writes this value to MR2[Rtt_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_01 : 1; /**< Self-refresh temperature range Rank 1
+ LMC writes this value to MR2[SRT] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_01 : 1; /**< Auto self-refresh Rank 1
+ LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_01 : 3; /**< Partial array self-refresh Rank 1
+ LMC writes this value to MR2[PASR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_00 : 3; /**< RTT_NOM Rank 0
+ LMC writes this value to MR1[Rtt_Nom] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
+ Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
+ only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
+ Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
+ uint64_t dic_00 : 2; /**< Output Driver Impedance Control Rank 0
+ LMC writes this value to MR1[D.I.C.] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_00 : 2; /**< RTT_WR Rank 0
+ LMC writes this value to MR2[Rtt_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_00 : 1; /**< Self-refresh temperature range Rank 0
+ LMC writes this value to MR2[SRT] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_00 : 1; /**< Auto self-refresh Rank 0
+ LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_00 : 3; /**< Partial array self-refresh Rank 0
+ LMC writes this value to MR2[PASR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
+ when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh entry and exit instruction sequences.
+ See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
+ LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+#else
+ uint64_t pasr_00 : 3;
+ uint64_t asr_00 : 1;
+ uint64_t srt_00 : 1;
+ uint64_t rtt_wr_00 : 2;
+ uint64_t dic_00 : 2;
+ uint64_t rtt_nom_00 : 3;
+ uint64_t pasr_01 : 3;
+ uint64_t asr_01 : 1;
+ uint64_t srt_01 : 1;
+ uint64_t rtt_wr_01 : 2;
+ uint64_t dic_01 : 2;
+ uint64_t rtt_nom_01 : 3;
+ uint64_t pasr_10 : 3;
+ uint64_t asr_10 : 1;
+ uint64_t srt_10 : 1;
+ uint64_t rtt_wr_10 : 2;
+ uint64_t dic_10 : 2;
+ uint64_t rtt_nom_10 : 3;
+ uint64_t pasr_11 : 3;
+ uint64_t asr_11 : 1;
+ uint64_t srt_11 : 1;
+ uint64_t rtt_wr_11 : 2;
+ uint64_t dic_11 : 2;
+ uint64_t rtt_nom_11 : 3;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_lmcx_modereg_params1_s cn61xx;
+ struct cvmx_lmcx_modereg_params1_s cn63xx;
+ struct cvmx_lmcx_modereg_params1_s cn63xxp1;
+ struct cvmx_lmcx_modereg_params1_s cn66xx;
+ struct cvmx_lmcx_modereg_params1_s cn68xx;
+ struct cvmx_lmcx_modereg_params1_s cn68xxp1;
+ struct cvmx_lmcx_modereg_params1_s cnf71xx;
+};
+typedef union cvmx_lmcx_modereg_params1 cvmx_lmcx_modereg_params1_t;
+
+/**
+ * cvmx_lmc#_nxm
+ *
+ * LMC_NXM = LMC non-existent memory
+ *
+ *
+ * Notes:
+ * Decoding for mem_msb/rank
+ * - 0000: mem_msb = mem_adr[25]
+ * - 0001: mem_msb = mem_adr[26]
+ * - 0010: mem_msb = mem_adr[27]
+ * - 0011: mem_msb = mem_adr[28]
+ * - 0100: mem_msb = mem_adr[29]
+ * - 0101: mem_msb = mem_adr[30]
+ * - 0110: mem_msb = mem_adr[31]
+ * - 0111: mem_msb = mem_adr[32]
+ * - 1000: mem_msb = mem_adr[33]
+ * - 1001: mem_msb = mem_adr[34]
+ * 1010-1111 = Reserved
+ * For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
+ * DDR3 parts, the column address width = 10, so with
+ * 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] and
+ * mem_msb = 4
+ *
+ * Note also that addresses greater the max defined space (pbank_msb) are also treated
+ * as NXM accesses
+ */
+union cvmx_lmcx_nxm {
+ uint64_t u64;
+ struct cvmx_lmcx_nxm_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t mem_msb_d3_r1 : 4; /**< Max Row MSB for DIMM3, RANK1/DIMM3 in Single Ranked
+ *UNUSED IN 6xxx* */
+ uint64_t mem_msb_d3_r0 : 4; /**< Max Row MSB for DIMM3, RANK0
+ *UNUSED IN 6xxx* */
+ uint64_t mem_msb_d2_r1 : 4; /**< Max Row MSB for DIMM2, RANK1/DIMM2 in Single Ranked
+ *UNUSED IN 6xxx* */
+ uint64_t mem_msb_d2_r0 : 4; /**< Max Row MSB for DIMM2, RANK0
+ *UNUSED IN 6xxx* */
+ uint64_t mem_msb_d1_r1 : 4; /**< Max Row MSB for DIMM1, RANK1/DIMM1 in Single Ranked */
+ uint64_t mem_msb_d1_r0 : 4; /**< Max Row MSB for DIMM1, RANK0 */
+ uint64_t mem_msb_d0_r1 : 4; /**< Max Row MSB for DIMM0, RANK1/DIMM0 in Single Ranked */
+ uint64_t mem_msb_d0_r0 : 4; /**< Max Row MSB for DIMM0, RANK0 */
+ uint64_t cs_mask : 8; /**< Chip select mask.
+ This mask corresponds to the 8 chip selects for a memory
+ configuration. If LMC*_CONFIG[RANK_ENA]==0 then this
+ mask must be set in pairs because each reference address
+ will assert a pair of chip selects. If the chip
+ select(s) have a corresponding CS_MASK bit set, then the
+ reference is to non-existent memory (NXM). LMC will alias a
+ NXM read reference to use the lowest, legal chip select(s)
+ and return 0's. LMC normally discards NXM writes, but will
+ also alias them when LMC*_CONTROL[NXM_WRITE_EN]=1.
+ CS_MASK<7:4> MBZ in 6xxx */
+#else
+ uint64_t cs_mask : 8;
+ uint64_t mem_msb_d0_r0 : 4;
+ uint64_t mem_msb_d0_r1 : 4;
+ uint64_t mem_msb_d1_r0 : 4;
+ uint64_t mem_msb_d1_r1 : 4;
+ uint64_t mem_msb_d2_r0 : 4;
+ uint64_t mem_msb_d2_r1 : 4;
+ uint64_t mem_msb_d3_r0 : 4;
+ uint64_t mem_msb_d3_r1 : 4;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_lmcx_nxm_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t cs_mask : 8; /**< Chip select mask.
+ This mask corresponds to the 8 chip selects for a memory
+ configuration. If LMC_MEM_CFG0[BUNK_ENA]==0 then this
+ mask must be set in pairs because each reference address
+ will assert a pair of chip selects. If the chip
+ select(s) have a corresponding CS_MASK bit set, then the
+ reference is to non-existent memory. LMC will alias the
+ reference to use the lowest, legal chip select(s) in
+ that case. */
+#else
+ uint64_t cs_mask : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn52xx;
+ struct cvmx_lmcx_nxm_cn52xx cn56xx;
+ struct cvmx_lmcx_nxm_cn52xx cn58xx;
+ struct cvmx_lmcx_nxm_s cn61xx;
+ struct cvmx_lmcx_nxm_s cn63xx;
+ struct cvmx_lmcx_nxm_s cn63xxp1;
+ struct cvmx_lmcx_nxm_s cn66xx;
+ struct cvmx_lmcx_nxm_s cn68xx;
+ struct cvmx_lmcx_nxm_s cn68xxp1;
+ struct cvmx_lmcx_nxm_s cnf71xx;
+};
+typedef union cvmx_lmcx_nxm cvmx_lmcx_nxm_t;
+
+/**
+ * cvmx_lmc#_ops_cnt
+ *
+ * LMC_OPS_CNT = Performance Counters
+ *
+ */
+union cvmx_lmcx_ops_cnt {
+ uint64_t u64;
+ struct cvmx_lmcx_ops_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t opscnt : 64; /**< Performance Counter
+ 64-bit counter that increments when the DDR3 data bus
+ is being used
+ DRAM bus utilization = LMC*_OPS_CNT/LMC*_DCLK_CNT */
+#else
+ uint64_t opscnt : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_ops_cnt_s cn61xx;
+ struct cvmx_lmcx_ops_cnt_s cn63xx;
+ struct cvmx_lmcx_ops_cnt_s cn63xxp1;
+ struct cvmx_lmcx_ops_cnt_s cn66xx;
+ struct cvmx_lmcx_ops_cnt_s cn68xx;
+ struct cvmx_lmcx_ops_cnt_s cn68xxp1;
+ struct cvmx_lmcx_ops_cnt_s cnf71xx;
+};
+typedef union cvmx_lmcx_ops_cnt cvmx_lmcx_ops_cnt_t;
+
+/**
+ * cvmx_lmc#_ops_cnt_hi
+ *
+ * LMC_OPS_CNT_HI = Performance Counters
+ *
+ */
+union cvmx_lmcx_ops_cnt_hi {
+ uint64_t u64;
+ struct cvmx_lmcx_ops_cnt_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t opscnt_hi : 32; /**< Performance Counter to measure Bus Utilization
+ Upper 32-bits of 64-bit counter
+ DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */
+#else
+ uint64_t opscnt_hi : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_ops_cnt_hi_s cn30xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn31xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn38xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2;
+ struct cvmx_lmcx_ops_cnt_hi_s cn50xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn52xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1;
+ struct cvmx_lmcx_ops_cnt_hi_s cn56xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1;
+ struct cvmx_lmcx_ops_cnt_hi_s cn58xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1;
+};
+typedef union cvmx_lmcx_ops_cnt_hi cvmx_lmcx_ops_cnt_hi_t;
+
+/**
+ * cvmx_lmc#_ops_cnt_lo
+ *
+ * LMC_OPS_CNT_LO = Performance Counters
+ *
+ */
+union cvmx_lmcx_ops_cnt_lo {
+ uint64_t u64;
+ struct cvmx_lmcx_ops_cnt_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t opscnt_lo : 32; /**< Performance Counter
+ Low 32-bits of 64-bit counter
+ DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */
+#else
+ uint64_t opscnt_lo : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_ops_cnt_lo_s cn30xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn31xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn38xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2;
+ struct cvmx_lmcx_ops_cnt_lo_s cn50xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn52xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1;
+ struct cvmx_lmcx_ops_cnt_lo_s cn56xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1;
+ struct cvmx_lmcx_ops_cnt_lo_s cn58xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1;
+};
+typedef union cvmx_lmcx_ops_cnt_lo cvmx_lmcx_ops_cnt_lo_t;
+
+/**
+ * cvmx_lmc#_phy_ctl
+ *
+ * LMC_PHY_CTL = LMC PHY Control
+ *
+ */
+union cvmx_lmcx_phy_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_phy_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t rx_always_on : 1; /**< Disable dynamic DDR3 IO Rx power gating */
+ uint64_t lv_mode : 1; /**< Low Voltage Mode (1.35V) */
+ uint64_t ck_tune1 : 1; /**< Clock Tune */
+ uint64_t ck_dlyout1 : 4; /**< Clock delay out setting */
+ uint64_t ck_tune0 : 1; /**< Clock Tune */
+ uint64_t ck_dlyout0 : 4; /**< Clock delay out setting */
+ uint64_t loopback : 1; /**< Loopback enable */
+ uint64_t loopback_pos : 1; /**< Loopback pos mode */
+ uint64_t ts_stagger : 1; /**< TS Staggermode
+ This mode configures output drivers with 2-stage drive
+ strength to avoid undershoot issues on the bus when strong
+ drivers are suddenly turned on. When this mode is asserted,
+ Octeon will configure output drivers to be weak drivers
+ (60 ohm output impedance) at the first CK cycle, and
+ change drivers to the designated drive strengths specified
+ in $LMC(0)_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
+ at the following cycle */
+#else
+ uint64_t ts_stagger : 1;
+ uint64_t loopback_pos : 1;
+ uint64_t loopback : 1;
+ uint64_t ck_dlyout0 : 4;
+ uint64_t ck_tune0 : 1;
+ uint64_t ck_dlyout1 : 4;
+ uint64_t ck_tune1 : 1;
+ uint64_t lv_mode : 1;
+ uint64_t rx_always_on : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_lmcx_phy_ctl_s cn61xx;
+ struct cvmx_lmcx_phy_ctl_s cn63xx;
+ struct cvmx_lmcx_phy_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t lv_mode : 1; /**< Low Voltage Mode (1.35V) */
+ uint64_t ck_tune1 : 1; /**< Clock Tune */
+ uint64_t ck_dlyout1 : 4; /**< Clock delay out setting */
+ uint64_t ck_tune0 : 1; /**< Clock Tune */
+ uint64_t ck_dlyout0 : 4; /**< Clock delay out setting */
+ uint64_t loopback : 1; /**< Loopback enable */
+ uint64_t loopback_pos : 1; /**< Loopback pos mode */
+ uint64_t ts_stagger : 1; /**< TS Staggermode
+ This mode configures output drivers with 2-stage drive
+ strength to avoid undershoot issues on the bus when strong
+ drivers are suddenly turned on. When this mode is asserted,
+ Octeon will configure output drivers to be weak drivers
+ (60 ohm output impedance) at the first CK cycle, and
+ change drivers to the designated drive strengths specified
+ in $LMC(0)_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
+ at the following cycle */
+#else
+ uint64_t ts_stagger : 1;
+ uint64_t loopback_pos : 1;
+ uint64_t loopback : 1;
+ uint64_t ck_dlyout0 : 4;
+ uint64_t ck_tune0 : 1;
+ uint64_t ck_dlyout1 : 4;
+ uint64_t ck_tune1 : 1;
+ uint64_t lv_mode : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_phy_ctl_s cn66xx;
+ struct cvmx_lmcx_phy_ctl_s cn68xx;
+ struct cvmx_lmcx_phy_ctl_s cn68xxp1;
+ struct cvmx_lmcx_phy_ctl_s cnf71xx;
+};
+typedef union cvmx_lmcx_phy_ctl cvmx_lmcx_phy_ctl_t;
+
+/**
+ * cvmx_lmc#_pll_bwctl
+ *
+ * LMC_PLL_BWCTL = DDR PLL Bandwidth Control Register
+ *
+ */
+union cvmx_lmcx_pll_bwctl {
+ uint64_t u64;
+ struct cvmx_lmcx_pll_bwctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t bwupd : 1; /**< Load this Bandwidth Register value into the PLL */
+ uint64_t bwctl : 4; /**< Bandwidth Control Register for DDR PLL */
+#else
+ uint64_t bwctl : 4;
+ uint64_t bwupd : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_lmcx_pll_bwctl_s cn30xx;
+ struct cvmx_lmcx_pll_bwctl_s cn31xx;
+ struct cvmx_lmcx_pll_bwctl_s cn38xx;
+ struct cvmx_lmcx_pll_bwctl_s cn38xxp2;
+};
+typedef union cvmx_lmcx_pll_bwctl cvmx_lmcx_pll_bwctl_t;
+
+/**
+ * cvmx_lmc#_pll_ctl
+ *
+ * LMC_PLL_CTL = LMC pll control
+ *
+ *
+ * Notes:
+ * This CSR is only relevant for LMC0. LMC1_PLL_CTL is not used.
+ *
+ * Exactly one of EN2, EN4, EN6, EN8, EN12, EN16 must be set.
+ *
+ * The resultant DDR_CK frequency is the DDR2_REF_CLK
+ * frequency multiplied by:
+ *
+ * (CLKF + 1) / ((CLKR + 1) * EN(2,4,6,8,12,16))
+ *
+ * The PLL frequency, which is:
+ *
+ * (DDR2_REF_CLK freq) * ((CLKF + 1) / (CLKR + 1))
+ *
+ * must reside between 1.2 and 2.5 GHz. A faster PLL frequency is desirable if there is a choice.
+ */
+union cvmx_lmcx_pll_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_pll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63 : 34;
+ uint64_t bypass : 1; /**< PLL Bypass */
+ uint64_t fasten_n : 1; /**< Should be set, especially when CLKF > ~80 */
+ uint64_t div_reset : 1; /**< Analog pll divider reset
+ De-assert at least 500*(CLKR+1) reference clock
+ cycles following RESET_N de-assertion. */
+ uint64_t reset_n : 1; /**< Analog pll reset
+ De-assert at least 5 usec after CLKF, CLKR,
+ and EN* are set up. */
+ uint64_t clkf : 12; /**< Multiply reference by CLKF + 1
+ CLKF must be <= 128 */
+ uint64_t clkr : 6; /**< Divide reference by CLKR + 1 */
+ uint64_t reserved_6_7 : 2;
+ uint64_t en16 : 1; /**< Divide output by 16 */
+ uint64_t en12 : 1; /**< Divide output by 12 */
+ uint64_t en8 : 1; /**< Divide output by 8 */
+ uint64_t en6 : 1; /**< Divide output by 6 */
+ uint64_t en4 : 1; /**< Divide output by 4 */
+ uint64_t en2 : 1; /**< Divide output by 2 */
+#else
+ uint64_t en2 : 1;
+ uint64_t en4 : 1;
+ uint64_t en6 : 1;
+ uint64_t en8 : 1;
+ uint64_t en12 : 1;
+ uint64_t en16 : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t clkr : 6;
+ uint64_t clkf : 12;
+ uint64_t reset_n : 1;
+ uint64_t div_reset : 1;
+ uint64_t fasten_n : 1;
+ uint64_t bypass : 1;
+ uint64_t reserved_30_63 : 34;
+#endif
+ } s;
+ struct cvmx_lmcx_pll_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t fasten_n : 1; /**< Should be set, especially when CLKF > ~80 */
+ uint64_t div_reset : 1; /**< Analog pll divider reset
+ De-assert at least 500*(CLKR+1) reference clock
+ cycles following RESET_N de-assertion. */
+ uint64_t reset_n : 1; /**< Analog pll reset
+ De-assert at least 5 usec after CLKF, CLKR,
+ and EN* are set up. */
+ uint64_t clkf : 12; /**< Multiply reference by CLKF + 1
+ CLKF must be <= 256 */
+ uint64_t clkr : 6; /**< Divide reference by CLKR + 1 */
+ uint64_t reserved_6_7 : 2;
+ uint64_t en16 : 1; /**< Divide output by 16 */
+ uint64_t en12 : 1; /**< Divide output by 12 */
+ uint64_t en8 : 1; /**< Divide output by 8 */
+ uint64_t en6 : 1; /**< Divide output by 6 */
+ uint64_t en4 : 1; /**< Divide output by 4 */
+ uint64_t en2 : 1; /**< Divide output by 2 */
+#else
+ uint64_t en2 : 1;
+ uint64_t en4 : 1;
+ uint64_t en6 : 1;
+ uint64_t en8 : 1;
+ uint64_t en12 : 1;
+ uint64_t en16 : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t clkr : 6;
+ uint64_t clkf : 12;
+ uint64_t reset_n : 1;
+ uint64_t div_reset : 1;
+ uint64_t fasten_n : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_pll_ctl_s cn52xx;
+ struct cvmx_lmcx_pll_ctl_s cn52xxp1;
+ struct cvmx_lmcx_pll_ctl_cn50xx cn56xx;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t div_reset : 1; /**< Analog pll divider reset
+ De-assert at least 500*(CLKR+1) reference clock
+ cycles following RESET_N de-assertion. */
+ uint64_t reset_n : 1; /**< Analog pll reset
+ De-assert at least 5 usec after CLKF, CLKR,
+ and EN* are set up. */
+ uint64_t clkf : 12; /**< Multiply reference by CLKF + 1
+ CLKF must be <= 128 */
+ uint64_t clkr : 6; /**< Divide reference by CLKR + 1 */
+ uint64_t reserved_6_7 : 2;
+ uint64_t en16 : 1; /**< Divide output by 16 */
+ uint64_t en12 : 1; /**< Divide output by 12 */
+ uint64_t en8 : 1; /**< Divide output by 8 */
+ uint64_t en6 : 1; /**< Divide output by 6 */
+ uint64_t en4 : 1; /**< Divide output by 4 */
+ uint64_t en2 : 1; /**< Divide output by 2 */
+#else
+ uint64_t en2 : 1;
+ uint64_t en4 : 1;
+ uint64_t en6 : 1;
+ uint64_t en8 : 1;
+ uint64_t en12 : 1;
+ uint64_t en16 : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t clkr : 6;
+ uint64_t clkf : 12;
+ uint64_t reset_n : 1;
+ uint64_t div_reset : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn56xxp1;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1;
+};
+typedef union cvmx_lmcx_pll_ctl cvmx_lmcx_pll_ctl_t;
+
+/**
+ * cvmx_lmc#_pll_status
+ *
+ * LMC_PLL_STATUS = LMC pll status
+ *
+ */
+union cvmx_lmcx_pll_status {
+ uint64_t u64;
+ struct cvmx_lmcx_pll_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ddr__nctl : 5; /**< DDR nctl from compensation circuit */
+ uint64_t ddr__pctl : 5; /**< DDR pctl from compensation circuit */
+ uint64_t reserved_2_21 : 20;
+ uint64_t rfslip : 1; /**< Reference clock slip */
+ uint64_t fbslip : 1; /**< Feedback clock slip */
+#else
+ uint64_t fbslip : 1;
+ uint64_t rfslip : 1;
+ uint64_t reserved_2_21 : 20;
+ uint64_t ddr__pctl : 5;
+ uint64_t ddr__nctl : 5;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_pll_status_s cn50xx;
+ struct cvmx_lmcx_pll_status_s cn52xx;
+ struct cvmx_lmcx_pll_status_s cn52xxp1;
+ struct cvmx_lmcx_pll_status_s cn56xx;
+ struct cvmx_lmcx_pll_status_s cn56xxp1;
+ struct cvmx_lmcx_pll_status_s cn58xx;
+ struct cvmx_lmcx_pll_status_cn58xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t rfslip : 1; /**< Reference clock slip */
+ uint64_t fbslip : 1; /**< Feedback clock slip */
+#else
+ uint64_t fbslip : 1;
+ uint64_t rfslip : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn58xxp1;
+};
+typedef union cvmx_lmcx_pll_status cvmx_lmcx_pll_status_t;
+
+/**
+ * cvmx_lmc#_read_level_ctl
+ *
+ * Notes:
+ * The HW writes and reads the cache block selected by ROW, COL, BNK and the rank as part of a read-leveling sequence for a rank.
+ * A cache block write is 16 72-bit words. PATTERN selects the write value. For the first 8
+ * words, the write value is the bit PATTERN<i> duplicated into a 72-bit vector. The write value of
+ * the last 8 words is the inverse of the write value of the first 8 words.
+ * See LMC*_READ_LEVEL_RANK*.
+ */
+union cvmx_lmcx_read_level_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_read_level_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t rankmask : 4; /**< Selects ranks to be leveled
+ to read-level rank i, set RANKMASK<i> */
+ uint64_t pattern : 8; /**< All DQ driven to PATTERN[burst], 0 <= burst <= 7
+ All DQ driven to ~PATTERN[burst-8], 8 <= burst <= 15 */
+ uint64_t row : 16; /**< Row address used to write/read data pattern */
+ uint64_t col : 12; /**< Column address used to write/read data pattern */
+ uint64_t reserved_3_3 : 1;
+ uint64_t bnk : 3; /**< Bank address used to write/read data pattern */
+#else
+ uint64_t bnk : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t col : 12;
+ uint64_t row : 16;
+ uint64_t pattern : 8;
+ uint64_t rankmask : 4;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_lmcx_read_level_ctl_s cn52xx;
+ struct cvmx_lmcx_read_level_ctl_s cn52xxp1;
+ struct cvmx_lmcx_read_level_ctl_s cn56xx;
+ struct cvmx_lmcx_read_level_ctl_s cn56xxp1;
+};
+typedef union cvmx_lmcx_read_level_ctl cvmx_lmcx_read_level_ctl_t;
+
+/**
+ * cvmx_lmc#_read_level_dbg
+ *
+ * Notes:
+ * A given read of LMC*_READ_LEVEL_DBG returns the read-leveling pass/fail results for all possible
+ * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled.
+ * LMC*_READ_LEVEL_DBG[BYTE] selects the particular byte.
+ * To get these pass/fail results for another different rank, you must run the hardware read-leveling
+ * again. For example, it is possible to get the BITMASK results for every byte of every rank
+ * if you run read-leveling separately for each rank, probing LMC*_READ_LEVEL_DBG between each
+ * read-leveling.
+ */
+union cvmx_lmcx_read_level_dbg {
+ uint64_t u64;
+ struct cvmx_lmcx_read_level_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bitmask : 16; /**< Bitmask generated during deskew settings sweep
+ BITMASK[n]=0 means deskew setting n failed
+ BITMASK[n]=1 means deskew setting n passed
+ for 0 <= n <= 15 */
+ uint64_t reserved_4_15 : 12;
+ uint64_t byte : 4; /**< 0 <= BYTE <= 8 */
+#else
+ uint64_t byte : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t bitmask : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_read_level_dbg_s cn52xx;
+ struct cvmx_lmcx_read_level_dbg_s cn52xxp1;
+ struct cvmx_lmcx_read_level_dbg_s cn56xx;
+ struct cvmx_lmcx_read_level_dbg_s cn56xxp1;
+};
+typedef union cvmx_lmcx_read_level_dbg cvmx_lmcx_read_level_dbg_t;
+
+/**
+ * cvmx_lmc#_read_level_rank#
+ *
+ * Notes:
+ * This is four CSRs per LMC, one per each rank.
+ * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.)
+ * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
+ * Deskew setting is measured in units of 1/4 DCLK, so the above BYTE* values can range over 4 DCLKs.
+ * SW initiates a HW read-leveling sequence by programming LMC*_READ_LEVEL_CTL and writing INIT_START=1 with SEQUENCE=1.
+ * See LMC*_READ_LEVEL_CTL.
+ */
+union cvmx_lmcx_read_level_rankx {
+ uint64_t u64;
+ struct cvmx_lmcx_read_level_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t status : 2; /**< Indicates status of the read-levelling and where
+ the BYTE* programmings in <35:0> came from:
+ 0 = BYTE* values are their reset value
+ 1 = BYTE* values were set via a CSR write to this register
+ 2 = read-leveling sequence currently in progress (BYTE* values are unpredictable)
+ 3 = BYTE* values came from a complete read-leveling sequence */
+ uint64_t byte8 : 4; /**< Deskew setting */
+ uint64_t byte7 : 4; /**< Deskew setting */
+ uint64_t byte6 : 4; /**< Deskew setting */
+ uint64_t byte5 : 4; /**< Deskew setting */
+ uint64_t byte4 : 4; /**< Deskew setting */
+ uint64_t byte3 : 4; /**< Deskew setting */
+ uint64_t byte2 : 4; /**< Deskew setting */
+ uint64_t byte1 : 4; /**< Deskew setting */
+ uint64_t byte0 : 4; /**< Deskew setting */
+#else
+ uint64_t byte0 : 4;
+ uint64_t byte1 : 4;
+ uint64_t byte2 : 4;
+ uint64_t byte3 : 4;
+ uint64_t byte4 : 4;
+ uint64_t byte5 : 4;
+ uint64_t byte6 : 4;
+ uint64_t byte7 : 4;
+ uint64_t byte8 : 4;
+ uint64_t status : 2;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_lmcx_read_level_rankx_s cn52xx;
+ struct cvmx_lmcx_read_level_rankx_s cn52xxp1;
+ struct cvmx_lmcx_read_level_rankx_s cn56xx;
+ struct cvmx_lmcx_read_level_rankx_s cn56xxp1;
+};
+typedef union cvmx_lmcx_read_level_rankx cvmx_lmcx_read_level_rankx_t;
+
+/**
+ * cvmx_lmc#_reset_ctl
+ *
+ * Specify the RSL base addresses for the block
+ *
+ *
+ * Notes:
+ * DDR3RST - DDR3 DRAM parts have a new RESET#
+ * pin that wasn't present in DDR2 parts. The
+ * DDR3RST CSR field controls the assertion of
+ * the new 6xxx pin that attaches to RESET#.
+ * When DDR3RST is set, 6xxx asserts RESET#.
+ * When DDR3RST is clear, 6xxx de-asserts
+ * RESET#.
+ *
+ * DDR3RST is set on a cold reset. Warm and
+ * soft chip resets do not affect the DDR3RST
+ * value. Outside of cold reset, only software
+ * CSR writes change the DDR3RST value.
+ *
+ * DDR3PWARM - Enables preserve mode during a warm
+ * reset. When set, the DDR3 controller hardware
+ * automatically puts the attached DDR3 DRAM parts
+ * into self refresh (see LMC*CONFIG[SEQUENCE] below) at the beginning of a warm
+ * reset sequence, provided that the DDR3 controller
+ * is up. When clear, the DDR3 controller hardware
+ * does not put the attached DDR3 DRAM parts into
+ * self-refresh during a warm reset sequence.
+ *
+ * DDR3PWARM is cleared on a cold reset. Warm and
+ * soft chip resets do not affect the DDR3PWARM
+ * value. Outside of cold reset, only software
+ * CSR writes change the DDR3PWARM value.
+ *
+ * Note that if a warm reset follows a soft reset,
+ * DDR3PWARM has no effect, as the DDR3 controller
+ * is no longer up after any cold/warm/soft
+ * reset sequence.
+ *
+ * DDR3PSOFT - Enables preserve mode during a soft
+ * reset. When set, the DDR3 controller hardware
+ * automatically puts the attached DDR3 DRAM parts
+ * into self refresh (see LMC*CONFIG[SEQUENCE] below) at the beginning of a soft
+ * reset sequence, provided that the DDR3 controller
+ * is up. When clear, the DDR3 controller hardware
+ * does not put the attached DDR3 DRAM parts into
+ * self-refresh during a soft reset sequence.
+ *
+ * DDR3PSOFT is cleared on a cold reset. Warm and
+ * soft chip resets do not affect the DDR3PSOFT
+ * value. Outside of cold reset, only software
+ * CSR writes change the DDR3PSOFT value.
+ *
+ * DDR3PSV - May be useful for system software to
+ * determine when the DDR3 contents have been
+ * preserved.
+ *
+ * Cleared by hardware during a cold reset. Never
+ * cleared by hardware during a warm/soft reset.
+ * Set by hardware during a warm/soft reset if
+ * the hardware automatically put the DDR3 DRAM
+ * into self-refresh during the reset sequence.
+ *
+ * Can also be written by software (to any value).
+ */
+union cvmx_lmcx_reset_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_reset_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t ddr3psv : 1; /**< Memory Reset
+ 1 = DDR contents preserved */
+ uint64_t ddr3psoft : 1; /**< Memory Reset
+ 1 = Enable Preserve mode during soft reset */
+ uint64_t ddr3pwarm : 1; /**< Memory Reset
+ 1 = Enable Preserve mode during warm reset */
+ uint64_t ddr3rst : 1; /**< Memory Reset
+ 0 = Reset asserted
+ 1 = Reset de-asserted */
+#else
+ uint64_t ddr3rst : 1;
+ uint64_t ddr3pwarm : 1;
+ uint64_t ddr3psoft : 1;
+ uint64_t ddr3psv : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_lmcx_reset_ctl_s cn61xx;
+ struct cvmx_lmcx_reset_ctl_s cn63xx;
+ struct cvmx_lmcx_reset_ctl_s cn63xxp1;
+ struct cvmx_lmcx_reset_ctl_s cn66xx;
+ struct cvmx_lmcx_reset_ctl_s cn68xx;
+ struct cvmx_lmcx_reset_ctl_s cn68xxp1;
+ struct cvmx_lmcx_reset_ctl_s cnf71xx;
+};
+typedef union cvmx_lmcx_reset_ctl cvmx_lmcx_reset_ctl_t;
+
+/**
+ * cvmx_lmc#_rlevel_ctl
+ */
+union cvmx_lmcx_rlevel_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_rlevel_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t delay_unload_3 : 1; /**< When set, unload the PHY silo one cycle later
+ during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 3
+ DELAY_UNLOAD_3 should normally be set, particularly at higher speeds. */
+ uint64_t delay_unload_2 : 1; /**< When set, unload the PHY silo one cycle later
+ during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 2
+ DELAY_UNLOAD_2 should normally not be set. */
+ uint64_t delay_unload_1 : 1; /**< When set, unload the PHY silo one cycle later
+ during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 1
+ DELAY_UNLOAD_1 should normally not be set. */
+ uint64_t delay_unload_0 : 1; /**< When set, unload the PHY silo one cycle later
+ during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 0
+ DELAY_UNLOAD_0 should normally not be set. */
+ uint64_t bitmask : 8; /**< Mask to select bit lanes on which read-leveling
+ feedback is returned when OR_DIS is set to 1 */
+ uint64_t or_dis : 1; /**< Disable or'ing of bits in a byte lane when computing
+ the read-leveling bitmask
+ OR_DIS should normally not be set. */
+ uint64_t offset_en : 1; /**< When set, LMC attempts to select the read-leveling
+ setting that is LMC*RLEVEL_CTL[OFFSET] settings earlier than the
+ last passing read-leveling setting in the largest
+ contiguous sequence of passing settings.
+ When clear, or if the setting selected by LMC*RLEVEL_CTL[OFFSET]
+ did not pass, LMC selects the middle setting in the
+ largest contiguous sequence of passing settings,
+ rounding earlier when necessary. */
+ uint64_t offset : 4; /**< The offset used when LMC*RLEVEL_CTL[OFFSET] is set */
+ uint64_t byte : 4; /**< 0 <= BYTE <= 8
+ Byte index for which bitmask results are saved
+ in LMC*_RLEVEL_DBG */
+#else
+ uint64_t byte : 4;
+ uint64_t offset : 4;
+ uint64_t offset_en : 1;
+ uint64_t or_dis : 1;
+ uint64_t bitmask : 8;
+ uint64_t delay_unload_0 : 1;
+ uint64_t delay_unload_1 : 1;
+ uint64_t delay_unload_2 : 1;
+ uint64_t delay_unload_3 : 1;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_lmcx_rlevel_ctl_s cn61xx;
+ struct cvmx_lmcx_rlevel_ctl_s cn63xx;
+ struct cvmx_lmcx_rlevel_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t offset_en : 1; /**< When set, LMC attempts to select the read-leveling
+ setting that is LMC*RLEVEL_CTL[OFFSET] settings earlier than the
+ last passing read-leveling setting in the largest
+ contiguous sequence of passing settings.
+ When clear, or if the setting selected by LMC*RLEVEL_CTL[OFFSET]
+ did not pass, LMC selects the middle setting in the
+ largest contiguous sequence of passing settings,
+ rounding earlier when necessary. */
+ uint64_t offset : 4; /**< The offset used when LMC*RLEVEL_CTL[OFFSET] is set */
+ uint64_t byte : 4; /**< 0 <= BYTE <= 8
+ Byte index for which bitmask results are saved
+ in LMC*_RLEVEL_DBG */
+#else
+ uint64_t byte : 4;
+ uint64_t offset : 4;
+ uint64_t offset_en : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_rlevel_ctl_s cn66xx;
+ struct cvmx_lmcx_rlevel_ctl_s cn68xx;
+ struct cvmx_lmcx_rlevel_ctl_s cn68xxp1;
+ struct cvmx_lmcx_rlevel_ctl_s cnf71xx;
+};
+typedef union cvmx_lmcx_rlevel_ctl cvmx_lmcx_rlevel_ctl_t;
+
+/**
+ * cvmx_lmc#_rlevel_dbg
+ *
+ * Notes:
+ * A given read of LMC*_RLEVEL_DBG returns the read-leveling pass/fail results for all possible
+ * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled.
+ * LMC*_RLEVEL_CTL[BYTE] selects the particular byte.
+ *
+ * To get these pass/fail results for another different rank, you must run the hardware read-leveling
+ * again. For example, it is possible to get the BITMASK results for every byte of every rank
+ * if you run read-leveling separately for each rank, probing LMC*_RLEVEL_DBG between each
+ * read-leveling.
+ */
+union cvmx_lmcx_rlevel_dbg {
+ uint64_t u64;
+ struct cvmx_lmcx_rlevel_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bitmask : 64; /**< Bitmask generated during deskew settings sweep
+ BITMASK[n]=0 means deskew setting n failed
+ BITMASK[n]=1 means deskew setting n passed
+ for 0 <= n <= 63 */
+#else
+ uint64_t bitmask : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_rlevel_dbg_s cn61xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn63xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn63xxp1;
+ struct cvmx_lmcx_rlevel_dbg_s cn66xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn68xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn68xxp1;
+ struct cvmx_lmcx_rlevel_dbg_s cnf71xx;
+};
+typedef union cvmx_lmcx_rlevel_dbg cvmx_lmcx_rlevel_dbg_t;
+
+/**
+ * cvmx_lmc#_rlevel_rank#
+ *
+ * Notes:
+ * This is four CSRs per LMC, one per each rank.
+ *
+ * Deskew setting is measured in units of 1/4 CK, so the above BYTE* values can range over 16 CKs.
+ *
+ * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.)
+ * If HW is unable to find a match per LMC*_RLEVEL_CTL[OFFSET_ENA] and LMC*_RLEVEL_CTL[OFFSET], then HW will set LMC*_RLEVEL_RANKi[BYTE*<5:0>]
+ * to 0.
+ *
+ * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
+ *
+ * SW initiates a HW read-leveling sequence by programming LMC*_RLEVEL_CTL and writing INIT_START=1 with SEQUENCE=1.
+ * See LMC*_RLEVEL_CTL.
+ *
+ * LMC*_RLEVEL_RANKi values for ranks i without attached DRAM should be set such that
+ * they do not increase the range of possible BYTE values for any byte
+ * lane. The easiest way to do this is to set
+ * LMC*_RLEVEL_RANKi = LMC*_RLEVEL_RANKj,
+ * where j is some rank with attached DRAM whose LMC*_RLEVEL_RANKj is already fully initialized.
+ */
+union cvmx_lmcx_rlevel_rankx {
+ uint64_t u64;
+ struct cvmx_lmcx_rlevel_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t status : 2; /**< Indicates status of the read-levelling and where
+ the BYTE* programmings in <35:0> came from:
+ 0 = BYTE* values are their reset value
+ 1 = BYTE* values were set via a CSR write to this register
+ 2 = read-leveling sequence currently in progress (BYTE* values are unpredictable)
+ 3 = BYTE* values came from a complete read-leveling sequence */
+ uint64_t byte8 : 6; /**< Deskew setting
+ When ECC DRAM is not present (i.e. when DRAM is not
+ attached to chip signals DDR_CBS_0_* and DDR_CB[7:0]),
+ SW should write BYTE8 to a value that does
+ not increase the range of possible BYTE* values. The
+ easiest way to do this is to set
+ LMC*_RLEVEL_RANK*[BYTE8] = LMC*_RLEVEL_RANK*[BYTE0]
+ when there is no ECC DRAM, using the final BYTE0 value. */
+ uint64_t byte7 : 6; /**< Deskew setting */
+ uint64_t byte6 : 6; /**< Deskew setting */
+ uint64_t byte5 : 6; /**< Deskew setting */
+ uint64_t byte4 : 6; /**< Deskew setting */
+ uint64_t byte3 : 6; /**< Deskew setting */
+ uint64_t byte2 : 6; /**< Deskew setting */
+ uint64_t byte1 : 6; /**< Deskew setting */
+ uint64_t byte0 : 6; /**< Deskew setting */
+#else
+ uint64_t byte0 : 6;
+ uint64_t byte1 : 6;
+ uint64_t byte2 : 6;
+ uint64_t byte3 : 6;
+ uint64_t byte4 : 6;
+ uint64_t byte5 : 6;
+ uint64_t byte6 : 6;
+ uint64_t byte7 : 6;
+ uint64_t byte8 : 6;
+ uint64_t status : 2;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_lmcx_rlevel_rankx_s cn61xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn63xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn63xxp1;
+ struct cvmx_lmcx_rlevel_rankx_s cn66xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn68xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn68xxp1;
+ struct cvmx_lmcx_rlevel_rankx_s cnf71xx;
+};
+typedef union cvmx_lmcx_rlevel_rankx cvmx_lmcx_rlevel_rankx_t;
+
+/**
+ * cvmx_lmc#_rodt_comp_ctl
+ *
+ * LMC_RODT_COMP_CTL = LMC Compensation control
+ *
+ */
+union cvmx_lmcx_rodt_comp_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_rodt_comp_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t enable : 1; /**< 0=not enabled, 1=enable */
+ uint64_t reserved_12_15 : 4;
+ uint64_t nctl : 4; /**< Compensation control bits */
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5; /**< Compensation control bits */
+#else
+ uint64_t pctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t enable : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn50xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn52xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn56xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn58xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1;
+};
+typedef union cvmx_lmcx_rodt_comp_ctl cvmx_lmcx_rodt_comp_ctl_t;
+
+/**
+ * cvmx_lmc#_rodt_ctl
+ *
+ * LMC_RODT_CTL = Obsolete LMC Read OnDieTermination control
+ * See the description in LMC_WODT_CTL1. On Reads, Octeon only supports turning on ODT's in
+ * the lower 2 DIMM's with the masks as below.
+ *
+ * Notes:
+ * When a given RANK in position N is selected, the RODT _HI and _LO masks for that position are used.
+ * Mask[3:0] is used for RODT control of the RANKs in positions 3, 2, 1, and 0, respectively.
+ * In 64b mode, DIMMs are assumed to be ordered in the following order:
+ * position 3: [unused , DIMM1_RANK1_LO]
+ * position 2: [unused , DIMM1_RANK0_LO]
+ * position 1: [unused , DIMM0_RANK1_LO]
+ * position 0: [unused , DIMM0_RANK0_LO]
+ * In 128b mode, DIMMs are assumed to be ordered in the following order:
+ * position 3: [DIMM3_RANK1_HI, DIMM1_RANK1_LO]
+ * position 2: [DIMM3_RANK0_HI, DIMM1_RANK0_LO]
+ * position 1: [DIMM2_RANK1_HI, DIMM0_RANK1_LO]
+ * position 0: [DIMM2_RANK0_HI, DIMM0_RANK0_LO]
+ */
+union cvmx_lmcx_rodt_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_rodt_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rodt_hi3 : 4; /**< Read ODT mask for position 3, data[127:64] */
+ uint64_t rodt_hi2 : 4; /**< Read ODT mask for position 2, data[127:64] */
+ uint64_t rodt_hi1 : 4; /**< Read ODT mask for position 1, data[127:64] */
+ uint64_t rodt_hi0 : 4; /**< Read ODT mask for position 0, data[127:64] */
+ uint64_t rodt_lo3 : 4; /**< Read ODT mask for position 3, data[ 63: 0] */
+ uint64_t rodt_lo2 : 4; /**< Read ODT mask for position 2, data[ 63: 0] */
+ uint64_t rodt_lo1 : 4; /**< Read ODT mask for position 1, data[ 63: 0] */
+ uint64_t rodt_lo0 : 4; /**< Read ODT mask for position 0, data[ 63: 0] */
+#else
+ uint64_t rodt_lo0 : 4;
+ uint64_t rodt_lo1 : 4;
+ uint64_t rodt_lo2 : 4;
+ uint64_t rodt_lo3 : 4;
+ uint64_t rodt_hi0 : 4;
+ uint64_t rodt_hi1 : 4;
+ uint64_t rodt_hi2 : 4;
+ uint64_t rodt_hi3 : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_rodt_ctl_s cn30xx;
+ struct cvmx_lmcx_rodt_ctl_s cn31xx;
+ struct cvmx_lmcx_rodt_ctl_s cn38xx;
+ struct cvmx_lmcx_rodt_ctl_s cn38xxp2;
+ struct cvmx_lmcx_rodt_ctl_s cn50xx;
+ struct cvmx_lmcx_rodt_ctl_s cn52xx;
+ struct cvmx_lmcx_rodt_ctl_s cn52xxp1;
+ struct cvmx_lmcx_rodt_ctl_s cn56xx;
+ struct cvmx_lmcx_rodt_ctl_s cn56xxp1;
+ struct cvmx_lmcx_rodt_ctl_s cn58xx;
+ struct cvmx_lmcx_rodt_ctl_s cn58xxp1;
+};
+typedef union cvmx_lmcx_rodt_ctl cvmx_lmcx_rodt_ctl_t;
+
+/**
+ * cvmx_lmc#_rodt_mask
+ *
+ * LMC_RODT_MASK = LMC Read OnDieTermination mask
+ * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations
+ * especially on a multi-rank system. DDR3 DQ/DQS I/O's have built in
+ * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
+ * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
+ * in that DIMM. System designers may prefer different combinations of ODT ON's for reads
+ * into different ranks. Octeon supports full programmability by way of the mask register below.
+ * Each Rank position has its own 8-bit programmable field.
+ * When the controller does a read to that rank, it sets the 4 ODT pins to the MASK pins below.
+ * For eg., When doing a read from Rank0, a system designer may desire to terminate the lines
+ * with the resistor on DIMM0/Rank1. The mask RODT_D0_R0 would then be [00000010].
+ * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
+ * required, write 0 in this register. Note that, as per the DDR3 specifications, the ODT pin
+ * for the rank that is being read should always be 0.
+ *
+ * Notes:
+ * When a given RANK is selected, the RODT mask for that RANK is used. The resulting RODT mask is
+ * driven to the DIMMs in the following manner:
+ * RANK_ENA=1 RANK_ENA=0
+ * Mask[3] -> DIMM1_ODT_1 MBZ
+ * Mask[2] -> DIMM1_ODT_0 DIMM1_ODT_0
+ * Mask[1] -> DIMM0_ODT_1 MBZ
+ * Mask[0] -> DIMM0_ODT_0 DIMM0_ODT_0
+ *
+ * LMC always reads entire cache blocks and always reads them via two consecutive
+ * read CAS operations to the same rank+bank+row spaced exactly 4 CK's apart.
+ * When a RODT mask bit is set, LMC asserts the OCTEON ODT output
+ * pin(s) starting (CL - CWL) CK's after the first read CAS operation. Then, OCTEON
+ * normally continues to assert the ODT output pin(s) for 9+LMC*_CONTROL[RODT_BPRCH] more CK's
+ * - for a total of 10+LMC*_CONTROL[RODT_BPRCH] CK's for the entire cache block read -
+ * through the second read CAS operation of the cache block,
+ * satisfying the 6 CK DDR3 ODTH8 requirements.
+ * But it is possible for OCTEON to issue two cache block reads separated by as few as
+ * RtR = 8 or 9 (10 if LMC*_CONTROL[RODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s)
+ * for the RODT mask of the first cache block read for RtR CK's, then asserts
+ * the ODT output pin(s) for the RODT mask of the second cache block read for 10+LMC*_CONTROL[RODT_BPRCH] CK's
+ * (or less if a third cache block read follows within 8 or 9 (or 10) CK's of this second cache block read).
+ * Note that it may be necessary to force LMC to space back-to-back cache block reads
+ * to different ranks apart by at least 10+LMC*_CONTROL[RODT_BPRCH] CK's to prevent DDR3 ODTH8 violations.
+ */
+union cvmx_lmcx_rodt_mask {
+ uint64_t u64;
+ struct cvmx_lmcx_rodt_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rodt_d3_r1 : 8; /**< Read ODT mask DIMM3, RANK1/DIMM3 in SingleRanked
+ *UNUSED IN 6xxx, and MBZ* */
+ uint64_t rodt_d3_r0 : 8; /**< Read ODT mask DIMM3, RANK0
+ *UNUSED IN 6xxx, and MBZ* */
+ uint64_t rodt_d2_r1 : 8; /**< Read ODT mask DIMM2, RANK1/DIMM2 in SingleRanked
+ *UNUSED IN 6xxx, and MBZ* */
+ uint64_t rodt_d2_r0 : 8; /**< Read ODT mask DIMM2, RANK0
+ *UNUSED IN 6xxx, and MBZ* */
+ uint64_t rodt_d1_r1 : 8; /**< Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked
+ if (RANK_ENA) then
+ RODT_D1_R1[3] must be 0
+ else
+ RODT_D1_R1[3:0] is not used and MBZ
+ *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
+ uint64_t rodt_d1_r0 : 8; /**< Read ODT mask DIMM1, RANK0
+ if (RANK_ENA) then
+ RODT_D1_RO[2] must be 0
+ else
+ RODT_D1_RO[3:2,1] must be 0
+ *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
+ uint64_t rodt_d0_r1 : 8; /**< Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked
+ if (RANK_ENA) then
+ RODT_D0_R1[1] must be 0
+ else
+ RODT_D0_R1[3:0] is not used and MBZ
+ *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
+ uint64_t rodt_d0_r0 : 8; /**< Read ODT mask DIMM0, RANK0
+ if (RANK_ENA) then
+ RODT_D0_RO[0] must be 0
+ else
+ RODT_D0_RO[1:0,3] must be 0
+ *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
+#else
+ uint64_t rodt_d0_r0 : 8;
+ uint64_t rodt_d0_r1 : 8;
+ uint64_t rodt_d1_r0 : 8;
+ uint64_t rodt_d1_r1 : 8;
+ uint64_t rodt_d2_r0 : 8;
+ uint64_t rodt_d2_r1 : 8;
+ uint64_t rodt_d3_r0 : 8;
+ uint64_t rodt_d3_r1 : 8;
+#endif
+ } s;
+ struct cvmx_lmcx_rodt_mask_s cn61xx;
+ struct cvmx_lmcx_rodt_mask_s cn63xx;
+ struct cvmx_lmcx_rodt_mask_s cn63xxp1;
+ struct cvmx_lmcx_rodt_mask_s cn66xx;
+ struct cvmx_lmcx_rodt_mask_s cn68xx;
+ struct cvmx_lmcx_rodt_mask_s cn68xxp1;
+ struct cvmx_lmcx_rodt_mask_s cnf71xx;
+};
+typedef union cvmx_lmcx_rodt_mask cvmx_lmcx_rodt_mask_t;
+
+/**
+ * cvmx_lmc#_scramble_cfg0
+ *
+ * LMC_SCRAMBLE_CFG0 = LMC Scramble Config0
+ *
+ */
+union cvmx_lmcx_scramble_cfg0 {
+ uint64_t u64;
+ struct cvmx_lmcx_scramble_cfg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t key : 64; /**< Scramble Key for Data */
+#else
+ uint64_t key : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_scramble_cfg0_s cn61xx;
+ struct cvmx_lmcx_scramble_cfg0_s cn66xx;
+ struct cvmx_lmcx_scramble_cfg0_s cnf71xx;
+};
+typedef union cvmx_lmcx_scramble_cfg0 cvmx_lmcx_scramble_cfg0_t;
+
+/**
+ * cvmx_lmc#_scramble_cfg1
+ *
+ * LMC_SCRAMBLE_CFG1 = LMC Scramble Config1
+ *
+ *
+ * Notes:
+ * Address scrambling usually maps addresses into the same rank. Exceptions are when LMC_NXM[CS_MASK] requires
+ * aliasing that uses the lowest, legal chip select(s).
+ */
+union cvmx_lmcx_scramble_cfg1 {
+ uint64_t u64;
+ struct cvmx_lmcx_scramble_cfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t key : 64; /**< Scramble Key for Addresses */
+#else
+ uint64_t key : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_scramble_cfg1_s cn61xx;
+ struct cvmx_lmcx_scramble_cfg1_s cn66xx;
+ struct cvmx_lmcx_scramble_cfg1_s cnf71xx;
+};
+typedef union cvmx_lmcx_scramble_cfg1 cvmx_lmcx_scramble_cfg1_t;
+
+/**
+ * cvmx_lmc#_scrambled_fadr
+ *
+ * LMC_SCRAMBLED_FADR = LMC Scrambled Failing Address Register (SEC/DED/NXM)
+ *
+ * This register only captures the first transaction with ecc/nxm errors. A DED/NXM error can
+ * over-write this register with its failing addresses if the first error was a SEC. If you write
+ * LMC*_CONFIG->SEC_ERR/DED_ERR/NXM_ERR then it will clear the error bits and capture the
+ * next failing address.
+ *
+ * If FDIMM is 2 that means the error is in the higher bits DIMM.
+ *
+ * Notes:
+ * LMC*_FADR captures the failing pre-scrambled address location (split into dimm, bunk, bank, etc). If
+ * scrambling is off, then LMC*_FADR will also capture the failing physical location in the DRAM parts.
+ *
+ * LMC*_SCRAMBLED_FADR captures the actual failing address location in the physical DRAM parts, i.e.,
+ * a. if scrambling is on, LMC*_SCRAMBLE_FADR contains the failing physical location in the DRAM parts (split
+ * into dimm, bunk, bank, etc)
+ * b. if scrambling is off, the pre-scramble and post-scramble addresses are the same, and so the contents of
+ * LMC*_SCRAMBLED_FADR match the contents of LMC*_FADR
+ */
+union cvmx_lmcx_scrambled_fadr {
+ uint64_t u64;
+ struct cvmx_lmcx_scrambled_fadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t fdimm : 2; /**< Failing DIMM# */
+ uint64_t fbunk : 1; /**< Failing Rank */
+ uint64_t fbank : 3; /**< Failing Bank[2:0] */
+ uint64_t frow : 16; /**< Failing Row Address[15:0] */
+ uint64_t fcol : 14; /**< Failing Column Address[13:0]
+ Technically, represents the address of the 128b data
+ that had an ecc error, i.e., fcol[0] is always 0. Can
+ be used in conjuction with LMC*_CONFIG[DED_ERR] to
+ isolate the 64b chunk of data in error */
+#else
+ uint64_t fcol : 14;
+ uint64_t frow : 16;
+ uint64_t fbank : 3;
+ uint64_t fbunk : 1;
+ uint64_t fdimm : 2;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_lmcx_scrambled_fadr_s cn61xx;
+ struct cvmx_lmcx_scrambled_fadr_s cn66xx;
+ struct cvmx_lmcx_scrambled_fadr_s cnf71xx;
+};
+typedef union cvmx_lmcx_scrambled_fadr cvmx_lmcx_scrambled_fadr_t;
+
+/**
+ * cvmx_lmc#_slot_ctl0
+ *
+ * LMC_SLOT_CTL0 = LMC Slot Control0
+ * This register is an assortment of various control fields needed by the memory controller
+ *
+ * Notes:
+ * If SW has not previously written to this register (since the last DRESET),
+ * HW updates the fields in this register to the minimum allowed value
+ * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and
+ * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register
+ * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn
+ * have valid data.
+ *
+ * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]:
+ * - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the 1st and 2nd types
+ * from different cache blocks.
+ * - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the 1st and 2nd types
+ * from different cache blocks. FieldValue = 0 is always illegal in this
+ * case.
+ *
+ * The hardware-calculated minimums are:
+ *
+ * min R2R_INIT = 1 - LMC*_CONFIG[DDR2T]
+ * min R2W_INIT = 5 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH]
+ * min W2R_INIT = 2 - LMC*_CONFIG[DDR2T] + LMC*_TIMING_PARAMS1[TWTR] + WL
+ * min W2W_INIT = 1 - LMC*_CONFIG[DDR2T]
+ *
+ * where
+ *
+ * RL = CL + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL)
+ * WL = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL)
+ * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1 (max is across all ranks i (0..3) and bytes j (0..8))
+ * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] (min is across all ranks i (0..3) and bytes j (0..8))
+ *
+ * R2W_INIT has 1 CK cycle built in for OCTEON-internal ODT settling/channel turnaround time.
+ */
+union cvmx_lmcx_slot_ctl0 {
+ uint64_t u64;
+ struct cvmx_lmcx_slot_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t w2w_init : 6; /**< Write-to-write spacing control
+ for back to back write followed by write cache block
+ accesses to the same rank and DIMM */
+ uint64_t w2r_init : 6; /**< Write-to-read spacing control
+ for back to back write followed by read cache block
+ accesses to the same rank and DIMM */
+ uint64_t r2w_init : 6; /**< Read-to-write spacing control
+ for back to back read followed by write cache block
+ accesses to the same rank and DIMM */
+ uint64_t r2r_init : 6; /**< Read-to-read spacing control
+ for back to back read followed by read cache block
+ accesses to the same rank and DIMM */
+#else
+ uint64_t r2r_init : 6;
+ uint64_t r2w_init : 6;
+ uint64_t w2r_init : 6;
+ uint64_t w2w_init : 6;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_lmcx_slot_ctl0_s cn61xx;
+ struct cvmx_lmcx_slot_ctl0_s cn63xx;
+ struct cvmx_lmcx_slot_ctl0_s cn63xxp1;
+ struct cvmx_lmcx_slot_ctl0_s cn66xx;
+ struct cvmx_lmcx_slot_ctl0_s cn68xx;
+ struct cvmx_lmcx_slot_ctl0_s cn68xxp1;
+ struct cvmx_lmcx_slot_ctl0_s cnf71xx;
+};
+typedef union cvmx_lmcx_slot_ctl0 cvmx_lmcx_slot_ctl0_t;
+
+/**
+ * cvmx_lmc#_slot_ctl1
+ *
+ * LMC_SLOT_CTL1 = LMC Slot Control1
+ * This register is an assortment of various control fields needed by the memory controller
+ *
+ * Notes:
+ * If SW has not previously written to this register (since the last DRESET),
+ * HW updates the fields in this register to the minimum allowed value
+ * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and
+ * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register
+ * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn
+ * have valid data.
+ *
+ * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]:
+ * - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the 1st and 2nd types
+ * from different cache blocks.
+ * - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the 1st and 2nd types
+ * from different cache blocks. FieldValue = 0 is always illegal in this
+ * case.
+ *
+ * The hardware-calculated minimums are:
+ *
+ * min R2R_XRANK_INIT = 2 - LMC*_CONFIG[DDR2T] + MaxRdSkew - MinRdSkew + LMC*_CONTROL[RODT_BPRCH]
+ * min R2W_XRANK_INIT = 5 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH]
+ * min W2R_XRANK_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxWrSkew + LMC*_CONTROL[FPRCH2]
+ * min W2W_XRANK_INIT = 4 - LMC*_CONFIG[DDR2T] + MaxWrSkew - MinWrSkew
+ *
+ * where
+ *
+ * RL = CL + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL)
+ * WL = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL)
+ * MinRdSkew = min(LMC*_RLEVEL_RANKi[BYTEj]/4) (min is across all ranks i (0..3) and bytes j (0..8))
+ * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1 (max is across all ranks i (0..3) and bytes j (0..8))
+ * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] (min is across all ranks i (0..3) and bytes j (0..8))
+ * MaxWrSkew = max(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] + 1 (max is across all ranks i (0..3) and bytes j (0..8))
+ *
+ * R2W_XRANK_INIT has 1 extra CK cycle built in for OCTEON-internal ODT settling/channel turnaround time.
+ *
+ * W2R_XRANK_INIT has 1 extra CK cycle built in for channel turnaround time.
+ */
+union cvmx_lmcx_slot_ctl1 {
+ uint64_t u64;
+ struct cvmx_lmcx_slot_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t w2w_xrank_init : 6; /**< Write-to-write spacing control
+ for back to back write followed by write cache block
+ accesses across ranks of the same DIMM */
+ uint64_t w2r_xrank_init : 6; /**< Write-to-read spacing control
+ for back to back write followed by read cache block
+ accesses across ranks of the same DIMM */
+ uint64_t r2w_xrank_init : 6; /**< Read-to-write spacing control
+ for back to back read followed by write cache block
+ accesses across ranks of the same DIMM */
+ uint64_t r2r_xrank_init : 6; /**< Read-to-read spacing control
+ for back to back read followed by read cache block
+ accesses across ranks of the same DIMM */
+#else
+ uint64_t r2r_xrank_init : 6;
+ uint64_t r2w_xrank_init : 6;
+ uint64_t w2r_xrank_init : 6;
+ uint64_t w2w_xrank_init : 6;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_lmcx_slot_ctl1_s cn61xx;
+ struct cvmx_lmcx_slot_ctl1_s cn63xx;
+ struct cvmx_lmcx_slot_ctl1_s cn63xxp1;
+ struct cvmx_lmcx_slot_ctl1_s cn66xx;
+ struct cvmx_lmcx_slot_ctl1_s cn68xx;
+ struct cvmx_lmcx_slot_ctl1_s cn68xxp1;
+ struct cvmx_lmcx_slot_ctl1_s cnf71xx;
+};
+typedef union cvmx_lmcx_slot_ctl1 cvmx_lmcx_slot_ctl1_t;
+
+/**
+ * cvmx_lmc#_slot_ctl2
+ *
+ * LMC_SLOT_CTL2 = LMC Slot Control2
+ * This register is an assortment of various control fields needed by the memory controller
+ *
+ * Notes:
+ * If SW has not previously written to this register (since the last DRESET),
+ * HW updates the fields in this register to the minimum allowed value
+ * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and
+ * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register
+ * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn
+ * have valid data.
+ *
+ * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]:
+ * - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the 1st and 2nd types
+ * from different cache blocks.
+ * - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the 1st and 2nd types
+ * from different cache blocks. FieldValue = 0 is always illegal in this
+ * case.
+ *
+ * The hardware-calculated minimums are:
+ *
+ * min R2R_XDIMM_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxRdSkew - MinRdSkew + LMC*_CONTROL[RODT_BPRCH]
+ * min R2W_XDIMM_INIT = 6 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH]
+ * min W2R_XDIMM_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxWrSkew + LMC*_CONTROL[FPRCH2]
+ * min W2W_XDIMM_INIT = 5 - LMC*_CONFIG[DDR2T] + MaxWrSkew - MinWrSkew
+ *
+ * where
+ *
+ * RL = CL + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL)
+ * WL = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL)
+ * MinRdSkew = min(LMC*_RLEVEL_RANKi[BYTEj]/4) (min is across all ranks i (0..3) and bytes j (0..8))
+ * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1 (max is across all ranks i (0..3) and bytes j (0..8))
+ * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] (min is across all ranks i (0..3) and bytes j (0..8))
+ * MaxWrSkew = max(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] + 1 (max is across all ranks i (0..3) and bytes j (0..8))
+ *
+ * R2W_XDIMM_INIT has 2 extra CK cycles built in for OCTEON-internal ODT settling/channel turnaround time.
+ *
+ * R2R_XDIMM_INIT, W2R_XRANK_INIT, W2W_XDIMM_INIT have 1 extra CK cycle built in for channel turnaround time.
+ */
+union cvmx_lmcx_slot_ctl2 {
+ uint64_t u64;
+ struct cvmx_lmcx_slot_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t w2w_xdimm_init : 6; /**< Write-to-write spacing control
+ for back to back write followed by write cache block
+ accesses across DIMMs */
+ uint64_t w2r_xdimm_init : 6; /**< Write-to-read spacing control
+ for back to back write followed by read cache block
+ accesses across DIMMs */
+ uint64_t r2w_xdimm_init : 6; /**< Read-to-write spacing control
+ for back to back read followed by write cache block
+ accesses across DIMMs */
+ uint64_t r2r_xdimm_init : 6; /**< Read-to-read spacing control
+ for back to back read followed by read cache block
+ accesses across DIMMs */
+#else
+ uint64_t r2r_xdimm_init : 6;
+ uint64_t r2w_xdimm_init : 6;
+ uint64_t w2r_xdimm_init : 6;
+ uint64_t w2w_xdimm_init : 6;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_lmcx_slot_ctl2_s cn61xx;
+ struct cvmx_lmcx_slot_ctl2_s cn63xx;
+ struct cvmx_lmcx_slot_ctl2_s cn63xxp1;
+ struct cvmx_lmcx_slot_ctl2_s cn66xx;
+ struct cvmx_lmcx_slot_ctl2_s cn68xx;
+ struct cvmx_lmcx_slot_ctl2_s cn68xxp1;
+ struct cvmx_lmcx_slot_ctl2_s cnf71xx;
+};
+typedef union cvmx_lmcx_slot_ctl2 cvmx_lmcx_slot_ctl2_t;
+
+/**
+ * cvmx_lmc#_timing_params0
+ */
+union cvmx_lmcx_timing_params0 {
+ uint64_t u64;
+ struct cvmx_lmcx_timing_params0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t trp_ext : 1; /**< Indicates tRP constraints.
+ Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)]-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tcksre : 4; /**< Indicates tCKSRE constraints.
+ Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
+ where tCKSRE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, 10ns) */
+ uint64_t trp : 4; /**< Indicates tRP constraints.
+ Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints.
+ Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
+ where tZQINIT is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512) */
+ uint64_t tdllk : 4; /**< Indicates tDLLK constraints.
+ Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))],
+ where tDLLK is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512)
+ This parameter is used in self-refresh exit
+ and assumed to be greater than tRFC */
+ uint64_t tmod : 4; /**< Indicates tMOD constraints.
+ Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
+ where tMOD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(12nCK, 15ns) */
+ uint64_t tmrd : 4; /**< Indicates tMRD constraints.
+ Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
+ where tMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4nCK */
+ uint64_t txpr : 4; /**< Indicates tXPR constraints.
+ Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
+ where tXPR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, tRFC+10ns) */
+ uint64_t tcke : 4; /**< Indicates tCKE constraints.
+ Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
+ where tCKE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
+ uint64_t tzqcs : 4; /**< Indicates tZQCS constraints.
+ Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
+ where tZQCS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4 (equivalent to 64) */
+ uint64_t tckeon : 10; /**< Reserved. Should be written to zero. */
+#else
+ uint64_t tckeon : 10;
+ uint64_t tzqcs : 4;
+ uint64_t tcke : 4;
+ uint64_t txpr : 4;
+ uint64_t tmrd : 4;
+ uint64_t tmod : 4;
+ uint64_t tdllk : 4;
+ uint64_t tzqinit : 4;
+ uint64_t trp : 4;
+ uint64_t tcksre : 4;
+ uint64_t trp_ext : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_lmcx_timing_params0_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t trp_ext : 1; /**< Indicates tRP constraints.
+ Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)]-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tcksre : 4; /**< Indicates tCKSRE constraints.
+ Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
+ where tCKSRE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, 10ns) */
+ uint64_t trp : 4; /**< Indicates tRP constraints.
+ Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints.
+ Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
+ where tZQINIT is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512) */
+ uint64_t tdllk : 4; /**< Indicates tDLLK constraints.
+ Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))],
+ where tDLLK is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512)
+ This parameter is used in self-refresh exit
+ and assumed to be greater than tRFC */
+ uint64_t tmod : 4; /**< Indicates tMOD constraints.
+ Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
+ where tMOD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(12nCK, 15ns) */
+ uint64_t tmrd : 4; /**< Indicates tMRD constraints.
+ Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
+ where tMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4nCK */
+ uint64_t txpr : 4; /**< Indicates tXPR constraints.
+ Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
+ where tXPR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, tRFC+10ns) */
+ uint64_t tcke : 4; /**< Indicates tCKE constraints.
+ Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
+ where tCKE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
+ uint64_t tzqcs : 4; /**< Indicates tZQCS constraints.
+ Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
+ where tZQCS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4 (equivalent to 64) */
+ uint64_t reserved_0_9 : 10;
+#else
+ uint64_t reserved_0_9 : 10;
+ uint64_t tzqcs : 4;
+ uint64_t tcke : 4;
+ uint64_t txpr : 4;
+ uint64_t tmrd : 4;
+ uint64_t tmod : 4;
+ uint64_t tdllk : 4;
+ uint64_t tzqinit : 4;
+ uint64_t trp : 4;
+ uint64_t tcksre : 4;
+ uint64_t trp_ext : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } cn61xx;
+ struct cvmx_lmcx_timing_params0_cn61xx cn63xx;
+ struct cvmx_lmcx_timing_params0_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t tcksre : 4; /**< Indicates tCKSRE constraints.
+ Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
+ where tCKSRE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, 10ns) */
+ uint64_t trp : 4; /**< Indicates tRP constraints.
+ Set TRP (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
+ + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
+ where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP tRP=10-15ns
+ TYP tRTP=max(4nCK, 7.5ns) */
+ uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints.
+ Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
+ where tZQINIT is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512) */
+ uint64_t tdllk : 4; /**< Indicates tDLLK constraints.
+ Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))],
+ where tDLLK is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=2 (equivalent to 512)
+ This parameter is used in self-refresh exit
+ and assumed to be greater than tRFC */
+ uint64_t tmod : 4; /**< Indicates tMOD constraints.
+ Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
+ where tMOD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(12nCK, 15ns) */
+ uint64_t tmrd : 4; /**< Indicates tMRD constraints.
+ Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
+ where tMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4nCK */
+ uint64_t txpr : 4; /**< Indicates tXPR constraints.
+ Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
+ where tXPR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(5nCK, tRFC+10ns) */
+ uint64_t tcke : 4; /**< Indicates tCKE constraints.
+ Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
+ where tCKE is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
+ uint64_t tzqcs : 4; /**< Indicates tZQCS constraints.
+ Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
+ where tZQCS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=4 (equivalent to 64) */
+ uint64_t tckeon : 10; /**< Reserved. Should be written to zero. */
+#else
+ uint64_t tckeon : 10;
+ uint64_t tzqcs : 4;
+ uint64_t tcke : 4;
+ uint64_t txpr : 4;
+ uint64_t tmrd : 4;
+ uint64_t tmod : 4;
+ uint64_t tdllk : 4;
+ uint64_t tzqinit : 4;
+ uint64_t trp : 4;
+ uint64_t tcksre : 4;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_timing_params0_cn61xx cn66xx;
+ struct cvmx_lmcx_timing_params0_cn61xx cn68xx;
+ struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1;
+ struct cvmx_lmcx_timing_params0_cn61xx cnf71xx;
+};
+typedef union cvmx_lmcx_timing_params0 cvmx_lmcx_timing_params0_t;
+
+/**
+ * cvmx_lmc#_timing_params1
+ */
+union cvmx_lmcx_timing_params1 {
+ uint64_t u64;
+ struct cvmx_lmcx_timing_params1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t tras_ext : 1; /**< Indicates tRAS constraints.
+ Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
+ where tRAS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=35ns-9*tREFI
+ - 000000: RESERVED
+ - 000001: 2 tCYC
+ - 000010: 3 tCYC
+ - ...
+ - 111111: 64 tCYC */
+ uint64_t txpdll : 5; /**< Indicates tXPDLL constraints.
+ Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
+ where tXPDLL is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(10nCK, 24ns) */
+ uint64_t tfaw : 5; /**< Indicates tFAW constraints.
+ Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
+ where tFAW is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=30-40ns */
+ uint64_t twldqsen : 4; /**< Indicates tWLDQSEN constraints.
+ Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
+ where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(25nCK) */
+ uint64_t twlmrd : 4; /**< Indicates tWLMRD constraints.
+ Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
+ where tWLMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(40nCK) */
+ uint64_t txp : 3; /**< Indicates tXP constraints.
+ Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
+ where tXP is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5ns) */
+ uint64_t trrd : 3; /**< Indicates tRRD constraints.
+ Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
+ where tRRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(4nCK, 10ns)
+ - 000: RESERVED
+ - 001: 3 tCYC
+ - ...
+ - 110: 8 tCYC
+ - 111: 9 tCYC */
+ uint64_t trfc : 5; /**< Indicates tRFC constraints.
+ Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
+ where tRFC is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=90-350ns
+ - 00000: RESERVED
+ - 00001: 8 tCYC
+ - 00010: 16 tCYC
+ - 00011: 24 tCYC
+ - 00100: 32 tCYC
+ - ...
+ - 11110: 240 tCYC
+ - 11111: 248 tCYC */
+ uint64_t twtr : 4; /**< Indicates tWTR constraints.
+ Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
+ where tWTR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(4nCK, 7.5ns)
+ - 0000: RESERVED
+ - 0001: 2
+ - ...
+ - 0111: 8
+ - 1000-1111: RESERVED */
+ uint64_t trcd : 4; /**< Indicates tRCD constraints.
+ Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
+ where tRCD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=10-15ns
+ - 0000: RESERVED
+ - 0001: 2 (2 is the smallest value allowed)
+ - 0002: 2
+ - ...
+ - 1110: 14
+ - 1111: RESERVED
+ In 2T mode, make this register TRCD-1, not going
+ below 2. */
+ uint64_t tras : 5; /**< Indicates tRAS constraints.
+ Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
+ where tRAS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=35ns-9*tREFI
+ - 000000: RESERVED
+ - 000001: 2 tCYC
+ - 000010: 3 tCYC
+ - ...
+ - 111111: 64 tCYC */
+ uint64_t tmprr : 4; /**< Indicates tMPRR constraints.
+ Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
+ where tMPRR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=1nCK */
+#else
+ uint64_t tmprr : 4;
+ uint64_t tras : 5;
+ uint64_t trcd : 4;
+ uint64_t twtr : 4;
+ uint64_t trfc : 5;
+ uint64_t trrd : 3;
+ uint64_t txp : 3;
+ uint64_t twlmrd : 4;
+ uint64_t twldqsen : 4;
+ uint64_t tfaw : 5;
+ uint64_t txpdll : 5;
+ uint64_t tras_ext : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_lmcx_timing_params1_s cn61xx;
+ struct cvmx_lmcx_timing_params1_s cn63xx;
+ struct cvmx_lmcx_timing_params1_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t txpdll : 5; /**< Indicates tXPDLL constraints.
+ Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
+ where tXPDLL is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(10nCK, 24ns) */
+ uint64_t tfaw : 5; /**< Indicates tFAW constraints.
+ Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
+ where tFAW is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=30-40ns */
+ uint64_t twldqsen : 4; /**< Indicates tWLDQSEN constraints.
+ Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
+ where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(25nCK) */
+ uint64_t twlmrd : 4; /**< Indicates tWLMRD constraints.
+ Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
+ where tWLMRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(40nCK) */
+ uint64_t txp : 3; /**< Indicates tXP constraints.
+ Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
+ where tXP is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(3nCK, 7.5ns) */
+ uint64_t trrd : 3; /**< Indicates tRRD constraints.
+ Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
+ where tRRD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(4nCK, 10ns)
+ - 000: RESERVED
+ - 001: 3 tCYC
+ - ...
+ - 110: 8 tCYC
+ - 111: 9 tCYC */
+ uint64_t trfc : 5; /**< Indicates tRFC constraints.
+ Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
+ where tRFC is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=90-350ns
+ - 00000: RESERVED
+ - 00001: 8 tCYC
+ - 00010: 16 tCYC
+ - 00011: 24 tCYC
+ - 00100: 32 tCYC
+ - ...
+ - 11110: 240 tCYC
+ - 11111: 248 tCYC */
+ uint64_t twtr : 4; /**< Indicates tWTR constraints.
+ Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
+ where tWTR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=max(4nCK, 7.5ns)
+ - 0000: RESERVED
+ - 0001: 2
+ - ...
+ - 0111: 8
+ - 1000-1111: RESERVED */
+ uint64_t trcd : 4; /**< Indicates tRCD constraints.
+ Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
+ where tRCD is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=10-15ns
+ - 0000: RESERVED
+ - 0001: 2 (2 is the smallest value allowed)
+ - 0002: 2
+ - ...
+ - 1001: 9
+ - 1010-1111: RESERVED
+ In 2T mode, make this register TRCD-1, not going
+ below 2. */
+ uint64_t tras : 5; /**< Indicates tRAS constraints.
+ Set TRAS (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
+ where tRAS is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=35ns-9*tREFI
+ - 00000: RESERVED
+ - 00001: 2 tCYC
+ - 00010: 3 tCYC
+ - ...
+ - 11111: 32 tCYC */
+ uint64_t tmprr : 4; /**< Indicates tMPRR constraints.
+ Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
+ where tMPRR is from the DDR3 spec, and tCYC(ns)
+ is the DDR clock frequency (not data rate).
+ TYP=1nCK */
+#else
+ uint64_t tmprr : 4;
+ uint64_t tras : 5;
+ uint64_t trcd : 4;
+ uint64_t twtr : 4;
+ uint64_t trfc : 5;
+ uint64_t trrd : 3;
+ uint64_t txp : 3;
+ uint64_t twlmrd : 4;
+ uint64_t twldqsen : 4;
+ uint64_t tfaw : 5;
+ uint64_t txpdll : 5;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_timing_params1_s cn66xx;
+ struct cvmx_lmcx_timing_params1_s cn68xx;
+ struct cvmx_lmcx_timing_params1_s cn68xxp1;
+ struct cvmx_lmcx_timing_params1_s cnf71xx;
+};
+typedef union cvmx_lmcx_timing_params1 cvmx_lmcx_timing_params1_t;
+
+/**
+ * cvmx_lmc#_tro_ctl
+ *
+ * LMC_TRO_CTL = LMC Temperature Ring Osc Control
+ * This register is an assortment of various control fields needed to control the temperature ring oscillator
+ *
+ * Notes:
+ * To bring up the temperature ring oscillator, write TRESET to 0, and follow by initializing RCLK_CNT to desired
+ * value
+ */
+union cvmx_lmcx_tro_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_tro_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t rclk_cnt : 32; /**< rclk counter */
+ uint64_t treset : 1; /**< Reset ring oscillator */
+#else
+ uint64_t treset : 1;
+ uint64_t rclk_cnt : 32;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_lmcx_tro_ctl_s cn61xx;
+ struct cvmx_lmcx_tro_ctl_s cn63xx;
+ struct cvmx_lmcx_tro_ctl_s cn63xxp1;
+ struct cvmx_lmcx_tro_ctl_s cn66xx;
+ struct cvmx_lmcx_tro_ctl_s cn68xx;
+ struct cvmx_lmcx_tro_ctl_s cn68xxp1;
+ struct cvmx_lmcx_tro_ctl_s cnf71xx;
+};
+typedef union cvmx_lmcx_tro_ctl cvmx_lmcx_tro_ctl_t;
+
+/**
+ * cvmx_lmc#_tro_stat
+ *
+ * LMC_TRO_STAT = LMC Temperature Ring Osc Status
+ * This register is an assortment of various control fields needed to control the temperature ring oscillator
+ */
+union cvmx_lmcx_tro_stat {
+ uint64_t u64;
+ struct cvmx_lmcx_tro_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ring_cnt : 32; /**< ring counter */
+#else
+ uint64_t ring_cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_tro_stat_s cn61xx;
+ struct cvmx_lmcx_tro_stat_s cn63xx;
+ struct cvmx_lmcx_tro_stat_s cn63xxp1;
+ struct cvmx_lmcx_tro_stat_s cn66xx;
+ struct cvmx_lmcx_tro_stat_s cn68xx;
+ struct cvmx_lmcx_tro_stat_s cn68xxp1;
+ struct cvmx_lmcx_tro_stat_s cnf71xx;
+};
+typedef union cvmx_lmcx_tro_stat cvmx_lmcx_tro_stat_t;
+
+/**
+ * cvmx_lmc#_wlevel_ctl
+ */
+union cvmx_lmcx_wlevel_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_wlevel_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t rtt_nom : 3; /**< RTT_NOM
+ LMC writes a decoded value to MR1[Rtt_Nom] of the rank during
+ write leveling. Per JEDEC DDR3 specifications,
+ only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6)
+ are allowed during write leveling with output buffer enabled.
+ 000 : LMC writes 001 (RZQ/4) to MR1[Rtt_Nom]
+ 001 : LMC writes 010 (RZQ/2) to MR1[Rtt_Nom]
+ 010 : LMC writes 011 (RZQ/6) to MR1[Rtt_Nom]
+ 011 : LMC writes 100 (RZQ/12) to MR1[Rtt_Nom]
+ 100 : LMC writes 101 (RZQ/8) to MR1[Rtt_Nom]
+ 101 : LMC writes 110 (Rsvd) to MR1[Rtt_Nom]
+ 110 : LMC writes 111 (Rsvd) to MR1[Rtt_Nom]
+ 111 : LMC writes 000 (Disabled) to MR1[Rtt_Nom] */
+ uint64_t bitmask : 8; /**< Mask to select bit lanes on which write-leveling
+ feedback is returned when OR_DIS is set to 1 */
+ uint64_t or_dis : 1; /**< Disable or'ing of bits in a byte lane when computing
+ the write-leveling bitmask */
+ uint64_t sset : 1; /**< Run write-leveling on the current setting only. */
+ uint64_t lanemask : 9; /**< One-hot mask to select byte lane to be leveled by
+ the write-leveling sequence
+ Used with x16 parts where the upper and lower byte
+ lanes need to be leveled independently */
+#else
+ uint64_t lanemask : 9;
+ uint64_t sset : 1;
+ uint64_t or_dis : 1;
+ uint64_t bitmask : 8;
+ uint64_t rtt_nom : 3;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_lmcx_wlevel_ctl_s cn61xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn63xx;
+ struct cvmx_lmcx_wlevel_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t sset : 1; /**< Run write-leveling on the current setting only. */
+ uint64_t lanemask : 9; /**< One-hot mask to select byte lane to be leveled by
+ the write-leveling sequence
+ Used with x16 parts where the upper and lower byte
+ lanes need to be leveled independently */
+#else
+ uint64_t lanemask : 9;
+ uint64_t sset : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_wlevel_ctl_s cn66xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn68xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn68xxp1;
+ struct cvmx_lmcx_wlevel_ctl_s cnf71xx;
+};
+typedef union cvmx_lmcx_wlevel_ctl cvmx_lmcx_wlevel_ctl_t;
+
+/**
+ * cvmx_lmc#_wlevel_dbg
+ *
+ * Notes:
+ * A given write of LMC*_WLEVEL_DBG returns the write-leveling pass/fail results for all possible
+ * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW write-leveled.
+ * LMC*_WLEVEL_DBG[BYTE] selects the particular byte.
+ * To get these pass/fail results for another different rank, you must run the hardware write-leveling
+ * again. For example, it is possible to get the BITMASK results for every byte of every rank
+ * if you run write-leveling separately for each rank, probing LMC*_WLEVEL_DBG between each
+ * write-leveling.
+ */
+union cvmx_lmcx_wlevel_dbg {
+ uint64_t u64;
+ struct cvmx_lmcx_wlevel_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t bitmask : 8; /**< Bitmask generated during deskew settings sweep
+ if LMCX_WLEVEL_CTL[SSET]=0
+ BITMASK[n]=0 means deskew setting n failed
+ BITMASK[n]=1 means deskew setting n passed
+ for 0 <= n <= 7
+ BITMASK contains the first 8 results of the total 16
+ collected by LMC during the write-leveling sequence
+ else if LMCX_WLEVEL_CTL[SSET]=1
+ BITMASK[0]=0 means curr deskew setting failed
+ BITMASK[0]=1 means curr deskew setting passed */
+ uint64_t byte : 4; /**< 0 <= BYTE <= 8 */
+#else
+ uint64_t byte : 4;
+ uint64_t bitmask : 8;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_lmcx_wlevel_dbg_s cn61xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn63xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn63xxp1;
+ struct cvmx_lmcx_wlevel_dbg_s cn66xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn68xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn68xxp1;
+ struct cvmx_lmcx_wlevel_dbg_s cnf71xx;
+};
+typedef union cvmx_lmcx_wlevel_dbg cvmx_lmcx_wlevel_dbg_t;
+
+/**
+ * cvmx_lmc#_wlevel_rank#
+ *
+ * Notes:
+ * This is four CSRs per LMC, one per each rank.
+ *
+ * Deskew setting is measured in units of 1/8 CK, so the above BYTE* values can range over 4 CKs.
+ *
+ * Assuming LMC*_WLEVEL_CTL[SSET]=0, the BYTE*<2:0> values are not used during write-leveling, and
+ * they are over-written by the hardware as part of the write-leveling sequence. (HW sets STATUS==3
+ * after HW write-leveling completes for the rank). SW needs to set BYTE*<4:3> bits.
+ *
+ * Each CSR may also be written by SW, but not while a write-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
+ *
+ * SW initiates a HW write-leveling sequence by programming LMC*_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQUENCE=6 in LMC*_CONFIG.
+ * LMC will then step through and accumulate write leveling results for 8 unique delay settings (twice), starting at a delay of
+ * LMC*_WLEVEL_RANKn[BYTE*<4:3>]*8 CK increasing by 1/8 CK each setting. HW will then set LMC*_WLEVEL_RANKi[BYTE*<2:0>] to indicate the
+ * first write leveling result of '1' that followed a reslt of '0' during the sequence by searching for a '1100' pattern in the generated
+ * bitmask, except that LMC will always write LMC*_WLEVEL_RANKi[BYTE*<0>]=0. If HW is unable to find a match for a '1100' pattern, then HW will
+ * set LMC*_WLEVEL_RANKi[BYTE*<2:0>] to 4.
+ * See LMC*_WLEVEL_CTL.
+ *
+ * LMC*_WLEVEL_RANKi values for ranks i without attached DRAM should be set such that
+ * they do not increase the range of possible BYTE values for any byte
+ * lane. The easiest way to do this is to set
+ * LMC*_WLEVEL_RANKi = LMC*_WLEVEL_RANKj,
+ * where j is some rank with attached DRAM whose LMC*_WLEVEL_RANKj is already fully initialized.
+ */
+union cvmx_lmcx_wlevel_rankx {
+ uint64_t u64;
+ struct cvmx_lmcx_wlevel_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t status : 2; /**< Indicates status of the write-leveling and where
+ the BYTE* programmings in <44:0> came from:
+ 0 = BYTE* values are their reset value
+ 1 = BYTE* values were set via a CSR write to this register
+ 2 = write-leveling sequence currently in progress (BYTE* values are unpredictable)
+ 3 = BYTE* values came from a complete write-leveling sequence, irrespective of
+ which lanes are masked via LMC*WLEVEL_CTL[LANEMASK] */
+ uint64_t byte8 : 5; /**< Deskew setting
+ Bit 0 of BYTE8 must be zero during normal operation.
+ When ECC DRAM is not present (i.e. when DRAM is not
+ attached to chip signals DDR_CBS_0_* and DDR_CB[7:0]),
+ SW should write BYTE8 with a value that does
+ not increase the range of possible BYTE* values. The
+ easiest way to do this is to set
+ LMC*_WLEVEL_RANK*[BYTE8] = LMC*_WLEVEL_RANK*[BYTE0]
+ when there is no ECC DRAM, using the final BYTE0 value. */
+ uint64_t byte7 : 5; /**< Deskew setting
+ Bit 0 of BYTE7 must be zero during normal operation */
+ uint64_t byte6 : 5; /**< Deskew setting
+ Bit 0 of BYTE6 must be zero during normal operation */
+ uint64_t byte5 : 5; /**< Deskew setting
+ Bit 0 of BYTE5 must be zero during normal operation */
+ uint64_t byte4 : 5; /**< Deskew setting
+ Bit 0 of BYTE4 must be zero during normal operation */
+ uint64_t byte3 : 5; /**< Deskew setting
+ Bit 0 of BYTE3 must be zero during normal operation */
+ uint64_t byte2 : 5; /**< Deskew setting
+ Bit 0 of BYTE2 must be zero during normal operation */
+ uint64_t byte1 : 5; /**< Deskew setting
+ Bit 0 of BYTE1 must be zero during normal operation */
+ uint64_t byte0 : 5; /**< Deskew setting
+ Bit 0 of BYTE0 must be zero during normal operation */
+#else
+ uint64_t byte0 : 5;
+ uint64_t byte1 : 5;
+ uint64_t byte2 : 5;
+ uint64_t byte3 : 5;
+ uint64_t byte4 : 5;
+ uint64_t byte5 : 5;
+ uint64_t byte6 : 5;
+ uint64_t byte7 : 5;
+ uint64_t byte8 : 5;
+ uint64_t status : 2;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_lmcx_wlevel_rankx_s cn61xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn63xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn63xxp1;
+ struct cvmx_lmcx_wlevel_rankx_s cn66xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn68xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn68xxp1;
+ struct cvmx_lmcx_wlevel_rankx_s cnf71xx;
+};
+typedef union cvmx_lmcx_wlevel_rankx cvmx_lmcx_wlevel_rankx_t;
+
+/**
+ * cvmx_lmc#_wodt_ctl0
+ *
+ * LMC_WODT_CTL0 = LMC Write OnDieTermination control
+ * See the description in LMC_WODT_CTL1.
+ *
+ * Notes:
+ * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask. See LMC_WODT_CTL1.
+ *
+ */
+union cvmx_lmcx_wodt_ctl0 {
+ uint64_t u64;
+ struct cvmx_lmcx_wodt_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wodt_d1_r1 : 8; /**< Write ODT mask DIMM1, RANK1 */
+ uint64_t wodt_d1_r0 : 8; /**< Write ODT mask DIMM1, RANK0 */
+ uint64_t wodt_d0_r1 : 8; /**< Write ODT mask DIMM0, RANK1 */
+ uint64_t wodt_d0_r0 : 8; /**< Write ODT mask DIMM0, RANK0 */
+#else
+ uint64_t wodt_d0_r0 : 8;
+ uint64_t wodt_d0_r1 : 8;
+ uint64_t wodt_d1_r0 : 8;
+ uint64_t wodt_d1_r1 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wodt_hi3 : 4; /**< Write ODT mask for position 3, data[127:64] */
+ uint64_t wodt_hi2 : 4; /**< Write ODT mask for position 2, data[127:64] */
+ uint64_t wodt_hi1 : 4; /**< Write ODT mask for position 1, data[127:64] */
+ uint64_t wodt_hi0 : 4; /**< Write ODT mask for position 0, data[127:64] */
+ uint64_t wodt_lo3 : 4; /**< Write ODT mask for position 3, data[ 63: 0] */
+ uint64_t wodt_lo2 : 4; /**< Write ODT mask for position 2, data[ 63: 0] */
+ uint64_t wodt_lo1 : 4; /**< Write ODT mask for position 1, data[ 63: 0] */
+ uint64_t wodt_lo0 : 4; /**< Write ODT mask for position 0, data[ 63: 0] */
+#else
+ uint64_t wodt_lo0 : 4;
+ uint64_t wodt_lo1 : 4;
+ uint64_t wodt_lo2 : 4;
+ uint64_t wodt_lo3 : 4;
+ uint64_t wodt_hi0 : 4;
+ uint64_t wodt_hi1 : 4;
+ uint64_t wodt_hi2 : 4;
+ uint64_t wodt_hi3 : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn38xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1;
+};
+typedef union cvmx_lmcx_wodt_ctl0 cvmx_lmcx_wodt_ctl0_t;
+
+/**
+ * cvmx_lmc#_wodt_ctl1
+ *
+ * LMC_WODT_CTL1 = LMC Write OnDieTermination control
+ * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations
+ * (667MHz and faster), especially on a multi-rank system. DDR2 DQ/DM/DQS I/O's have built in
+ * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
+ * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
+ * in that DIMM. System designers may prefer different combinations of ODT ON's for read and write
+ * into different ranks. Octeon supports full programmability by way of the mask register below.
+ * Each Rank position has its own 8-bit programmable field.
+ * When the controller does a write to that rank, it sets the 8 ODT pins to the MASK pins below.
+ * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines
+ * with the resistor on Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010].
+ * If ODT feature is not desired, the DDR parts can be programmed to not look at these pins by
+ * writing 0 in QS_DIC. Octeon drives the appropriate mask values on the ODT pins by default.
+ * If this feature is not required, write 0 in this register.
+ *
+ * Notes:
+ * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask.
+ * When a given RANK is selected, the WODT mask for that RANK is used. The resulting WODT mask is
+ * driven to the DIMMs in the following manner:
+ * BUNK_ENA=1 BUNK_ENA=0
+ * Mask[7] -> DIMM3, RANK1 DIMM3
+ * Mask[6] -> DIMM3, RANK0
+ * Mask[5] -> DIMM2, RANK1 DIMM2
+ * Mask[4] -> DIMM2, RANK0
+ * Mask[3] -> DIMM1, RANK1 DIMM1
+ * Mask[2] -> DIMM1, RANK0
+ * Mask[1] -> DIMM0, RANK1 DIMM0
+ * Mask[0] -> DIMM0, RANK0
+ */
+union cvmx_lmcx_wodt_ctl1 {
+ uint64_t u64;
+ struct cvmx_lmcx_wodt_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wodt_d3_r1 : 8; /**< Write ODT mask DIMM3, RANK1/DIMM3 in SingleRanked */
+ uint64_t wodt_d3_r0 : 8; /**< Write ODT mask DIMM3, RANK0 */
+ uint64_t wodt_d2_r1 : 8; /**< Write ODT mask DIMM2, RANK1/DIMM2 in SingleRanked */
+ uint64_t wodt_d2_r0 : 8; /**< Write ODT mask DIMM2, RANK0 */
+#else
+ uint64_t wodt_d2_r0 : 8;
+ uint64_t wodt_d2_r1 : 8;
+ uint64_t wodt_d3_r0 : 8;
+ uint64_t wodt_d3_r1 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_lmcx_wodt_ctl1_s cn30xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn31xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn52xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn52xxp1;
+ struct cvmx_lmcx_wodt_ctl1_s cn56xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn56xxp1;
+};
+typedef union cvmx_lmcx_wodt_ctl1 cvmx_lmcx_wodt_ctl1_t;
+
+/**
+ * cvmx_lmc#_wodt_mask
+ *
+ * LMC_WODT_MASK = LMC Write OnDieTermination mask
+ * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations
+ * especially on a multi-rank system. DDR3 DQ/DQS I/O's have built in
+ * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
+ * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
+ * in that DIMM. System designers may prefer different combinations of ODT ON's for writes
+ * into different ranks. Octeon supports full programmability by way of the mask register below.
+ * Each Rank position has its own 8-bit programmable field.
+ * When the controller does a write to that rank, it sets the 4 ODT pins to the MASK pins below.
+ * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines
+ * with the resistor on DIMM0/Rank1. The mask WODT_D0_R0 would then be [00000010].
+ * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
+ * required, write 0 in this register.
+ *
+ * Notes:
+ * When a given RANK is selected, the WODT mask for that RANK is used. The resulting WODT mask is
+ * driven to the DIMMs in the following manner:
+ * RANK_ENA=1 RANK_ENA=0
+ * Mask[3] -> DIMM1_ODT_1 MBZ
+ * Mask[2] -> DIMM1_ODT_0 DIMM1_ODT_0
+ * Mask[1] -> DIMM0_ODT_1 MBZ
+ * Mask[0] -> DIMM0_ODT_0 DIMM0_ODT_0
+ *
+ * LMC always writes entire cache blocks and always writes them via two consecutive
+ * write CAS operations to the same rank+bank+row spaced exactly 4 CK's apart.
+ * When a WODT mask bit is set, LMC asserts the OCTEON ODT output
+ * pin(s) starting the same CK as the first write CAS operation. Then, OCTEON
+ * normally continues to assert the ODT output pin(s) for 9+LMC*_CONTROL[WODT_BPRCH] more CK's
+ * - for a total of 10+LMC*_CONTROL[WODT_BPRCH] CK's for the entire cache block write -
+ * through the second write CAS operation of the cache block,
+ * satisfying the 6 CK DDR3 ODTH8 requirements.
+ * But it is possible for OCTEON to issue two cache block writes separated by as few as
+ * WtW = 8 or 9 (10 if LMC*_CONTROL[WODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s)
+ * for the WODT mask of the first cache block write for WtW CK's, then asserts
+ * the ODT output pin(s) for the WODT mask of the second cache block write for 10+LMC*_CONTROL[WODT_BPRCH] CK's
+ * (or less if a third cache block write follows within 8 or 9 (or 10) CK's of this second cache block write).
+ * Note that it may be necessary to force LMC to space back-to-back cache block writes
+ * to different ranks apart by at least 10+LMC*_CONTROL[WODT_BPRCH] CK's to prevent DDR3 ODTH8 violations.
+ */
+union cvmx_lmcx_wodt_mask {
+ uint64_t u64;
+ struct cvmx_lmcx_wodt_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wodt_d3_r1 : 8; /**< Write ODT mask DIMM3, RANK1/DIMM3 in SingleRanked
+ *UNUSED IN 6xxx, and MBZ* */
+ uint64_t wodt_d3_r0 : 8; /**< Write ODT mask DIMM3, RANK0
+ *UNUSED IN 6xxx, and MBZ* */
+ uint64_t wodt_d2_r1 : 8; /**< Write ODT mask DIMM2, RANK1/DIMM2 in SingleRanked
+ *UNUSED IN 6xxx, and MBZ* */
+ uint64_t wodt_d2_r0 : 8; /**< Write ODT mask DIMM2, RANK0
+ *UNUSED IN 6xxx, and MBZ* */
+ uint64_t wodt_d1_r1 : 8; /**< Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked
+ if (!RANK_ENA) then WODT_D1_R1[3:0] MBZ
+ *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
+ uint64_t wodt_d1_r0 : 8; /**< Write ODT mask DIMM1, RANK0
+ if (!RANK_ENA) then WODT_D1_R0[3,1] MBZ
+ *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
+ uint64_t wodt_d0_r1 : 8; /**< Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked
+ if (!RANK_ENA) then WODT_D0_R1[3:0] MBZ
+ *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
+ uint64_t wodt_d0_r0 : 8; /**< Write ODT mask DIMM0, RANK0
+ if (!RANK_ENA) then WODT_D0_R0[3,1] MBZ
+ *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
+#else
+ uint64_t wodt_d0_r0 : 8;
+ uint64_t wodt_d0_r1 : 8;
+ uint64_t wodt_d1_r0 : 8;
+ uint64_t wodt_d1_r1 : 8;
+ uint64_t wodt_d2_r0 : 8;
+ uint64_t wodt_d2_r1 : 8;
+ uint64_t wodt_d3_r0 : 8;
+ uint64_t wodt_d3_r1 : 8;
+#endif
+ } s;
+ struct cvmx_lmcx_wodt_mask_s cn61xx;
+ struct cvmx_lmcx_wodt_mask_s cn63xx;
+ struct cvmx_lmcx_wodt_mask_s cn63xxp1;
+ struct cvmx_lmcx_wodt_mask_s cn66xx;
+ struct cvmx_lmcx_wodt_mask_s cn68xx;
+ struct cvmx_lmcx_wodt_mask_s cn68xxp1;
+ struct cvmx_lmcx_wodt_mask_s cnf71xx;
+};
+typedef union cvmx_lmcx_wodt_mask cvmx_lmcx_wodt_mask_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-lmcx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-log-arc.S
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-log-arc.S (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-log-arc.S 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,179 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+//
+// The function defined here is called for every function as it is executed.
+// These calls are automatically inserted by GCC when the switch "-pg" is
+// used. This allows cvmx-log to add a PC entry as each function is executed.
+// This information, along with the timestamps can give the user a good idea
+// of the performance characteristics of their program. This function normally
+// takes about 22 cycles to execute.
+//
+
+#ifdef __linux__
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#define LA dla
+#else
+#include <machine/asm.h>
+#include <machine/regdef.h>
+#define LA la
+#endif
+
+.set noreorder
+.set noat
+LEAF(_mcount)
+ //
+ // All registers we use must be saved since calls are added by gcc
+ // after register allocation. The at register ($3) will contain the
+ // original ra register before the _mcount call. Also the compiler
+ // automatically performs a "dsubu sp, sp, 16" before we're called.
+ // At the end of this function all registers must have their original
+ // values and the stack pointr must be adjusted by 16. This code is
+ // pretty unreadable since it has been arranged to promote dual issue.
+ //
+#ifdef __linux__
+ dsubu sp, sp, 32
+#else
+ dsubu sp, sp, 16
+#endif
+ sd s3, 24(sp) // Save register
+ rdhwr s3, $31 // Read the cycle count
+ sd s0, 0(sp) // Save register
+ LA s0, cvmx_log_buffer_end_ptr // Load the address of the end of the log buffer
+ sd s1, 8(sp) // Save register
+ LA s1, cvmx_log_buffer_write_ptr // Load the address of the location in the log buffer
+ sd s2, 16(sp) // Save register
+ ld s0, 0(s0) // Get value of the current log buffer end location
+ ld s2, 0(s1) // Get value of the current log buffer location
+ dsubu s0, s0, s2 // Subtract the end pointer and the write pointer
+ sltiu s0, s0, 16 // Check if there are at least 16 bytes
+ bne s0, $0, call_c_pc // Call the slow C function if we don't have room in the log
+ li s0, 0x001 // 11 bit constant that matches the first 11 bits of a CVMX_LOG_TYPE_PC header
+ sd ra, 8(s2) // Write the pc to the log
+ dins s3, s0, 53, 11 // Overwrite the upper cycle count bits with the CVMX_LOG_TYPE_PC header
+ sd s3, 0(s2) // Write the log header
+ daddu s2, s2, 16 // Increment the write location ptr
+ sd s2, 0(s1) // Store the write location ptr
+return_c_pc:
+ ld s0, 0(sp) // Restore register
+ ld s1, 8(sp) // Restore register
+ ld s2, 16(sp) // Restore register
+ ld s3, 24(sp) // Restore register
+ daddu sp, sp, 32 // Pop everything off the stack, even the 16 bytes done by gcc
+ jr ra // Return to the caller and
+ or ra, $1, $1 // make sure the ra is back to its original value
+
+call_c_pc:
+ // The registers used by the C code may change based on optimizations. To be
+ // safe, I'll save all registers. We're in the slow path case anyway.
+ dsubu sp, sp, 216
+ sd $1, 0(sp)
+ sd $2, 8(sp)
+ sd $3, 16(sp)
+ sd $4, 24(sp)
+ sd $5, 32(sp)
+ sd $6, 40(sp)
+ sd $7, 48(sp)
+ sd $8, 56(sp)
+ sd $9, 64(sp)
+ sd $10, 72(sp)
+ sd $11, 80(sp)
+ sd $12, 88(sp)
+ sd $13, 96(sp)
+ sd $14, 104(sp)
+ sd $15, 112(sp)
+ // s0, s1, s2, s3 are already saved
+ sd $20, 120(sp)
+ sd $21, 128(sp)
+ sd $22, 136(sp)
+ sd $23, 144(sp)
+ sd $24, 152(sp)
+ sd $25, 160(sp)
+ sd $26, 168(sp)
+ sd $27, 176(sp)
+ sd $28, 184(sp)
+ sd $29, 192(sp)
+ sd $30, 200(sp)
+ sd $31, 208(sp)
+
+ or a0, ra, ra
+ jal cvmx_log_pc
+ nop
+
+ ld $1, 0(sp)
+ ld $2, 8(sp)
+ ld $3, 16(sp)
+ ld $4, 24(sp)
+ ld $5, 32(sp)
+ ld $6, 40(sp)
+ ld $7, 48(sp)
+ ld $8, 56(sp)
+ ld $9, 64(sp)
+ ld $10, 72(sp)
+ ld $11, 80(sp)
+ ld $12, 88(sp)
+ ld $13, 96(sp)
+ ld $14, 104(sp)
+ ld $15, 112(sp)
+ // s0, s1, s2, s3 will be restored later
+ ld $20, 120(sp)
+ ld $21, 128(sp)
+ ld $22, 136(sp)
+ ld $23, 144(sp)
+ ld $24, 152(sp)
+ ld $25, 160(sp)
+ ld $26, 168(sp)
+ ld $27, 176(sp)
+ ld $28, 184(sp)
+ ld $29, 192(sp)
+ ld $30, 200(sp)
+ ld $31, 208(sp)
+ b return_c_pc
+ daddu sp, sp, 216
+
+END(_mcount)
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-log-arc.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-log.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-log.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-log.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,540 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * cvmx-log supplies a fast log buffer implementation. Each core writes
+ * log data to a differnet buffer to avoid synchronization overhead. Function
+ * call logging can be turned on with the GCC option "-pg".
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#include "cvmx.h"
+#include "cvmx-core.h"
+#include "cvmx-log.h"
+
+#define CVMX_LOG_BUFFER_SIZE (1<<15)
+#define CVMX_LOG_NUM_BUFFERS 4
+
+/**
+ * The possible types of log data that can be stored in the
+ * buffer.
+ */
+typedef enum
+{
+ CVMX_LOG_TYPE_PC = 0, /**< Log of the program counter location. used for code profiling / tracing */
+ CVMX_LOG_TYPE_PRINTF, /**< Constant printf format string with two 64bit arguments */
+ CVMX_LOG_TYPE_DATA, /**< Arbitrary array of dwords. Max size is 31 dwords */
+ CVMX_LOG_TYPE_STRUCTURE,/**< Log a structured data element. Max size is 30 dwords */
+ CVMX_LOG_TYPE_PERF, /**< Mips performance counters control registers followed by the data */
+} cvmx_log_type_t;
+
+/**
+ * Header definition for each log entry.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ cvmx_log_type_t type : 3; /* Data in the log entry */
+ uint64_t size : 8; /* Data size in 64bit words */
+ uint64_t cycle :53; /* Low bits of the cycle counter as a timestamp */
+ } s;
+} cvmx_log_header_t;
+
+/**
+ * Circular log buffer. Each processor gets a private one to
+ * write to. Log entries are added at the current write
+ * location, then the write location is incremented. The
+ * buffer may wrap in the middle of a log entry.
+ */
+static uint64_t cvmx_log_buffers[CVMX_LOG_NUM_BUFFERS][CVMX_LOG_BUFFER_SIZE];
+
+/**
+ * Current locations in the log.
+ */
+uint64_t *cvmx_log_buffer_write_ptr = NULL; /* The next write will occur here */
+uint64_t *cvmx_log_buffer_end_ptr = NULL; /* Write must move to the next buffer when it equals this */
+uint64_t *cvmx_log_buffer_head_ptr = NULL; /* Pointer to begin extracting log data from */
+static uint64_t *cvmx_log_buffer_read_ptr = NULL; /* Location cvmx_display is reading from */
+static uint64_t *cvmx_log_buffer_read_end_ptr = NULL; /* Location where read will need the next buffer */
+uint64_t cvmx_log_mcd0_on_full = 0; /* If this is set, cvm-log will assert MCD0 when the log
+ is full. This is set by the remote logging utility through
+ the debugger interface. */
+
+
+/**
+ * @INTERNAL
+ * Initialize the log for writing
+ */
+static void __cvmx_log_initialize(void) CVMX_LOG_DISABLE_PC_LOGGING;
+static void __cvmx_log_initialize(void)
+{
+ int buf_num;
+
+ /* Link the buffers together using the last element in each buffer */
+ for (buf_num=0; buf_num<CVMX_LOG_NUM_BUFFERS-1; buf_num++)
+ cvmx_log_buffers[buf_num][CVMX_LOG_BUFFER_SIZE-1] = CAST64(cvmx_log_buffers[buf_num+1]);
+ cvmx_log_buffers[CVMX_LOG_NUM_BUFFERS-1][CVMX_LOG_BUFFER_SIZE-1] = CAST64(NULL);
+
+ cvmx_log_buffer_head_ptr = &cvmx_log_buffers[0][0];
+ cvmx_log_buffer_write_ptr = &cvmx_log_buffers[0][0];
+ cvmx_log_buffer_end_ptr = cvmx_log_buffer_write_ptr + CVMX_LOG_BUFFER_SIZE-1;
+}
+
+
+/**
+ * @INTERNAL
+ * Called when the log is full of data. This function must
+ * make room for more log data before returning.
+ */
+static void __cvmx_log_full_process(void) CVMX_LOG_DISABLE_PC_LOGGING;
+static void __cvmx_log_full_process(void)
+{
+ if (cvmx_log_mcd0_on_full)
+ {
+ register uint64_t tmp;
+ /* Pulse MCD0 signal so a remote utility can extract the data */
+ asm volatile (
+ "dmfc0 %0, $22\n"
+ "ori %0, %0, 0x1110\n"
+ "dmtc0 %0, $22\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ : "=r" (tmp));
+ }
+ /* The write ptr may have been modifed by the debugger, check it again */
+ if (!(volatile uint64_t)CAST64(cvmx_log_buffer_write_ptr))
+ {
+ #ifndef __KERNEL__
+ /* Disabled for the Linux kernel since printk is also profiled */
+ cvmx_dprintf("Log is full, reusing first buffer\n");
+ #endif
+ *cvmx_log_buffer_end_ptr = CAST64(cvmx_log_buffer_head_ptr);
+ cvmx_log_buffer_write_ptr = cvmx_log_buffer_head_ptr;
+ cvmx_log_buffer_end_ptr = cvmx_log_buffer_write_ptr + CVMX_LOG_BUFFER_SIZE-1;
+ cvmx_log_buffer_head_ptr = CASTPTR(uint64_t, *cvmx_log_buffer_end_ptr);
+ *cvmx_log_buffer_end_ptr = CAST64(NULL);
+ }
+}
+
+
+/**
+ * @INTERNAL
+ * Simple inline function to build a log header
+ *
+ * @param type Type of header to build
+ * @param size Amount of data that follows the header in dwords
+ * @return The header
+ */
+static inline uint64_t __cvmx_log_build_header(cvmx_log_type_t type, uint64_t size) CVMX_LOG_DISABLE_PC_LOGGING;
+static inline uint64_t __cvmx_log_build_header(cvmx_log_type_t type, uint64_t size)
+{
+ cvmx_log_header_t header;
+ header.u64 = 0;
+ header.s.type = type;
+ header.s.size = size;
+ header.s.cycle = cvmx_get_cycle();
+ return header.u64;
+}
+
+
+/**
+ * @INTERNAL
+ * Function to write and increment the position. It rotates
+ * to the next log buffer as necessary.
+ *
+ * @param data Data to write to the log
+ */
+static inline void __cvmx_log_write(uint64_t data) CVMX_LOG_DISABLE_PC_LOGGING;
+static inline void __cvmx_log_write(uint64_t data)
+{
+ /* Check and see if we need to rotate the log */
+ if (cvmx_likely(cvmx_log_buffer_write_ptr != cvmx_log_buffer_end_ptr))
+ {
+ /* No rotate is necessary, just write the data */
+ *cvmx_log_buffer_write_ptr++ = data;
+ }
+ else
+ {
+ /* Initialize the log if necessary */
+ if (cvmx_unlikely(cvmx_log_buffer_head_ptr == NULL))
+ __cvmx_log_initialize();
+ else
+ {
+ cvmx_log_buffer_write_ptr = CASTPTR(uint64_t, *cvmx_log_buffer_end_ptr);
+ if (cvmx_likely(cvmx_log_buffer_write_ptr))
+ {
+ /* Rotate the log. Might be a good time to send the old buffer
+ somewhere */
+ cvmx_log_buffer_end_ptr = cvmx_log_buffer_write_ptr + CVMX_LOG_BUFFER_SIZE-1;
+ }
+ else
+ __cvmx_log_full_process(); /* After this function returns, the log must be ready for updates */
+ }
+ *cvmx_log_buffer_write_ptr++ = data;
+ }
+}
+
+
+/**
+ * Log a program counter address to the log. This is caused by
+ * the assembly code function mcount when writing the PC value
+ * is more complicated that the simple case support by it.
+ *
+ * @param pc Program counter address to log
+ */
+void cvmx_log_pc(uint64_t pc) CVMX_LOG_DISABLE_PC_LOGGING;
+void cvmx_log_pc(uint64_t pc)
+{
+ __cvmx_log_write(__cvmx_log_build_header(CVMX_LOG_TYPE_PC, 1));
+ __cvmx_log_write(pc);
+}
+
+
+/**
+ * Log a constant printf style format string with 0 to 4
+ * arguments. The string must persist until the log is read,
+ * but the parameters are copied into the log.
+ *
+ * @param format Constant printf style format string.
+ */
+void cvmx_log_printf0(const char *format)
+{
+ __cvmx_log_write(__cvmx_log_build_header(CVMX_LOG_TYPE_PRINTF, 1));
+ __cvmx_log_write(CAST64(format));
+}
+
+
+/**
+ * Log a constant printf style format string with 0 to 4
+ * arguments. The string must persist until the log is read,
+ * but the parameters are copied into the log.
+ *
+ * @param format Constant printf style format string.
+ * @param number1 64bit argument to the printf format string
+ */
+void cvmx_log_printf1(const char *format, uint64_t number1)
+{
+ __cvmx_log_write(__cvmx_log_build_header(CVMX_LOG_TYPE_PRINTF, 2));
+ __cvmx_log_write(CAST64(format));
+ __cvmx_log_write(number1);
+}
+
+
+/**
+ * Log a constant printf style format string with 0 to 4
+ * arguments. The string must persist until the log is read,
+ * but the parameters are copied into the log.
+ *
+ * @param format Constant printf style format string.
+ * @param number1 64bit argument to the printf format string
+ * @param number2 64bit argument to the printf format string
+ */
+void cvmx_log_printf2(const char *format, uint64_t number1, uint64_t number2)
+{
+ __cvmx_log_write(__cvmx_log_build_header(CVMX_LOG_TYPE_PRINTF, 3));
+ __cvmx_log_write(CAST64(format));
+ __cvmx_log_write(number1);
+ __cvmx_log_write(number2);
+}
+
+
+/**
+ * Log a constant printf style format string with 0 to 4
+ * arguments. The string must persist until the log is read,
+ * but the parameters are copied into the log.
+ *
+ * @param format Constant printf style format string.
+ * @param number1 64bit argument to the printf format string
+ * @param number2 64bit argument to the printf format string
+ * @param number3 64bit argument to the printf format string
+ */
+void cvmx_log_printf3(const char *format, uint64_t number1, uint64_t number2, uint64_t number3)
+{
+ __cvmx_log_write(__cvmx_log_build_header(CVMX_LOG_TYPE_PRINTF, 4));
+ __cvmx_log_write(CAST64(format));
+ __cvmx_log_write(number1);
+ __cvmx_log_write(number2);
+ __cvmx_log_write(number3);
+}
+
+
+/**
+ * Log a constant printf style format string with 0 to 4
+ * arguments. The string must persist until the log is read,
+ * but the parameters are copied into the log.
+ *
+ * @param format Constant printf style format string.
+ * @param number1 64bit argument to the printf format string
+ * @param number2 64bit argument to the printf format string
+ * @param number3 64bit argument to the printf format string
+ * @param number4 64bit argument to the printf format string
+ */
+void cvmx_log_printf4(const char *format, uint64_t number1, uint64_t number2, uint64_t number3, uint64_t number4)
+{
+ __cvmx_log_write(__cvmx_log_build_header(CVMX_LOG_TYPE_PRINTF, 5));
+ __cvmx_log_write(CAST64(format));
+ __cvmx_log_write(number1);
+ __cvmx_log_write(number2);
+ __cvmx_log_write(number3);
+ __cvmx_log_write(number4);
+}
+
+
+/**
+ * Log an arbitrary block of 64bit words. At most 255 64bit
+ * words can be logged. The words are copied into the log.
+ *
+ * @param size_in_dwords
+ * Number of 64bit dwords to copy into the log.
+ * @param data Array of 64bit dwords to copy
+ */
+void cvmx_log_data(uint64_t size_in_dwords, const uint64_t *data)
+{
+ if (size_in_dwords > 255)
+ size_in_dwords = 255;
+
+ __cvmx_log_write(__cvmx_log_build_header(CVMX_LOG_TYPE_DATA, size_in_dwords));
+ while (size_in_dwords--)
+ __cvmx_log_write(*data++);
+}
+
+
+/**
+ * Log a structured data object. Post processing will use the
+ * debugging information in the ELF file to determine how to
+ * display the structure. Max of 2032 bytes.
+ *
+ * Example:
+ * cvmx_log_structure("cvmx_wqe_t", work, sizeof(*work));
+ *
+ * @param type C typedef expressed as a string. This will be used to
+ * lookup the structure in the debugging infirmation.
+ * @param data Data to be written to the log.
+ * @param size_in_bytes
+ * Size if the data in bytes. Normally you'll use the
+ * sizeof() operator here.
+ */
+void cvmx_log_structure(const char *type, void *data, int size_in_bytes)
+{
+ uint64_t size_in_dwords = (size_in_bytes + 7) >> 3;
+ uint64_t *ptr = (uint64_t*)data;
+
+ if (size_in_dwords > 254)
+ size_in_dwords = 254;
+
+ __cvmx_log_write(__cvmx_log_build_header(CVMX_LOG_TYPE_STRUCTURE, size_in_dwords + 1));
+ __cvmx_log_write(CAST64(type));
+ while (size_in_dwords--)
+ __cvmx_log_write(*ptr++);
+}
+
+
+/**
+ * Setup the mips performance counters
+ *
+ * @param counter1 Event type for counter 1
+ * @param counter2 Event type for counter 2
+ */
+void cvmx_log_perf_setup(cvmx_core_perf_t counter1, cvmx_core_perf_t counter2)
+{
+ cvmx_core_perf_control_t control;
+
+ control.u32 = 0;
+ control.s.event = counter1;
+ control.s.u = 1;
+ control.s.s = 1;
+ control.s.k = 1;
+ control.s.ex = 1;
+ asm ("mtc0 %0, $25, 0\n" : : "r"(control.u32));
+ control.s.event = counter2;
+ asm ("mtc0 %0, $25, 2\n" : : "r"(control.u32));
+}
+
+
+/**
+ * Log the performance counters
+ */
+void cvmx_log_perf(void)
+{
+ uint64_t control1;
+ uint64_t control2;
+ uint64_t data1;
+ uint64_t data2;
+ asm ("dmfc0 %0, $25, 1\n" : "=r"(data1));
+ asm ("dmfc0 %0, $25, 3\n" : "=r"(data2));
+ asm ("mfc0 %0, $25, 0\n" : "=r"(control1));
+ asm ("mfc0 %0, $25, 2\n" : "=r"(control2));
+ __cvmx_log_write(__cvmx_log_build_header(CVMX_LOG_TYPE_PERF, 3));
+ __cvmx_log_write(((control1 & 0xffffffff) << 32) | (control2 & 0xffffffff));
+ __cvmx_log_write(data1);
+ __cvmx_log_write(data2);
+}
+
+
+/**
+ * @INTERNAL
+ * Read a dword from the log
+ *
+ * @return the dword
+ */
+static uint64_t __cvmx_log_read(void) CVMX_LOG_DISABLE_PC_LOGGING;
+static uint64_t __cvmx_log_read(void)
+{
+ uint64_t data;
+
+ /* Check and see if we need to rotate the log */
+ if (cvmx_likely(cvmx_log_buffer_read_ptr != cvmx_log_buffer_read_end_ptr))
+ {
+ /* No rotate is necessary, just read the data */
+ data = *cvmx_log_buffer_read_ptr++;
+ }
+ else
+ {
+ cvmx_log_buffer_read_ptr = CASTPTR(uint64_t, *cvmx_log_buffer_read_end_ptr);
+ if (cvmx_likely(cvmx_log_buffer_read_ptr))
+ {
+ /* Rotate to the next log buffer */
+ cvmx_log_buffer_read_end_ptr = cvmx_log_buffer_read_ptr + CVMX_LOG_BUFFER_SIZE-1;
+ data = *cvmx_log_buffer_read_ptr++;
+ }
+ else
+ {
+ /* No more log buffers, return 0 */
+ cvmx_log_buffer_read_end_ptr = NULL;
+ data = 0;
+ }
+ }
+
+ return data;
+}
+
+
+/**
+ * Display the current log in a human readable format.
+ */
+void cvmx_log_display(void)
+{
+ unsigned int i;
+ cvmx_log_header_t header;
+
+ cvmx_log_buffer_read_ptr = cvmx_log_buffer_head_ptr;
+ cvmx_log_buffer_read_end_ptr = cvmx_log_buffer_read_ptr + CVMX_LOG_BUFFER_SIZE-1;
+
+ while (cvmx_log_buffer_read_ptr && (cvmx_log_buffer_read_ptr != cvmx_log_buffer_write_ptr))
+ {
+ header.u64 = __cvmx_log_read();
+ if (header.s.cycle == 0)
+ continue;
+ printf("%llu: ", (unsigned long long)header.s.cycle);
+ switch (header.s.type)
+ {
+ case CVMX_LOG_TYPE_PC:
+ if (header.s.size == 1)
+ printf("pc 0x%016llx\n", (unsigned long long)__cvmx_log_read());
+ else
+ printf("Illegal size (%d) for log entry: pc\n", header.s.size);
+ break;
+ case CVMX_LOG_TYPE_PRINTF:
+ switch (header.s.size)
+ {
+ case 1:
+ printf(CASTPTR(const char, __cvmx_log_read()));
+ break;
+ case 2:
+ printf(CASTPTR(const char, __cvmx_log_read()), __cvmx_log_read());
+ break;
+ case 3:
+ printf(CASTPTR(const char, __cvmx_log_read()), __cvmx_log_read(), __cvmx_log_read());
+ break;
+ case 4:
+ printf(CASTPTR(const char, __cvmx_log_read()), __cvmx_log_read(), __cvmx_log_read(), __cvmx_log_read());
+ break;
+ case 5:
+ printf(CASTPTR(const char, __cvmx_log_read()), __cvmx_log_read(), __cvmx_log_read(), __cvmx_log_read(), __cvmx_log_read());
+ break;
+ default:
+ printf("Illegal size (%d) for log entry: printf\n", header.s.size);
+ break;
+ }
+ printf("\n");
+ break;
+ case CVMX_LOG_TYPE_DATA:
+ printf("data");
+ for (i=0; i<header.s.size; i++)
+ printf(" 0x%016llx", (unsigned long long)__cvmx_log_read());
+ printf("\n");
+ break;
+ case CVMX_LOG_TYPE_STRUCTURE:
+ printf("struct %s", CASTPTR(const char, __cvmx_log_read()));
+ for (i=1; i<header.s.size; i++)
+ printf(" 0x%016llx", (unsigned long long)__cvmx_log_read());
+ printf("\n");
+ break;
+ case CVMX_LOG_TYPE_PERF:
+ if (header.s.size == 3)
+ {
+ unsigned long long control = __cvmx_log_read();
+ unsigned long long data1 = __cvmx_log_read();
+ unsigned long long data2 = __cvmx_log_read();
+ printf("perf control=0x%016llx data1=0x%016llx data2=0x%016llx\n", control, data1, data2);
+ }
+ else
+ printf("Illegal size (%d) for log entry: perf\n", header.s.size);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-log.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-log.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-log.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-log.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,140 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+#ifndef __CVMX_LOG_H__
+#define __CVMX_LOG_H__
+
+/**
+ * @file
+ *
+ * cvmx-log supplies a fast log buffer implementation. Each core writes
+ * log data to a differnet buffer to avoid synchronization overhead. Function
+ * call logging can be turned on with the GCC option "-pg".
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx-core.h>
+#else
+#include "cvmx-core.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Add CVMX_LOG_DISABLE_PC_LOGGING as an attribute to and function prototype
+ * that you don't want logged when the gcc option "-pg" is supplied. We
+ * use it on the cvmx-log functions since it is pointless to log the
+ * calling of a function than in itself writes to the log.
+ */
+#define CVMX_LOG_DISABLE_PC_LOGGING __attribute__((no_instrument_function))
+
+/**
+ * Log a constant printf style format string with 0 to 4
+ * arguments. The string must persist until the log is read,
+ * but the parameters are copied into the log.
+ *
+ * @param format Constant printf style format string.
+ * @param numberx 64bit argument to the printf format string
+ */
+void cvmx_log_printf0(const char *format) CVMX_LOG_DISABLE_PC_LOGGING;
+void cvmx_log_printf1(const char *format, uint64_t number1) CVMX_LOG_DISABLE_PC_LOGGING;
+void cvmx_log_printf2(const char *format, uint64_t number1, uint64_t number2) CVMX_LOG_DISABLE_PC_LOGGING;
+void cvmx_log_printf3(const char *format, uint64_t number1, uint64_t number2, uint64_t number3) CVMX_LOG_DISABLE_PC_LOGGING;
+void cvmx_log_printf4(const char *format, uint64_t number1, uint64_t number2, uint64_t number3, uint64_t number4) CVMX_LOG_DISABLE_PC_LOGGING;
+
+/**
+ * Log an arbitrary block of 64bit words. At most 255 64bit
+ * words can be logged. The words are copied into the log.
+ *
+ * @param size_in_dwords
+ * Number of 64bit dwords to copy into the log.
+ * @param data Array of 64bit dwords to copy
+ */
+void cvmx_log_data(uint64_t size_in_dwords, const uint64_t *data) CVMX_LOG_DISABLE_PC_LOGGING;
+
+/**
+ * Log a structured data object. Post processing will use the
+ * debugging information in the ELF file to determine how to
+ * display the structure. Max of 2032 bytes.
+ *
+ * Example:
+ * cvmx_log_structure("cvmx_wqe_t", work, sizeof(*work));
+ *
+ * @param type C typedef expressed as a string. This will be used to
+ * lookup the structure in the debugging infirmation.
+ * @param data Data to be written to the log.
+ * @param size_in_bytes
+ * Size if the data in bytes. Normally you'll use the
+ * sizeof() operator here.
+ */
+void cvmx_log_structure(const char *type, void *data, int size_in_bytes) CVMX_LOG_DISABLE_PC_LOGGING;
+
+/**
+ * Setup the mips performance counters
+ *
+ * @param counter1 Event type for counter 1
+ * @param counter2 Event type for counter 2
+ */
+void cvmx_log_perf_setup(cvmx_core_perf_t counter1, cvmx_core_perf_t counter2);
+
+/**
+ * Log the performance counters
+ */
+void cvmx_log_perf(void) CVMX_LOG_DISABLE_PC_LOGGING;
+
+/**
+ * Display the current log in a human readable format.
+ */
+void cvmx_log_display(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-log.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-malloc/README-malloc
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-malloc/README-malloc (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-malloc/README-malloc 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,12 @@
+Readme for Octeon shared memory malloc
+
+This malloc is based on ptmalloc2, which is the malloc
+implementation of glibc. Source code and more information
+on this can be found at http://www.malloc.de/en/index.html.
+Please see the individual files for licensing terms.
+
+The main change to the code modifies the way the malloc
+gets memory from the system. Under Linux/Unix, malloc
+uses the brk or memmap sytem calls to request more memory.
+In this implementation, memory regions must be explicitly
+given to malloc by the application.
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-malloc/README-malloc
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-malloc/arena.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-malloc/arena.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-malloc/arena.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,294 @@
+/* $MidnightBSD$ */
+/*
+Copyright (c) 2001 Wolfram Gloger
+Copyright (c) 2006 Cavium networks
+
+Permission to use, copy, modify, distribute, and sell this software
+and its documentation for any purpose is hereby granted without fee,
+provided that (i) the above copyright notices and this permission
+notice appear in all copies of the software and related documentation,
+and (ii) the name of Wolfram Gloger may not be used in any advertising
+or publicity relating to the software.
+
+THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
+EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
+WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+
+IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
+INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
+OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* $Id: arena.c 30481 2007-12-05 21:46:59Z rfranz $ */
+
+/* Compile-time constants. */
+
+#define HEAP_MIN_SIZE (4096) /* Must leave room for struct malloc_state, arena ptrs, etc., totals about 2400 bytes */
+
+#ifndef THREAD_STATS
+#define THREAD_STATS 0
+#endif
+
+/* If THREAD_STATS is non-zero, some statistics on mutex locking are
+ computed. */
+
+/***************************************************************************/
+
+// made static to avoid conflicts with newlib
+static mstate _int_new_arena __MALLOC_P ((size_t __ini_size));
+
+/***************************************************************************/
+
+#define top(ar_ptr) ((ar_ptr)->top)
+
+/* A heap is a single contiguous memory region holding (coalesceable)
+ malloc_chunks. Not used unless compiling with
+ USE_ARENAS. */
+
+typedef struct _heap_info {
+ mstate ar_ptr; /* Arena for this heap. */
+ struct _heap_info *prev; /* Previous heap. */
+ size_t size; /* Current size in bytes. */
+ size_t pad; /* Make sure the following data is properly aligned. */
+} heap_info;
+
+/* Thread specific data */
+
+static tsd_key_t arena_key; // one per PP (thread)
+static CVMX_SHARED mutex_t list_lock; // shared...
+
+#if THREAD_STATS
+static int stat_n_heaps;
+#define THREAD_STAT(x) x
+#else
+#define THREAD_STAT(x) do ; while(0)
+#endif
+
+/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
+static unsigned long arena_mem;
+
+/* Already initialized? */
+int CVMX_SHARED cvmx__malloc_initialized = -1;
+
+/**************************************************************************/
+
+#if USE_ARENAS
+
+/* find the heap and corresponding arena for a given ptr */
+
+#define arena_for_chunk(ptr) ((ptr)->arena_ptr)
+#define set_arena_for_chunk(ptr, arena) (ptr)->arena_ptr = (arena)
+
+
+#endif /* USE_ARENAS */
+
+/**************************************************************************/
+
+#ifndef NO_THREADS
+
+/* atfork support. */
+
+static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size,
+ __const __malloc_ptr_t));
+static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
+ __const __malloc_ptr_t));
+static Void_t* save_arena;
+
+/* Magic value for the thread-specific arena pointer when
+ malloc_atfork() is in use. */
+
+#define ATFORK_ARENA_PTR ((Void_t*)-1)
+
+/* The following hooks are used while the `atfork' handling mechanism
+ is active. */
+
+static Void_t*
+malloc_atfork(size_t sz, const Void_t *caller)
+{
+return(NULL);
+}
+
+static void
+free_atfork(Void_t* mem, const Void_t *caller)
+{
+ Void_t *vptr = NULL;
+ mstate ar_ptr;
+ mchunkptr p; /* chunk corresponding to mem */
+
+ if (mem == 0) /* free(0) has no effect */
+ return;
+
+ p = mem2chunk(mem); /* do not bother to replicate free_check here */
+
+#if HAVE_MMAP
+ if (chunk_is_mmapped(p)) /* release mmapped memory. */
+ {
+ munmap_chunk(p);
+ return;
+ }
+#endif
+
+ ar_ptr = arena_for_chunk(p);
+ tsd_getspecific(arena_key, vptr);
+ if(vptr != ATFORK_ARENA_PTR)
+ (void)mutex_lock(&ar_ptr->mutex);
+ _int_free(ar_ptr, mem);
+ if(vptr != ATFORK_ARENA_PTR)
+ (void)mutex_unlock(&ar_ptr->mutex);
+}
+
+
+
+#ifdef __linux__
+#error __linux__defined!
+#endif
+
+#endif /* !defined NO_THREADS */
+
+
+
+/* Initialization routine. */
+#ifdef _LIBC
+#error _LIBC is defined, and should not be
+#endif /* _LIBC */
+
+static CVMX_SHARED cvmx_spinlock_t malloc_init_spin_lock;
+
+
+
+
+/* Managing heaps and arenas (for concurrent threads) */
+
+#if USE_ARENAS
+
+#if MALLOC_DEBUG > 1
+
+/* Print the complete contents of a single heap to stderr. */
+
+static void
+#if __STD_C
+dump_heap(heap_info *heap)
+#else
+dump_heap(heap) heap_info *heap;
+#endif
+{
+ char *ptr;
+ mchunkptr p;
+
+ fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
+ ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
+ (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
+ p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
+ ~MALLOC_ALIGN_MASK);
+ for(;;) {
+ fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
+ if(p == top(heap->ar_ptr)) {
+ fprintf(stderr, " (top)\n");
+ break;
+ } else if(p->size == (0|PREV_INUSE)) {
+ fprintf(stderr, " (fence)\n");
+ break;
+ }
+ fprintf(stderr, "\n");
+ p = next_chunk(p);
+ }
+}
+
+#endif /* MALLOC_DEBUG > 1 */
+/* Delete a heap. */
+
+
+static mstate cvmx_new_arena(void *addr, size_t size)
+{
+ mstate a;
+ heap_info *h;
+ char *ptr;
+ unsigned long misalign;
+ int page_mask = malloc_getpagesize - 1;
+
+ debug_printf("cvmx_new_arena called, addr: %p, size %ld\n", addr, size);
+ debug_printf("heapinfo size: %ld, mstate size: %d\n", sizeof(heap_info), sizeof(struct malloc_state));
+
+ if (!addr || (size < HEAP_MIN_SIZE))
+ {
+ return(NULL);
+ }
+ /* We must zero out the arena as the malloc code assumes this. */
+ memset(addr, 0, size);
+
+ h = (heap_info *)addr;
+ h->size = size;
+
+ a = h->ar_ptr = (mstate)(h+1);
+ malloc_init_state(a);
+ /*a->next = NULL;*/
+ a->system_mem = a->max_system_mem = h->size;
+ arena_mem += h->size;
+ a->next = a;
+
+ /* Set up the top chunk, with proper alignment. */
+ ptr = (char *)(a + 1);
+ misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
+ if (misalign > 0)
+ ptr += MALLOC_ALIGNMENT - misalign;
+ top(a) = (mchunkptr)ptr;
+ set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
+
+ return a;
+}
+
+
+int cvmx_add_arena(cvmx_arena_list_t *arena_list, void *ptr, size_t size)
+{
+ mstate a;
+
+ /* Enforce required alignement, and adjust size */
+ int misaligned = ((size_t)ptr) & (MALLOC_ALIGNMENT - 1);
+ if (misaligned)
+ {
+ ptr = (char*)ptr + MALLOC_ALIGNMENT - misaligned;
+ size -= MALLOC_ALIGNMENT - misaligned;
+ }
+
+ debug_printf("Adding arena at addr: %p, size %d\n", ptr, size);
+
+ a = cvmx_new_arena(ptr, size); /* checks ptr and size */
+ if (!a)
+ {
+ return(-1);
+ }
+
+ debug_printf("cmvx_add_arena - arena_list: %p, *arena_list: %p\n", arena_list, *arena_list);
+ debug_printf("cmvx_add_arena - list: %p, new: %p\n", *arena_list, a);
+ mutex_init(&a->mutex);
+ mutex_lock(&a->mutex);
+
+
+ if (*arena_list)
+ {
+ mstate ar_ptr = *arena_list;
+ (void)mutex_lock(&ar_ptr->mutex);
+ a->next = ar_ptr->next; // lock held on a and ar_ptr
+ ar_ptr->next = a;
+ (void)mutex_unlock(&ar_ptr->mutex);
+ }
+ else
+ {
+ *arena_list = a;
+// a->next = a;
+ }
+
+ debug_printf("cvmx_add_arena - list: %p, list->next: %p\n", *arena_list, ((mstate)*arena_list)->next);
+
+ // unlock, since it is not going to be used immediately
+ (void)mutex_unlock(&a->mutex);
+
+ return(0);
+}
+
+
+
+#endif /* USE_ARENAS */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-malloc/arena.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,4107 @@
+/* $MidnightBSD$ */
+/*
+Copyright (c) 2001 Wolfram Gloger
+Copyright (c) 2006 Cavium networks
+
+Permission to use, copy, modify, distribute, and sell this software
+and its documentation for any purpose is hereby granted without fee,
+provided that (i) the above copyright notices and this permission
+notice appear in all copies of the software and related documentation,
+and (ii) the name of Wolfram Gloger may not be used in any advertising
+or publicity relating to the software.
+
+THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
+EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
+WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+
+IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
+INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
+OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ This is a version (aka ptmalloc2) of malloc/free/realloc written by
+ Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
+
+* Version ptmalloc2-20011215
+ $Id: malloc.c 30481 2007-12-05 21:46:59Z rfranz $
+ based on:
+ VERSION 2.7.1pre1 Sat May 12 07:41:21 2001 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+ http://www.malloc.de/malloc/ptmalloc2.tar.gz
+ Check before installing!
+
+* Quickstart
+
+ In order to compile this implementation, a Makefile is provided with
+ the ptmalloc2 distribution, which has pre-defined targets for some
+ popular systems (e.g. "make posix" for Posix threads). All that is
+ typically required with regard to compiler flags is the selection of
+ the thread package via defining one out of USE_PTHREADS, USE_THR or
+ USE_SPROC. Check the thread-m.h file for what effects this has.
+ Many/most systems will additionally require USE_TSD_DATA_HACK to be
+ defined, so this is the default for "make posix".
+
+* Why use this malloc?
+
+ This is not the fastest, most space-conserving, most portable, or
+ most tunable malloc ever written. However it is among the fastest
+ while also being among the most space-conserving, portable and tunable.
+ Consistent balance across these factors results in a good general-purpose
+ allocator for malloc-intensive programs.
+
+ The main properties of the algorithms are:
+ * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
+ with ties normally decided via FIFO (i.e. least recently used).
+ * For small (<= 64 bytes by default) requests, it is a caching
+ allocator, that maintains pools of quickly recycled chunks.
+ * In between, and for combinations of large and small requests, it does
+ the best it can trying to meet both goals at once.
+ * For very large requests (>= 128KB by default), it relies on system
+ memory mapping facilities, if supported.
+
+ For a longer but slightly out of date high-level description, see
+ http://gee.cs.oswego.edu/dl/html/malloc.html
+
+ You may already by default be using a C library containing a malloc
+ that is based on some version of this malloc (for example in
+ linux). You might still want to use the one in this file in order to
+ customize settings or to avoid overheads associated with library
+ versions.
+
+* Contents, described in more detail in "description of public routines" below.
+
+ Standard (ANSI/SVID/...) functions:
+ malloc(size_t n);
+ calloc(size_t n_elements, size_t element_size);
+ free(Void_t* p);
+ realloc(Void_t* p, size_t n);
+ memalign(size_t alignment, size_t n);
+ valloc(size_t n);
+ mallinfo()
+ mallopt(int parameter_number, int parameter_value)
+
+ Additional functions:
+ independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
+ independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
+ pvalloc(size_t n);
+ cfree(Void_t* p);
+ malloc_trim(size_t pad);
+ malloc_usable_size(Void_t* p);
+ malloc_stats();
+
+* Vital statistics:
+
+ Supported pointer representation: 4 or 8 bytes
+ Supported size_t representation: 4 or 8 bytes
+ Note that size_t is allowed to be 4 bytes even if pointers are 8.
+ You can adjust this by defining INTERNAL_SIZE_T
+
+ Alignment: 2 * sizeof(size_t) (default)
+ (i.e., 8 byte alignment with 4byte size_t). This suffices for
+ nearly all current machines and C compilers. However, you can
+ define MALLOC_ALIGNMENT to be wider than this if necessary.
+
+ Minimum overhead per allocated chunk: 4 or 8 bytes
+ Each malloced chunk has a hidden word of overhead holding size
+ and status information.
+
+ Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
+ 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
+
+ When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
+ ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
+ needed; 4 (8) for a trailing size field and 8 (16) bytes for
+ free list pointers. Thus, the minimum allocatable size is
+ 16/24/32 bytes.
+
+ Even a request for zero bytes (i.e., malloc(0)) returns a
+ pointer to something of the minimum allocatable size.
+
+ The maximum overhead wastage (i.e., number of extra bytes
+ allocated than were requested in malloc) is less than or equal
+ to the minimum size, except for requests >= mmap_threshold that
+ are serviced via mmap(), where the worst case wastage is 2 *
+ sizeof(size_t) bytes plus the remainder from a system page (the
+ minimal mmap unit); typically 4096 or 8192 bytes.
+
+ Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
+ 8-byte size_t: 2^64 minus about two pages
+
+ It is assumed that (possibly signed) size_t values suffice to
+ represent chunk sizes. `Possibly signed' is due to the fact
+ that `size_t' may be defined on a system as either a signed or
+ an unsigned type. The ISO C standard says that it must be
+ unsigned, but a few systems are known not to adhere to this.
+ Additionally, even when size_t is unsigned, sbrk (which is by
+ default used to obtain memory from system) accepts signed
+ arguments, and may not be able to handle size_t-wide arguments
+ with negative sign bit. Generally, values that would
+ appear as negative after accounting for overhead and alignment
+ are supported only via mmap(), which does not have this
+ limitation.
+
+ Requests for sizes outside the allowed range will perform an optional
+ failure action and then return null. (Requests may also
+ also fail because a system is out of memory.)
+
+ Thread-safety: thread-safe unless NO_THREADS is defined
+
+ Compliance: I believe it is compliant with the 1997 Single Unix Specification
+ (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
+ others as well.
+
+* Synopsis of compile-time options:
+
+ People have reported using previous versions of this malloc on all
+ versions of Unix, sometimes by tweaking some of the defines
+ below. It has been tested most extensively on Solaris and
+ Linux. It is also reported to work on WIN32 platforms.
+ People also report using it in stand-alone embedded systems.
+
+ The implementation is in straight, hand-tuned ANSI C. It is not
+ at all modular. (Sorry!) It uses a lot of macros. To be at all
+ usable, this code should be compiled using an optimizing compiler
+ (for example gcc -O3) that can simplify expressions and control
+ paths. (FAQ: some macros import variables as arguments rather than
+ declare locals because people reported that some debuggers
+ otherwise get confused.)
+
+ OPTION DEFAULT VALUE
+
+ Compilation Environment options:
+
+ __STD_C derived from C compiler defines
+ WIN32 NOT defined
+ HAVE_MEMCPY defined
+ USE_MEMCPY 1 if HAVE_MEMCPY is defined
+ HAVE_MMAP defined as 1
+ MMAP_CLEARS 1
+ HAVE_MREMAP 0 unless linux defined
+ USE_ARENAS the same as HAVE_MMAP
+ malloc_getpagesize derived from system #includes, or 4096 if not
+ HAVE_USR_INCLUDE_MALLOC_H NOT defined
+ LACKS_UNISTD_H NOT defined unless WIN32
+ LACKS_SYS_PARAM_H NOT defined unless WIN32
+ LACKS_SYS_MMAN_H NOT defined unless WIN32
+
+ Changing default word sizes:
+
+ INTERNAL_SIZE_T size_t
+ MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
+
+ Configuration and functionality options:
+
+ USE_DL_PREFIX NOT defined
+ USE_PUBLIC_MALLOC_WRAPPERS NOT defined
+ USE_MALLOC_LOCK NOT defined
+ MALLOC_DEBUG NOT defined
+ REALLOC_ZERO_BYTES_FREES 1
+ MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
+ TRIM_FASTBINS 0
+ FIRST_SORTED_BIN_SIZE 512
+
+ Options for customizing MORECORE:
+
+ MORECORE sbrk
+ MORECORE_FAILURE -1
+ MORECORE_CONTIGUOUS 1
+ MORECORE_CANNOT_TRIM NOT defined
+ MORECORE_CLEARS 1
+ MMAP_AS_MORECORE_SIZE (1024 * 1024)
+
+ Tuning options that are also dynamically changeable via mallopt:
+
+ DEFAULT_MXFAST 64
+ DEFAULT_TRIM_THRESHOLD 128 * 1024
+ DEFAULT_TOP_PAD 0
+ DEFAULT_MMAP_THRESHOLD 128 * 1024
+ DEFAULT_MMAP_MAX 65536
+
+ There are several other #defined constants and macros that you
+ probably don't want to touch unless you are extending or adapting malloc. */
+
+/*
+ __STD_C should be nonzero if using ANSI-standard C compiler, a C++
+ compiler, or a C compiler sufficiently close to ANSI to get away
+ with it.
+*/
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-malloc.h"
+
+
+#ifndef __STD_C
+#if defined(__STDC__) || defined(__cplusplus)
+#define __STD_C 1
+#else
+#define __STD_C 0
+#endif
+#endif /*__STD_C*/
+
+
+/*
+ Void_t* is the pointer type that malloc should say it returns
+*/
+
+#ifndef Void_t
+#if 1
+#define Void_t void
+#else
+#define Void_t char
+#endif
+#endif /*Void_t*/
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
+
+/* #define LACKS_UNISTD_H */
+
+#ifndef LACKS_UNISTD_H
+#include <unistd.h>
+#endif
+
+/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
+
+/* #define LACKS_SYS_PARAM_H */
+
+
+#include <stdio.h> /* needed for malloc_stats */
+#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
+
+
+/*
+ Debugging:
+
+ Because freed chunks may be overwritten with bookkeeping fields, this
+ malloc will often die when freed memory is overwritten by user
+ programs. This can be very effective (albeit in an annoying way)
+ in helping track down dangling pointers.
+
+ If you compile with -DMALLOC_DEBUG, a number of assertion checks are
+ enabled that will catch more memory errors. You probably won't be
+ able to make much sense of the actual assertion errors, but they
+ should help you locate incorrectly overwritten memory. The checking
+ is fairly extensive, and will slow down execution
+ noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
+ will attempt to check every non-mmapped allocated and free chunk in
+ the course of computing the summmaries. (By nature, mmapped regions
+ cannot be checked very much automatically.)
+
+ Setting MALLOC_DEBUG may also be helpful if you are trying to modify
+ this code. The assertions in the check routines spell out in more
+ detail the assumptions and invariants underlying the algorithms.
+
+ Setting MALLOC_DEBUG does NOT provide an automated mechanism for
+ checking that all accesses to malloced memory stay within their
+ bounds. However, there are several add-ons and adaptations of this
+ or other mallocs available that do this.
+*/
+
+#define MALLOC_DEBUG 1
+#if MALLOC_DEBUG
+#include <assert.h>
+#else
+#define assert(x) ((void)0)
+#endif
+
+
+/*
+ INTERNAL_SIZE_T is the word-size used for internal bookkeeping
+ of chunk sizes.
+
+ The default version is the same as size_t.
+
+ While not strictly necessary, it is best to define this as an
+ unsigned type, even if size_t is a signed type. This may avoid some
+ artificial size limitations on some systems.
+
+ On a 64-bit machine, you may be able to reduce malloc overhead by
+ defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
+ expense of not being able to handle more than 2^32 of malloced
+ space. If this limitation is acceptable, you are encouraged to set
+ this unless you are on a platform requiring 16byte alignments. In
+ this case the alignment requirements turn out to negate any
+ potential advantages of decreasing size_t word size.
+
+ Implementors: Beware of the possible combinations of:
+ - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
+ and might be the same width as int or as long
+ - size_t might have different width and signedness as INTERNAL_SIZE_T
+ - int and long might be 32 or 64 bits, and might be the same width
+ To deal with this, most comparisons and difference computations
+ among INTERNAL_SIZE_Ts should cast them to unsigned long, being
+ aware of the fact that casting an unsigned int to a wider long does
+ not sign-extend. (This also makes checking for negative numbers
+ awkward.) Some of these casts result in harmless compiler warnings
+ on some systems.
+*/
+
+#ifndef INTERNAL_SIZE_T
+#define INTERNAL_SIZE_T size_t
+#endif
+
+/* The corresponding word size */
+#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
+
+
+/*
+ MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
+ It must be a power of two at least 2 * SIZE_SZ, even on machines
+ for which smaller alignments would suffice. It may be defined as
+ larger than this though. Note however that code and data structures
+ are optimized for the case of 8-byte alignment.
+*/
+
+
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
+#endif
+
+/* The corresponding bit mask value */
+#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
+
+
+
+/*
+ REALLOC_ZERO_BYTES_FREES should be set if a call to
+ realloc with zero bytes should be the same as a call to free.
+ This is required by the C standard. Otherwise, since this malloc
+ returns a unique pointer for malloc(0), so does realloc(p, 0).
+*/
+
+#ifndef REALLOC_ZERO_BYTES_FREES
+#define REALLOC_ZERO_BYTES_FREES 1
+#endif
+
+/*
+ TRIM_FASTBINS controls whether free() of a very small chunk can
+ immediately lead to trimming. Setting to true (1) can reduce memory
+ footprint, but will almost always slow down programs that use a lot
+ of small chunks.
+
+ Define this only if you are willing to give up some speed to more
+ aggressively reduce system-level memory footprint when releasing
+ memory in programs that use many small chunks. You can get
+ essentially the same effect by setting MXFAST to 0, but this can
+ lead to even greater slowdowns in programs using many small chunks.
+ TRIM_FASTBINS is an in-between compile-time option, that disables
+ only those chunks bordering topmost memory from being placed in
+ fastbins.
+*/
+
+#ifndef TRIM_FASTBINS
+#define TRIM_FASTBINS 0
+#endif
+
+
+/*
+ USE_DL_PREFIX will prefix all public routines with the string 'dl'.
+ This is necessary when you only want to use this malloc in one part
+ of a program, using your regular system malloc elsewhere.
+*/
+
+#define USE_DL_PREFIX
+
+
+/*
+ Two-phase name translation.
+ All of the actual routines are given mangled names.
+ When wrappers are used, they become the public callable versions.
+ When DL_PREFIX is used, the callable names are prefixed.
+*/
+
+#ifdef USE_DL_PREFIX
+#define public_cALLOc cvmx_calloc
+#define public_fREe cvmx_free
+#define public_cFREe dlcfree
+#define public_mALLOc cvmx_malloc
+#define public_mEMALIGn cvmx_memalign
+#define public_rEALLOc cvmx_realloc
+#define public_vALLOc dlvalloc
+#define public_pVALLOc dlpvalloc
+#define public_mALLINFo dlmallinfo
+#define public_mALLOPt dlmallopt
+#define public_mTRIm dlmalloc_trim
+#define public_mSTATs dlmalloc_stats
+#define public_mUSABLe dlmalloc_usable_size
+#define public_iCALLOc dlindependent_calloc
+#define public_iCOMALLOc dlindependent_comalloc
+#define public_gET_STATe dlget_state
+#define public_sET_STATe dlset_state
+#else /* USE_DL_PREFIX */
+#ifdef _LIBC
+#error _LIBC defined and should not be
+/* Special defines for the GNU C library. */
+#define public_cALLOc __libc_calloc
+#define public_fREe __libc_free
+#define public_cFREe __libc_cfree
+#define public_mALLOc __libc_malloc
+#define public_mEMALIGn __libc_memalign
+#define public_rEALLOc __libc_realloc
+#define public_vALLOc __libc_valloc
+#define public_pVALLOc __libc_pvalloc
+#define public_mALLINFo __libc_mallinfo
+#define public_mALLOPt __libc_mallopt
+#define public_mTRIm __malloc_trim
+#define public_mSTATs __malloc_stats
+#define public_mUSABLe __malloc_usable_size
+#define public_iCALLOc __libc_independent_calloc
+#define public_iCOMALLOc __libc_independent_comalloc
+#define public_gET_STATe __malloc_get_state
+#define public_sET_STATe __malloc_set_state
+#define malloc_getpagesize __getpagesize()
+#define open __open
+#define mmap __mmap
+#define munmap __munmap
+#define mremap __mremap
+#define mprotect __mprotect
+#define MORECORE (*__morecore)
+#define MORECORE_FAILURE 0
+
+Void_t * __default_morecore (ptrdiff_t);
+Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
+
+#else /* !_LIBC */
+#define public_cALLOc calloc
+#define public_fREe free
+#define public_cFREe cfree
+#define public_mALLOc malloc
+#define public_mEMALIGn memalign
+#define public_rEALLOc realloc
+#define public_vALLOc valloc
+#define public_pVALLOc pvalloc
+#define public_mALLINFo mallinfo
+#define public_mALLOPt mallopt
+#define public_mTRIm malloc_trim
+#define public_mSTATs malloc_stats
+#define public_mUSABLe malloc_usable_size
+#define public_iCALLOc independent_calloc
+#define public_iCOMALLOc independent_comalloc
+#define public_gET_STATe malloc_get_state
+#define public_sET_STATe malloc_set_state
+#endif /* _LIBC */
+#endif /* USE_DL_PREFIX */
+
+
+/*
+ HAVE_MEMCPY should be defined if you are not otherwise using
+ ANSI STD C, but still have memcpy and memset in your C library
+ and want to use them in calloc and realloc. Otherwise simple
+ macro versions are defined below.
+
+ USE_MEMCPY should be defined as 1 if you actually want to
+ have memset and memcpy called. People report that the macro
+ versions are faster than libc versions on some systems.
+
+ Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
+ (of <= 36 bytes) are manually unrolled in realloc and calloc.
+*/
+
+#define HAVE_MEMCPY
+
+#ifndef USE_MEMCPY
+#ifdef HAVE_MEMCPY
+#define USE_MEMCPY 1
+#else
+#define USE_MEMCPY 0
+#endif
+#endif
+
+
+#if (__STD_C || defined(HAVE_MEMCPY))
+
+#ifdef WIN32
+/* On Win32 memset and memcpy are already declared in windows.h */
+#else
+#if __STD_C
+void* memset(void*, int, size_t);
+void* memcpy(void*, const void*, size_t);
+#else
+Void_t* memset();
+Void_t* memcpy();
+#endif
+#endif
+#endif
+
+/*
+ MALLOC_FAILURE_ACTION is the action to take before "return 0" when
+ malloc fails to be able to return memory, either because memory is
+ exhausted or because of illegal arguments.
+
+ By default, sets errno if running on STD_C platform, else does nothing.
+*/
+
+#ifndef MALLOC_FAILURE_ACTION
+#if __STD_C
+#define MALLOC_FAILURE_ACTION \
+ errno = ENOMEM;
+
+#else
+#define MALLOC_FAILURE_ACTION
+#endif
+#endif
+
+/*
+ MORECORE-related declarations. By default, rely on sbrk
+*/
+
+
+#ifdef LACKS_UNISTD_H
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+#if __STD_C
+extern Void_t* sbrk(ptrdiff_t);
+#else
+extern Void_t* sbrk();
+#endif
+#endif
+#endif
+
+/*
+ MORECORE is the name of the routine to call to obtain more memory
+ from the system. See below for general guidance on writing
+ alternative MORECORE functions, as well as a version for WIN32 and a
+ sample version for pre-OSX macos.
+*/
+#undef MORECORE // not supported
+#ifndef MORECORE
+#define MORECORE notsupported
+#endif
+
+/*
+ MORECORE_FAILURE is the value returned upon failure of MORECORE
+ as well as mmap. Since it cannot be an otherwise valid memory address,
+ and must reflect values of standard sys calls, you probably ought not
+ try to redefine it.
+*/
+
+#ifndef MORECORE_FAILURE
+#define MORECORE_FAILURE (-1)
+#endif
+
+/*
+ If MORECORE_CONTIGUOUS is true, take advantage of fact that
+ consecutive calls to MORECORE with positive arguments always return
+ contiguous increasing addresses. This is true of unix sbrk. Even
+ if not defined, when regions happen to be contiguous, malloc will
+ permit allocations spanning regions obtained from different
+ calls. But defining this when applicable enables some stronger
+ consistency checks and space efficiencies.
+*/
+
+#ifndef MORECORE_CONTIGUOUS
+#define MORECORE_CONTIGUOUS 0
+#endif
+
+/*
+ Define MORECORE_CANNOT_TRIM if your version of MORECORE
+ cannot release space back to the system when given negative
+ arguments. This is generally necessary only if you are using
+ a hand-crafted MORECORE function that cannot handle negative arguments.
+*/
+
+#define MORECORE_CANNOT_TRIM 1
+
+/* MORECORE_CLEARS (default 1)
+ The degree to which the routine mapped to MORECORE zeroes out
+ memory: never (0), only for newly allocated space (1) or always
+ (2). The distinction between (1) and (2) is necessary because on
+ some systems, if the application first decrements and then
+ increments the break value, the contents of the reallocated space
+ are unspecified.
+*/
+
+#ifndef MORECORE_CLEARS
+#define MORECORE_CLEARS 0
+#endif
+
+
+/*
+ Define HAVE_MMAP as true to optionally make malloc() use mmap() to
+ allocate very large blocks. These will be returned to the
+ operating system immediately after a free(). Also, if mmap
+ is available, it is used as a backup strategy in cases where
+ MORECORE fails to provide space from system.
+
+ This malloc is best tuned to work with mmap for large requests.
+ If you do not have mmap, operations involving very large chunks (1MB
+ or so) may be slower than you'd like.
+*/
+
+#undef HAVE_MMAP
+#ifndef HAVE_MMAP
+#define HAVE_MMAP 0
+
+/*
+ Standard unix mmap using /dev/zero clears memory so calloc doesn't
+ need to.
+*/
+
+#ifndef MMAP_CLEARS
+#define MMAP_CLEARS 0
+#endif
+
+#else /* no mmap */
+#ifndef MMAP_CLEARS
+#define MMAP_CLEARS 0
+#endif
+#endif
+
+
+/*
+ MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
+ sbrk fails, and mmap is used as a backup (which is done only if
+ HAVE_MMAP). The value must be a multiple of page size. This
+ backup strategy generally applies only when systems have "holes" in
+ address space, so sbrk cannot perform contiguous expansion, but
+ there is still space available on system. On systems for which
+ this is known to be useful (i.e. most linux kernels), this occurs
+ only when programs allocate huge amounts of memory. Between this,
+ and the fact that mmap regions tend to be limited, the size should
+ be large, to avoid too many mmap calls and thus avoid running out
+ of kernel resources.
+*/
+
+#ifndef MMAP_AS_MORECORE_SIZE
+#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
+#endif
+
+/*
+ Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
+ large blocks. This is currently only possible on Linux with
+ kernel versions newer than 1.3.77.
+*/
+#undef linux
+#ifndef HAVE_MREMAP
+#ifdef linux
+#define HAVE_MREMAP 1
+#else
+#define HAVE_MREMAP 0
+#endif
+
+#endif /* HAVE_MMAP */
+
+/* Define USE_ARENAS to enable support for multiple `arenas'. These
+ are allocated using mmap(), are necessary for threads and
+ occasionally useful to overcome address space limitations affecting
+ sbrk(). */
+
+#ifndef USE_ARENAS
+#define USE_ARENAS 1 // we 'manually' mmap the arenas.....
+#endif
+
+
+/*
+ The system page size. To the extent possible, this malloc manages
+ memory from the system in page-size units. Note that this value is
+ cached during initialization into a field of malloc_state. So even
+ if malloc_getpagesize is a function, it is only called once.
+
+ The following mechanics for getpagesize were adapted from bsd/gnu
+ getpagesize.h. If none of the system-probes here apply, a value of
+ 4096 is used, which should be OK: If they don't apply, then using
+ the actual value probably doesn't impact performance.
+*/
+
+
+#define malloc_getpagesize (4096)
+#ifndef malloc_getpagesize
+
+#ifndef LACKS_UNISTD_H
+# include <unistd.h>
+#endif
+
+# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
+# ifndef _SC_PAGE_SIZE
+# define _SC_PAGE_SIZE _SC_PAGESIZE
+# endif
+# endif
+
+# ifdef _SC_PAGE_SIZE
+# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
+# else
+# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+ extern size_t getpagesize();
+# define malloc_getpagesize getpagesize()
+# else
+# ifdef WIN32 /* use supplied emulation of getpagesize */
+# define malloc_getpagesize getpagesize()
+# else
+# ifndef LACKS_SYS_PARAM_H
+# include <sys/param.h>
+# endif
+# ifdef EXEC_PAGESIZE
+# define malloc_getpagesize EXEC_PAGESIZE
+# else
+# ifdef NBPG
+# ifndef CLSIZE
+# define malloc_getpagesize NBPG
+# else
+# define malloc_getpagesize (NBPG * CLSIZE)
+# endif
+# else
+# ifdef NBPC
+# define malloc_getpagesize NBPC
+# else
+# ifdef PAGESIZE
+# define malloc_getpagesize PAGESIZE
+# else /* just guess */
+# define malloc_getpagesize (4096)
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+#endif
+
+/*
+ This version of malloc supports the standard SVID/XPG mallinfo
+ routine that returns a struct containing usage properties and
+ statistics. It should work on any SVID/XPG compliant system that has
+ a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
+ install such a thing yourself, cut out the preliminary declarations
+ as described above and below and save them in a malloc.h file. But
+ there's no compelling reason to bother to do this.)
+
+ The main declaration needed is the mallinfo struct that is returned
+ (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
+ bunch of fields that are not even meaningful in this version of
+ malloc. These fields are are instead filled by mallinfo() with
+ other numbers that might be of interest.
+
+ HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
+ /usr/include/malloc.h file that includes a declaration of struct
+ mallinfo. If so, it is included; else an SVID2/XPG2 compliant
+ version is declared below. These must be precisely the same for
+ mallinfo() to work. The original SVID version of this struct,
+ defined on most systems with mallinfo, declares all fields as
+ ints. But some others define as unsigned long. If your system
+ defines the fields using a type of different width than listed here,
+ you must #include your system version and #define
+ HAVE_USR_INCLUDE_MALLOC_H.
+*/
+
+/* #define HAVE_USR_INCLUDE_MALLOC_H */
+
+#ifdef HAVE_USR_INCLUDE_MALLOC_H
+#include "/usr/include/malloc.h"
+#endif
+
+
+/* ---------- description of public routines ------------ */
+
+/*
+ malloc(size_t n)
+ Returns a pointer to a newly allocated chunk of at least n bytes, or null
+ if no space is available. Additionally, on failure, errno is
+ set to ENOMEM on ANSI C systems.
+
+ If n is zero, malloc returns a minumum-sized chunk. (The minimum
+ size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
+ systems.) On most systems, size_t is an unsigned type, so calls
+ with negative arguments are interpreted as requests for huge amounts
+ of space, which will often fail. The maximum supported value of n
+ differs across systems, but is in all cases less than the maximum
+ representable value of a size_t.
+*/
+#if __STD_C
+Void_t* public_mALLOc(cvmx_arena_list_t arena_list, size_t);
+#else
+Void_t* public_mALLOc();
+#endif
+
+/*
+ free(Void_t* p)
+ Releases the chunk of memory pointed to by p, that had been previously
+ allocated using malloc or a related routine such as realloc.
+ It has no effect if p is null. It can have arbitrary (i.e., bad!)
+ effects if p has already been freed.
+
+ Unless disabled (using mallopt), freeing very large spaces will
+ when possible, automatically trigger operations that give
+ back unused memory to the system, thus reducing program footprint.
+*/
+#if __STD_C
+void public_fREe(Void_t*);
+#else
+void public_fREe();
+#endif
+
+/*
+ calloc(size_t n_elements, size_t element_size);
+ Returns a pointer to n_elements * element_size bytes, with all locations
+ set to zero.
+*/
+#if __STD_C
+Void_t* public_cALLOc(cvmx_arena_list_t arena_list, size_t, size_t);
+#else
+Void_t* public_cALLOc();
+#endif
+
+/*
+ realloc(Void_t* p, size_t n)
+ Returns a pointer to a chunk of size n that contains the same data
+ as does chunk p up to the minimum of (n, p's size) bytes, or null
+ if no space is available.
+
+ The returned pointer may or may not be the same as p. The algorithm
+ prefers extending p when possible, otherwise it employs the
+ equivalent of a malloc-copy-free sequence.
+
+ If p is null, realloc is equivalent to malloc.
+
+ If space is not available, realloc returns null, errno is set (if on
+ ANSI) and p is NOT freed.
+
+ if n is for fewer bytes than already held by p, the newly unused
+ space is lopped off and freed if possible. Unless the #define
+ REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
+ zero (re)allocates a minimum-sized chunk.
+
+ Large chunks that were internally obtained via mmap will always
+ be reallocated using malloc-copy-free sequences unless
+ the system supports MREMAP (currently only linux).
+
+ The old unix realloc convention of allowing the last-free'd chunk
+ to be used as an argument to realloc is not supported.
+*/
+#if __STD_C
+Void_t* public_rEALLOc(cvmx_arena_list_t arena_list, Void_t*, size_t);
+#else
+Void_t* public_rEALLOc();
+#endif
+
+/*
+ memalign(size_t alignment, size_t n);
+ Returns a pointer to a newly allocated chunk of n bytes, aligned
+ in accord with the alignment argument.
+
+ The alignment argument should be a power of two. If the argument is
+ not a power of two, the nearest greater power is used.
+ 8-byte alignment is guaranteed by normal malloc calls, so don't
+ bother calling memalign with an argument of 8 or less.
+
+ Overreliance on memalign is a sure way to fragment space.
+*/
+#if __STD_C
+Void_t* public_mEMALIGn(cvmx_arena_list_t arena_list, size_t, size_t);
+#else
+Void_t* public_mEMALIGn();
+#endif
+
+/*
+ valloc(size_t n);
+ Equivalent to memalign(pagesize, n), where pagesize is the page
+ size of the system. If the pagesize is unknown, 4096 is used.
+*/
+#if __STD_C
+Void_t* public_vALLOc(size_t);
+#else
+Void_t* public_vALLOc();
+#endif
+
+
+
+/*
+ mallopt(int parameter_number, int parameter_value)
+ Sets tunable parameters The format is to provide a
+ (parameter-number, parameter-value) pair. mallopt then sets the
+ corresponding parameter to the argument value if it can (i.e., so
+ long as the value is meaningful), and returns 1 if successful else
+ 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
+ normally defined in malloc.h. Only one of these (M_MXFAST) is used
+ in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
+ so setting them has no effect. But this malloc also supports four
+ other options in mallopt. See below for details. Briefly, supported
+ parameters are as follows (listed defaults are for "typical"
+ configurations).
+
+ Symbol param # default allowed param values
+ M_MXFAST 1 64 0-80 (0 disables fastbins)
+ M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
+ M_TOP_PAD -2 0 any
+ M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
+ M_MMAP_MAX -4 65536 any (0 disables use of mmap)
+*/
+#if __STD_C
+int public_mALLOPt(int, int);
+#else
+int public_mALLOPt();
+#endif
+
+
+/*
+ mallinfo()
+ Returns (by copy) a struct containing various summary statistics:
+
+ arena: current total non-mmapped bytes allocated from system
+ ordblks: the number of free chunks
+ smblks: the number of fastbin blocks (i.e., small chunks that
+ have been freed but not use resused or consolidated)
+ hblks: current number of mmapped regions
+ hblkhd: total bytes held in mmapped regions
+ usmblks: the maximum total allocated space. This will be greater
+ than current total if trimming has occurred.
+ fsmblks: total bytes held in fastbin blocks
+ uordblks: current total allocated space (normal or mmapped)
+ fordblks: total free space
+ keepcost: the maximum number of bytes that could ideally be released
+ back to system via malloc_trim. ("ideally" means that
+ it ignores page restrictions etc.)
+
+ Because these fields are ints, but internal bookkeeping may
+ be kept as longs, the reported values may wrap around zero and
+ thus be inaccurate.
+*/
+#if __STD_C
+struct mallinfo public_mALLINFo(void);
+#else
+struct mallinfo public_mALLINFo();
+#endif
+
+/*
+ independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
+
+ independent_calloc is similar to calloc, but instead of returning a
+ single cleared space, it returns an array of pointers to n_elements
+ independent elements that can hold contents of size elem_size, each
+ of which starts out cleared, and can be independently freed,
+ realloc'ed etc. The elements are guaranteed to be adjacently
+ allocated (this is not guaranteed to occur with multiple callocs or
+ mallocs), which may also improve cache locality in some
+ applications.
+
+ The "chunks" argument is optional (i.e., may be null, which is
+ probably the most typical usage). If it is null, the returned array
+ is itself dynamically allocated and should also be freed when it is
+ no longer needed. Otherwise, the chunks array must be of at least
+ n_elements in length. It is filled in with the pointers to the
+ chunks.
+
+ In either case, independent_calloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and "chunks"
+ is null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be individually freed when it is no longer
+ needed. If you'd like to instead be able to free all at once, you
+ should instead use regular calloc and assign pointers into this
+ space to represent elements. (In this case though, you cannot
+ independently free elements.)
+
+ independent_calloc simplifies and speeds up implementations of many
+ kinds of pools. It may also be useful when constructing large data
+ structures that initially have a fixed number of fixed-sized nodes,
+ but the number is not known at compile time, and some of the nodes
+ may later need to be freed. For example:
+
+ struct Node { int item; struct Node* next; };
+
+ struct Node* build_list() {
+ struct Node** pool;
+ int n = read_number_of_nodes_needed();
+ if (n <= 0) return 0;
+ pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
+ if (pool == 0) die();
+ // organize into a linked list...
+ struct Node* first = pool[0];
+ for (i = 0; i < n-1; ++i)
+ pool[i]->next = pool[i+1];
+ free(pool); // Can now free the array (or not, if it is needed later)
+ return first;
+ }
+*/
+#if __STD_C
+Void_t** public_iCALLOc(size_t, size_t, Void_t**);
+#else
+Void_t** public_iCALLOc();
+#endif
+
+/*
+ independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
+
+ independent_comalloc allocates, all at once, a set of n_elements
+ chunks with sizes indicated in the "sizes" array. It returns
+ an array of pointers to these elements, each of which can be
+ independently freed, realloc'ed etc. The elements are guaranteed to
+ be adjacently allocated (this is not guaranteed to occur with
+ multiple callocs or mallocs), which may also improve cache locality
+ in some applications.
+
+ The "chunks" argument is optional (i.e., may be null). If it is null
+ the returned array is itself dynamically allocated and should also
+ be freed when it is no longer needed. Otherwise, the chunks array
+ must be of at least n_elements in length. It is filled in with the
+ pointers to the chunks.
+
+ In either case, independent_comalloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and chunks is
+ null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be individually freed when it is no longer
+ needed. If you'd like to instead be able to free all at once, you
+ should instead use a single regular malloc, and assign pointers at
+ particular offsets in the aggregate space. (In this case though, you
+ cannot independently free elements.)
+
+ independent_comallac differs from independent_calloc in that each
+ element may have a different size, and also that it does not
+ automatically clear elements.
+
+ independent_comalloc can be used to speed up allocation in cases
+ where several structs or objects must always be allocated at the
+ same time. For example:
+
+ struct Head { ... }
+ struct Foot { ... }
+
+ void send_message(char* msg) {
+ int msglen = strlen(msg);
+ size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
+ void* chunks[3];
+ if (independent_comalloc(3, sizes, chunks) == 0)
+ die();
+ struct Head* head = (struct Head*)(chunks[0]);
+ char* body = (char*)(chunks[1]);
+ struct Foot* foot = (struct Foot*)(chunks[2]);
+ // ...
+ }
+
+ In general though, independent_comalloc is worth using only for
+ larger values of n_elements. For small values, you probably won't
+ detect enough difference from series of malloc calls to bother.
+
+ Overuse of independent_comalloc can increase overall memory usage,
+ since it cannot reuse existing noncontiguous small chunks that
+ might be available for some of the elements.
+*/
+#if __STD_C
+Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
+#else
+Void_t** public_iCOMALLOc();
+#endif
+
+
+/*
+ pvalloc(size_t n);
+ Equivalent to valloc(minimum-page-that-holds(n)), that is,
+ round up n to nearest pagesize.
+ */
+#if __STD_C
+Void_t* public_pVALLOc(size_t);
+#else
+Void_t* public_pVALLOc();
+#endif
+
+/*
+ cfree(Void_t* p);
+ Equivalent to free(p).
+
+ cfree is needed/defined on some systems that pair it with calloc,
+ for odd historical reasons (such as: cfree is used in example
+ code in the first edition of K&R).
+*/
+#if __STD_C
+void public_cFREe(Void_t*);
+#else
+void public_cFREe();
+#endif
+
+/*
+ malloc_trim(size_t pad);
+
+ If possible, gives memory back to the system (via negative
+ arguments to sbrk) if there is unused memory at the `high' end of
+ the malloc pool. You can call this after freeing large blocks of
+ memory to potentially reduce the system-level memory requirements
+ of a program. However, it cannot guarantee to reduce memory. Under
+ some allocation patterns, some large free blocks of memory will be
+ locked between two used chunks, so they cannot be given back to
+ the system.
+
+ The `pad' argument to malloc_trim represents the amount of free
+ trailing space to leave untrimmed. If this argument is zero,
+ only the minimum amount of memory to maintain internal data
+ structures will be left (one page or less). Non-zero arguments
+ can be supplied to maintain enough trailing space to service
+ future expected allocations without having to re-obtain memory
+ from the system.
+
+ Malloc_trim returns 1 if it actually released any memory, else 0.
+ On systems that do not support "negative sbrks", it will always
+ rreturn 0.
+*/
+#if __STD_C
+int public_mTRIm(size_t);
+#else
+int public_mTRIm();
+#endif
+
+/*
+ malloc_usable_size(Void_t* p);
+
+ Returns the number of bytes you can actually use in
+ an allocated chunk, which may be more than you requested (although
+ often not) due to alignment and minimum size constraints.
+ You can use this many bytes without worrying about
+ overwriting other allocated objects. This is not a particularly great
+ programming practice. malloc_usable_size can be more useful in
+ debugging and assertions, for example:
+
+ p = malloc(n);
+ assert(malloc_usable_size(p) >= 256);
+
+*/
+#if __STD_C
+size_t public_mUSABLe(Void_t*);
+#else
+size_t public_mUSABLe();
+#endif
+
+/*
+ malloc_stats();
+ Prints on stderr the amount of space obtained from the system (both
+ via sbrk and mmap), the maximum amount (which may be more than
+ current if malloc_trim and/or munmap got called), and the current
+ number of bytes allocated via malloc (or realloc, etc) but not yet
+ freed. Note that this is the number of bytes allocated, not the
+ number requested. It will be larger than the number requested
+ because of alignment and bookkeeping overhead. Because it includes
+ alignment wastage as being in use, this figure may be greater than
+ zero even when no user-level chunks are allocated.
+
+ The reported current and maximum system memory can be inaccurate if
+ a program makes other calls to system memory allocation functions
+ (normally sbrk) outside of malloc.
+
+ malloc_stats prints only the most commonly interesting statistics.
+ More information can be obtained by calling mallinfo.
+
+*/
+#if __STD_C
+void public_mSTATs(void);
+#else
+void public_mSTATs();
+#endif
+
+/*
+ malloc_get_state(void);
+
+ Returns the state of all malloc variables in an opaque data
+ structure.
+*/
+#if __STD_C
+Void_t* public_gET_STATe(void);
+#else
+Void_t* public_gET_STATe();
+#endif
+
+/*
+ malloc_set_state(Void_t* state);
+
+ Restore the state of all malloc variables from data obtained with
+ malloc_get_state().
+*/
+#if __STD_C
+int public_sET_STATe(Void_t*);
+#else
+int public_sET_STATe();
+#endif
+
+#ifdef _LIBC
+/*
+ posix_memalign(void **memptr, size_t alignment, size_t size);
+
+ POSIX wrapper like memalign(), checking for validity of size.
+*/
+int __posix_memalign(void **, size_t, size_t);
+#endif
+
+/* mallopt tuning options */
+
+/*
+ M_MXFAST is the maximum request size used for "fastbins", special bins
+ that hold returned chunks without consolidating their spaces. This
+ enables future requests for chunks of the same size to be handled
+ very quickly, but can increase fragmentation, and thus increase the
+ overall memory footprint of a program.
+
+ This malloc manages fastbins very conservatively yet still
+ efficiently, so fragmentation is rarely a problem for values less
+ than or equal to the default. The maximum supported value of MXFAST
+ is 80. You wouldn't want it any higher than this anyway. Fastbins
+ are designed especially for use with many small structs, objects or
+ strings -- the default handles structs/objects/arrays with sizes up
+ to 8 4byte fields, or small strings representing words, tokens,
+ etc. Using fastbins for larger objects normally worsens
+ fragmentation without improving speed.
+
+ M_MXFAST is set in REQUEST size units. It is internally used in
+ chunksize units, which adds padding and alignment. You can reduce
+ M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
+ algorithm to be a closer approximation of fifo-best-fit in all cases,
+ not just for larger requests, but will generally cause it to be
+ slower.
+*/
+
+
+/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
+#ifndef M_MXFAST
+#define M_MXFAST 1
+#endif
+
+#ifndef DEFAULT_MXFAST
+#define DEFAULT_MXFAST 64
+#endif
+
+
+/*
+ M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
+ to keep before releasing via malloc_trim in free().
+
+ Automatic trimming is mainly useful in long-lived programs.
+ Because trimming via sbrk can be slow on some systems, and can
+ sometimes be wasteful (in cases where programs immediately
+ afterward allocate more large chunks) the value should be high
+ enough so that your overall system performance would improve by
+ releasing this much memory.
+
+ The trim threshold and the mmap control parameters (see below)
+ can be traded off with one another. Trimming and mmapping are
+ two different ways of releasing unused memory back to the
+ system. Between these two, it is often possible to keep
+ system-level demands of a long-lived program down to a bare
+ minimum. For example, in one test suite of sessions measuring
+ the XF86 X server on Linux, using a trim threshold of 128K and a
+ mmap threshold of 192K led to near-minimal long term resource
+ consumption.
+
+ If you are using this malloc in a long-lived program, it should
+ pay to experiment with these values. As a rough guide, you
+ might set to a value close to the average size of a process
+ (program) running on your system. Releasing this much memory
+ would allow such a process to run in memory. Generally, it's
+ worth it to tune for trimming rather tham memory mapping when a
+ program undergoes phases where several large chunks are
+ allocated and released in ways that can reuse each other's
+ storage, perhaps mixed with phases where there are no such
+ chunks at all. And in well-behaved long-lived programs,
+ controlling release of large blocks via trimming versus mapping
+ is usually faster.
+
+ However, in most programs, these parameters serve mainly as
+ protection against the system-level effects of carrying around
+ massive amounts of unneeded memory. Since frequent calls to
+ sbrk, mmap, and munmap otherwise degrade performance, the default
+ parameters are set to relatively high values that serve only as
+ safeguards.
+
+ The trim value It must be greater than page size to have any useful
+ effect. To disable trimming completely, you can set to
+ (unsigned long)(-1)
+
+ Trim settings interact with fastbin (MXFAST) settings: Unless
+ TRIM_FASTBINS is defined, automatic trimming never takes place upon
+ freeing a chunk with size less than or equal to MXFAST. Trimming is
+ instead delayed until subsequent freeing of larger chunks. However,
+ you can still force an attempted trim by calling malloc_trim.
+
+ Also, trimming is not generally possible in cases where
+ the main arena is obtained via mmap.
+
+ Note that the trick some people use of mallocing a huge space and
+ then freeing it at program startup, in an attempt to reserve system
+ memory, doesn't have the intended effect under automatic trimming,
+ since that memory will immediately be returned to the system.
+*/
+
+#define M_TRIM_THRESHOLD -1
+
+#ifndef DEFAULT_TRIM_THRESHOLD
+#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
+#endif
+
+/*
+ M_TOP_PAD is the amount of extra `padding' space to allocate or
+ retain whenever sbrk is called. It is used in two ways internally:
+
+ * When sbrk is called to extend the top of the arena to satisfy
+ a new malloc request, this much padding is added to the sbrk
+ request.
+
+ * When malloc_trim is called automatically from free(),
+ it is used as the `pad' argument.
+
+ In both cases, the actual amount of padding is rounded
+ so that the end of the arena is always a system page boundary.
+
+ The main reason for using padding is to avoid calling sbrk so
+ often. Having even a small pad greatly reduces the likelihood
+ that nearly every malloc request during program start-up (or
+ after trimming) will invoke sbrk, which needlessly wastes
+ time.
+
+ Automatic rounding-up to page-size units is normally sufficient
+ to avoid measurable overhead, so the default is 0. However, in
+ systems where sbrk is relatively slow, it can pay to increase
+ this value, at the expense of carrying around more memory than
+ the program needs.
+*/
+
+#define M_TOP_PAD -2
+
+#ifndef DEFAULT_TOP_PAD
+#define DEFAULT_TOP_PAD (0)
+#endif
+
+/*
+ M_MMAP_THRESHOLD is the request size threshold for using mmap()
+ to service a request. Requests of at least this size that cannot
+ be allocated using already-existing space will be serviced via mmap.
+ (If enough normal freed space already exists it is used instead.)
+
+ Using mmap segregates relatively large chunks of memory so that
+ they can be individually obtained and released from the host
+ system. A request serviced through mmap is never reused by any
+ other request (at least not directly; the system may just so
+ happen to remap successive requests to the same locations).
+
+ Segregating space in this way has the benefits that:
+
+ 1. Mmapped space can ALWAYS be individually released back
+ to the system, which helps keep the system level memory
+ demands of a long-lived program low.
+ 2. Mapped memory can never become `locked' between
+ other chunks, as can happen with normally allocated chunks, which
+ means that even trimming via malloc_trim would not release them.
+ 3. On some systems with "holes" in address spaces, mmap can obtain
+ memory that sbrk cannot.
+
+ However, it has the disadvantages that:
+
+ 1. The space cannot be reclaimed, consolidated, and then
+ used to service later requests, as happens with normal chunks.
+ 2. It can lead to more wastage because of mmap page alignment
+ requirements
+ 3. It causes malloc performance to be more dependent on host
+ system memory management support routines which may vary in
+ implementation quality and may impose arbitrary
+ limitations. Generally, servicing a request via normal
+ malloc steps is faster than going through a system's mmap.
+
+ The advantages of mmap nearly always outweigh disadvantages for
+ "large" chunks, but the value of "large" varies across systems. The
+ default is an empirically derived value that works well in most
+ systems.
+*/
+
+#define M_MMAP_THRESHOLD -3
+
+#ifndef DEFAULT_MMAP_THRESHOLD
+#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
+#endif
+
+/*
+ M_MMAP_MAX is the maximum number of requests to simultaneously
+ service using mmap. This parameter exists because
+ some systems have a limited number of internal tables for
+ use by mmap, and using more than a few of them may degrade
+ performance.
+
+ The default is set to a value that serves only as a safeguard.
+ Setting to 0 disables use of mmap for servicing large requests. If
+ HAVE_MMAP is not set, the default value is 0, and attempts to set it
+ to non-zero values in mallopt will fail.
+*/
+
+#define M_MMAP_MAX -4
+
+#ifndef DEFAULT_MMAP_MAX
+#if HAVE_MMAP
+#define DEFAULT_MMAP_MAX (65536)
+#else
+#define DEFAULT_MMAP_MAX (0)
+#endif
+#endif
+
+#ifdef __cplusplus
+}; /* end of extern "C" */
+#endif
+
+#include <cvmx-spinlock.h>
+#include "malloc.h"
+#include "thread-m.h"
+
+#ifdef DEBUG_PRINTS
+#define debug_printf printf
+#else
+#define debug_printf(format, args...)
+#endif
+
+#ifndef BOUNDED_N
+#define BOUNDED_N(ptr, sz) (ptr)
+#endif
+#ifndef RETURN_ADDRESS
+#define RETURN_ADDRESS(X_) (NULL)
+#endif
+
+/* On some platforms we can compile internal, not exported functions better.
+ Let the environment provide a macro and define it to be empty if it
+ is not available. */
+#ifndef internal_function
+# define internal_function
+#endif
+
+/* Forward declarations. */
+struct malloc_chunk;
+typedef struct malloc_chunk* mchunkptr;
+
+/* Internal routines. */
+
+#if __STD_C
+
+static Void_t* _int_malloc(mstate, size_t);
+static void _int_free(mstate, Void_t*);
+static Void_t* _int_realloc(mstate, Void_t*, size_t);
+static Void_t* _int_memalign(mstate, size_t, size_t);
+static Void_t* _int_valloc(mstate, size_t);
+static Void_t* _int_pvalloc(mstate, size_t);
+static Void_t* cALLOc(cvmx_arena_list_t arena_list, size_t, size_t);
+static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
+static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
+static int mTRIm(size_t);
+static size_t mUSABLe(Void_t*);
+static void mSTATs(void);
+static int mALLOPt(int, int);
+static struct mallinfo mALLINFo(mstate);
+
+static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
+static int internal_function top_check(void);
+static void internal_function munmap_chunk(mchunkptr p);
+#if HAVE_MREMAP
+static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
+#endif
+
+static Void_t* malloc_check(size_t sz, const Void_t *caller);
+static void free_check(Void_t* mem, const Void_t *caller);
+static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
+ const Void_t *caller);
+static Void_t* memalign_check(size_t alignment, size_t bytes,
+ const Void_t *caller);
+#ifndef NO_THREADS
+static Void_t* malloc_starter(size_t sz, const Void_t *caller);
+static void free_starter(Void_t* mem, const Void_t *caller);
+static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
+static void free_atfork(Void_t* mem, const Void_t *caller);
+#endif
+
+#else
+
+Void_t* _int_malloc();
+void _int_free();
+Void_t* _int_realloc();
+Void_t* _int_memalign();
+Void_t* _int_valloc();
+Void_t* _int_pvalloc();
+/*static Void_t* cALLOc();*/
+static Void_t** _int_icalloc();
+static Void_t** _int_icomalloc();
+static int mTRIm();
+static size_t mUSABLe();
+static void mSTATs();
+static int mALLOPt();
+static struct mallinfo mALLINFo();
+
+#endif
+
+
+
+
+/* ------------- Optional versions of memcopy ---------------- */
+
+
+#if USE_MEMCPY
+
+/*
+ Note: memcpy is ONLY invoked with non-overlapping regions,
+ so the (usually slower) memmove is not needed.
+*/
+
+#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
+#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
+
+#else /* !USE_MEMCPY */
+
+/* Use Duff's device for good zeroing/copying performance. */
+
+#define MALLOC_ZERO(charp, nbytes) \
+do { \
+ INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
+ unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
+ long mcn; \
+ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
+ switch (mctmp) { \
+ case 0: for(;;) { *mzp++ = 0; \
+ case 7: *mzp++ = 0; \
+ case 6: *mzp++ = 0; \
+ case 5: *mzp++ = 0; \
+ case 4: *mzp++ = 0; \
+ case 3: *mzp++ = 0; \
+ case 2: *mzp++ = 0; \
+ case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
+ } \
+} while(0)
+
+#define MALLOC_COPY(dest,src,nbytes) \
+do { \
+ INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
+ INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
+ unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
+ long mcn; \
+ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
+ switch (mctmp) { \
+ case 0: for(;;) { *mcdst++ = *mcsrc++; \
+ case 7: *mcdst++ = *mcsrc++; \
+ case 6: *mcdst++ = *mcsrc++; \
+ case 5: *mcdst++ = *mcsrc++; \
+ case 4: *mcdst++ = *mcsrc++; \
+ case 3: *mcdst++ = *mcsrc++; \
+ case 2: *mcdst++ = *mcsrc++; \
+ case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
+ } \
+} while(0)
+
+#endif
+
+/* ------------------ MMAP support ------------------ */
+
+
+#if HAVE_MMAP
+
+#include <fcntl.h>
+#ifndef LACKS_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+# define MAP_ANONYMOUS MAP_ANON
+#endif
+#if !defined(MAP_FAILED)
+# define MAP_FAILED ((char*)-1)
+#endif
+
+#ifndef MAP_NORESERVE
+# ifdef MAP_AUTORESRV
+# define MAP_NORESERVE MAP_AUTORESRV
+# else
+# define MAP_NORESERVE 0
+# endif
+#endif
+
+/*
+ Nearly all versions of mmap support MAP_ANONYMOUS,
+ so the following is unlikely to be needed, but is
+ supplied just in case.
+*/
+
+#ifndef MAP_ANONYMOUS
+
+static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+
+#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
+ (dev_zero_fd = open("/dev/zero", O_RDWR), \
+ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
+ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
+
+#else
+
+#define MMAP(addr, size, prot, flags) \
+ (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
+
+#endif
+
+
+#endif /* HAVE_MMAP */
+
+
+/*
+ ----------------------- Chunk representations -----------------------
+*/
+
+
+/*
+ This struct declaration is misleading (but accurate and necessary).
+ It declares a "view" into memory allowing access to necessary
+ fields at known offsets from a given base. See explanation below.
+*/
+struct malloc_chunk {
+
+ INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
+ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
+ mstate arena_ptr; /* ptr to arena chunk belongs to */
+
+ struct malloc_chunk* fd; /* double links -- used only if free. */
+ struct malloc_chunk* bk;
+};
+
+
+/*
+ malloc_chunk details:
+
+ (The following includes lightly edited explanations by Colin Plumb.)
+
+ Chunks of memory are maintained using a `boundary tag' method as
+ described in e.g., Knuth or Standish. (See the paper by Paul
+ Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
+ survey of such techniques.) Sizes of free chunks are stored both
+ in the front of each chunk and at the end. This makes
+ consolidating fragmented chunks into bigger chunks very fast. The
+ size fields also hold bits representing whether chunks are free or
+ in use.
+
+ An allocated chunk looks like this:
+
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk, if allocated | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of chunk, in bytes |P|
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | User data starts here... .
+ . .
+ . (malloc_usable_space() bytes) .
+ . |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+ Where "chunk" is the front of the chunk for the purpose of most of
+ the malloc code, but "mem" is the pointer that is returned to the
+ user. "Nextchunk" is the beginning of the next contiguous chunk.
+
+ Chunks always begin on even word boundries, so the mem portion
+ (which is returned to the user) is also on an even word boundary, and
+ thus at least double-word aligned.
+
+ Free chunks are stored in circular doubly-linked lists, and look like this:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `head:' | Size of chunk, in bytes |P|
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Forward pointer to next chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Back pointer to previous chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Unused space (may be 0 bytes long) .
+ . .
+ . |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `foot:' | Size of chunk, in bytes |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The P (PREV_INUSE) bit, stored in the unused low-order bit of the
+ chunk size (which is always a multiple of two words), is an in-use
+ bit for the *previous* chunk. If that bit is *clear*, then the
+ word before the current chunk size contains the previous chunk
+ size, and can be used to find the front of the previous chunk.
+ The very first chunk allocated always has this bit set,
+ preventing access to non-existent (or non-owned) memory. If
+ prev_inuse is set for any given chunk, then you CANNOT determine
+ the size of the previous chunk, and might even get a memory
+ addressing fault when trying to do so.
+
+ Note that the `foot' of the current chunk is actually represented
+ as the prev_size of the NEXT chunk. This makes it easier to
+ deal with alignments etc but can be very confusing when trying
+ to extend or adapt this code.
+
+ The two exceptions to all this are
+
+ 1. The special chunk `top' doesn't bother using the
+ trailing size field since there is no next contiguous chunk
+ that would have to index off it. After initialization, `top'
+ is forced to always exist. If it would become less than
+ MINSIZE bytes long, it is replenished.
+
+ 2. Chunks allocated via mmap, which have the second-lowest-order
+ bit (IS_MMAPPED) set in their size fields. Because they are
+ allocated one-by-one, each must contain its own trailing size field.
+
+*/
+
+/*
+ ---------- Size and alignment checks and conversions ----------
+*/
+
+/* conversion from malloc headers to user pointers, and back */
+/* Added size for pointer to make room for arena_ptr */
+#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ + sizeof(void *)))
+#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ - sizeof(void *)))
+
+/* The smallest possible chunk */
+#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+
+#define MINSIZE \
+ (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
+
+/* Check if m has acceptable alignment */
+
+#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
+
+
+/*
+ Check if a request is so large that it would wrap around zero when
+ padded and aligned. To simplify some other code, the bound is made
+ low enough so that adding MINSIZE will also not wrap around zero.
+*/
+
+#define REQUEST_OUT_OF_RANGE(req) \
+ ((unsigned long)(req) >= \
+ (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
+
+/* pad request bytes into a usable size -- internal version */
+
+
+/* prev_size field of next chunk is overwritten with data
+** when in use. NOTE - last SIZE_SZ of arena must be left
+** unused for last chunk to use
+*/
+/* Added sizeof(void *) to make room for arena_ptr */
+#define request2size(req) \
+ (((req) + sizeof(void *) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
+ MINSIZE : \
+ ((req) + sizeof(void *) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
+
+/* Same, except also perform argument check */
+
+#define checked_request2size(req, sz) \
+ if (REQUEST_OUT_OF_RANGE(req)) { \
+ MALLOC_FAILURE_ACTION; \
+ return 0; \
+ } \
+ (sz) = request2size(req);
+
+/*
+ --------------- Physical chunk operations ---------------
+*/
+
+
+/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
+#define PREV_INUSE 0x1
+
+/* extract inuse bit of previous chunk */
+#define prev_inuse(p) ((p)->size & PREV_INUSE)
+
+
+/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
+#define IS_MMAPPED 0x2
+
+/* check for mmap()'ed chunk */
+#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
+
+
+
+/*
+ Bits to mask off when extracting size
+
+ Note: IS_MMAPPED is intentionally not masked off from size field in
+ macros for which mmapped chunks should never be seen. This should
+ cause helpful core dumps to occur if it is tried by accident by
+ people extending or adapting this malloc.
+*/
+#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
+
+/* Get size, ignoring use bits */
+#define chunksize(p) ((p)->size & ~(SIZE_BITS))
+
+
+/* Ptr to next physical malloc_chunk. */
+#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
+
+/* Ptr to previous physical malloc_chunk */
+#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
+
+/* Treat space at ptr + offset as a chunk */
+#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
+
+/* extract p's inuse bit */
+#define inuse(p)\
+((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
+
+/* set/clear chunk as being inuse without otherwise disturbing */
+#define set_inuse(p)\
+((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
+
+#define clear_inuse(p)\
+((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
+
+
+/* check/set/clear inuse bits in known places */
+#define inuse_bit_at_offset(p, s)\
+ (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
+
+#define set_inuse_bit_at_offset(p, s)\
+ (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
+
+#define clear_inuse_bit_at_offset(p, s)\
+ (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
+
+
+/* Set size at head, without disturbing its use bit */
+#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
+
+/* Set size/use field */
+#define set_head(p, s) ((p)->size = (s))
+
+/* Set size at footer (only when chunk is not in use) */
+#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
+
+
+/*
+ -------------------- Internal data structures --------------------
+
+ All internal state is held in an instance of malloc_state defined
+ below. There are no other static variables, except in two optional
+ cases:
+ * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
+ * If HAVE_MMAP is true, but mmap doesn't support
+ MAP_ANONYMOUS, a dummy file descriptor for mmap.
+
+ Beware of lots of tricks that minimize the total bookkeeping space
+ requirements. The result is a little over 1K bytes (for 4byte
+ pointers and size_t.)
+*/
+
+/*
+ Bins
+
+ An array of bin headers for free chunks. Each bin is doubly
+ linked. The bins are approximately proportionally (log) spaced.
+ There are a lot of these bins (128). This may look excessive, but
+ works very well in practice. Most bins hold sizes that are
+ unusual as malloc request sizes, but are more usual for fragments
+ and consolidated sets of chunks, which is what these bins hold, so
+ they can be found quickly. All procedures maintain the invariant
+ that no consolidated chunk physically borders another one, so each
+ chunk in a list is known to be preceeded and followed by either
+ inuse chunks or the ends of memory.
+
+ Chunks in bins are kept in size order, with ties going to the
+ approximately least recently used chunk. Ordering isn't needed
+ for the small bins, which all contain the same-sized chunks, but
+ facilitates best-fit allocation for larger chunks. These lists
+ are just sequential. Keeping them in order almost never requires
+ enough traversal to warrant using fancier ordered data
+ structures.
+
+ Chunks of the same size are linked with the most
+ recently freed at the front, and allocations are taken from the
+ back. This results in LRU (FIFO) allocation order, which tends
+ to give each chunk an equal opportunity to be consolidated with
+ adjacent freed chunks, resulting in larger free chunks and less
+ fragmentation.
+
+ To simplify use in double-linked lists, each bin header acts
+ as a malloc_chunk. This avoids special-casing for headers.
+ But to conserve space and improve locality, we allocate
+ only the fd/bk pointers of bins, and then use repositioning tricks
+ to treat these as the fields of a malloc_chunk*.
+*/
+
+typedef struct malloc_chunk* mbinptr;
+
+/* addressing -- note that bin_at(0) does not exist */
+#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
+
+/* analog of ++bin */
+#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
+
+/* Reminders about list directionality within bins */
+#define first(b) ((b)->fd)
+#define last(b) ((b)->bk)
+
+/* Take a chunk off a bin list */
+#define unlink(P, BK, FD) { \
+ FD = P->fd; \
+ BK = P->bk; \
+ FD->bk = BK; \
+ BK->fd = FD; \
+}
+
+/*
+ Indexing
+
+ Bins for sizes < 512 bytes contain chunks of all the same size, spaced
+ 8 bytes apart. Larger bins are approximately logarithmically spaced:
+
+ 64 bins of size 8
+ 32 bins of size 64
+ 16 bins of size 512
+ 8 bins of size 4096
+ 4 bins of size 32768
+ 2 bins of size 262144
+ 1 bin of size what's left
+
+ There is actually a little bit of slop in the numbers in bin_index
+ for the sake of speed. This makes no difference elsewhere.
+
+ The bins top out around 1MB because we expect to service large
+ requests via mmap.
+*/
+
+#define NBINS 128
+#define NSMALLBINS 64
+#define SMALLBIN_WIDTH 8
+#define MIN_LARGE_SIZE 512
+
+#define in_smallbin_range(sz) \
+ ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
+
+#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
+
+#define largebin_index(sz) \
+(((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
+ ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
+ ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
+ ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
+ ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
+ 126)
+
+#define bin_index(sz) \
+ ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
+
+/*
+ FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
+ first bin that is maintained in sorted order. This must
+ be the smallest size corresponding to a given bin.
+
+ Normally, this should be MIN_LARGE_SIZE. But you can weaken
+ best fit guarantees to sometimes speed up malloc by increasing value.
+ Doing this means that malloc may choose a chunk that is
+ non-best-fitting by up to the width of the bin.
+
+ Some useful cutoff values:
+ 512 - all bins sorted
+ 2560 - leaves bins <= 64 bytes wide unsorted
+ 12288 - leaves bins <= 512 bytes wide unsorted
+ 65536 - leaves bins <= 4096 bytes wide unsorted
+ 262144 - leaves bins <= 32768 bytes wide unsorted
+ -1 - no bins sorted (not recommended!)
+*/
+
+#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
+/* #define FIRST_SORTED_BIN_SIZE 65536 */
+
+/*
+ Unsorted chunks
+
+ All remainders from chunk splits, as well as all returned chunks,
+ are first placed in the "unsorted" bin. They are then placed
+ in regular bins after malloc gives them ONE chance to be used before
+ binning. So, basically, the unsorted_chunks list acts as a queue,
+ with chunks being placed on it in free (and malloc_consolidate),
+ and taken off (to be either used or placed in bins) in malloc.
+
+ The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
+ does not have to be taken into account in size comparisons.
+*/
+
+/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
+#define unsorted_chunks(M) (bin_at(M, 1))
+
+/*
+ Top
+
+ The top-most available chunk (i.e., the one bordering the end of
+ available memory) is treated specially. It is never included in
+ any bin, is used only if no other chunk is available, and is
+ released back to the system if it is very large (see
+ M_TRIM_THRESHOLD). Because top initially
+ points to its own bin with initial zero size, thus forcing
+ extension on the first malloc request, we avoid having any special
+ code in malloc to check whether it even exists yet. But we still
+ need to do so when getting memory from system, so we make
+ initial_top treat the bin as a legal but unusable chunk during the
+ interval between initialization and the first call to
+ sYSMALLOc. (This is somewhat delicate, since it relies on
+ the 2 preceding words to be zero during this interval as well.)
+*/
+
+/* Conveniently, the unsorted bin can be used as dummy top on first call */
+#define initial_top(M) (unsorted_chunks(M))
+
+/*
+ Binmap
+
+ To help compensate for the large number of bins, a one-level index
+ structure is used for bin-by-bin searching. `binmap' is a
+ bitvector recording whether bins are definitely empty so they can
+ be skipped over during during traversals. The bits are NOT always
+ cleared as soon as bins are empty, but instead only
+ when they are noticed to be empty during traversal in malloc.
+*/
+
+/* Conservatively use 32 bits per map word, even if on 64bit system */
+#define BINMAPSHIFT 5
+#define BITSPERMAP (1U << BINMAPSHIFT)
+#define BINMAPSIZE (NBINS / BITSPERMAP)
+
+#define idx2block(i) ((i) >> BINMAPSHIFT)
+#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
+
+#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
+#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
+#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
+
+/*
+ Fastbins
+
+ An array of lists holding recently freed small chunks. Fastbins
+ are not doubly linked. It is faster to single-link them, and
+ since chunks are never removed from the middles of these lists,
+ double linking is not necessary. Also, unlike regular bins, they
+ are not even processed in FIFO order (they use faster LIFO) since
+ ordering doesn't much matter in the transient contexts in which
+ fastbins are normally used.
+
+ Chunks in fastbins keep their inuse bit set, so they cannot
+ be consolidated with other free chunks. malloc_consolidate
+ releases all chunks in fastbins and consolidates them with
+ other free chunks.
+*/
+
+typedef struct malloc_chunk* mfastbinptr;
+
+/* offset 2 to use otherwise unindexable first 2 bins */
+#define fastbin_index(sz) ((int)((((unsigned int)(sz)) >> 3) - 2))
+
+/* The maximum fastbin request size we support */
+#define MAX_FAST_SIZE 80
+
+#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
+
+/*
+ FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
+ that triggers automatic consolidation of possibly-surrounding
+ fastbin chunks. This is a heuristic, so the exact value should not
+ matter too much. It is defined at half the default trim threshold as a
+ compromise heuristic to only attempt consolidation if it is likely
+ to lead to trimming. However, it is not dynamically tunable, since
+ consolidation reduces fragmentation surrounding large chunks even
+ if trimming is not used.
+*/
+
+#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
+
+/*
+ Since the lowest 2 bits in max_fast don't matter in size comparisons,
+ they are used as flags.
+*/
+
+/*
+ FASTCHUNKS_BIT held in max_fast indicates that there are probably
+ some fastbin chunks. It is set true on entering a chunk into any
+ fastbin, and cleared only in malloc_consolidate.
+
+ The truth value is inverted so that have_fastchunks will be true
+ upon startup (since statics are zero-filled), simplifying
+ initialization checks.
+*/
+
+#define FASTCHUNKS_BIT (1U)
+
+#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0)
+#define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT)
+#define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT)
+
+/*
+ NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
+ regions. Otherwise, contiguity is exploited in merging together,
+ when possible, results from consecutive MORECORE calls.
+
+ The initial value comes from MORECORE_CONTIGUOUS, but is
+ changed dynamically if mmap is ever used as an sbrk substitute.
+*/
+
+#define NONCONTIGUOUS_BIT (2U)
+
+#define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0)
+#define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0)
+#define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT)
+#define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT)
+
+/*
+ Set value of max_fast.
+ Use impossibly small value if 0.
+ Precondition: there are no existing fastbin chunks.
+ Setting the value clears fastchunk bit but preserves noncontiguous bit.
+*/
+
+#define set_max_fast(M, s) \
+ (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
+ FASTCHUNKS_BIT | \
+ ((M)->max_fast & NONCONTIGUOUS_BIT)
+
+
+/*
+ ----------- Internal state representation and initialization -----------
+*/
+
+struct malloc_state {
+ /* Serialize access. */
+ mutex_t mutex;
+
+ /* Statistics for locking. Only used if THREAD_STATS is defined. */
+ long stat_lock_direct, stat_lock_loop, stat_lock_wait;
+ long pad0_[1]; /* try to give the mutex its own cacheline */
+
+ /* The maximum chunk size to be eligible for fastbin */
+ INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
+
+ /* Fastbins */
+ mfastbinptr fastbins[NFASTBINS];
+
+ /* Base of the topmost chunk -- not otherwise kept in a bin */
+ mchunkptr top;
+
+ /* The remainder from the most recent split of a small request */
+ mchunkptr last_remainder;
+
+ /* Normal bins packed as described above */
+ mchunkptr bins[NBINS * 2];
+
+ /* Bitmap of bins */
+ unsigned int binmap[BINMAPSIZE];
+
+ /* Linked list */
+ struct malloc_state *next;
+
+ /* Memory allocated from the system in this arena. */
+ INTERNAL_SIZE_T system_mem;
+ INTERNAL_SIZE_T max_system_mem;
+};
+
+struct malloc_par {
+ /* Tunable parameters */
+ unsigned long trim_threshold;
+ INTERNAL_SIZE_T top_pad;
+ INTERNAL_SIZE_T mmap_threshold;
+
+ /* Memory map support */
+ int n_mmaps;
+ int n_mmaps_max;
+ int max_n_mmaps;
+
+ /* Cache malloc_getpagesize */
+ unsigned int pagesize;
+
+ /* Statistics */
+ INTERNAL_SIZE_T mmapped_mem;
+ /*INTERNAL_SIZE_T sbrked_mem;*/
+ /*INTERNAL_SIZE_T max_sbrked_mem;*/
+ INTERNAL_SIZE_T max_mmapped_mem;
+ INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
+
+ /* First address handed out by MORECORE/sbrk. */
+ char* sbrk_base;
+};
+
+/* There are several instances of this struct ("arenas") in this
+ malloc. If you are adapting this malloc in a way that does NOT use
+ a static or mmapped malloc_state, you MUST explicitly zero-fill it
+ before using. This malloc relies on the property that malloc_state
+ is initialized to all zeroes (as is true of C statics). */
+
+
+
+/*
+ Initialize a malloc_state struct.
+
+ This is called only from within malloc_consolidate, which needs
+ be called in the same contexts anyway. It is never called directly
+ outside of malloc_consolidate because some optimizing compilers try
+ to inline it at all call points, which turns out not to be an
+ optimization at all. (Inlining it in malloc_consolidate is fine though.)
+*/
+
+#if __STD_C
+static void malloc_init_state(mstate av)
+#else
+static void malloc_init_state(av) mstate av;
+#endif
+{
+ int i;
+ mbinptr bin;
+
+ /* Establish circular links for normal bins */
+ for (i = 1; i < NBINS; ++i) {
+ bin = bin_at(av,i);
+ bin->fd = bin->bk = bin;
+ }
+
+ set_noncontiguous(av);
+
+ set_max_fast(av, DEFAULT_MXFAST);
+
+ av->top = initial_top(av);
+}
+
+/*
+ Other internal utilities operating on mstates
+*/
+
+#if __STD_C
+static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
+static void malloc_consolidate(mstate);
+//static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
+#else
+static Void_t* sYSMALLOc();
+static void malloc_consolidate();
+static Void_t** iALLOc();
+#endif
+
+/* ------------------- Support for multiple arenas -------------------- */
+#include "arena.c"
+
+/*
+ Debugging support
+
+ These routines make a number of assertions about the states
+ of data structures that should be true at all times. If any
+ are not true, it's very likely that a user program has somehow
+ trashed memory. (It's also possible that there is a coding error
+ in malloc. In which case, please report it!)
+*/
+
+#if ! MALLOC_DEBUG
+
+#define check_chunk(A,P)
+#define check_free_chunk(A,P)
+#define check_inuse_chunk(A,P)
+#define check_remalloced_chunk(A,P,N)
+#define check_malloced_chunk(A,P,N)
+#define check_malloc_state(A)
+
+#else
+
+#define check_chunk(A,P) do_check_chunk(A,P)
+#define check_free_chunk(A,P) do_check_free_chunk(A,P)
+#define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
+#define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
+#define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
+#define check_malloc_state(A) do_check_malloc_state(A)
+
+/*
+ Properties of all chunks
+*/
+
+#if __STD_C
+static void do_check_chunk(mstate av, mchunkptr p)
+#else
+static void do_check_chunk(av, p) mstate av; mchunkptr p;
+#endif
+{
+ unsigned long sz = chunksize(p);
+ /* min and max possible addresses assuming contiguous allocation */
+ char* max_address = (char*)(av->top) + chunksize(av->top);
+ char* min_address = max_address - av->system_mem;
+
+ if (!chunk_is_mmapped(p)) {
+
+ /* Has legal address ... */
+ if (p != av->top) {
+ if (contiguous(av)) {
+ assert(((char*)p) >= min_address);
+ assert(((char*)p + sz) <= ((char*)(av->top)));
+ }
+ }
+ else {
+ /* top size is always at least MINSIZE */
+ assert((unsigned long)(sz) >= MINSIZE);
+ /* top predecessor always marked inuse */
+ assert(prev_inuse(p));
+ }
+
+ }
+ else {
+#if HAVE_MMAP
+ /* address is outside main heap */
+ if (contiguous(av) && av->top != initial_top(av)) {
+ assert(((char*)p) < min_address || ((char*)p) > max_address);
+ }
+ /* chunk is page-aligned */
+ assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
+ /* mem is aligned */
+ assert(aligned_OK(chunk2mem(p)));
+#else
+ /* force an appropriate assert violation if debug set */
+ assert(!chunk_is_mmapped(p));
+#endif
+ }
+}
+
+/*
+ Properties of free chunks
+*/
+
+#if __STD_C
+static void do_check_free_chunk(mstate av, mchunkptr p)
+#else
+static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
+#endif
+{
+ INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE);
+ mchunkptr next = chunk_at_offset(p, sz);
+
+ do_check_chunk(av, p);
+
+ /* Chunk must claim to be free ... */
+ assert(!inuse(p));
+ assert (!chunk_is_mmapped(p));
+
+ /* Unless a special marker, must have OK fields */
+ if ((unsigned long)(sz) >= MINSIZE)
+ {
+ assert((sz & MALLOC_ALIGN_MASK) == 0);
+ assert(aligned_OK(chunk2mem(p)));
+ /* ... matching footer field */
+ assert(next->prev_size == sz);
+ /* ... and is fully consolidated */
+ assert(prev_inuse(p));
+ assert (next == av->top || inuse(next));
+
+ /* ... and has minimally sane links */
+ assert(p->fd->bk == p);
+ assert(p->bk->fd == p);
+ }
+ else /* markers are always of size SIZE_SZ */
+ assert(sz == SIZE_SZ);
+}
+
+/*
+ Properties of inuse chunks
+*/
+
+#if __STD_C
+static void do_check_inuse_chunk(mstate av, mchunkptr p)
+#else
+static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
+#endif
+{
+ mchunkptr next;
+
+ do_check_chunk(av, p);
+
+ assert(av == arena_for_chunk(p));
+ if (chunk_is_mmapped(p))
+ return; /* mmapped chunks have no next/prev */
+
+ /* Check whether it claims to be in use ... */
+ assert(inuse(p));
+
+ next = next_chunk(p);
+
+ /* ... and is surrounded by OK chunks.
+ Since more things can be checked with free chunks than inuse ones,
+ if an inuse chunk borders them and debug is on, it's worth doing them.
+ */
+ if (!prev_inuse(p)) {
+ /* Note that we cannot even look at prev unless it is not inuse */
+ mchunkptr prv = prev_chunk(p);
+ assert(next_chunk(prv) == p);
+ do_check_free_chunk(av, prv);
+ }
+
+ if (next == av->top) {
+ assert(prev_inuse(next));
+ assert(chunksize(next) >= MINSIZE);
+ }
+ else if (!inuse(next))
+ do_check_free_chunk(av, next);
+}
+
+/*
+ Properties of chunks recycled from fastbins
+*/
+
+#if __STD_C
+static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
+#else
+static void do_check_remalloced_chunk(av, p, s)
+mstate av; mchunkptr p; INTERNAL_SIZE_T s;
+#endif
+{
+ INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE);
+
+ if (!chunk_is_mmapped(p)) {
+ assert(av == arena_for_chunk(p));
+ }
+
+ do_check_inuse_chunk(av, p);
+
+ /* Legal size ... */
+ assert((sz & MALLOC_ALIGN_MASK) == 0);
+ assert((unsigned long)(sz) >= MINSIZE);
+ /* ... and alignment */
+ assert(aligned_OK(chunk2mem(p)));
+ /* chunk is less than MINSIZE more than request */
+ assert((long)(sz) - (long)(s) >= 0);
+ assert((long)(sz) - (long)(s + MINSIZE) < 0);
+}
+
+/*
+ Properties of nonrecycled chunks at the point they are malloced
+*/
+
+#if __STD_C
+static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
+#else
+static void do_check_malloced_chunk(av, p, s)
+mstate av; mchunkptr p; INTERNAL_SIZE_T s;
+#endif
+{
+ /* same as recycled case ... */
+ do_check_remalloced_chunk(av, p, s);
+
+ /*
+ ... plus, must obey implementation invariant that prev_inuse is
+ always true of any allocated chunk; i.e., that each allocated
+ chunk borders either a previously allocated and still in-use
+ chunk, or the base of its memory arena. This is ensured
+ by making all allocations from the the `lowest' part of any found
+ chunk. This does not necessarily hold however for chunks
+ recycled via fastbins.
+ */
+
+ assert(prev_inuse(p));
+}
+
+
+/*
+ Properties of malloc_state.
+
+ This may be useful for debugging malloc, as well as detecting user
+ programmer errors that somehow write into malloc_state.
+
+ If you are extending or experimenting with this malloc, you can
+ probably figure out how to hack this routine to print out or
+ display chunk addresses, sizes, bins, and other instrumentation.
+*/
+
+static void do_check_malloc_state(mstate av)
+{
+ int i;
+ mchunkptr p;
+ mchunkptr q;
+ mbinptr b;
+ unsigned int binbit;
+ int empty;
+ unsigned int idx;
+ INTERNAL_SIZE_T size;
+ unsigned long total = 0;
+ int max_fast_bin;
+
+ /* internal size_t must be no wider than pointer type */
+ assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
+
+ /* alignment is a power of 2 */
+ assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
+
+ /* cannot run remaining checks until fully initialized */
+ if (av->top == 0 || av->top == initial_top(av))
+ return;
+
+
+ /* properties of fastbins */
+
+ /* max_fast is in allowed range */
+ assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE));
+
+ max_fast_bin = fastbin_index(av->max_fast);
+
+ for (i = 0; i < NFASTBINS; ++i) {
+ p = av->fastbins[i];
+
+ /* all bins past max_fast are empty */
+ if (i > max_fast_bin)
+ assert(p == 0);
+
+ while (p != 0) {
+ /* each chunk claims to be inuse */
+ do_check_inuse_chunk(av, p);
+ total += chunksize(p);
+ /* chunk belongs in this bin */
+ assert(fastbin_index(chunksize(p)) == i);
+ p = p->fd;
+ }
+ }
+
+ if (total != 0)
+ assert(have_fastchunks(av));
+ else if (!have_fastchunks(av))
+ assert(total == 0);
+
+ /* check normal bins */
+ for (i = 1; i < NBINS; ++i) {
+ b = bin_at(av,i);
+
+ /* binmap is accurate (except for bin 1 == unsorted_chunks) */
+ if (i >= 2) {
+ binbit = get_binmap(av,i);
+ empty = last(b) == b;
+ if (!binbit)
+ assert(empty);
+ else if (!empty)
+ assert(binbit);
+ }
+
+ for (p = last(b); p != b; p = p->bk) {
+ /* each chunk claims to be free */
+ do_check_free_chunk(av, p);
+ size = chunksize(p);
+ total += size;
+ if (i >= 2) {
+ /* chunk belongs in bin */
+ idx = bin_index(size);
+ assert(idx == (unsigned int)i);
+ /* lists are sorted */
+ if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
+ assert(p->bk == b ||
+ (unsigned long)chunksize(p->bk) >=
+ (unsigned long)chunksize(p));
+ }
+ }
+ /* chunk is followed by a legal chain of inuse chunks */
+ for (q = next_chunk(p);
+ (q != av->top && inuse(q) &&
+ (unsigned long)(chunksize(q)) >= MINSIZE);
+ q = next_chunk(q))
+ do_check_inuse_chunk(av, q);
+ }
+ }
+
+ /* top chunk is OK */
+ check_chunk(av, av->top);
+
+ /* sanity checks for statistics */
+
+
+ assert((unsigned long)(av->system_mem) <=
+ (unsigned long)(av->max_system_mem));
+
+
+}
+#endif
+
+
+
+/* ----------- Routines dealing with system allocation -------------- */
+
+/* No system allocation routines supported */
+
+
+/*------------------------ Public wrappers. --------------------------------*/
+
+
+
+#undef DEBUG_MALLOC
+Void_t*
+public_mALLOc(cvmx_arena_list_t arena_list, size_t bytes)
+{
+ mstate ar_ptr, orig_ar_ptr;
+ Void_t *victim = NULL;
+ static mstate debug_prev_ar; // debug only!
+#ifdef DEBUG_MALLOC
+ int arena_cnt=0;
+#endif
+
+ ar_ptr = arena_list;
+
+ if (!ar_ptr)
+ {
+ return(NULL);
+ }
+
+ if (debug_prev_ar != ar_ptr)
+ {
+ debug_printf("New arena: %p\n", ar_ptr);
+#ifdef CVMX_SPINLOCK_DEBUG
+ cvmx_dprintf("lock wait count for arena: %p is %ld\n", ar_ptr, ar_ptr->mutex.wait_cnt);
+#endif
+ debug_prev_ar = ar_ptr;
+ }
+ orig_ar_ptr = ar_ptr;
+
+ // try to get an arena without contention
+ do
+ {
+#ifdef DEBUG_MALLOC
+ arena_cnt++;
+#endif
+ if (!mutex_trylock(&ar_ptr->mutex))
+ {
+ // we locked it
+ victim = _int_malloc(ar_ptr, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ if(victim)
+ {
+ break;
+ }
+ }
+ ar_ptr = ar_ptr->next;
+ } while (ar_ptr != orig_ar_ptr);
+
+ // we couldn't get the memory without contention, so try all
+ // arenas. SLOW!
+ if (!victim)
+ {
+ ar_ptr = orig_ar_ptr;
+ do
+ {
+#ifdef DEBUG_MALLOC
+ arena_cnt++;
+#endif
+ mutex_lock(&ar_ptr->mutex);
+ victim = _int_malloc(ar_ptr, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ if(victim)
+ {
+ break;
+ }
+ ar_ptr = ar_ptr->next;
+ } while (ar_ptr != orig_ar_ptr);
+ }
+
+
+ assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
+ ar_ptr == arena_for_chunk(mem2chunk(victim)));
+
+#ifdef DEBUG_MALLOC
+ if (!victim)
+ {
+ cvmx_dprintf("Malloc failed: size: %ld, arena_cnt: %d\n", bytes, arena_cnt);
+ }
+#endif
+
+ debug_printf("cvmx_malloc(%ld) = %p\n", bytes, victim);
+
+ // remember which arena we last used.....
+ tsd_setspecific(arena_key, (Void_t *)ar_ptr);
+ return victim;
+}
+
+
+
+void
+public_fREe(Void_t* mem)
+{
+ mstate ar_ptr;
+ mchunkptr p; /* chunk corresponding to mem */
+
+ debug_printf("cvmx_free(%p)\n", mem);
+
+
+ if (mem == 0) /* free(0) has no effect */
+ return;
+
+ p = mem2chunk(mem);
+
+
+ ar_ptr = arena_for_chunk(p);
+ assert(ar_ptr);
+#if THREAD_STATS
+ if(!mutex_trylock(&ar_ptr->mutex))
+ ++(ar_ptr->stat_lock_direct);
+ else {
+ (void)mutex_lock(&ar_ptr->mutex);
+ ++(ar_ptr->stat_lock_wait);
+ }
+#else
+ (void)mutex_lock(&ar_ptr->mutex);
+#endif
+ _int_free(ar_ptr, mem);
+ (void)mutex_unlock(&ar_ptr->mutex);
+}
+
+Void_t*
+public_rEALLOc(cvmx_arena_list_t arena_list, Void_t* oldmem, size_t bytes)
+{
+ mstate ar_ptr;
+ INTERNAL_SIZE_T nb; /* padded request size */
+
+ mchunkptr oldp; /* chunk corresponding to oldmem */
+ INTERNAL_SIZE_T oldsize; /* its size */
+
+ Void_t* newp; /* chunk to return */
+
+
+#if REALLOC_ZERO_BYTES_FREES
+ if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
+#endif
+
+ /* realloc of null is supposed to be same as malloc */
+ if (oldmem == 0) return public_mALLOc(arena_list, bytes);
+
+ oldp = mem2chunk(oldmem);
+ oldsize = chunksize(oldp);
+
+ checked_request2size(bytes, nb);
+
+
+ ar_ptr = arena_for_chunk(oldp);
+ (void)mutex_lock(&ar_ptr->mutex);
+
+
+ newp = _int_realloc(ar_ptr, oldmem, bytes);
+
+ (void)mutex_unlock(&ar_ptr->mutex);
+ assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
+ ar_ptr == arena_for_chunk(mem2chunk(newp)));
+ return newp;
+}
+
+#undef DEBUG_MEMALIGN
+Void_t*
+public_mEMALIGn(cvmx_arena_list_t arena_list, size_t alignment, size_t bytes)
+{
+ mstate ar_ptr, orig_ar_ptr;
+ Void_t *p = NULL;
+#ifdef DEBUG_MEMALIGN
+ int arena_cnt=0;
+#endif
+
+
+ /* If need less alignment than we give anyway, just relay to malloc */
+ if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(arena_list, bytes);
+
+ /* Otherwise, ensure that it is at least a minimum chunk size */
+ if (alignment < MINSIZE) alignment = MINSIZE;
+
+
+ ar_ptr = arena_list;
+
+ if (!ar_ptr)
+ {
+ return(NULL);
+ }
+
+ orig_ar_ptr = ar_ptr;
+
+
+ // try to get an arena without contention
+ do
+ {
+
+#ifdef DEBUG_MEMALIGN
+ arena_cnt++;
+#endif
+ if (!mutex_trylock(&ar_ptr->mutex))
+ {
+ // we locked it
+ p = _int_memalign(ar_ptr, alignment, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ if(p)
+ {
+ break;
+ }
+ }
+ ar_ptr = ar_ptr->next;
+ } while (ar_ptr != orig_ar_ptr);
+
+
+ // we couldn't get the memory without contention, so try all
+ // arenas. SLOW!
+ if (!p)
+ {
+#ifdef DEBUG_MEMALIGN
+ arena_cnt++;
+#endif
+ ar_ptr = orig_ar_ptr;
+ do
+ {
+ mutex_lock(&ar_ptr->mutex);
+ p = _int_memalign(ar_ptr, alignment, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ if(p)
+ {
+ break;
+ }
+ ar_ptr = ar_ptr->next;
+ } while (ar_ptr != orig_ar_ptr);
+ }
+
+
+ if (p)
+ {
+ assert(ar_ptr == arena_for_chunk(mem2chunk(p)));
+ }
+ else
+ {
+#ifdef DEBUG_MEMALIGN
+ cvmx_dprintf("Memalign failed: align: 0x%x, size: %ld, arena_cnt: %ld\n", alignment, bytes, arena_cnt);
+#endif
+ }
+
+ assert(!p || ar_ptr == arena_for_chunk(mem2chunk(p)));
+ return p;
+}
+
+
+
+Void_t*
+public_cALLOc(cvmx_arena_list_t arena_list, size_t n, size_t elem_size)
+{
+ mstate av;
+ mchunkptr oldtop, p;
+ INTERNAL_SIZE_T sz, csz, oldtopsize;
+ Void_t* mem;
+ unsigned long clearsize;
+ unsigned long nclears;
+ INTERNAL_SIZE_T* d;
+
+
+ /* FIXME: check for overflow on multiplication. */
+ sz = n * elem_size;
+
+ mem = public_mALLOc(arena_list, sz);
+ if (mem)
+ {
+ memset(mem, 0, sz);
+ }
+
+ return mem;
+}
+
+
+#ifndef _LIBC
+
+void
+public_cFREe(Void_t* m)
+{
+ public_fREe(m);
+}
+
+#endif /* _LIBC */
+
+/*
+ ------------------------------ malloc ------------------------------
+*/
+
+static Void_t*
+_int_malloc(mstate av, size_t bytes)
+{
+ INTERNAL_SIZE_T nb; /* normalized request size */
+ unsigned int idx; /* associated bin index */
+ mbinptr bin; /* associated bin */
+ mfastbinptr* fb; /* associated fastbin */
+
+ mchunkptr victim; /* inspected/selected chunk */
+ INTERNAL_SIZE_T size; /* its size */
+ int victim_index; /* its bin index */
+
+ mchunkptr remainder; /* remainder from a split */
+ unsigned long remainder_size; /* its size */
+
+ unsigned int block; /* bit map traverser */
+ unsigned int bit; /* bit map traverser */
+ unsigned int map; /* current word of binmap */
+
+ mchunkptr fwd; /* misc temp for linking */
+ mchunkptr bck; /* misc temp for linking */
+
+ /*
+ Convert request size to internal form by adding SIZE_SZ bytes
+ overhead plus possibly more to obtain necessary alignment and/or
+ to obtain a size of at least MINSIZE, the smallest allocatable
+ size. Also, checked_request2size traps (returning 0) request sizes
+ that are so large that they wrap around zero when padded and
+ aligned.
+ */
+
+
+ checked_request2size(bytes, nb);
+
+ /*
+ If the size qualifies as a fastbin, first check corresponding bin.
+ This code is safe to execute even if av is not yet initialized, so we
+ can try it without checking, which saves some time on this fast path.
+ */
+
+ if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
+ fb = &(av->fastbins[(fastbin_index(nb))]);
+ if ( (victim = *fb) != 0) {
+ *fb = victim->fd;
+ check_remalloced_chunk(av, victim, nb);
+ set_arena_for_chunk(victim, av);
+ return chunk2mem(victim);
+ }
+ }
+
+ /*
+ If a small request, check regular bin. Since these "smallbins"
+ hold one size each, no searching within bins is necessary.
+ (For a large request, we need to wait until unsorted chunks are
+ processed to find best fit. But for small ones, fits are exact
+ anyway, so we can check now, which is faster.)
+ */
+
+ if (in_smallbin_range(nb)) {
+ idx = smallbin_index(nb);
+ bin = bin_at(av,idx);
+
+ if ( (victim = last(bin)) != bin) {
+ if (victim == 0) /* initialization check */
+ malloc_consolidate(av);
+ else {
+ bck = victim->bk;
+ set_inuse_bit_at_offset(victim, nb);
+ bin->bk = bck;
+ bck->fd = bin;
+
+ set_arena_for_chunk(victim, av);
+ check_malloced_chunk(av, victim, nb);
+ return chunk2mem(victim);
+ }
+ }
+ }
+
+ /*
+ If this is a large request, consolidate fastbins before continuing.
+ While it might look excessive to kill all fastbins before
+ even seeing if there is space available, this avoids
+ fragmentation problems normally associated with fastbins.
+ Also, in practice, programs tend to have runs of either small or
+ large requests, but less often mixtures, so consolidation is not
+ invoked all that often in most programs. And the programs that
+ it is called frequently in otherwise tend to fragment.
+ */
+
+ else {
+ idx = largebin_index(nb);
+ if (have_fastchunks(av))
+ malloc_consolidate(av);
+ }
+
+ /*
+ Process recently freed or remaindered chunks, taking one only if
+ it is exact fit, or, if this a small request, the chunk is remainder from
+ the most recent non-exact fit. Place other traversed chunks in
+ bins. Note that this step is the only place in any routine where
+ chunks are placed in bins.
+
+ The outer loop here is needed because we might not realize until
+ near the end of malloc that we should have consolidated, so must
+ do so and retry. This happens at most once, and only when we would
+ otherwise need to expand memory to service a "small" request.
+ */
+
+ for(;;) {
+
+ while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
+ bck = victim->bk;
+ size = chunksize(victim);
+
+ /*
+ If a small request, try to use last remainder if it is the
+ only chunk in unsorted bin. This helps promote locality for
+ runs of consecutive small requests. This is the only
+ exception to best-fit, and applies only when there is
+ no exact fit for a small chunk.
+ */
+
+ if (in_smallbin_range(nb) &&
+ bck == unsorted_chunks(av) &&
+ victim == av->last_remainder &&
+ (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
+
+ /* split and reattach remainder */
+ remainder_size = size - nb;
+ remainder = chunk_at_offset(victim, nb);
+ unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
+ av->last_remainder = remainder;
+ remainder->bk = remainder->fd = unsorted_chunks(av);
+
+ set_head(victim, nb | PREV_INUSE);
+ set_head(remainder, remainder_size | PREV_INUSE);
+ set_foot(remainder, remainder_size);
+
+ set_arena_for_chunk(victim, av);
+ check_malloced_chunk(av, victim, nb);
+ return chunk2mem(victim);
+ }
+
+ /* remove from unsorted list */
+ unsorted_chunks(av)->bk = bck;
+ bck->fd = unsorted_chunks(av);
+
+ /* Take now instead of binning if exact fit */
+
+ if (size == nb) {
+ set_inuse_bit_at_offset(victim, size);
+ set_arena_for_chunk(victim, av);
+ check_malloced_chunk(av, victim, nb);
+ return chunk2mem(victim);
+ }
+
+ /* place chunk in bin */
+
+ if (in_smallbin_range(size)) {
+ victim_index = smallbin_index(size);
+ bck = bin_at(av, victim_index);
+ fwd = bck->fd;
+ }
+ else {
+ victim_index = largebin_index(size);
+ bck = bin_at(av, victim_index);
+ fwd = bck->fd;
+
+ if (fwd != bck) {
+ /* if smaller than smallest, place first */
+ if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
+ fwd = bck;
+ bck = bck->bk;
+ }
+ else if ((unsigned long)(size) >=
+ (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
+
+ /* maintain large bins in sorted order */
+ size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
+ while ((unsigned long)(size) < (unsigned long)(fwd->size)) {
+ fwd = fwd->fd;
+ }
+ bck = fwd->bk;
+ }
+ }
+ }
+
+ mark_bin(av, victim_index);
+ victim->bk = bck;
+ victim->fd = fwd;
+ fwd->bk = victim;
+ bck->fd = victim;
+ }
+
+ /*
+ If a large request, scan through the chunks of current bin in
+ sorted order to find smallest that fits. This is the only step
+ where an unbounded number of chunks might be scanned without doing
+ anything useful with them. However the lists tend to be short.
+ */
+
+ if (!in_smallbin_range(nb)) {
+ bin = bin_at(av, idx);
+
+ for (victim = last(bin); victim != bin; victim = victim->bk) {
+ size = chunksize(victim);
+
+ if ((unsigned long)(size) >= (unsigned long)(nb)) {
+ remainder_size = size - nb;
+ unlink(victim, bck, fwd);
+
+ /* Exhaust */
+ if (remainder_size < MINSIZE) {
+ set_inuse_bit_at_offset(victim, size);
+ set_arena_for_chunk(victim, av);
+ check_malloced_chunk(av, victim, nb);
+ return chunk2mem(victim);
+ }
+ /* Split */
+ else {
+ remainder = chunk_at_offset(victim, nb);
+ unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
+ remainder->bk = remainder->fd = unsorted_chunks(av);
+ set_head(victim, nb | PREV_INUSE);
+ set_head(remainder, remainder_size | PREV_INUSE);
+ set_foot(remainder, remainder_size);
+ set_arena_for_chunk(victim, av);
+ check_malloced_chunk(av, victim, nb);
+ return chunk2mem(victim);
+ }
+ }
+ }
+ }
+
+ /*
+ Search for a chunk by scanning bins, starting with next largest
+ bin. This search is strictly by best-fit; i.e., the smallest
+ (with ties going to approximately the least recently used) chunk
+ that fits is selected.
+
+ The bitmap avoids needing to check that most blocks are nonempty.
+ The particular case of skipping all bins during warm-up phases
+ when no chunks have been returned yet is faster than it might look.
+ */
+
+ ++idx;
+ bin = bin_at(av,idx);
+ block = idx2block(idx);
+ map = av->binmap[block];
+ bit = idx2bit(idx);
+
+ for (;;) {
+
+ /* Skip rest of block if there are no more set bits in this block. */
+ if (bit > map || bit == 0) {
+ do {
+ if (++block >= BINMAPSIZE) /* out of bins */
+ goto use_top;
+ } while ( (map = av->binmap[block]) == 0);
+
+ bin = bin_at(av, (block << BINMAPSHIFT));
+ bit = 1;
+ }
+
+ /* Advance to bin with set bit. There must be one. */
+ while ((bit & map) == 0) {
+ bin = next_bin(bin);
+ bit <<= 1;
+ assert(bit != 0);
+ }
+
+ /* Inspect the bin. It is likely to be non-empty */
+ victim = last(bin);
+
+ /* If a false alarm (empty bin), clear the bit. */
+ if (victim == bin) {
+ av->binmap[block] = map &= ~bit; /* Write through */
+ bin = next_bin(bin);
+ bit <<= 1;
+ }
+
+ else {
+ size = chunksize(victim);
+
+ /* We know the first chunk in this bin is big enough to use. */
+ assert((unsigned long)(size) >= (unsigned long)(nb));
+
+ remainder_size = size - nb;
+
+ /* unlink */
+ bck = victim->bk;
+ bin->bk = bck;
+ bck->fd = bin;
+
+ /* Exhaust */
+ if (remainder_size < MINSIZE) {
+ set_inuse_bit_at_offset(victim, size);
+ set_arena_for_chunk(victim, av);
+ check_malloced_chunk(av, victim, nb);
+ return chunk2mem(victim);
+ }
+
+ /* Split */
+ else {
+ remainder = chunk_at_offset(victim, nb);
+
+ unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
+ remainder->bk = remainder->fd = unsorted_chunks(av);
+ /* advertise as last remainder */
+ if (in_smallbin_range(nb))
+ av->last_remainder = remainder;
+
+ set_head(victim, nb | PREV_INUSE);
+ set_head(remainder, remainder_size | PREV_INUSE);
+ set_foot(remainder, remainder_size);
+ set_arena_for_chunk(victim, av);
+ check_malloced_chunk(av, victim, nb);
+ return chunk2mem(victim);
+ }
+ }
+ }
+
+ use_top:
+ /*
+ If large enough, split off the chunk bordering the end of memory
+ (held in av->top). Note that this is in accord with the best-fit
+ search rule. In effect, av->top is treated as larger (and thus
+ less well fitting) than any other available chunk since it can
+ be extended to be as large as necessary (up to system
+ limitations).
+
+ We require that av->top always exists (i.e., has size >=
+ MINSIZE) after initialization, so if it would otherwise be
+ exhuasted by current request, it is replenished. (The main
+ reason for ensuring it exists is that we may need MINSIZE space
+ to put in fenceposts in sysmalloc.)
+ */
+
+ victim = av->top;
+ size = chunksize(victim);
+
+ if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
+ remainder_size = size - nb;
+ remainder = chunk_at_offset(victim, nb);
+ av->top = remainder;
+ set_head(victim, nb | PREV_INUSE);
+ set_head(remainder, remainder_size | PREV_INUSE);
+
+ set_arena_for_chunk(victim, av);
+ check_malloced_chunk(av, victim, nb);
+ return chunk2mem(victim);
+ }
+
+ /*
+ If there is space available in fastbins, consolidate and retry,
+ to possibly avoid expanding memory. This can occur only if nb is
+ in smallbin range so we didn't consolidate upon entry.
+ */
+
+ else if (have_fastchunks(av)) {
+ assert(in_smallbin_range(nb));
+ malloc_consolidate(av);
+ idx = smallbin_index(nb); /* restore original bin index */
+ }
+
+ /*
+ Otherwise, relay to handle system-dependent cases
+ */
+ else
+ return(NULL); // sysmalloc not supported
+ }
+}
+
+/*
+ ------------------------------ free ------------------------------
+*/
+
+static void
+_int_free(mstate av, Void_t* mem)
+{
+ mchunkptr p; /* chunk corresponding to mem */
+ INTERNAL_SIZE_T size; /* its size */
+ mfastbinptr* fb; /* associated fastbin */
+ mchunkptr nextchunk; /* next contiguous chunk */
+ INTERNAL_SIZE_T nextsize; /* its size */
+ int nextinuse; /* true if nextchunk is used */
+ INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
+ mchunkptr bck; /* misc temp for linking */
+ mchunkptr fwd; /* misc temp for linking */
+
+
+ /* free(0) has no effect */
+ if (mem != 0) {
+ p = mem2chunk(mem);
+ size = chunksize(p);
+
+ check_inuse_chunk(av, p);
+
+ /*
+ If eligible, place chunk on a fastbin so it can be found
+ and used quickly in malloc.
+ */
+
+ if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
+
+#if TRIM_FASTBINS
+ /*
+ If TRIM_FASTBINS set, don't place chunks
+ bordering top into fastbins
+ */
+ && (chunk_at_offset(p, size) != av->top)
+#endif
+ ) {
+
+ set_fastchunks(av);
+ fb = &(av->fastbins[fastbin_index(size)]);
+ p->fd = *fb;
+ *fb = p;
+ }
+
+ /*
+ Consolidate other non-mmapped chunks as they arrive.
+ */
+
+ else if (!chunk_is_mmapped(p)) {
+ nextchunk = chunk_at_offset(p, size);
+ nextsize = chunksize(nextchunk);
+ assert(nextsize > 0);
+
+ /* consolidate backward */
+ if (!prev_inuse(p)) {
+ prevsize = p->prev_size;
+ size += prevsize;
+ p = chunk_at_offset(p, -((long) prevsize));
+ unlink(p, bck, fwd);
+ }
+
+ if (nextchunk != av->top) {
+ /* get and clear inuse bit */
+ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+
+ /* consolidate forward */
+ if (!nextinuse) {
+ unlink(nextchunk, bck, fwd);
+ size += nextsize;
+ } else
+ clear_inuse_bit_at_offset(nextchunk, 0);
+
+ /*
+ Place the chunk in unsorted chunk list. Chunks are
+ not placed into regular bins until after they have
+ been given one chance to be used in malloc.
+ */
+
+ bck = unsorted_chunks(av);
+ fwd = bck->fd;
+ p->bk = bck;
+ p->fd = fwd;
+ bck->fd = p;
+ fwd->bk = p;
+
+ set_head(p, size | PREV_INUSE);
+ set_foot(p, size);
+
+ check_free_chunk(av, p);
+ }
+
+ /*
+ If the chunk borders the current high end of memory,
+ consolidate into top
+ */
+
+ else {
+ size += nextsize;
+ set_head(p, size | PREV_INUSE);
+ av->top = p;
+ check_chunk(av, p);
+ }
+
+ /*
+ If freeing a large space, consolidate possibly-surrounding
+ chunks. Then, if the total unused topmost memory exceeds trim
+ threshold, ask malloc_trim to reduce top.
+
+ Unless max_fast is 0, we don't know if there are fastbins
+ bordering top, so we cannot tell for sure whether threshold
+ has been reached unless fastbins are consolidated. But we
+ don't want to consolidate on each free. As a compromise,
+ consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
+ is reached.
+ */
+
+ if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
+ if (have_fastchunks(av))
+ malloc_consolidate(av);
+ }
+ }
+ }
+}
+
+/*
+ ------------------------- malloc_consolidate -------------------------
+
+ malloc_consolidate is a specialized version of free() that tears
+ down chunks held in fastbins. Free itself cannot be used for this
+ purpose since, among other things, it might place chunks back onto
+ fastbins. So, instead, we need to use a minor variant of the same
+ code.
+
+ Also, because this routine needs to be called the first time through
+ malloc anyway, it turns out to be the perfect place to trigger
+ initialization code.
+*/
+
+#if __STD_C
+static void malloc_consolidate(mstate av)
+#else
+static void malloc_consolidate(av) mstate av;
+#endif
+{
+ mfastbinptr* fb; /* current fastbin being consolidated */
+ mfastbinptr* maxfb; /* last fastbin (for loop control) */
+ mchunkptr p; /* current chunk being consolidated */
+ mchunkptr nextp; /* next chunk to consolidate */
+ mchunkptr unsorted_bin; /* bin header */
+ mchunkptr first_unsorted; /* chunk to link to */
+
+ /* These have same use as in free() */
+ mchunkptr nextchunk;
+ INTERNAL_SIZE_T size;
+ INTERNAL_SIZE_T nextsize;
+ INTERNAL_SIZE_T prevsize;
+ int nextinuse;
+ mchunkptr bck;
+ mchunkptr fwd;
+
+ /*
+ If max_fast is 0, we know that av hasn't
+ yet been initialized, in which case do so below
+ */
+
+ if (av->max_fast != 0) {
+ clear_fastchunks(av);
+
+ unsorted_bin = unsorted_chunks(av);
+
+ /*
+ Remove each chunk from fast bin and consolidate it, placing it
+ then in unsorted bin. Among other reasons for doing this,
+ placing in unsorted bin avoids needing to calculate actual bins
+ until malloc is sure that chunks aren't immediately going to be
+ reused anyway.
+ */
+
+ maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
+ fb = &(av->fastbins[0]);
+ do {
+ if ( (p = *fb) != 0) {
+ *fb = 0;
+
+ do {
+ check_inuse_chunk(av, p);
+ nextp = p->fd;
+
+ /* Slightly streamlined version of consolidation code in free() */
+ size = p->size & ~(PREV_INUSE);
+ nextchunk = chunk_at_offset(p, size);
+ nextsize = chunksize(nextchunk);
+
+ if (!prev_inuse(p)) {
+ prevsize = p->prev_size;
+ size += prevsize;
+ p = chunk_at_offset(p, -((long) prevsize));
+ unlink(p, bck, fwd);
+ }
+
+ if (nextchunk != av->top) {
+ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+
+ if (!nextinuse) {
+ size += nextsize;
+ unlink(nextchunk, bck, fwd);
+ } else
+ clear_inuse_bit_at_offset(nextchunk, 0);
+
+ first_unsorted = unsorted_bin->fd;
+ unsorted_bin->fd = p;
+ first_unsorted->bk = p;
+
+ set_head(p, size | PREV_INUSE);
+ p->bk = unsorted_bin;
+ p->fd = first_unsorted;
+ set_foot(p, size);
+ }
+
+ else {
+ size += nextsize;
+ set_head(p, size | PREV_INUSE);
+ av->top = p;
+ }
+
+ } while ( (p = nextp) != 0);
+
+ }
+ } while (fb++ != maxfb);
+ }
+ else {
+ malloc_init_state(av);
+ check_malloc_state(av);
+ }
+}
+
+/*
+ ------------------------------ realloc ------------------------------
+*/
+
+static Void_t*
+_int_realloc(mstate av, Void_t* oldmem, size_t bytes)
+{
+ INTERNAL_SIZE_T nb; /* padded request size */
+
+ mchunkptr oldp; /* chunk corresponding to oldmem */
+ INTERNAL_SIZE_T oldsize; /* its size */
+
+ mchunkptr newp; /* chunk to return */
+ INTERNAL_SIZE_T newsize; /* its size */
+ Void_t* newmem; /* corresponding user mem */
+
+ mchunkptr next; /* next contiguous chunk after oldp */
+
+ mchunkptr remainder; /* extra space at end of newp */
+ unsigned long remainder_size; /* its size */
+
+ mchunkptr bck; /* misc temp for linking */
+ mchunkptr fwd; /* misc temp for linking */
+
+ unsigned long copysize; /* bytes to copy */
+ unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
+ INTERNAL_SIZE_T* s; /* copy source */
+ INTERNAL_SIZE_T* d; /* copy destination */
+
+
+#if REALLOC_ZERO_BYTES_FREES
+ if (bytes == 0) {
+ _int_free(av, oldmem);
+ return 0;
+ }
+#endif
+
+ /* realloc of null is supposed to be same as malloc */
+ if (oldmem == 0) return _int_malloc(av, bytes);
+
+ checked_request2size(bytes, nb);
+
+ oldp = mem2chunk(oldmem);
+ oldsize = chunksize(oldp);
+
+ check_inuse_chunk(av, oldp);
+
+ // force to act like not mmapped
+ if (1) {
+
+ if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
+ /* already big enough; split below */
+ newp = oldp;
+ newsize = oldsize;
+ }
+
+ else {
+ next = chunk_at_offset(oldp, oldsize);
+
+ /* Try to expand forward into top */
+ if (next == av->top &&
+ (unsigned long)(newsize = oldsize + chunksize(next)) >=
+ (unsigned long)(nb + MINSIZE)) {
+ set_head_size(oldp, nb );
+ av->top = chunk_at_offset(oldp, nb);
+ set_head(av->top, (newsize - nb) | PREV_INUSE);
+ check_inuse_chunk(av, oldp);
+ set_arena_for_chunk(oldp, av);
+ return chunk2mem(oldp);
+ }
+
+ /* Try to expand forward into next chunk; split off remainder below */
+ else if (next != av->top &&
+ !inuse(next) &&
+ (unsigned long)(newsize = oldsize + chunksize(next)) >=
+ (unsigned long)(nb)) {
+ newp = oldp;
+ unlink(next, bck, fwd);
+ }
+
+ /* allocate, copy, free */
+ else {
+ newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
+ if (newmem == 0)
+ return 0; /* propagate failure */
+
+ newp = mem2chunk(newmem);
+ newsize = chunksize(newp);
+
+ /*
+ Avoid copy if newp is next chunk after oldp.
+ */
+ if (newp == next) {
+ newsize += oldsize;
+ newp = oldp;
+ }
+ else {
+ /*
+ Unroll copy of <= 36 bytes (72 if 8byte sizes)
+ We know that contents have an odd number of
+ INTERNAL_SIZE_T-sized words; minimally 3.
+ */
+
+ copysize = oldsize - SIZE_SZ;
+ s = (INTERNAL_SIZE_T*)(oldmem);
+ d = (INTERNAL_SIZE_T*)(newmem);
+ ncopies = copysize / sizeof(INTERNAL_SIZE_T);
+ assert(ncopies >= 3);
+
+ if (ncopies > 9)
+ MALLOC_COPY(d, s, copysize);
+
+ else {
+ *(d+0) = *(s+0);
+ *(d+1) = *(s+1);
+ *(d+2) = *(s+2);
+ if (ncopies > 4) {
+ *(d+3) = *(s+3);
+ *(d+4) = *(s+4);
+ if (ncopies > 6) {
+ *(d+5) = *(s+5);
+ *(d+6) = *(s+6);
+ if (ncopies > 8) {
+ *(d+7) = *(s+7);
+ *(d+8) = *(s+8);
+ }
+ }
+ }
+ }
+
+ _int_free(av, oldmem);
+ set_arena_for_chunk(newp, av);
+ check_inuse_chunk(av, newp);
+ return chunk2mem(newp);
+ }
+ }
+ }
+
+ /* If possible, free extra space in old or extended chunk */
+
+ assert((unsigned long)(newsize) >= (unsigned long)(nb));
+
+ remainder_size = newsize - nb;
+
+ if (remainder_size < MINSIZE) { /* not enough extra to split off */
+ set_head_size(newp, newsize);
+ set_inuse_bit_at_offset(newp, newsize);
+ }
+ else { /* split remainder */
+ remainder = chunk_at_offset(newp, nb);
+ set_head_size(newp, nb );
+ set_head(remainder, remainder_size | PREV_INUSE );
+ /* Mark remainder as inuse so free() won't complain */
+ set_inuse_bit_at_offset(remainder, remainder_size);
+ set_arena_for_chunk(remainder, av);
+ _int_free(av, chunk2mem(remainder));
+ }
+
+ set_arena_for_chunk(newp, av);
+ check_inuse_chunk(av, newp);
+ return chunk2mem(newp);
+ }
+
+ /*
+ Handle mmap cases
+ */
+
+ else {
+ /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
+ check_malloc_state(av);
+ MALLOC_FAILURE_ACTION;
+ return 0;
+ }
+}
+
+/*
+ ------------------------------ memalign ------------------------------
+*/
+
+static Void_t*
+_int_memalign(mstate av, size_t alignment, size_t bytes)
+{
+ INTERNAL_SIZE_T nb; /* padded request size */
+ char* m; /* memory returned by malloc call */
+ mchunkptr p; /* corresponding chunk */
+ char* brk; /* alignment point within p */
+ mchunkptr newp; /* chunk to return */
+ INTERNAL_SIZE_T newsize; /* its size */
+ INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
+ mchunkptr remainder; /* spare room at end to split off */
+ unsigned long remainder_size; /* its size */
+ INTERNAL_SIZE_T size;
+
+ /* If need less alignment than we give anyway, just relay to malloc */
+
+ if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
+
+ /* Otherwise, ensure that it is at least a minimum chunk size */
+
+ if (alignment < MINSIZE) alignment = MINSIZE;
+
+ /* Make sure alignment is power of 2 (in case MINSIZE is not). */
+ if ((alignment & (alignment - 1)) != 0) {
+ size_t a = MALLOC_ALIGNMENT * 2;
+ while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
+ alignment = a;
+ }
+
+ checked_request2size(bytes, nb);
+
+ /*
+ Strategy: find a spot within that chunk that meets the alignment
+ request, and then possibly free the leading and trailing space.
+ */
+
+
+ /* Call malloc with worst case padding to hit alignment. */
+
+ m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
+
+ if (m == 0) return 0; /* propagate failure */
+
+ p = mem2chunk(m);
+
+ if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
+
+ /*
+ Find an aligned spot inside chunk. Since we need to give back
+ leading space in a chunk of at least MINSIZE, if the first
+ calculation places us at a spot with less than MINSIZE leader,
+ we can move to the next aligned spot -- we've allocated enough
+ total room so that this is always possible.
+ */
+
+ brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
+ -((signed long) alignment));
+ if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
+ brk += alignment;
+
+ newp = (mchunkptr)brk;
+ leadsize = brk - (char*)(p);
+ newsize = chunksize(p) - leadsize;
+
+ /* For mmapped chunks, just adjust offset */
+ if (chunk_is_mmapped(p)) {
+ newp->prev_size = p->prev_size + leadsize;
+ set_head(newp, newsize|IS_MMAPPED);
+ set_arena_for_chunk(newp, av);
+ return chunk2mem(newp);
+ }
+
+ /* Otherwise, give back leader, use the rest */
+ set_head(newp, newsize | PREV_INUSE );
+ set_inuse_bit_at_offset(newp, newsize);
+ set_head_size(p, leadsize);
+ set_arena_for_chunk(p, av);
+ _int_free(av, chunk2mem(p));
+ p = newp;
+
+ assert (newsize >= nb &&
+ (((unsigned long)(chunk2mem(p))) % alignment) == 0);
+ }
+
+ /* Also give back spare room at the end */
+ if (!chunk_is_mmapped(p)) {
+ size = chunksize(p);
+ if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
+ remainder_size = size - nb;
+ remainder = chunk_at_offset(p, nb);
+ set_head(remainder, remainder_size | PREV_INUSE );
+ set_head_size(p, nb);
+ set_arena_for_chunk(remainder, av);
+ _int_free(av, chunk2mem(remainder));
+ }
+ }
+
+ set_arena_for_chunk(p, av);
+ check_inuse_chunk(av, p);
+ return chunk2mem(p);
+}
+
+#if 1
+/*
+ ------------------------------ calloc ------------------------------
+*/
+
+#if __STD_C
+Void_t* cALLOc(cvmx_arena_list_t arena_list, size_t n_elements, size_t elem_size)
+#else
+Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
+#endif
+{
+ mchunkptr p;
+ unsigned long clearsize;
+ unsigned long nclears;
+ INTERNAL_SIZE_T* d;
+
+ Void_t* mem = public_mALLOc(arena_list, n_elements * elem_size);
+
+ if (mem != 0) {
+ p = mem2chunk(mem);
+
+ {
+ /*
+ Unroll clear of <= 36 bytes (72 if 8byte sizes)
+ We know that contents have an odd number of
+ INTERNAL_SIZE_T-sized words; minimally 3.
+ */
+
+ d = (INTERNAL_SIZE_T*)mem;
+ clearsize = chunksize(p) - SIZE_SZ;
+ nclears = clearsize / sizeof(INTERNAL_SIZE_T);
+ assert(nclears >= 3);
+
+ if (nclears > 9)
+ MALLOC_ZERO(d, clearsize);
+
+ else {
+ *(d+0) = 0;
+ *(d+1) = 0;
+ *(d+2) = 0;
+ if (nclears > 4) {
+ *(d+3) = 0;
+ *(d+4) = 0;
+ if (nclears > 6) {
+ *(d+5) = 0;
+ *(d+6) = 0;
+ if (nclears > 8) {
+ *(d+7) = 0;
+ *(d+8) = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ return mem;
+}
+#endif
+
+
+/*
+ ------------------------- malloc_usable_size -------------------------
+*/
+
+#if __STD_C
+size_t mUSABLe(Void_t* mem)
+#else
+size_t mUSABLe(mem) Void_t* mem;
+#endif
+{
+ mchunkptr p;
+ if (mem != 0) {
+ p = mem2chunk(mem);
+ if (chunk_is_mmapped(p))
+ return chunksize(p) - 3*SIZE_SZ; /* updated size for adding arena_ptr */
+ else if (inuse(p))
+ return chunksize(p) - 2*SIZE_SZ; /* updated size for adding arena_ptr */
+ }
+ return 0;
+}
+
+/*
+ ------------------------------ mallinfo ------------------------------
+*/
+
+struct mallinfo mALLINFo(mstate av)
+{
+ struct mallinfo mi;
+ int i;
+ mbinptr b;
+ mchunkptr p;
+ INTERNAL_SIZE_T avail;
+ INTERNAL_SIZE_T fastavail;
+ int nblocks;
+ int nfastblocks;
+
+ /* Ensure initialization */
+ if (av->top == 0) malloc_consolidate(av);
+
+ check_malloc_state(av);
+
+ /* Account for top */
+ avail = chunksize(av->top);
+ nblocks = 1; /* top always exists */
+
+ /* traverse fastbins */
+ nfastblocks = 0;
+ fastavail = 0;
+
+ for (i = 0; i < NFASTBINS; ++i) {
+ for (p = av->fastbins[i]; p != 0; p = p->fd) {
+ ++nfastblocks;
+ fastavail += chunksize(p);
+ }
+ }
+
+ avail += fastavail;
+
+ /* traverse regular bins */
+ for (i = 1; i < NBINS; ++i) {
+ b = bin_at(av, i);
+ for (p = last(b); p != b; p = p->bk) {
+ ++nblocks;
+ avail += chunksize(p);
+ }
+ }
+
+ mi.smblks = nfastblocks;
+ mi.ordblks = nblocks;
+ mi.fordblks = avail;
+ mi.uordblks = av->system_mem - avail;
+ mi.arena = av->system_mem;
+ mi.fsmblks = fastavail;
+ mi.keepcost = chunksize(av->top);
+ return mi;
+}
+
+/*
+ ------------------------------ malloc_stats ------------------------------
+*/
+
+void mSTATs()
+{
+}
+
+
+/*
+ ------------------------------ mallopt ------------------------------
+*/
+
+#if 0
+#if __STD_C
+int mALLOPt(int param_number, int value)
+#else
+int mALLOPt(param_number, value) int param_number; int value;
+#endif
+{
+}
+#endif
+
+
+/*
+ -------------------- Alternative MORECORE functions --------------------
+*/
+
+
+/*
+ General Requirements for MORECORE.
+
+ The MORECORE function must have the following properties:
+
+ If MORECORE_CONTIGUOUS is false:
+
+ * MORECORE must allocate in multiples of pagesize. It will
+ only be called with arguments that are multiples of pagesize.
+
+ * MORECORE(0) must return an address that is at least
+ MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
+
+ else (i.e. If MORECORE_CONTIGUOUS is true):
+
+ * Consecutive calls to MORECORE with positive arguments
+ return increasing addresses, indicating that space has been
+ contiguously extended.
+
+ * MORECORE need not allocate in multiples of pagesize.
+ Calls to MORECORE need not have args of multiples of pagesize.
+
+ * MORECORE need not page-align.
+
+ In either case:
+
+ * MORECORE may allocate more memory than requested. (Or even less,
+ but this will generally result in a malloc failure.)
+
+ * MORECORE must not allocate memory when given argument zero, but
+ instead return one past the end address of memory from previous
+ nonzero call. This malloc does NOT call MORECORE(0)
+ until at least one call with positive arguments is made, so
+ the initial value returned is not important.
+
+ * Even though consecutive calls to MORECORE need not return contiguous
+ addresses, it must be OK for malloc'ed chunks to span multiple
+ regions in those cases where they do happen to be contiguous.
+
+ * MORECORE need not handle negative arguments -- it may instead
+ just return MORECORE_FAILURE when given negative arguments.
+ Negative arguments are always multiples of pagesize. MORECORE
+ must not misinterpret negative args as large positive unsigned
+ args. You can suppress all such calls from even occurring by defining
+ MORECORE_CANNOT_TRIM,
+
+ There is some variation across systems about the type of the
+ argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
+ actually be size_t, because sbrk supports negative args, so it is
+ normally the signed type of the same width as size_t (sometimes
+ declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
+ matter though. Internally, we use "long" as arguments, which should
+ work across all reasonable possibilities.
+
+ Additionally, if MORECORE ever returns failure for a positive
+ request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
+ system allocator. This is a useful backup strategy for systems with
+ holes in address spaces -- in this case sbrk cannot contiguously
+ expand the heap, but mmap may be able to map noncontiguous space.
+
+ If you'd like mmap to ALWAYS be used, you can define MORECORE to be
+ a function that always returns MORECORE_FAILURE.
+
+ If you are using this malloc with something other than sbrk (or its
+ emulation) to supply memory regions, you probably want to set
+ MORECORE_CONTIGUOUS as false. As an example, here is a custom
+ allocator kindly contributed for pre-OSX macOS. It uses virtually
+ but not necessarily physically contiguous non-paged memory (locked
+ in, present and won't get swapped out). You can use it by
+ uncommenting this section, adding some #includes, and setting up the
+ appropriate defines above:
+
+ #define MORECORE osMoreCore
+ #define MORECORE_CONTIGUOUS 0
+
+ There is also a shutdown routine that should somehow be called for
+ cleanup upon program exit.
+
+ #define MAX_POOL_ENTRIES 100
+ #define MINIMUM_MORECORE_SIZE (64 * 1024)
+ static int next_os_pool;
+ void *our_os_pools[MAX_POOL_ENTRIES];
+
+ void *osMoreCore(int size)
+ {
+ void *ptr = 0;
+ static void *sbrk_top = 0;
+
+ if (size > 0)
+ {
+ if (size < MINIMUM_MORECORE_SIZE)
+ size = MINIMUM_MORECORE_SIZE;
+ if (CurrentExecutionLevel() == kTaskLevel)
+ ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
+ if (ptr == 0)
+ {
+ return (void *) MORECORE_FAILURE;
+ }
+ // save ptrs so they can be freed during cleanup
+ our_os_pools[next_os_pool] = ptr;
+ next_os_pool++;
+ ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
+ sbrk_top = (char *) ptr + size;
+ return ptr;
+ }
+ else if (size < 0)
+ {
+ // we don't currently support shrink behavior
+ return (void *) MORECORE_FAILURE;
+ }
+ else
+ {
+ return sbrk_top;
+ }
+ }
+
+ // cleanup any allocated memory pools
+ // called as last thing before shutting down driver
+
+ void osCleanupMem(void)
+ {
+ void **ptr;
+
+ for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
+ if (*ptr)
+ {
+ PoolDeallocate(*ptr);
+ *ptr = 0;
+ }
+ }
+
+*/
+
+
+
+/* ------------------------------------------------------------
+History:
+
+[see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
+
+*/
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,214 @@
+/* $MidnightBSD$ */
+/*
+Copyright (c) 2001 Wolfram Gloger
+Copyright (c) 2006 Cavium networks
+
+Permission to use, copy, modify, distribute, and sell this software
+and its documentation for any purpose is hereby granted without fee,
+provided that (i) the above copyright notices and this permission
+notice appear in all copies of the software and related documentation,
+and (ii) the name of Wolfram Gloger may not be used in any advertising
+or publicity relating to the software.
+
+THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
+EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
+WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+
+IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
+INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
+OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _MALLOC_H
+#define _MALLOC_H 1
+
+#undef _LIBC
+#ifdef _LIBC
+#include <features.h>
+#endif
+
+/*
+ $Id: malloc.h 30481 2007-12-05 21:46:59Z rfranz $
+ `ptmalloc2', a malloc implementation for multiple threads without
+ lock contention, by Wolfram Gloger <wg at malloc.de>.
+
+ VERSION 2.7.0
+
+ This work is mainly derived from malloc-2.7.0 by Doug Lea
+ <dl at cs.oswego.edu>, which is available from:
+
+ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+
+ This trimmed-down header file only provides function prototypes and
+ the exported data structures. For more detailed function
+ descriptions and compile-time options, see the source file
+ `malloc.c'.
+*/
+
+#if 0
+# include <stddef.h>
+# define __malloc_ptr_t void *
+# undef size_t
+# define size_t unsigned long
+# undef ptrdiff_t
+# define ptrdiff_t long
+#else
+# undef Void_t
+# define Void_t void
+# define __malloc_ptr_t char *
+#endif
+
+#ifdef _LIBC
+/* Used by GNU libc internals. */
+# define __malloc_size_t size_t
+# define __malloc_ptrdiff_t ptrdiff_t
+#elif !defined __attribute_malloc__
+# define __attribute_malloc__
+#endif
+
+#ifdef __GNUC__
+
+/* GCC can always grok prototypes. For C++ programs we add throw()
+ to help it optimize the function calls. But this works only with
+ gcc 2.8.x and egcs. */
+# if defined __cplusplus && (__GNUC__ >= 3 || __GNUC_MINOR__ >= 8)
+# define __THROW throw ()
+# else
+# define __THROW
+# endif
+# define __MALLOC_P(args) args __THROW
+/* This macro will be used for functions which might take C++ callback
+ functions. */
+# define __MALLOC_PMT(args) args
+
+#else /* Not GCC. */
+
+# define __THROW
+
+# if (defined __STDC__ && __STDC__) || defined __cplusplus
+
+# define __MALLOC_P(args) args
+# define __MALLOC_PMT(args) args
+
+# else /* Not ANSI C or C++. */
+
+# define __MALLOC_P(args) () /* No prototypes. */
+# define __MALLOC_PMT(args) ()
+
+# endif /* ANSI C or C++. */
+
+#endif /* GCC. */
+
+#ifndef NULL
+# ifdef __cplusplus
+# define NULL 0
+# else
+# define NULL ((__malloc_ptr_t) 0)
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Nonzero if the malloc is already initialized. */
+#ifdef _LIBC
+/* In the GNU libc we rename the global variable
+ `__malloc_initialized' to `__libc_malloc_initialized'. */
+# define __malloc_initialized __libc_malloc_initialized
+#endif
+extern int cvmx__malloc_initialized;
+
+
+/* SVID2/XPG mallinfo structure */
+
+struct mallinfo {
+ int arena; /* non-mmapped space allocated from system */
+ int ordblks; /* number of free chunks */
+ int smblks; /* number of fastbin blocks */
+ int hblks; /* number of mmapped regions */
+ int hblkhd; /* space in mmapped regions */
+ int usmblks; /* maximum total allocated space */
+ int fsmblks; /* space available in freed fastbin blocks */
+ int uordblks; /* total allocated space */
+ int fordblks; /* total free space */
+ int keepcost; /* top-most, releasable (via malloc_trim) space */
+};
+
+/* Returns a copy of the updated current mallinfo. */
+extern struct mallinfo mallinfo __MALLOC_P ((void));
+
+/* SVID2/XPG mallopt options */
+#ifndef M_MXFAST
+# define M_MXFAST 1 /* maximum request size for "fastbins" */
+#endif
+#ifndef M_NLBLKS
+# define M_NLBLKS 2 /* UNUSED in this malloc */
+#endif
+#ifndef M_GRAIN
+# define M_GRAIN 3 /* UNUSED in this malloc */
+#endif
+#ifndef M_KEEP
+# define M_KEEP 4 /* UNUSED in this malloc */
+#endif
+
+/* mallopt options that actually do something */
+#define M_TRIM_THRESHOLD -1
+#define M_TOP_PAD -2
+#define M_MMAP_THRESHOLD -3
+#define M_MMAP_MAX -4
+#define M_CHECK_ACTION -5
+
+/* General SVID/XPG interface to tunable parameters. */
+extern int mallopt __MALLOC_P ((int __param, int __val));
+
+/* Release all but __pad bytes of freed top-most memory back to the
+ system. Return 1 if successful, else 0. */
+extern int malloc_trim __MALLOC_P ((size_t __pad));
+
+/* Report the number of usable allocated bytes associated with allocated
+ chunk __ptr. */
+extern size_t malloc_usable_size __MALLOC_P ((__malloc_ptr_t __ptr));
+
+/* Prints brief summary statistics on stderr. */
+extern void malloc_stats __MALLOC_P ((void));
+
+/* Record the state of all malloc variables in an opaque data structure. */
+extern __malloc_ptr_t malloc_get_state __MALLOC_P ((void));
+
+/* Restore the state of all malloc variables from data obtained with
+ malloc_get_state(). */
+extern int malloc_set_state __MALLOC_P ((__malloc_ptr_t __ptr));
+
+/* Called once when malloc is initialized; redefining this variable in
+ the application provides the preferred way to set up the hook
+ pointers. */
+extern void (*cmvx__malloc_initialize_hook) __MALLOC_PMT ((void));
+/* Hooks for debugging and user-defined versions. */
+extern void (*cvmx__free_hook) __MALLOC_PMT ((__malloc_ptr_t __ptr,
+ __const __malloc_ptr_t));
+extern __malloc_ptr_t (*cvmx__malloc_hook) __MALLOC_PMT ((size_t __size,
+ __const __malloc_ptr_t));
+extern __malloc_ptr_t (*cvmx__realloc_hook) __MALLOC_PMT ((__malloc_ptr_t __ptr,
+ size_t __size,
+ __const __malloc_ptr_t));
+extern __malloc_ptr_t (*cvmx__memalign_hook) __MALLOC_PMT ((size_t __alignment,
+ size_t __size,
+ __const __malloc_ptr_t));
+extern void (*__after_morecore_hook) __MALLOC_PMT ((void));
+
+/* Activate a standard set of debugging hooks. */
+extern void cvmx__malloc_check_init __MALLOC_P ((void));
+
+/* Internal routines, operating on "arenas". */
+struct malloc_state;
+typedef struct malloc_state *mstate;
+#ifdef __cplusplus
+}; /* end of extern "C" */
+#endif
+
+
+#endif /* malloc.h */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-malloc/malloc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-malloc/thread-m.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-malloc/thread-m.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-malloc/thread-m.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,74 @@
+/* $MidnightBSD$ */
+/*
+Copyright (c) 2001 Wolfram Gloger
+Copyright (c) 2006 Cavium networks
+
+Permission to use, copy, modify, distribute, and sell this software
+and its documentation for any purpose is hereby granted without fee,
+provided that (i) the above copyright notices and this permission
+notice appear in all copies of the software and related documentation,
+and (ii) the name of Wolfram Gloger may not be used in any advertising
+or publicity relating to the software.
+
+THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
+EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
+WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+
+IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
+INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
+OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* $Id: thread-m.h 30481 2007-12-05 21:46:59Z rfranz $
+ One out of _LIBC, USE_PTHREADS, USE_THR or USE_SPROC should be
+ defined, otherwise the token NO_THREADS and dummy implementations
+ of the macros will be defined. */
+
+#ifndef _THREAD_M_H
+#define _THREAD_M_H
+
+#undef thread_atfork_static
+
+
+#undef NO_THREADS /* No threads, provide dummy macros */
+
+typedef int thread_id;
+
+/* The mutex functions used to do absolutely nothing, i.e. lock,
+ trylock and unlock would always just return 0. However, even
+ without any concurrently active threads, a mutex can be used
+ legitimately as an `in use' flag. To make the code that is
+ protected by a mutex async-signal safe, these macros would have to
+ be based on atomic test-and-set operations, for example. */
+#ifdef __OCTEON__
+typedef cvmx_spinlock_t mutex_t;
+#define MUTEX_INITIALIZER CMVX_SPINLOCK_UNLOCKED_VAL
+#define mutex_init(m) cvmx_spinlock_init(m)
+#define mutex_lock(m) cvmx_spinlock_lock(m)
+#define mutex_trylock(m) (cvmx_spinlock_trylock(m))
+#define mutex_unlock(m) cvmx_spinlock_unlock(m)
+#else
+
+typedef int mutex_t;
+
+#define MUTEX_INITIALIZER 0
+#define mutex_init(m) (*(m) = 0)
+#define mutex_lock(m) ((*(m) = 1), 0)
+#define mutex_trylock(m) (*(m) ? 1 : ((*(m) = 1), 0))
+#define mutex_unlock(m) (*(m) = 0)
+#endif
+
+
+
+typedef void *tsd_key_t;
+#define tsd_key_create(key, destr) do {} while(0)
+#define tsd_setspecific(key, data) ((key) = (data))
+#define tsd_getspecific(key, vptr) (vptr = (key))
+
+#define thread_atfork(prepare, parent, child) do {} while(0)
+
+
+#endif /* !defined(_THREAD_M_H) */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-malloc/thread-m.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-malloc.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-malloc.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-malloc.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,226 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file provides prototypes for the memory management library functions.
+ * Two different allocators are provided: an arena based allocator that is derived from a
+ * modified version of ptmalloc2 (used in glibc), and a zone allocator for allocating fixed
+ * size memory blocks.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_MALLOC_H__
+#define __CVMX_MALLOC_H__
+
+#include "cvmx-spinlock.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct malloc_state; /* forward declaration */
+typedef struct malloc_state *cvmx_arena_list_t;
+
+
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+/**
+ * Creates an arena from the memory region specified and adds it
+ * to the supplied arena list.
+ *
+ * @param arena_list Pointer to an arena list to add new arena to.
+ * If NULL, new list is created.
+ * @param ptr pointer to memory region to create arena from
+ *
+ * @param size Size of memory region available at ptr in bytes.
+ *
+ * @return -1 on Failure
+ * 0 on success
+ */
+int cvmx_add_arena(cvmx_arena_list_t *arena_list, void *ptr, size_t size);
+
+/**
+ * allocate buffer from an arena list
+ *
+ * @param arena_list arena list to allocate buffer from
+ * @param size size of buffer to allocate (in bytes)
+ *
+ * @return pointer to buffer or NULL if allocation failed
+ */
+void *cvmx_malloc(cvmx_arena_list_t arena_list, size_t size);
+/**
+ * Allocate zero initialized buffer
+ *
+ * @param arena_list arena list to allocate from
+ * @param n number of elements
+ * @param elem_size size of elementes
+ *
+ * @return pointer to (n*elem_size) byte zero initialized buffer or NULL
+ * on allocation failure
+ */
+void *cvmx_calloc(cvmx_arena_list_t arena_list, size_t n, size_t elem_size);
+/**
+ * attempt to increase the size of an already allocated buffer
+ * This function may allocate a new buffer and copy
+ * the data if current buffer can't be extended.
+ *
+ * @param arena_list arena list to allocate from
+ * @param ptr pointer to buffer to extend
+ * @param size new buffer size
+ *
+ * @return pointer to expanded buffer (may differ from ptr)
+ * or NULL on failure
+ */
+void *cvmx_realloc(cvmx_arena_list_t arena_list, void *ptr, size_t size);
+/**
+ * allocate a buffer with a specified alignment
+ *
+ * @param arena_list arena list to allocate from
+ * @param alignment alignment of buffer. Must be a power of 2
+ * @param bytes size of buffer in bytes
+ *
+ * @return pointer to buffer on success
+ * NULL on failure
+ */
+void *cvmx_memalign(cvmx_arena_list_t arena_list, size_t alignment, size_t bytes);
+/**
+ * free a previously allocated buffer
+ *
+ * @param ptr pointer of buffer to deallocate
+ */
+void cvmx_free(void *ptr);
+#endif
+
+
+
+
+#define CVMX_ZONE_OVERHEAD (64)
+/** Zone allocator definitions
+ *
+ */
+struct cvmx_zone
+{
+ cvmx_spinlock_t lock;
+ char *baseptr;
+ char *name;
+ void *freelist;
+ uint32_t num_elem;
+ uint32_t elem_size;
+ uint32_t align;
+};
+typedef struct cvmx_zone * cvmx_zone_t;
+
+static inline uint32_t cvmx_zone_size(cvmx_zone_t zone)
+{
+ return(zone->elem_size);
+}
+static inline char *cvmx_zone_name(cvmx_zone_t zone)
+{
+ return(zone->name);
+}
+
+
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+/**
+ * Creates a memory zone for efficient allocation/deallocation of
+ * fixed size memory blocks from a specified memory region.
+ *
+ * @param name name of zone.
+ * @param elem_size size of blocks that will be requested from zone
+ * @param num_elem number of elements to allocate
+ * @param mem_ptr pointer to memory to allocate zone from
+ * @param mem_size size of memory region available
+ * (must be at least elem_size * num_elem + CVMX_ZONE_OVERHEAD bytes)
+ * @param flags flags for zone. Currently unused.
+ *
+ * @return pointer to zone on success or
+ * NULL on failure
+ */
+cvmx_zone_t cvmx_zone_create_from_addr(char *name, uint32_t elem_size, uint32_t num_elem,
+ void* mem_ptr, uint64_t mem_size, uint32_t flags);
+/**
+ * Creates a memory zone for efficient allocation/deallocation of
+ * fixed size memory blocks from a previously initialized arena list.
+ *
+ * @param name name of zone.
+ * @param elem_size size of blocks that will be requested from zone
+ * @param num_elem number of elements to allocate
+ * @param align alignment of buffers (must be power of 2)
+ * Elements are allocated contiguously, so the buffer size
+ * must be a multiple of the requested alignment for all
+ * buffers to have the requested alignment.
+ * @param arena_list arena list to allocate memory from
+ * @param flags flags for zone. Currently unused.
+ *
+ * @return pointer to zone on success or
+ * NULL on failure
+ */
+cvmx_zone_t cvmx_zone_create_from_arena(char *name, uint32_t elem_size, uint32_t num_elem, uint32_t align,
+ cvmx_arena_list_t arena_list, uint32_t flags);
+#endif
+/**
+ * Allocate a buffer from a memory zone
+ *
+ * @param zone zone to allocate buffer from
+ * @param flags flags (currently unused)
+ *
+ * @return pointer to buffer or NULL on failure
+ */
+void * cvmx_zone_alloc(cvmx_zone_t zone, uint32_t flags);
+/**
+ * Free a previously allocated buffer
+ *
+ * @param zone zone that buffer was allocated from
+ * @param ptr pointer to buffer to be freed
+ */
+void cvmx_zone_free(cvmx_zone_t zone, void *ptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __CVMX_MALLOC_H__
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-malloc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-mdio.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-mdio.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-mdio.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,600 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
+ * clause 22 and clause 45 operations.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_MIO_H__
+#define __CVMX_MIO_H__
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-clock.h>
+#else
+#include "cvmx-clock.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * PHY register 0 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_CONTROL 0
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t reset : 1;
+ uint16_t loopback : 1;
+ uint16_t speed_lsb : 1;
+ uint16_t autoneg_enable : 1;
+ uint16_t power_down : 1;
+ uint16_t isolate : 1;
+ uint16_t restart_autoneg : 1;
+ uint16_t duplex : 1;
+ uint16_t collision_test : 1;
+ uint16_t speed_msb : 1;
+ uint16_t unidirectional_enable : 1;
+ uint16_t reserved_0_4 : 5;
+ } s;
+} cvmx_mdio_phy_reg_control_t;
+
+/**
+ * PHY register 1 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_STATUS 1
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t capable_100base_t4 : 1;
+ uint16_t capable_100base_x_full : 1;
+ uint16_t capable_100base_x_half : 1;
+ uint16_t capable_10_full : 1;
+ uint16_t capable_10_half : 1;
+ uint16_t capable_100base_t2_full : 1;
+ uint16_t capable_100base_t2_half : 1;
+ uint16_t capable_extended_status : 1;
+ uint16_t capable_unidirectional : 1;
+ uint16_t capable_mf_preamble_suppression : 1;
+ uint16_t autoneg_complete : 1;
+ uint16_t remote_fault : 1;
+ uint16_t capable_autoneg : 1;
+ uint16_t link_status : 1;
+ uint16_t jabber_detect : 1;
+ uint16_t capable_extended_registers : 1;
+
+ } s;
+} cvmx_mdio_phy_reg_status_t;
+
+/**
+ * PHY register 2 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_ID1 2
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t oui_bits_3_18;
+ } s;
+} cvmx_mdio_phy_reg_id1_t;
+
+/**
+ * PHY register 3 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_ID2 3
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t oui_bits_19_24 : 6;
+ uint16_t model : 6;
+ uint16_t revision : 4;
+ } s;
+} cvmx_mdio_phy_reg_id2_t;
+
+/**
+ * PHY register 4 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t next_page : 1;
+ uint16_t reserved_14 : 1;
+ uint16_t remote_fault : 1;
+ uint16_t reserved_12 : 1;
+ uint16_t asymmetric_pause : 1;
+ uint16_t pause : 1;
+ uint16_t advert_100base_t4 : 1;
+ uint16_t advert_100base_tx_full : 1;
+ uint16_t advert_100base_tx_half : 1;
+ uint16_t advert_10base_tx_full : 1;
+ uint16_t advert_10base_tx_half : 1;
+ uint16_t selector : 5;
+ } s;
+} cvmx_mdio_phy_reg_autoneg_adver_t;
+
+/**
+ * PHY register 5 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t next_page : 1;
+ uint16_t ack : 1;
+ uint16_t remote_fault : 1;
+ uint16_t reserved_12 : 1;
+ uint16_t asymmetric_pause : 1;
+ uint16_t pause : 1;
+ uint16_t advert_100base_t4 : 1;
+ uint16_t advert_100base_tx_full : 1;
+ uint16_t advert_100base_tx_half : 1;
+ uint16_t advert_10base_tx_full : 1;
+ uint16_t advert_10base_tx_half : 1;
+ uint16_t selector : 5;
+ } s;
+} cvmx_mdio_phy_reg_link_partner_ability_t;
+
+/**
+ * PHY register 6 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t reserved_5_15 : 11;
+ uint16_t parallel_detection_fault : 1;
+ uint16_t link_partner_next_page_capable : 1;
+ uint16_t local_next_page_capable : 1;
+ uint16_t page_received : 1;
+ uint16_t link_partner_autoneg_capable : 1;
+
+ } s;
+} cvmx_mdio_phy_reg_autoneg_expansion_t;
+
+/**
+ * PHY register 9 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t test_mode : 3;
+ uint16_t manual_master_slave : 1;
+ uint16_t master : 1;
+ uint16_t port_type : 1;
+ uint16_t advert_1000base_t_full : 1;
+ uint16_t advert_1000base_t_half : 1;
+ uint16_t reserved_0_7 : 8;
+ } s;
+} cvmx_mdio_phy_reg_control_1000_t;
+
+/**
+ * PHY register 10 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_STATUS_1000 10
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t master_slave_fault : 1;
+ uint16_t is_master : 1;
+ uint16_t local_receiver_ok : 1;
+ uint16_t remote_receiver_ok : 1;
+ uint16_t remote_capable_1000base_t_full : 1;
+ uint16_t remote_capable_1000base_t_half : 1;
+ uint16_t reserved_8_9 : 2;
+ uint16_t idle_error_count : 8;
+ } s;
+} cvmx_mdio_phy_reg_status_1000_t;
+
+/**
+ * PHY register 15 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t capable_1000base_x_full : 1;
+ uint16_t capable_1000base_x_half : 1;
+ uint16_t capable_1000base_t_full : 1;
+ uint16_t capable_1000base_t_half : 1;
+ uint16_t reserved_0_11 : 12;
+ } s;
+} cvmx_mdio_phy_reg_extended_status_t;
+
+
+/**
+ * PHY register 13 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t function : 2;
+ uint16_t reserved_5_13 : 9;
+ uint16_t devad : 5;
+ } s;
+} cvmx_mdio_phy_reg_mmd_control_t;
+
+/**
+ * PHY register 14 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t address_data : 16;
+ } s;
+} cvmx_mdio_phy_reg_mmd_address_data_t;
+
+/* Operating request encodings. */
+#define MDIO_CLAUSE_22_WRITE 0
+#define MDIO_CLAUSE_22_READ 1
+
+#define MDIO_CLAUSE_45_ADDRESS 0
+#define MDIO_CLAUSE_45_WRITE 1
+#define MDIO_CLAUSE_45_READ_INC 2
+#define MDIO_CLAUSE_45_READ 3
+
+/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
+#define CVMX_MMD_DEVICE_PMA_PMD 1
+#define CVMX_MMD_DEVICE_WIS 2
+#define CVMX_MMD_DEVICE_PCS 3
+#define CVMX_MMD_DEVICE_PHY_XS 4
+#define CVMX_MMD_DEVICE_DTS_XS 5
+#define CVMX_MMD_DEVICE_TC 6
+#define CVMX_MMD_DEVICE_CL22_EXT 29
+#define CVMX_MMD_DEVICE_VENDOR_1 30
+#define CVMX_MMD_DEVICE_VENDOR_2 31
+
+#define CVMX_MDIO_TIMEOUT 100000 /* 100 millisec */
+
+/* Helper function to put MDIO interface into clause 45 mode */
+static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
+{
+ cvmx_smix_clk_t smi_clk;
+ /* Put bus into clause 45 mode */
+ smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
+ smi_clk.s.mode = 1;
+ smi_clk.s.preamble = 1;
+ cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
+}
+/* Helper function to put MDIO interface into clause 22 mode */
+static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
+{
+ cvmx_smix_clk_t smi_clk;
+ /* Put bus into clause 22 mode */
+ smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
+ smi_clk.s.mode = 0;
+ cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
+}
+
+/**
+ * @INTERNAL
+ * Function to read SMIX_RD_DAT and check for timeouts. This
+ * code sequence is done fairly often, so put in in one spot.
+ *
+ * @param bus_id SMI/MDIO bus to read
+ *
+ * @return Value of SMIX_RD_DAT. pending will be set on
+ * a timeout.
+ */
+static inline cvmx_smix_rd_dat_t __cvmx_mdio_read_rd_dat(int bus_id)
+{
+ cvmx_smix_rd_dat_t smi_rd;
+ uint64_t done = cvmx_get_cycle() + (uint64_t)CVMX_MDIO_TIMEOUT *
+ cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000;
+ do
+ {
+ cvmx_wait(1000);
+ smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
+ } while (smi_rd.s.pending && (cvmx_get_cycle() < done));
+ return smi_rd;
+}
+
+
+/**
+ * Perform an MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
+{
+#if defined(CVMX_BUILD_FOR_LINUX_KERNEL) && defined(CONFIG_PHYLIB)
+ struct mii_bus *bus;
+ int rv;
+
+ BUG_ON(bus_id > 3 || bus_id < 0);
+
+ bus = octeon_mdiobuses[bus_id];
+ if (bus == NULL)
+ return -1;
+
+ rv = mdiobus_read(bus, phy_id, location);
+
+ if (rv < 0)
+ return -1;
+ return rv;
+#else
+ cvmx_smix_cmd_t smi_cmd;
+ cvmx_smix_rd_dat_t smi_rd;
+
+ if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ __cvmx_mdio_set_clause22_mode(bus_id);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ smi_rd = __cvmx_mdio_read_rd_dat(bus_id);
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return -1;
+#endif
+}
+
+
+/**
+ * Perform an MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param location Register location to write
+ * @param val Value to write
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
+{
+#if defined(CVMX_BUILD_FOR_LINUX_KERNEL) && defined(CONFIG_PHYLIB)
+ struct mii_bus *bus;
+ int rv;
+
+ BUG_ON(bus_id > 3 || bus_id < 0);
+
+ bus = octeon_mdiobuses[bus_id];
+ if (bus == NULL)
+ return -1;
+
+ rv = mdiobus_write(bus, phy_id, location, (u16)val);
+
+ if (rv < 0)
+ return -1;
+ return 0;
+#else
+ cvmx_smix_cmd_t smi_cmd;
+ cvmx_smix_wr_dat_t smi_wr;
+
+ if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ __cvmx_mdio_set_clause22_mode(bus_id);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SMIX_WR_DAT(bus_id),
+ cvmx_smix_wr_dat_t, pending, ==, 0, CVMX_MDIO_TIMEOUT))
+ return -1;
+
+ return 0;
+#endif
+}
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+/**
+ * Perform an IEEE 802.3 clause 45 MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param device MDIO Managable Device (MMD) id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+
+static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device, int location)
+{
+ cvmx_smix_cmd_t smi_cmd;
+ cvmx_smix_rd_dat_t smi_rd;
+ cvmx_smix_wr_dat_t smi_wr;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ return -1;
+
+ __cvmx_mdio_set_clause45_mode(bus_id);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = location;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SMIX_WR_DAT(bus_id),
+ cvmx_smix_wr_dat_t, pending, ==, 0, CVMX_MDIO_TIMEOUT))
+ {
+ cvmx_dprintf ("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d TIME OUT(address)\n", bus_id, phy_id, device, location);
+ return -1;
+ }
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ smi_rd = __cvmx_mdio_read_rd_dat(bus_id);
+ if (smi_rd.s.pending)
+ {
+ cvmx_dprintf ("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d TIME OUT(data)\n", bus_id, phy_id, device, location);
+ return -1;
+ }
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ {
+ cvmx_dprintf ("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d INVALID READ\n", bus_id, phy_id, device, location);
+ return -1;
+ }
+}
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param device MDIO Managable Device (MMD) id
+ * @param location Register location to write
+ * @param val Value to write
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device, int location,
+ int val)
+{
+ cvmx_smix_cmd_t smi_cmd;
+ cvmx_smix_wr_dat_t smi_wr;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ return -1;
+
+ __cvmx_mdio_set_clause45_mode(bus_id);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = location;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SMIX_WR_DAT(bus_id),
+ cvmx_smix_wr_dat_t, pending, ==, 0, CVMX_MDIO_TIMEOUT))
+ return -1;
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SMIX_WR_DAT(bus_id),
+ cvmx_smix_wr_dat_t, pending, ==, 0, CVMX_MDIO_TIMEOUT))
+ return -1;
+
+ return 0;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-mdio.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1030 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support functions for managing the MII management port
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-mdio.h"
+#include "cvmx-mgmt-port.h"
+#include "cvmx-sysinfo.h"
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "cvmx-error.h"
+#endif
+
+/**
+ * Enum of MIX interface modes
+ */
+typedef enum
+{
+ CVMX_MGMT_PORT_NONE = 0,
+ CVMX_MGMT_PORT_MII_MODE,
+ CVMX_MGMT_PORT_RGMII_MODE,
+} cvmx_mgmt_port_mode_t;
+
+/**
+ * Format of the TX/RX ring buffer entries
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t reserved_62_63 : 2;
+ uint64_t len : 14; /* Length of the buffer/packet in bytes */
+ uint64_t tstamp : 1; /* For TX, signals that the packet should be timestamped */
+ uint64_t code : 7; /* The RX error code */
+ uint64_t addr : 40; /* Physical address of the buffer */
+ } s;
+} cvmx_mgmt_port_ring_entry_t;
+
+/**
+ * Per port state required for each mgmt port
+ */
+typedef struct
+{
+ cvmx_spinlock_t lock; /* Used for exclusive access to this structure */
+ int tx_write_index; /* Where the next TX will write in the tx_ring and tx_buffers */
+ int rx_read_index; /* Where the next RX will be in the rx_ring and rx_buffers */
+ int port; /* Port to use. (This is the 'fake' IPD port number */
+ uint64_t mac; /* Our MAC address */
+ cvmx_mgmt_port_ring_entry_t tx_ring[CVMX_MGMT_PORT_NUM_TX_BUFFERS];
+ cvmx_mgmt_port_ring_entry_t rx_ring[CVMX_MGMT_PORT_NUM_RX_BUFFERS];
+ char tx_buffers[CVMX_MGMT_PORT_NUM_TX_BUFFERS][CVMX_MGMT_PORT_TX_BUFFER_SIZE];
+ char rx_buffers[CVMX_MGMT_PORT_NUM_RX_BUFFERS][CVMX_MGMT_PORT_RX_BUFFER_SIZE];
+ cvmx_mgmt_port_mode_t mode; /* Mode of the interface */
+} cvmx_mgmt_port_state_t;
+
+/**
+ * Pointers to each mgmt port's state
+ */
+CVMX_SHARED cvmx_mgmt_port_state_t *cvmx_mgmt_port_state_ptr = NULL;
+
+
+/**
+ * Return the number of management ports supported by this chip
+ *
+ * @return Number of ports
+ */
+static int __cvmx_mgmt_port_num_ports(void)
+{
+#if defined(OCTEON_VENDOR_GEFES)
+ return 0; /* none of the GEFES boards have mgmt ports */
+#else
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ return 2;
+ else
+ return 0;
+#endif
+}
+
+
+/**
+ * Return the number of management ports supported on this board.
+ *
+ * @return Number of ports
+ */
+int cvmx_mgmt_port_num_ports(void)
+{
+ return __cvmx_mgmt_port_num_ports();
+}
+
+
+/**
+ * Called to initialize a management port for use. Multiple calls
+ * to this function across applications is safe.
+ *
+ * @param port Port to initialize
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+cvmx_mgmt_port_result_t cvmx_mgmt_port_initialize(int port)
+{
+ char *alloc_name = "cvmx_mgmt_port";
+ cvmx_mixx_oring1_t oring1;
+ cvmx_mixx_ctl_t mix_ctl;
+
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ cvmx_mgmt_port_state_ptr = cvmx_bootmem_alloc_named_flags(CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t), 128, alloc_name, CVMX_BOOTMEM_FLAG_END_ALLOC);
+ if (cvmx_mgmt_port_state_ptr)
+ {
+ memset(cvmx_mgmt_port_state_ptr, 0, CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t));
+ }
+ else
+ {
+ const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name);
+ if (block_desc)
+ cvmx_mgmt_port_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
+ else
+ {
+ cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Unable to get named block %s on MIX%d.\n", alloc_name, port);
+ return CVMX_MGMT_PORT_NO_MEMORY;
+ }
+ }
+
+ /* Reset the MIX block if the previous user had a different TX ring size, or if
+ ** we allocated a new (and blank) state structure. */
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+ if (!mix_ctl.s.reset)
+ {
+ oring1.u64 = cvmx_read_csr(CVMX_MIXX_ORING1(port));
+ if (oring1.s.osize != CVMX_MGMT_PORT_NUM_TX_BUFFERS || cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
+ {
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+ mix_ctl.s.en = 0;
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+ do
+ {
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+ } while (mix_ctl.s.busy);
+ mix_ctl.s.reset = 1;
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+ cvmx_read_csr(CVMX_MIXX_CTL(port));
+ memset(cvmx_mgmt_port_state_ptr + port, 0, sizeof(cvmx_mgmt_port_state_t));
+ }
+ }
+
+ if (cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
+ {
+ cvmx_mgmt_port_state_t *state = cvmx_mgmt_port_state_ptr + port;
+ int i;
+ cvmx_mixx_bist_t mix_bist;
+ cvmx_agl_gmx_bist_t agl_gmx_bist;
+ cvmx_mixx_oring1_t oring1;
+ cvmx_mixx_iring1_t iring1;
+ cvmx_mixx_ctl_t mix_ctl;
+ cvmx_agl_prtx_ctl_t agl_prtx_ctl;
+
+ /* Make sure BIST passed */
+ mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(port));
+ if (mix_bist.u64)
+ cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port MIX failed BIST (0x%016llx) on MIX%d\n", CAST64(mix_bist.u64), port);
+
+ agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
+ if (agl_gmx_bist.u64)
+ cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port AGL failed BIST (0x%016llx) on MIX%d\n", CAST64(agl_gmx_bist.u64), port);
+
+ /* Clear all state information */
+ memset(state, 0, sizeof(*state));
+
+ /* Take the control logic out of reset */
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+ mix_ctl.s.reset = 0;
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+
+ /* Read until reset == 0. Timeout should never happen... */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_MIXX_CTL(port), cvmx_mixx_ctl_t, reset, ==, 0, 300000000))
+ {
+ cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Timeout waiting for MIX(%d) reset.\n", port);
+ return CVMX_MGMT_PORT_INIT_ERROR;
+ }
+
+ /* Set the PHY address and mode of the interface (RGMII/MII mode). */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ {
+ state->port = -1;
+ state->mode = CVMX_MGMT_PORT_MII_MODE;
+ }
+ else
+ {
+ int port_num = CVMX_HELPER_BOARD_MGMT_IPD_PORT + port;
+ int phy_addr = cvmx_helper_board_get_mii_address(port_num);
+ if (phy_addr != -1)
+ {
+ cvmx_mdio_phy_reg_status_t phy_status;
+ /* Read PHY status register to find the mode of the interface. */
+ phy_status.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_STATUS);
+ if (phy_status.s.capable_extended_status == 0) // MII mode
+ state->mode = CVMX_MGMT_PORT_MII_MODE;
+ else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)
+ && phy_status.s.capable_extended_status) // RGMII mode
+ state->mode = CVMX_MGMT_PORT_RGMII_MODE;
+ else
+ state->mode = CVMX_MGMT_PORT_NONE;
+ }
+ else
+ {
+ cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Not able to read the PHY on MIX%d\n", port);
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+ }
+ state->port = port_num;
+ }
+
+ /* All interfaces should be configured in same mode */
+ for (i = 0; i < __cvmx_mgmt_port_num_ports(); i++)
+ {
+ if (i != port
+ && cvmx_mgmt_port_state_ptr[i].mode != CVMX_MGMT_PORT_NONE
+ && cvmx_mgmt_port_state_ptr[i].mode != state->mode)
+ {
+ cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: All ports in MIX interface are not configured in same mode.\n \
+ Port %d is configured as %d\n \
+ And Port %d is configured as %d\n", port, state->mode, i, cvmx_mgmt_port_state_ptr[i].mode);
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+ }
+ }
+
+ /* Create a default MAC address */
+ state->mac = 0x000000dead000000ull;
+ state->mac += 0xffffff & CAST64(state);
+
+ /* Setup the TX ring */
+ for (i=0; i<CVMX_MGMT_PORT_NUM_TX_BUFFERS; i++)
+ {
+ state->tx_ring[i].s.len = CVMX_MGMT_PORT_TX_BUFFER_SIZE;
+ state->tx_ring[i].s.addr = cvmx_ptr_to_phys(state->tx_buffers[i]);
+ }
+
+ /* Tell the HW where the TX ring is */
+ oring1.u64 = 0;
+ oring1.s.obase = cvmx_ptr_to_phys(state->tx_ring)>>3;
+ oring1.s.osize = CVMX_MGMT_PORT_NUM_TX_BUFFERS;
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
+
+ /* Setup the RX ring */
+ for (i=0; i<CVMX_MGMT_PORT_NUM_RX_BUFFERS; i++)
+ {
+ /* This size is -8 due to an errata for CN56XX pass 1 */
+ state->rx_ring[i].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
+ state->rx_ring[i].s.addr = cvmx_ptr_to_phys(state->rx_buffers[i]);
+ }
+
+ /* Tell the HW where the RX ring is */
+ iring1.u64 = 0;
+ iring1.s.ibase = cvmx_ptr_to_phys(state->rx_ring)>>3;
+ iring1.s.isize = CVMX_MGMT_PORT_NUM_RX_BUFFERS;
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
+ cvmx_write_csr(CVMX_MIXX_IRING2(port), CVMX_MGMT_PORT_NUM_RX_BUFFERS);
+
+ /* Disable the external input/output */
+ cvmx_mgmt_port_disable(port);
+
+ /* Set the MAC address filtering up */
+ cvmx_mgmt_port_set_mac(port, state->mac);
+
+ /* Set the default max size to an MTU of 1500 with L2 and VLAN */
+ cvmx_mgmt_port_set_max_packet_size(port, 1518);
+
+ /* Enable the port HW. Packets are not allowed until cvmx_mgmt_port_enable() is called */
+ mix_ctl.u64 = 0;
+ mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
+ mix_ctl.s.en = 1; /* Enable the port */
+ mix_ctl.s.nbtarb = 0; /* Arbitration mode */
+ mix_ctl.s.mrq_hwm = 1; /* MII CB-request FIFO programmable high watermark */
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+
+ /* Select the mode of operation for the interface. */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
+
+ if (state->mode == CVMX_MGMT_PORT_RGMII_MODE)
+ agl_prtx_ctl.s.mode = 0;
+ else if (state->mode == CVMX_MGMT_PORT_MII_MODE)
+ agl_prtx_ctl.s.mode = 1;
+ else
+ {
+ cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Invalid mode for MIX(%d)\n", port);
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+ }
+
+ cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+ }
+
+ /* Initialize the physical layer. */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ /* MII clocks counts are based on the 125Mhz reference, so our
+ delays need to be scaled to match the core clock rate. The
+ "+1" is to make sure rounding always waits a little too
+ long. */
+ uint64_t clock_scale = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 125000000 + 1;
+
+ /* Take the DLL and clock tree out of reset */
+ agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
+ agl_prtx_ctl.s.clkrst = 0;
+ if (state->mode == CVMX_MGMT_PORT_RGMII_MODE) // RGMII Initialization
+ {
+ agl_prtx_ctl.s.dllrst = 0;
+ agl_prtx_ctl.s.clktx_byp = 0;
+ }
+ cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+ cvmx_read_csr(CVMX_AGL_PRTX_CTL(port)); /* Force write out before wait */
+
+ /* Wait for the DLL to lock. External 125 MHz reference clock must be stable at this point. */
+ cvmx_wait(256 * clock_scale);
+
+ /* The rest of the config is common between RGMII/MII */
+
+ /* Enable the interface */
+ agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
+ agl_prtx_ctl.s.enable = 1;
+ cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+
+ /* Read the value back to force the previous write */
+ agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
+
+ /* Enable the componsation controller */
+ agl_prtx_ctl.s.comp = 1;
+ agl_prtx_ctl.s.drv_byp = 0;
+ cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+ cvmx_read_csr(CVMX_AGL_PRTX_CTL(port)); /* Force write out before wait */
+ cvmx_wait(1024 * clock_scale); // for componsation state to lock.
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
+ {
+ /* Force compensation values, as they are not determined properly by HW */
+ cvmx_agl_gmx_drv_ctl_t drv_ctl;
+
+ drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
+ if (port)
+ {
+ drv_ctl.s.byp_en1 = 1;
+ drv_ctl.s.nctl1 = 6;
+ drv_ctl.s.pctl1 = 6;
+ }
+ else
+ {
+ drv_ctl.s.byp_en = 1;
+ drv_ctl.s.nctl = 6;
+ drv_ctl.s.pctl = 6;
+ }
+ cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
+ }
+ }
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+ cvmx_error_enable_group(CVMX_ERROR_GROUP_MGMT_PORT, port);
+#endif
+ return CVMX_MGMT_PORT_SUCCESS;
+}
+
+
+/**
+ * Shutdown a management port. This currently disables packet IO
+ * but leaves all hardware and buffers. Another application can then
+ * call initialize() without redoing the hardware setup.
+ *
+ * @param port Management port
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+cvmx_mgmt_port_result_t cvmx_mgmt_port_shutdown(int port)
+{
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+ cvmx_error_disable_group(CVMX_ERROR_GROUP_MGMT_PORT, port);
+#endif
+
+ /* Stop packets from comming in */
+ cvmx_mgmt_port_disable(port);
+
+ /* We don't free any memory so the next intialize can reuse the HW setup */
+ return CVMX_MGMT_PORT_SUCCESS;
+}
+
+
+/**
+ * Enable packet IO on a management port
+ *
+ * @param port Management port
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+cvmx_mgmt_port_result_t cvmx_mgmt_port_enable(int port)
+{
+ cvmx_mgmt_port_state_t *state;
+ cvmx_agl_gmx_inf_mode_t agl_gmx_inf_mode;
+ cvmx_agl_gmx_rxx_frm_ctl_t rxx_frm_ctl;
+
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ state = cvmx_mgmt_port_state_ptr + port;
+
+ cvmx_spinlock_lock(&state->lock);
+
+ rxx_frm_ctl.u64 = 0;
+ rxx_frm_ctl.s.pre_align = 1;
+ rxx_frm_ctl.s.pad_len = 1; /* When set, disables the length check for non-min sized pkts with padding in the client data */
+ rxx_frm_ctl.s.vlan_len = 1; /* When set, disables the length check for VLAN pkts */
+ rxx_frm_ctl.s.pre_free = 1; /* When set, PREAMBLE checking is less strict */
+ rxx_frm_ctl.s.ctl_smac = 0; /* Control Pause Frames can match station SMAC */
+ rxx_frm_ctl.s.ctl_mcst = 1; /* Control Pause Frames can match globally assign Multicast address */
+ rxx_frm_ctl.s.ctl_bck = 1; /* Forward pause information to TX block */
+ rxx_frm_ctl.s.ctl_drp = 1; /* Drop Control Pause Frames */
+ rxx_frm_ctl.s.pre_strp = 1; /* Strip off the preamble */
+ rxx_frm_ctl.s.pre_chk = 1; /* This port is configured to send PREAMBLE+SFD to begin every frame. GMX checks that the PREAMBLE is sent correctly */
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
+
+ /* Enable the AGL block */
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ {
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+ }
+
+ /* Configure the port duplex and enables */
+ cvmx_mgmt_port_link_set(port, cvmx_mgmt_port_link_get(port));
+
+ cvmx_spinlock_unlock(&state->lock);
+ return CVMX_MGMT_PORT_SUCCESS;
+}
+
+
+/**
+ * Disable packet IO on a management port
+ *
+ * @param port Management port
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+cvmx_mgmt_port_result_t cvmx_mgmt_port_disable(int port)
+{
+ cvmx_mgmt_port_state_t *state;
+ cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
+
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ state = cvmx_mgmt_port_state_ptr + port;
+
+ cvmx_spinlock_lock(&state->lock);
+
+ agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ agl_gmx_prtx.s.en = 0;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ cvmx_spinlock_unlock(&state->lock);
+ return CVMX_MGMT_PORT_SUCCESS;
+}
+
+
+/**
+ * Send a packet out the management port. The packet is copied so
+ * the input buffer isn't used after this call.
+ *
+ * @param port Management port
+ * @param packet_len Length of the packet to send. It does not include the final CRC
+ * @param buffer Packet data
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+cvmx_mgmt_port_result_t cvmx_mgmt_port_send(int port, int packet_len, void *buffer)
+{
+ cvmx_mgmt_port_state_t *state;
+ cvmx_mixx_oring2_t mix_oring2;
+
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ /* Max sure the packet size is valid */
+ if ((packet_len < 1) || (packet_len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ if (buffer == NULL)
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ state = cvmx_mgmt_port_state_ptr + port;
+
+ cvmx_spinlock_lock(&state->lock);
+
+ mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
+ if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1)
+ {
+ /* No room for another packet */
+ cvmx_spinlock_unlock(&state->lock);
+ return CVMX_MGMT_PORT_NO_MEMORY;
+ }
+ else
+ {
+ /* Copy the packet into the output buffer */
+ memcpy(state->tx_buffers[state->tx_write_index], buffer, packet_len);
+ /* Insert the source MAC */
+ memcpy(state->tx_buffers[state->tx_write_index] + 6, ((char*)&state->mac) + 2, 6);
+ /* Update the TX ring buffer entry size */
+ state->tx_ring[state->tx_write_index].s.len = packet_len;
+ /* This code doesn't support TX timestamps */
+ state->tx_ring[state->tx_write_index].s.tstamp = 0;
+ /* Increment our TX index */
+ state->tx_write_index = (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
+ /* Ring the doorbell, sending the packet */
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
+ if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
+ cvmx_write_csr(CVMX_MIXX_ORCNT(port), cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
+
+ cvmx_spinlock_unlock(&state->lock);
+ return CVMX_MGMT_PORT_SUCCESS;
+ }
+}
+
+
+#if defined(__FreeBSD__)
+/**
+ * Send a packet out the management port. The packet is copied so
+ * the input mbuf isn't used after this call.
+ *
+ * @param port Management port
+ * @param m Packet mbuf (with pkthdr)
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+cvmx_mgmt_port_result_t cvmx_mgmt_port_sendm(int port, const struct mbuf *m)
+{
+ cvmx_mgmt_port_state_t *state;
+ cvmx_mixx_oring2_t mix_oring2;
+
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ /* Max sure the packet size is valid */
+ if ((m->m_pkthdr.len < 1) || (m->m_pkthdr.len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ state = cvmx_mgmt_port_state_ptr + port;
+
+ cvmx_spinlock_lock(&state->lock);
+
+ mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
+ if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1)
+ {
+ /* No room for another packet */
+ cvmx_spinlock_unlock(&state->lock);
+ return CVMX_MGMT_PORT_NO_MEMORY;
+ }
+ else
+ {
+ /* Copy the packet into the output buffer */
+ m_copydata(m, 0, m->m_pkthdr.len, state->tx_buffers[state->tx_write_index]);
+ /* Update the TX ring buffer entry size */
+ state->tx_ring[state->tx_write_index].s.len = m->m_pkthdr.len;
+ /* This code doesn't support TX timestamps */
+ state->tx_ring[state->tx_write_index].s.tstamp = 0;
+ /* Increment our TX index */
+ state->tx_write_index = (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
+ /* Ring the doorbell, sending the packet */
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
+ if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
+ cvmx_write_csr(CVMX_MIXX_ORCNT(port), cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
+
+ cvmx_spinlock_unlock(&state->lock);
+ return CVMX_MGMT_PORT_SUCCESS;
+ }
+}
+#endif
+
+
+/**
+ * Receive a packet from the management port.
+ *
+ * @param port Management port
+ * @param buffer_len Size of the buffer to receive the packet into
+ * @param buffer Buffer to receive the packet into
+ *
+ * @return The size of the packet, or a negative erorr code on failure. Zero
+ * means that no packets were available.
+ */
+int cvmx_mgmt_port_receive(int port, int buffer_len, uint8_t *buffer)
+{
+ cvmx_mixx_ircnt_t mix_ircnt;
+ cvmx_mgmt_port_state_t *state;
+ int result;
+
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ /* Max sure the buffer size is valid */
+ if (buffer_len < 1)
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ if (buffer == NULL)
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ state = cvmx_mgmt_port_state_ptr + port;
+
+ cvmx_spinlock_lock(&state->lock);
+
+ /* Find out how many RX packets are pending */
+ mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
+ if (mix_ircnt.s.ircnt)
+ {
+ uint64_t *source = (void *)state->rx_buffers[state->rx_read_index];
+ uint64_t *zero_check = source;
+ /* CN56XX pass 1 has an errata where packets might start 8 bytes
+ into the buffer instead of at their correct lcoation. If the
+ first 8 bytes is zero we assume this has happened */
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && (*zero_check == 0))
+ source++;
+ /* Start off with zero bytes received */
+ result = 0;
+ /* While the completion code signals more data, copy the buffers
+ into the user's data */
+ while (state->rx_ring[state->rx_read_index].s.code == 16)
+ {
+ /* Only copy what will fit in the user's buffer */
+ int length = state->rx_ring[state->rx_read_index].s.len;
+ if (length > buffer_len)
+ length = buffer_len;
+ memcpy(buffer, source, length);
+ /* Reduce the size of the buffer to the remaining space. If we run
+ out we will signal an error when the code 15 buffer doesn't fit */
+ buffer += length;
+ buffer_len -= length;
+ result += length;
+ /* Update this buffer for reuse in future receives. This size is
+ -8 due to an errata for CN56XX pass 1 */
+ state->rx_ring[state->rx_read_index].s.code = 0;
+ state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
+ state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
+ /* Zero the beginning of the buffer for use by the errata check */
+ *zero_check = 0;
+ CVMX_SYNCWS;
+ /* Increment the number of RX buffers */
+ cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
+ source = (void *)state->rx_buffers[state->rx_read_index];
+ zero_check = source;
+ }
+
+ /* Check for the final good completion code */
+ if (state->rx_ring[state->rx_read_index].s.code == 15)
+ {
+ if (buffer_len >= state->rx_ring[state->rx_read_index].s.len)
+ {
+ int length = state->rx_ring[state->rx_read_index].s.len;
+ memcpy(buffer, source, length);
+ result += length;
+ }
+ else
+ {
+ /* Not enough room for the packet */
+ cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Packet (%d) larger than supplied buffer (%d)\n", state->rx_ring[state->rx_read_index].s.len, buffer_len);
+ result = CVMX_MGMT_PORT_NO_MEMORY;
+ }
+ }
+ else
+ {
+ cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Receive error code %d. Packet dropped(Len %d), \n",
+ state->rx_ring[state->rx_read_index].s.code, state->rx_ring[state->rx_read_index].s.len + result);
+ result = -state->rx_ring[state->rx_read_index].s.code;
+
+
+ /* Check to see if we need to change the duplex. */
+ cvmx_mgmt_port_link_set(port, cvmx_mgmt_port_link_get(port));
+ }
+
+ /* Clean out the ring buffer entry. This size is -8 due to an errata
+ for CN56XX pass 1 */
+ state->rx_ring[state->rx_read_index].s.code = 0;
+ state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
+ state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
+ /* Zero the beginning of the buffer for use by the errata check */
+ *zero_check = 0;
+ CVMX_SYNCWS;
+ /* Increment the number of RX buffers */
+ cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
+ /* Decrement the pending RX count */
+ cvmx_write_csr(CVMX_MIXX_IRCNT(port), 1);
+ }
+ else
+ {
+ /* No packets available */
+ result = 0;
+ }
+ cvmx_spinlock_unlock(&state->lock);
+ return result;
+}
+
+/**
+ * Set the MAC address for a management port
+ *
+ * @param port Management port
+ * @param mac New MAC address. The lower 6 bytes are used.
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+cvmx_mgmt_port_result_t cvmx_mgmt_port_set_mac(int port, uint64_t mac)
+{
+ cvmx_mgmt_port_state_t *state;
+ cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
+
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ state = cvmx_mgmt_port_state_ptr + port;
+
+ cvmx_spinlock_lock(&state->lock);
+
+ agl_gmx_rxx_adr_ctl.u64 = 0;
+ agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Only accept matching MAC addresses */
+ agl_gmx_rxx_adr_ctl.s.mcst = 0; /* Drop multicast */
+ agl_gmx_rxx_adr_ctl.s.bcst = 1; /* Allow broadcast */
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
+
+ /* Only using one of the CAMs */
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), (mac >> 40) & 0xff);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), (mac >> 32) & 0xff);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), (mac >> 24) & 0xff);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), (mac >> 16) & 0xff);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), (mac >> 8) & 0xff);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), (mac >> 0) & 0xff);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
+ state->mac = mac;
+
+ cvmx_spinlock_unlock(&state->lock);
+ return CVMX_MGMT_PORT_SUCCESS;
+}
+
+
+/**
+ * Get the MAC address for a management port
+ *
+ * @param port Management port
+ *
+ * @return MAC address
+ */
+uint64_t cvmx_mgmt_port_get_mac(int port)
+{
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return CVMX_MGMT_PORT_INVALID_PARAM;
+
+ return cvmx_mgmt_port_state_ptr[port].mac;
+}
+
+/**
+ * Set the multicast list.
+ *
+ * @param port Management port
+ * @param flags Interface flags
+ *
+ * @return
+ */
+void cvmx_mgmt_port_set_multicast_list(int port, int flags)
+{
+ cvmx_mgmt_port_state_t *state;
+ cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
+
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return;
+
+ state = cvmx_mgmt_port_state_ptr + port;
+
+ cvmx_spinlock_lock(&state->lock);
+
+ agl_gmx_rxx_adr_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port));
+
+ /* Allow broadcast MAC addresses */
+ if (!agl_gmx_rxx_adr_ctl.s.bcst)
+ agl_gmx_rxx_adr_ctl.s.bcst = 1;
+
+ if ((flags & CVMX_IFF_ALLMULTI) || (flags & CVMX_IFF_PROMISC))
+ agl_gmx_rxx_adr_ctl.s.mcst = 2; /* Force accept multicast packets */
+ else
+ agl_gmx_rxx_adr_ctl.s.mcst = 1; /* Force reject multicast packets */
+
+ if (flags & CVMX_IFF_PROMISC)
+ agl_gmx_rxx_adr_ctl.s.cam_mode = 0; /* Reject matches if promisc. Since CAM is shut off, should accept everything */
+ else
+ agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Filter packets based on the CAM */
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
+
+ if (flags & CVMX_IFF_PROMISC)
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
+ else
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
+
+ cvmx_spinlock_unlock(&state->lock);
+}
+
+
+/**
+ * Set the maximum packet allowed in. Size is specified
+ * including L2 but without FCS. A normal MTU would corespond
+ * to 1514 assuming the standard 14 byte L2 header.
+ *
+ * @param port Management port
+ * @param size_without_fcs
+ * Size in bytes without FCS
+ */
+void cvmx_mgmt_port_set_max_packet_size(int port, int size_without_fcs)
+{
+ cvmx_mgmt_port_state_t *state;
+
+ if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
+ return;
+
+ state = cvmx_mgmt_port_state_ptr + port;
+
+ cvmx_spinlock_lock(&state->lock);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), (size_without_fcs+7) & 0xfff8);
+ cvmx_spinlock_unlock(&state->lock);
+}
+
+/**
+ * Return the link state of an RGMII/MII port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_mgmt_port_link_set().
+ *
+ * @param port The RGMII/MII interface port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t cvmx_mgmt_port_link_get(int port)
+{
+ cvmx_mgmt_port_state_t *state;
+ cvmx_helper_link_info_t result;
+
+ state = cvmx_mgmt_port_state_ptr + port;
+ result.u64 = 0;
+
+ if (port > __cvmx_mgmt_port_num_ports())
+ {
+ cvmx_dprintf("WARNING: Invalid port %d\n", port);
+ return result;
+ }
+
+ if (state->port != -1)
+ return __cvmx_helper_board_link_get(state->port);
+ else // Simulator does not have PHY, use some defaults.
+ {
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 100;
+ return result;
+ }
+ return result;
+}
+
+/**
+ * Configure RGMII/MII port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ *
+ * @param port RGMII/MII interface port
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_mgmt_port_link_set(int port, cvmx_helper_link_info_t link_info)
+{
+ cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
+
+ /* Disable GMX before we make any changes. */
+ agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ agl_gmx_prtx.s.en = 0;
+ agl_gmx_prtx.s.tx_en = 0;
+ agl_gmx_prtx.s.rx_en = 0;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ uint64_t one_second = cvmx_clock_get_rate(CVMX_CLOCK_CORE);
+ /* Wait for GMX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port), cvmx_agl_gmx_prtx_cfg_t, rx_idle, ==, 1, one_second)
+ || CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port), cvmx_agl_gmx_prtx_cfg_t, tx_idle, ==, 1, one_second))
+ {
+ cvmx_dprintf("MIX%d: Timeout waiting for GMX to be idle\n", port);
+ return -1;
+ }
+ }
+
+ agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+
+ /* Set duplex mode */
+ if (!link_info.s.link_up)
+ agl_gmx_prtx.s.duplex = 1; /* Force full duplex on down links */
+ else
+ agl_gmx_prtx.s.duplex = link_info.s.full_duplex;
+
+ switch(link_info.s.speed)
+ {
+ case 10:
+ agl_gmx_prtx.s.speed = 0;
+ agl_gmx_prtx.s.slottime = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ agl_gmx_prtx.s.speed_msb = 1;
+ agl_gmx_prtx.s.burst = 1;
+ }
+ break;
+
+ case 100:
+ agl_gmx_prtx.s.speed = 0;
+ agl_gmx_prtx.s.slottime = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ agl_gmx_prtx.s.speed_msb = 0;
+ agl_gmx_prtx.s.burst = 1;
+ }
+ break;
+
+ case 1000:
+ /* 1000 MBits is only supported on 6XXX chips */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ agl_gmx_prtx.s.speed_msb = 0;
+ agl_gmx_prtx.s.speed = 1;
+ agl_gmx_prtx.s.slottime = 1; /* Only matters for half-duplex */
+ agl_gmx_prtx.s.burst = agl_gmx_prtx.s.duplex;
+ }
+ break;
+
+ /* No link */
+ case 0:
+ default:
+ break;
+ }
+
+ /* Write the new GMX setting with the port still disabled. */
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ /* Read GMX CFG again to make sure the config is completed. */
+ agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ cvmx_mgmt_port_state_t *state = cvmx_mgmt_port_state_ptr + port;
+ cvmx_agl_gmx_txx_clk_t agl_clk;
+ agl_clk.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_CLK(port));
+ agl_clk.s.clk_cnt = 1; /* MII (both speeds) and RGMII 1000 setting */
+ if (state->mode == CVMX_MGMT_PORT_RGMII_MODE)
+ {
+ if (link_info.s.speed == 10)
+ agl_clk.s.clk_cnt = 50;
+ else if (link_info.s.speed == 100)
+ agl_clk.s.clk_cnt = 5;
+ }
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_CLK(port), agl_clk.u64);
+ }
+
+ /* Enable transmit and receive ports */
+ agl_gmx_prtx.s.tx_en = 1;
+ agl_gmx_prtx.s.rx_en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ /* Enable the link. */
+ agl_gmx_prtx.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+ return 0;
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,238 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support functions for managing the MII management port
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_MGMT_PORT_H__
+#define __CVMX_MGMT_PORT_H__
+
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_MGMT_PORT_NUM_PORTS 2 /* Right now we only have one mgmt port */
+#define CVMX_MGMT_PORT_NUM_TX_BUFFERS 16 /* Number of TX ring buffer entries and buffers */
+#define CVMX_MGMT_PORT_NUM_RX_BUFFERS 128 /* Number of RX ring buffer entries and buffers */
+#define CVMX_MGMT_PORT_TX_BUFFER_SIZE 12288 /* Size of each TX/RX buffer */
+#define CVMX_MGMT_PORT_RX_BUFFER_SIZE 1536 /* Size of each TX/RX buffer */
+
+typedef enum
+{
+ CVMX_MGMT_PORT_SUCCESS = 0,
+ CVMX_MGMT_PORT_NO_MEMORY = -1,
+ CVMX_MGMT_PORT_INVALID_PARAM = -2,
+ CVMX_MGMT_PORT_INIT_ERROR = -3,
+} cvmx_mgmt_port_result_t;
+
+
+/* Enumeration of Net Device interface flags. */
+typedef enum
+{
+ CVMX_IFF_PROMISC = 0x100, /* receive all packets */
+ CVMX_IFF_ALLMULTI = 0x200, /* receive all multicast packets */
+} cvmx_mgmt_port_netdevice_flags_t;
+
+/**
+ * Called to initialize a management port for use. Multiple calls
+ * to this function accross applications is safe.
+ *
+ * @param port Port to initialize
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+extern cvmx_mgmt_port_result_t cvmx_mgmt_port_initialize(int port);
+
+/**
+ * Shutdown a management port. This currently disables packet IO
+ * but leaves all hardware and buffers. Another application can then
+ * call initialize() without redoing the hardware setup.
+ *
+ * @param port Management port
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+extern cvmx_mgmt_port_result_t cvmx_mgmt_port_shutdown(int port);
+
+/**
+ * Enable packet IO on a management port
+ *
+ * @param port Management port
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+extern cvmx_mgmt_port_result_t cvmx_mgmt_port_enable(int port);
+
+/**
+ * Disable packet IO on a management port
+ *
+ * @param port Management port
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+extern cvmx_mgmt_port_result_t cvmx_mgmt_port_disable(int port);
+
+/**
+ * Send a packet out the management port. The packet is copied so
+ * the input buffer isn't used after this call.
+ *
+ * @param port Management port
+ * @param packet_len Length of the packet to send. It does not include the final CRC
+ * @param buffer Packet data
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+extern cvmx_mgmt_port_result_t cvmx_mgmt_port_send(int port, int packet_len, void *buffer);
+
+#if defined(__FreeBSD__)
+/**
+ * Send a packet out the management port. The packet is copied so
+ * the input mbuf isn't used after this call.
+ *
+ * @param port Management port
+ * @param m Packet mbuf (with pkthdr)
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+extern cvmx_mgmt_port_result_t cvmx_mgmt_port_sendm(int port, const struct mbuf *m);
+#endif
+
+/**
+ * Receive a packet from the management port.
+ *
+ * @param port Management port
+ * @param buffer_len Size of the buffer to receive the packet into
+ * @param buffer Buffer to receive the packet into
+ *
+ * @return The size of the packet, or a negative erorr code on failure. Zero
+ * means that no packets were available.
+ */
+extern int cvmx_mgmt_port_receive(int port, int buffer_len, uint8_t *buffer);
+
+/**
+ * Set the MAC address for a management port
+ *
+ * @param port Management port
+ * @param mac New MAC address. The lower 6 bytes are used.
+ *
+ * @return CVMX_MGMT_PORT_SUCCESS or an error code
+ */
+extern cvmx_mgmt_port_result_t cvmx_mgmt_port_set_mac(int port, uint64_t mac);
+
+/**
+ * Get the MAC address for a management port
+ *
+ * @param port Management port
+ *
+ * @return MAC address
+ */
+extern uint64_t cvmx_mgmt_port_get_mac(int port);
+#define CVMX_MGMT_PORT_GET_MAC_ERROR ((unsigned long long)-2LL)
+
+/**
+ * Set the multicast list.
+ *
+ * @param port Management port
+ * @param flags Interface flags
+ *
+ * @return
+ */
+extern void cvmx_mgmt_port_set_multicast_list(int port, int flags);
+
+/**
+ * Set the maximum packet allowed in. Size is specified
+ * including L2 but without FCS. A normal MTU would corespond
+ * to 1514 assuming the standard 14 byte L2 header.
+ *
+ * @param port Management port
+ * @param size_without_fcs
+ * Size in bytes without FCS
+ */
+extern void cvmx_mgmt_port_set_max_packet_size(int port, int size_without_fcs);
+
+/**
+ * Return the link state of an RGMII/MII port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to __cvmx_mgmt_port_link_set().
+ *
+ * @param port The RGMII/MII interface port to query
+ *
+ * @return Link state
+ */
+extern cvmx_helper_link_info_t cvmx_mgmt_port_link_get(int port);
+
+/**
+ * Configure RGMII/MII port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ *
+ * @param port RGMII/MII interface port
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int cvmx_mgmt_port_link_set(int port, cvmx_helper_link_info_t link_info);
+
+/**
+ * Return the number of management ports supported on this board.
+ *
+ * @return Number of ports
+ */
+extern int cvmx_mgmt_port_num_ports(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_MGMT_PORT_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-mgmt-port.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-mio-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-mio-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-mio-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,9526 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-mio-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon mio.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_MIO_DEFS_H__
+#define __CVMX_MIO_DEFS_H__
+
+#define CVMX_MIO_BOOT_BIST_STAT (CVMX_ADD_IO_SEG(0x00011800000000F8ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_BOOT_COMP CVMX_MIO_BOOT_COMP_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_COMP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_BOOT_COMP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800000000B8ull);
+}
+#else
+#define CVMX_MIO_BOOT_COMP (CVMX_ADD_IO_SEG(0x00011800000000B8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_BOOT_DMA_CFGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_DMA_CFGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000100ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_MIO_BOOT_DMA_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000100ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_BOOT_DMA_INTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_DMA_INTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000138ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_MIO_BOOT_DMA_INTX(offset) (CVMX_ADD_IO_SEG(0x0001180000000138ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_BOOT_DMA_INT_ENX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_DMA_INT_ENX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000150ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_MIO_BOOT_DMA_INT_ENX(offset) (CVMX_ADD_IO_SEG(0x0001180000000150ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_BOOT_DMA_TIMX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_DMA_TIMX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000120ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_MIO_BOOT_DMA_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001180000000120ull) + ((offset) & 3) * 8)
+#endif
+#define CVMX_MIO_BOOT_ERR (CVMX_ADD_IO_SEG(0x00011800000000A0ull))
+#define CVMX_MIO_BOOT_INT (CVMX_ADD_IO_SEG(0x00011800000000A8ull))
+#define CVMX_MIO_BOOT_LOC_ADR (CVMX_ADD_IO_SEG(0x0001180000000090ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_BOOT_LOC_CFGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_LOC_CFGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000080ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_MIO_BOOT_LOC_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000080ull) + ((offset) & 1) * 8)
+#endif
+#define CVMX_MIO_BOOT_LOC_DAT (CVMX_ADD_IO_SEG(0x0001180000000098ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_BOOT_PIN_DEFS CVMX_MIO_BOOT_PIN_DEFS_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_PIN_DEFS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_BOOT_PIN_DEFS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800000000C0ull);
+}
+#else
+#define CVMX_MIO_BOOT_PIN_DEFS (CVMX_ADD_IO_SEG(0x00011800000000C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_BOOT_REG_CFGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_MIO_BOOT_REG_CFGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000000ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_MIO_BOOT_REG_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000000ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_BOOT_REG_TIMX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_MIO_BOOT_REG_TIMX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000040ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_MIO_BOOT_REG_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001180000000040ull) + ((offset) & 7) * 8)
+#endif
+#define CVMX_MIO_BOOT_THR (CVMX_ADD_IO_SEG(0x00011800000000B0ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_BUF_DAT CVMX_MIO_EMM_BUF_DAT_FUNC()
+static inline uint64_t CVMX_MIO_EMM_BUF_DAT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_BUF_DAT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800000020E8ull);
+}
+#else
+#define CVMX_MIO_EMM_BUF_DAT (CVMX_ADD_IO_SEG(0x00011800000020E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_BUF_IDX CVMX_MIO_EMM_BUF_IDX_FUNC()
+static inline uint64_t CVMX_MIO_EMM_BUF_IDX_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_BUF_IDX not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800000020E0ull);
+}
+#else
+#define CVMX_MIO_EMM_BUF_IDX (CVMX_ADD_IO_SEG(0x00011800000020E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_CFG CVMX_MIO_EMM_CFG_FUNC()
+static inline uint64_t CVMX_MIO_EMM_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002000ull);
+}
+#else
+#define CVMX_MIO_EMM_CFG (CVMX_ADD_IO_SEG(0x0001180000002000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_CMD CVMX_MIO_EMM_CMD_FUNC()
+static inline uint64_t CVMX_MIO_EMM_CMD_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_CMD not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002058ull);
+}
+#else
+#define CVMX_MIO_EMM_CMD (CVMX_ADD_IO_SEG(0x0001180000002058ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_DMA CVMX_MIO_EMM_DMA_FUNC()
+static inline uint64_t CVMX_MIO_EMM_DMA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_DMA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002050ull);
+}
+#else
+#define CVMX_MIO_EMM_DMA (CVMX_ADD_IO_SEG(0x0001180000002050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_INT CVMX_MIO_EMM_INT_FUNC()
+static inline uint64_t CVMX_MIO_EMM_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002078ull);
+}
+#else
+#define CVMX_MIO_EMM_INT (CVMX_ADD_IO_SEG(0x0001180000002078ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_INT_EN CVMX_MIO_EMM_INT_EN_FUNC()
+static inline uint64_t CVMX_MIO_EMM_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002080ull);
+}
+#else
+#define CVMX_MIO_EMM_INT_EN (CVMX_ADD_IO_SEG(0x0001180000002080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_EMM_MODEX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_MIO_EMM_MODEX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000002008ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_MIO_EMM_MODEX(offset) (CVMX_ADD_IO_SEG(0x0001180000002008ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_RCA CVMX_MIO_EMM_RCA_FUNC()
+static inline uint64_t CVMX_MIO_EMM_RCA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_RCA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800000020A0ull);
+}
+#else
+#define CVMX_MIO_EMM_RCA (CVMX_ADD_IO_SEG(0x00011800000020A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_RSP_HI CVMX_MIO_EMM_RSP_HI_FUNC()
+static inline uint64_t CVMX_MIO_EMM_RSP_HI_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_RSP_HI not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002070ull);
+}
+#else
+#define CVMX_MIO_EMM_RSP_HI (CVMX_ADD_IO_SEG(0x0001180000002070ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_RSP_LO CVMX_MIO_EMM_RSP_LO_FUNC()
+static inline uint64_t CVMX_MIO_EMM_RSP_LO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_RSP_LO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002068ull);
+}
+#else
+#define CVMX_MIO_EMM_RSP_LO (CVMX_ADD_IO_SEG(0x0001180000002068ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_RSP_STS CVMX_MIO_EMM_RSP_STS_FUNC()
+static inline uint64_t CVMX_MIO_EMM_RSP_STS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_RSP_STS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002060ull);
+}
+#else
+#define CVMX_MIO_EMM_RSP_STS (CVMX_ADD_IO_SEG(0x0001180000002060ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_SAMPLE CVMX_MIO_EMM_SAMPLE_FUNC()
+static inline uint64_t CVMX_MIO_EMM_SAMPLE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_SAMPLE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002090ull);
+}
+#else
+#define CVMX_MIO_EMM_SAMPLE (CVMX_ADD_IO_SEG(0x0001180000002090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_STS_MASK CVMX_MIO_EMM_STS_MASK_FUNC()
+static inline uint64_t CVMX_MIO_EMM_STS_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_STS_MASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002098ull);
+}
+#else
+#define CVMX_MIO_EMM_STS_MASK (CVMX_ADD_IO_SEG(0x0001180000002098ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_SWITCH CVMX_MIO_EMM_SWITCH_FUNC()
+static inline uint64_t CVMX_MIO_EMM_SWITCH_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_SWITCH not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002048ull);
+}
+#else
+#define CVMX_MIO_EMM_SWITCH (CVMX_ADD_IO_SEG(0x0001180000002048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_EMM_WDOG CVMX_MIO_EMM_WDOG_FUNC()
+static inline uint64_t CVMX_MIO_EMM_WDOG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_EMM_WDOG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000002088ull);
+}
+#else
+#define CVMX_MIO_EMM_WDOG (CVMX_ADD_IO_SEG(0x0001180000002088ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_FUS_BNK_DATX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_FUS_BNK_DATX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001520ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_MIO_FUS_BNK_DATX(offset) (CVMX_ADD_IO_SEG(0x0001180000001520ull) + ((offset) & 3) * 8)
+#endif
+#define CVMX_MIO_FUS_DAT0 (CVMX_ADD_IO_SEG(0x0001180000001400ull))
+#define CVMX_MIO_FUS_DAT1 (CVMX_ADD_IO_SEG(0x0001180000001408ull))
+#define CVMX_MIO_FUS_DAT2 (CVMX_ADD_IO_SEG(0x0001180000001410ull))
+#define CVMX_MIO_FUS_DAT3 (CVMX_ADD_IO_SEG(0x0001180000001418ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_EMA CVMX_MIO_FUS_EMA_FUNC()
+static inline uint64_t CVMX_MIO_FUS_EMA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_FUS_EMA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001550ull);
+}
+#else
+#define CVMX_MIO_FUS_EMA (CVMX_ADD_IO_SEG(0x0001180000001550ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_PDF CVMX_MIO_FUS_PDF_FUNC()
+static inline uint64_t CVMX_MIO_FUS_PDF_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_FUS_PDF not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001420ull);
+}
+#else
+#define CVMX_MIO_FUS_PDF (CVMX_ADD_IO_SEG(0x0001180000001420ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_PLL CVMX_MIO_FUS_PLL_FUNC()
+static inline uint64_t CVMX_MIO_FUS_PLL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_FUS_PLL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001580ull);
+}
+#else
+#define CVMX_MIO_FUS_PLL (CVMX_ADD_IO_SEG(0x0001180000001580ull))
+#endif
+#define CVMX_MIO_FUS_PROG (CVMX_ADD_IO_SEG(0x0001180000001510ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_PROG_TIMES CVMX_MIO_FUS_PROG_TIMES_FUNC()
+static inline uint64_t CVMX_MIO_FUS_PROG_TIMES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_FUS_PROG_TIMES not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001518ull);
+}
+#else
+#define CVMX_MIO_FUS_PROG_TIMES (CVMX_ADD_IO_SEG(0x0001180000001518ull))
+#endif
+#define CVMX_MIO_FUS_RCMD (CVMX_ADD_IO_SEG(0x0001180000001500ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_READ_TIMES CVMX_MIO_FUS_READ_TIMES_FUNC()
+static inline uint64_t CVMX_MIO_FUS_READ_TIMES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_FUS_READ_TIMES not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001570ull);
+}
+#else
+#define CVMX_MIO_FUS_READ_TIMES (CVMX_ADD_IO_SEG(0x0001180000001570ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_REPAIR_RES0 CVMX_MIO_FUS_REPAIR_RES0_FUNC()
+static inline uint64_t CVMX_MIO_FUS_REPAIR_RES0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_FUS_REPAIR_RES0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001558ull);
+}
+#else
+#define CVMX_MIO_FUS_REPAIR_RES0 (CVMX_ADD_IO_SEG(0x0001180000001558ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_REPAIR_RES1 CVMX_MIO_FUS_REPAIR_RES1_FUNC()
+static inline uint64_t CVMX_MIO_FUS_REPAIR_RES1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_FUS_REPAIR_RES1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001560ull);
+}
+#else
+#define CVMX_MIO_FUS_REPAIR_RES1 (CVMX_ADD_IO_SEG(0x0001180000001560ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_REPAIR_RES2 CVMX_MIO_FUS_REPAIR_RES2_FUNC()
+static inline uint64_t CVMX_MIO_FUS_REPAIR_RES2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_FUS_REPAIR_RES2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001568ull);
+}
+#else
+#define CVMX_MIO_FUS_REPAIR_RES2 (CVMX_ADD_IO_SEG(0x0001180000001568ull))
+#endif
+#define CVMX_MIO_FUS_SPR_REPAIR_RES (CVMX_ADD_IO_SEG(0x0001180000001548ull))
+#define CVMX_MIO_FUS_SPR_REPAIR_SUM (CVMX_ADD_IO_SEG(0x0001180000001540ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_TGG CVMX_MIO_FUS_TGG_FUNC()
+static inline uint64_t CVMX_MIO_FUS_TGG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_FUS_TGG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001428ull);
+}
+#else
+#define CVMX_MIO_FUS_TGG (CVMX_ADD_IO_SEG(0x0001180000001428ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_FUS_UNLOCK CVMX_MIO_FUS_UNLOCK_FUNC()
+static inline uint64_t CVMX_MIO_FUS_UNLOCK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_MIO_FUS_UNLOCK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001578ull);
+}
+#else
+#define CVMX_MIO_FUS_UNLOCK (CVMX_ADD_IO_SEG(0x0001180000001578ull))
+#endif
+#define CVMX_MIO_FUS_WADR (CVMX_ADD_IO_SEG(0x0001180000001508ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_GPIO_COMP CVMX_MIO_GPIO_COMP_FUNC()
+static inline uint64_t CVMX_MIO_GPIO_COMP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_GPIO_COMP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800000000C8ull);
+}
+#else
+#define CVMX_MIO_GPIO_COMP (CVMX_ADD_IO_SEG(0x00011800000000C8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_NDF_DMA_CFG CVMX_MIO_NDF_DMA_CFG_FUNC()
+static inline uint64_t CVMX_MIO_NDF_DMA_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_NDF_DMA_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000168ull);
+}
+#else
+#define CVMX_MIO_NDF_DMA_CFG (CVMX_ADD_IO_SEG(0x0001180000000168ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_NDF_DMA_INT CVMX_MIO_NDF_DMA_INT_FUNC()
+static inline uint64_t CVMX_MIO_NDF_DMA_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_NDF_DMA_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000170ull);
+}
+#else
+#define CVMX_MIO_NDF_DMA_INT (CVMX_ADD_IO_SEG(0x0001180000000170ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_NDF_DMA_INT_EN CVMX_MIO_NDF_DMA_INT_EN_FUNC()
+static inline uint64_t CVMX_MIO_NDF_DMA_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_NDF_DMA_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000178ull);
+}
+#else
+#define CVMX_MIO_NDF_DMA_INT_EN (CVMX_ADD_IO_SEG(0x0001180000000178ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PLL_CTL CVMX_MIO_PLL_CTL_FUNC()
+static inline uint64_t CVMX_MIO_PLL_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_MIO_PLL_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001448ull);
+}
+#else
+#define CVMX_MIO_PLL_CTL (CVMX_ADD_IO_SEG(0x0001180000001448ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PLL_SETTING CVMX_MIO_PLL_SETTING_FUNC()
+static inline uint64_t CVMX_MIO_PLL_SETTING_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_MIO_PLL_SETTING not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001440ull);
+}
+#else
+#define CVMX_MIO_PLL_SETTING (CVMX_ADD_IO_SEG(0x0001180000001440ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_CKOUT_HI_INCR CVMX_MIO_PTP_CKOUT_HI_INCR_FUNC()
+static inline uint64_t CVMX_MIO_PTP_CKOUT_HI_INCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_CKOUT_HI_INCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F40ull);
+}
+#else
+#define CVMX_MIO_PTP_CKOUT_HI_INCR (CVMX_ADD_IO_SEG(0x0001070000000F40ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_CKOUT_LO_INCR CVMX_MIO_PTP_CKOUT_LO_INCR_FUNC()
+static inline uint64_t CVMX_MIO_PTP_CKOUT_LO_INCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_CKOUT_LO_INCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F48ull);
+}
+#else
+#define CVMX_MIO_PTP_CKOUT_LO_INCR (CVMX_ADD_IO_SEG(0x0001070000000F48ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_CKOUT_THRESH_HI CVMX_MIO_PTP_CKOUT_THRESH_HI_FUNC()
+static inline uint64_t CVMX_MIO_PTP_CKOUT_THRESH_HI_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_CKOUT_THRESH_HI not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F38ull);
+}
+#else
+#define CVMX_MIO_PTP_CKOUT_THRESH_HI (CVMX_ADD_IO_SEG(0x0001070000000F38ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_CKOUT_THRESH_LO CVMX_MIO_PTP_CKOUT_THRESH_LO_FUNC()
+static inline uint64_t CVMX_MIO_PTP_CKOUT_THRESH_LO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_CKOUT_THRESH_LO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F30ull);
+}
+#else
+#define CVMX_MIO_PTP_CKOUT_THRESH_LO (CVMX_ADD_IO_SEG(0x0001070000000F30ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_CLOCK_CFG CVMX_MIO_PTP_CLOCK_CFG_FUNC()
+static inline uint64_t CVMX_MIO_PTP_CLOCK_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_CLOCK_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F00ull);
+}
+#else
+#define CVMX_MIO_PTP_CLOCK_CFG (CVMX_ADD_IO_SEG(0x0001070000000F00ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_CLOCK_COMP CVMX_MIO_PTP_CLOCK_COMP_FUNC()
+static inline uint64_t CVMX_MIO_PTP_CLOCK_COMP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_CLOCK_COMP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F18ull);
+}
+#else
+#define CVMX_MIO_PTP_CLOCK_COMP (CVMX_ADD_IO_SEG(0x0001070000000F18ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_CLOCK_HI CVMX_MIO_PTP_CLOCK_HI_FUNC()
+static inline uint64_t CVMX_MIO_PTP_CLOCK_HI_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_CLOCK_HI not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F10ull);
+}
+#else
+#define CVMX_MIO_PTP_CLOCK_HI (CVMX_ADD_IO_SEG(0x0001070000000F10ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_CLOCK_LO CVMX_MIO_PTP_CLOCK_LO_FUNC()
+static inline uint64_t CVMX_MIO_PTP_CLOCK_LO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_CLOCK_LO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F08ull);
+}
+#else
+#define CVMX_MIO_PTP_CLOCK_LO (CVMX_ADD_IO_SEG(0x0001070000000F08ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_EVT_CNT CVMX_MIO_PTP_EVT_CNT_FUNC()
+static inline uint64_t CVMX_MIO_PTP_EVT_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_EVT_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F28ull);
+}
+#else
+#define CVMX_MIO_PTP_EVT_CNT (CVMX_ADD_IO_SEG(0x0001070000000F28ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_PHY_1PPS_IN CVMX_MIO_PTP_PHY_1PPS_IN_FUNC()
+static inline uint64_t CVMX_MIO_PTP_PHY_1PPS_IN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_PHY_1PPS_IN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F70ull);
+}
+#else
+#define CVMX_MIO_PTP_PHY_1PPS_IN (CVMX_ADD_IO_SEG(0x0001070000000F70ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_PPS_HI_INCR CVMX_MIO_PTP_PPS_HI_INCR_FUNC()
+static inline uint64_t CVMX_MIO_PTP_PPS_HI_INCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_PPS_HI_INCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F60ull);
+}
+#else
+#define CVMX_MIO_PTP_PPS_HI_INCR (CVMX_ADD_IO_SEG(0x0001070000000F60ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_PPS_LO_INCR CVMX_MIO_PTP_PPS_LO_INCR_FUNC()
+static inline uint64_t CVMX_MIO_PTP_PPS_LO_INCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_PPS_LO_INCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F68ull);
+}
+#else
+#define CVMX_MIO_PTP_PPS_LO_INCR (CVMX_ADD_IO_SEG(0x0001070000000F68ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_PPS_THRESH_HI CVMX_MIO_PTP_PPS_THRESH_HI_FUNC()
+static inline uint64_t CVMX_MIO_PTP_PPS_THRESH_HI_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_PPS_THRESH_HI not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F58ull);
+}
+#else
+#define CVMX_MIO_PTP_PPS_THRESH_HI (CVMX_ADD_IO_SEG(0x0001070000000F58ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_PPS_THRESH_LO CVMX_MIO_PTP_PPS_THRESH_LO_FUNC()
+static inline uint64_t CVMX_MIO_PTP_PPS_THRESH_LO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_PPS_THRESH_LO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F50ull);
+}
+#else
+#define CVMX_MIO_PTP_PPS_THRESH_LO (CVMX_ADD_IO_SEG(0x0001070000000F50ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_PTP_TIMESTAMP CVMX_MIO_PTP_TIMESTAMP_FUNC()
+static inline uint64_t CVMX_MIO_PTP_TIMESTAMP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_PTP_TIMESTAMP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000F20ull);
+}
+#else
+#define CVMX_MIO_PTP_TIMESTAMP (CVMX_ADD_IO_SEG(0x0001070000000F20ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_QLMX_CFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_QLMX_CFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001590ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_MIO_QLMX_CFG(offset) (CVMX_ADD_IO_SEG(0x0001180000001590ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_RST_BOOT CVMX_MIO_RST_BOOT_FUNC()
+static inline uint64_t CVMX_MIO_RST_BOOT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_RST_BOOT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001600ull);
+}
+#else
+#define CVMX_MIO_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180000001600ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_RST_CFG CVMX_MIO_RST_CFG_FUNC()
+static inline uint64_t CVMX_MIO_RST_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_RST_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001610ull);
+}
+#else
+#define CVMX_MIO_RST_CFG (CVMX_ADD_IO_SEG(0x0001180000001610ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_RST_CKILL CVMX_MIO_RST_CKILL_FUNC()
+static inline uint64_t CVMX_MIO_RST_CKILL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_RST_CKILL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001638ull);
+}
+#else
+#define CVMX_MIO_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180000001638ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_RST_CNTLX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_RST_CNTLX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001648ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_MIO_RST_CNTLX(offset) (CVMX_ADD_IO_SEG(0x0001180000001648ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_RST_CTLX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_RST_CTLX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001618ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_MIO_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180000001618ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_RST_DELAY CVMX_MIO_RST_DELAY_FUNC()
+static inline uint64_t CVMX_MIO_RST_DELAY_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_RST_DELAY not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001608ull);
+}
+#else
+#define CVMX_MIO_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180000001608ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_RST_INT CVMX_MIO_RST_INT_FUNC()
+static inline uint64_t CVMX_MIO_RST_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_RST_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001628ull);
+}
+#else
+#define CVMX_MIO_RST_INT (CVMX_ADD_IO_SEG(0x0001180000001628ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_RST_INT_EN CVMX_MIO_RST_INT_EN_FUNC()
+static inline uint64_t CVMX_MIO_RST_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MIO_RST_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001630ull);
+}
+#else
+#define CVMX_MIO_RST_INT_EN (CVMX_ADD_IO_SEG(0x0001180000001630ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_TWSX_INT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_TWSX_INT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001010ull) + ((offset) & 1) * 512;
+}
+#else
+#define CVMX_MIO_TWSX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180000001010ull) + ((offset) & 1) * 512)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_TWSX_SW_TWSI(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_TWSX_SW_TWSI(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001000ull) + ((offset) & 1) * 512;
+}
+#else
+#define CVMX_MIO_TWSX_SW_TWSI(offset) (CVMX_ADD_IO_SEG(0x0001180000001000ull) + ((offset) & 1) * 512)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_TWSX_SW_TWSI_EXT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_TWSX_SW_TWSI_EXT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001018ull) + ((offset) & 1) * 512;
+}
+#else
+#define CVMX_MIO_TWSX_SW_TWSI_EXT(offset) (CVMX_ADD_IO_SEG(0x0001180000001018ull) + ((offset) & 1) * 512)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_TWSX_TWSI_SW(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_TWSX_TWSI_SW(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001008ull) + ((offset) & 1) * 512;
+}
+#else
+#define CVMX_MIO_TWSX_TWSI_SW(offset) (CVMX_ADD_IO_SEG(0x0001180000001008ull) + ((offset) & 1) * 512)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_DLH CVMX_MIO_UART2_DLH_FUNC()
+static inline uint64_t CVMX_MIO_UART2_DLH_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_DLH not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000488ull);
+}
+#else
+#define CVMX_MIO_UART2_DLH (CVMX_ADD_IO_SEG(0x0001180000000488ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_DLL CVMX_MIO_UART2_DLL_FUNC()
+static inline uint64_t CVMX_MIO_UART2_DLL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_DLL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000480ull);
+}
+#else
+#define CVMX_MIO_UART2_DLL (CVMX_ADD_IO_SEG(0x0001180000000480ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_FAR CVMX_MIO_UART2_FAR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_FAR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_FAR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000520ull);
+}
+#else
+#define CVMX_MIO_UART2_FAR (CVMX_ADD_IO_SEG(0x0001180000000520ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_FCR CVMX_MIO_UART2_FCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_FCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_FCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000450ull);
+}
+#else
+#define CVMX_MIO_UART2_FCR (CVMX_ADD_IO_SEG(0x0001180000000450ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_HTX CVMX_MIO_UART2_HTX_FUNC()
+static inline uint64_t CVMX_MIO_UART2_HTX_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_HTX not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000708ull);
+}
+#else
+#define CVMX_MIO_UART2_HTX (CVMX_ADD_IO_SEG(0x0001180000000708ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_IER CVMX_MIO_UART2_IER_FUNC()
+static inline uint64_t CVMX_MIO_UART2_IER_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_IER not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000408ull);
+}
+#else
+#define CVMX_MIO_UART2_IER (CVMX_ADD_IO_SEG(0x0001180000000408ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_IIR CVMX_MIO_UART2_IIR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_IIR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_IIR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000410ull);
+}
+#else
+#define CVMX_MIO_UART2_IIR (CVMX_ADD_IO_SEG(0x0001180000000410ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_LCR CVMX_MIO_UART2_LCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_LCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_LCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000418ull);
+}
+#else
+#define CVMX_MIO_UART2_LCR (CVMX_ADD_IO_SEG(0x0001180000000418ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_LSR CVMX_MIO_UART2_LSR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_LSR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_LSR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000428ull);
+}
+#else
+#define CVMX_MIO_UART2_LSR (CVMX_ADD_IO_SEG(0x0001180000000428ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_MCR CVMX_MIO_UART2_MCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_MCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_MCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000420ull);
+}
+#else
+#define CVMX_MIO_UART2_MCR (CVMX_ADD_IO_SEG(0x0001180000000420ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_MSR CVMX_MIO_UART2_MSR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_MSR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_MSR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000430ull);
+}
+#else
+#define CVMX_MIO_UART2_MSR (CVMX_ADD_IO_SEG(0x0001180000000430ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_RBR CVMX_MIO_UART2_RBR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_RBR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_RBR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000400ull);
+}
+#else
+#define CVMX_MIO_UART2_RBR (CVMX_ADD_IO_SEG(0x0001180000000400ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_RFL CVMX_MIO_UART2_RFL_FUNC()
+static inline uint64_t CVMX_MIO_UART2_RFL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_RFL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000608ull);
+}
+#else
+#define CVMX_MIO_UART2_RFL (CVMX_ADD_IO_SEG(0x0001180000000608ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_RFW CVMX_MIO_UART2_RFW_FUNC()
+static inline uint64_t CVMX_MIO_UART2_RFW_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_RFW not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000530ull);
+}
+#else
+#define CVMX_MIO_UART2_RFW (CVMX_ADD_IO_SEG(0x0001180000000530ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_SBCR CVMX_MIO_UART2_SBCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SBCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SBCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000620ull);
+}
+#else
+#define CVMX_MIO_UART2_SBCR (CVMX_ADD_IO_SEG(0x0001180000000620ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_SCR CVMX_MIO_UART2_SCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SCR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SCR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000438ull);
+}
+#else
+#define CVMX_MIO_UART2_SCR (CVMX_ADD_IO_SEG(0x0001180000000438ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_SFE CVMX_MIO_UART2_SFE_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SFE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SFE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000630ull);
+}
+#else
+#define CVMX_MIO_UART2_SFE (CVMX_ADD_IO_SEG(0x0001180000000630ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_SRR CVMX_MIO_UART2_SRR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SRR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SRR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000610ull);
+}
+#else
+#define CVMX_MIO_UART2_SRR (CVMX_ADD_IO_SEG(0x0001180000000610ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_SRT CVMX_MIO_UART2_SRT_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SRT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SRT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000638ull);
+}
+#else
+#define CVMX_MIO_UART2_SRT (CVMX_ADD_IO_SEG(0x0001180000000638ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_SRTS CVMX_MIO_UART2_SRTS_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SRTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SRTS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000618ull);
+}
+#else
+#define CVMX_MIO_UART2_SRTS (CVMX_ADD_IO_SEG(0x0001180000000618ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_STT CVMX_MIO_UART2_STT_FUNC()
+static inline uint64_t CVMX_MIO_UART2_STT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_STT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000700ull);
+}
+#else
+#define CVMX_MIO_UART2_STT (CVMX_ADD_IO_SEG(0x0001180000000700ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_TFL CVMX_MIO_UART2_TFL_FUNC()
+static inline uint64_t CVMX_MIO_UART2_TFL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_TFL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000600ull);
+}
+#else
+#define CVMX_MIO_UART2_TFL (CVMX_ADD_IO_SEG(0x0001180000000600ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_TFR CVMX_MIO_UART2_TFR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_TFR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_TFR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000528ull);
+}
+#else
+#define CVMX_MIO_UART2_TFR (CVMX_ADD_IO_SEG(0x0001180000000528ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_THR CVMX_MIO_UART2_THR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_THR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_THR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000440ull);
+}
+#else
+#define CVMX_MIO_UART2_THR (CVMX_ADD_IO_SEG(0x0001180000000440ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MIO_UART2_USR CVMX_MIO_UART2_USR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_USR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_USR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000000538ull);
+}
+#else
+#define CVMX_MIO_UART2_USR (CVMX_ADD_IO_SEG(0x0001180000000538ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_DLH(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_DLH(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000888ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_DLH(offset) (CVMX_ADD_IO_SEG(0x0001180000000888ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_DLL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_DLL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000880ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_DLL(offset) (CVMX_ADD_IO_SEG(0x0001180000000880ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_FAR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_FAR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000920ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_FAR(offset) (CVMX_ADD_IO_SEG(0x0001180000000920ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_FCR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_FCR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000850ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_FCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000850ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_HTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_HTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000B08ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_HTX(offset) (CVMX_ADD_IO_SEG(0x0001180000000B08ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_IER(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_IER(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000808ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_IER(offset) (CVMX_ADD_IO_SEG(0x0001180000000808ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_IIR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_IIR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000810ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_IIR(offset) (CVMX_ADD_IO_SEG(0x0001180000000810ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_LCR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_LCR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000818ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_LCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000818ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_LSR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_LSR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000828ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_LSR(offset) (CVMX_ADD_IO_SEG(0x0001180000000828ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_MCR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_MCR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000820ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_MCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000820ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_MSR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_MSR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000830ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_MSR(offset) (CVMX_ADD_IO_SEG(0x0001180000000830ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_RBR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_RBR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000800ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_RBR(offset) (CVMX_ADD_IO_SEG(0x0001180000000800ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_RFL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_RFL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000A08ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_RFL(offset) (CVMX_ADD_IO_SEG(0x0001180000000A08ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_RFW(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_RFW(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000930ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_RFW(offset) (CVMX_ADD_IO_SEG(0x0001180000000930ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_SBCR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SBCR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000A20ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_SBCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000A20ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_SCR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SCR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000838ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_SCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000838ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_SFE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SFE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000A30ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_SFE(offset) (CVMX_ADD_IO_SEG(0x0001180000000A30ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_SRR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SRR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000A10ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_SRR(offset) (CVMX_ADD_IO_SEG(0x0001180000000A10ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_SRT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SRT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000A38ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_SRT(offset) (CVMX_ADD_IO_SEG(0x0001180000000A38ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_SRTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SRTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000A18ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_SRTS(offset) (CVMX_ADD_IO_SEG(0x0001180000000A18ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_STT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_STT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000B00ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_STT(offset) (CVMX_ADD_IO_SEG(0x0001180000000B00ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_TFL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_TFL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000A00ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_TFL(offset) (CVMX_ADD_IO_SEG(0x0001180000000A00ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_TFR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_TFR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000928ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_TFR(offset) (CVMX_ADD_IO_SEG(0x0001180000000928ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_THR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_THR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000840ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_THR(offset) (CVMX_ADD_IO_SEG(0x0001180000000840ull) + ((offset) & 1) * 1024)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIO_UARTX_USR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_USR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000000938ull) + ((offset) & 1) * 1024;
+}
+#else
+#define CVMX_MIO_UARTX_USR(offset) (CVMX_ADD_IO_SEG(0x0001180000000938ull) + ((offset) & 1) * 1024)
+#endif
+
+/**
+ * cvmx_mio_boot_bist_stat
+ *
+ * MIO_BOOT_BIST_STAT = MIO Boot BIST Status Register
+ *
+ * Contains the BIST status for the MIO boot memories. '0' = pass, '1' = fail.
+ */
+union cvmx_mio_boot_bist_stat {
+ uint64_t u64;
+ struct cvmx_mio_boot_bist_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_mio_boot_bist_stat_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t ncbo_1 : 1; /**< NCB output FIFO 1 BIST status */
+ uint64_t ncbo_0 : 1; /**< NCB output FIFO 0 BIST status */
+ uint64_t loc : 1; /**< Local memory BIST status */
+ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */
+#else
+ uint64_t ncbi : 1;
+ uint64_t loc : 1;
+ uint64_t ncbo_0 : 1;
+ uint64_t ncbo_1 : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_mio_boot_bist_stat_cn30xx cn31xx;
+ struct cvmx_mio_boot_bist_stat_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t ncbo_0 : 1; /**< NCB output FIFO BIST status */
+ uint64_t loc : 1; /**< Local memory BIST status */
+ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */
+#else
+ uint64_t ncbi : 1;
+ uint64_t loc : 1;
+ uint64_t ncbo_0 : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn38xx;
+ struct cvmx_mio_boot_bist_stat_cn38xx cn38xxp2;
+ struct cvmx_mio_boot_bist_stat_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t pcm_1 : 1; /**< PCM memory 1 BIST status */
+ uint64_t pcm_0 : 1; /**< PCM memory 0 BIST status */
+ uint64_t ncbo_1 : 1; /**< NCB output FIFO 1 BIST status */
+ uint64_t ncbo_0 : 1; /**< NCB output FIFO 0 BIST status */
+ uint64_t loc : 1; /**< Local memory region BIST status */
+ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */
+#else
+ uint64_t ncbi : 1;
+ uint64_t loc : 1;
+ uint64_t ncbo_0 : 1;
+ uint64_t ncbo_1 : 1;
+ uint64_t pcm_0 : 1;
+ uint64_t pcm_1 : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn50xx;
+ struct cvmx_mio_boot_bist_stat_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t ndf : 2; /**< NAND flash BIST status */
+ uint64_t ncbo_0 : 1; /**< NCB output FIFO BIST status */
+ uint64_t dma : 1; /**< DMA memory BIST status */
+ uint64_t loc : 1; /**< Local memory BIST status */
+ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */
+#else
+ uint64_t ncbi : 1;
+ uint64_t loc : 1;
+ uint64_t dma : 1;
+ uint64_t ncbo_0 : 1;
+ uint64_t ndf : 2;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn52xx;
+ struct cvmx_mio_boot_bist_stat_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t ncbo_0 : 1; /**< NCB output FIFO BIST status */
+ uint64_t dma : 1; /**< DMA memory BIST status */
+ uint64_t loc : 1; /**< Local memory region BIST status */
+ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */
+#else
+ uint64_t ncbi : 1;
+ uint64_t loc : 1;
+ uint64_t dma : 1;
+ uint64_t ncbo_0 : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xxp1;
+ struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xx;
+ struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xxp1;
+ struct cvmx_mio_boot_bist_stat_cn38xx cn58xx;
+ struct cvmx_mio_boot_bist_stat_cn38xx cn58xxp1;
+ struct cvmx_mio_boot_bist_stat_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t stat : 12; /**< BIST status */
+#else
+ uint64_t stat : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn61xx;
+ struct cvmx_mio_boot_bist_stat_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t stat : 9; /**< BIST status */
+#else
+ uint64_t stat : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn63xx;
+ struct cvmx_mio_boot_bist_stat_cn63xx cn63xxp1;
+ struct cvmx_mio_boot_bist_stat_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t stat : 10; /**< BIST status */
+#else
+ uint64_t stat : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn66xx;
+ struct cvmx_mio_boot_bist_stat_cn66xx cn68xx;
+ struct cvmx_mio_boot_bist_stat_cn66xx cn68xxp1;
+ struct cvmx_mio_boot_bist_stat_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_boot_bist_stat cvmx_mio_boot_bist_stat_t;
+
+/**
+ * cvmx_mio_boot_comp
+ *
+ * MIO_BOOT_COMP = MIO Boot Compensation Register
+ *
+ * Reset value is as follows:
+ *
+ * no pullups, PCTL=38, NCTL=30 (25 ohm termination)
+ * pullup on boot_ad[9], PCTL=19, NCTL=15 (50 ohm termination)
+ * pullup on boot_ad[10], PCTL=15, NCTL=12 (65 ohm termination)
+ * pullups on boot_ad[10:9], PCTL=15, NCTL=12 (65 ohm termination)
+ */
+union cvmx_mio_boot_comp {
+ uint64_t u64;
+ struct cvmx_mio_boot_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_mio_boot_comp_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t pctl : 5; /**< Boot bus PCTL */
+ uint64_t nctl : 5; /**< Boot bus NCTL */
+#else
+ uint64_t nctl : 5;
+ uint64_t pctl : 5;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn50xx;
+ struct cvmx_mio_boot_comp_cn50xx cn52xx;
+ struct cvmx_mio_boot_comp_cn50xx cn52xxp1;
+ struct cvmx_mio_boot_comp_cn50xx cn56xx;
+ struct cvmx_mio_boot_comp_cn50xx cn56xxp1;
+ struct cvmx_mio_boot_comp_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t pctl : 6; /**< Boot bus PCTL */
+ uint64_t nctl : 6; /**< Boot bus NCTL */
+#else
+ uint64_t nctl : 6;
+ uint64_t pctl : 6;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn61xx;
+ struct cvmx_mio_boot_comp_cn61xx cn63xx;
+ struct cvmx_mio_boot_comp_cn61xx cn63xxp1;
+ struct cvmx_mio_boot_comp_cn61xx cn66xx;
+ struct cvmx_mio_boot_comp_cn61xx cn68xx;
+ struct cvmx_mio_boot_comp_cn61xx cn68xxp1;
+ struct cvmx_mio_boot_comp_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_boot_comp cvmx_mio_boot_comp_t;
+
+/**
+ * cvmx_mio_boot_dma_cfg#
+ *
+ * MIO_BOOT_DMA_CFG = MIO Boot DMA Config Register (1 per engine * 2 engines)
+ *
+ * SIZE is specified in number of bus transfers, where one transfer is equal to the following number
+ * of bytes dependent on MIO_BOOT_DMA_TIMn[WIDTH] and MIO_BOOT_DMA_TIMn[DDR]:
+ *
+ * WIDTH DDR Transfer Size (bytes)
+ * ----------------------------------------
+ * 0 0 2
+ * 0 1 4
+ * 1 0 4
+ * 1 1 8
+ *
+ * Note: ADR must be aligned to the bus width (i.e. 16 bit aligned if WIDTH=0, 32 bit aligned if WIDTH=1).
+ */
+union cvmx_mio_boot_dma_cfgx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t en : 1; /**< DMA Engine X enable */
+ uint64_t rw : 1; /**< DMA Engine X R/W bit (0 = read, 1 = write) */
+ uint64_t clr : 1; /**< DMA Engine X clear EN on device terminated burst */
+ uint64_t reserved_60_60 : 1;
+ uint64_t swap32 : 1; /**< DMA Engine X 32 bit swap */
+ uint64_t swap16 : 1; /**< DMA Engine X 16 bit swap */
+ uint64_t swap8 : 1; /**< DMA Engine X 8 bit swap */
+ uint64_t endian : 1; /**< DMA Engine X NCB endian mode (0 = big, 1 = little) */
+ uint64_t size : 20; /**< DMA Engine X size */
+ uint64_t adr : 36; /**< DMA Engine X address */
+#else
+ uint64_t adr : 36;
+ uint64_t size : 20;
+ uint64_t endian : 1;
+ uint64_t swap8 : 1;
+ uint64_t swap16 : 1;
+ uint64_t swap32 : 1;
+ uint64_t reserved_60_60 : 1;
+ uint64_t clr : 1;
+ uint64_t rw : 1;
+ uint64_t en : 1;
+#endif
+ } s;
+ struct cvmx_mio_boot_dma_cfgx_s cn52xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn52xxp1;
+ struct cvmx_mio_boot_dma_cfgx_s cn56xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_cfgx_s cn61xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn63xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn63xxp1;
+ struct cvmx_mio_boot_dma_cfgx_s cn66xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn68xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn68xxp1;
+ struct cvmx_mio_boot_dma_cfgx_s cnf71xx;
+};
+typedef union cvmx_mio_boot_dma_cfgx cvmx_mio_boot_dma_cfgx_t;
+
+/**
+ * cvmx_mio_boot_dma_int#
+ *
+ * MIO_BOOT_DMA_INT = MIO Boot DMA Interrupt Register (1 per engine * 2 engines)
+ *
+ */
+union cvmx_mio_boot_dma_intx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_intx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dmarq : 1; /**< DMA Engine X DMARQ asserted interrupt */
+ uint64_t done : 1; /**< DMA Engine X request completion interrupt */
+#else
+ uint64_t done : 1;
+ uint64_t dmarq : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_mio_boot_dma_intx_s cn52xx;
+ struct cvmx_mio_boot_dma_intx_s cn52xxp1;
+ struct cvmx_mio_boot_dma_intx_s cn56xx;
+ struct cvmx_mio_boot_dma_intx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_intx_s cn61xx;
+ struct cvmx_mio_boot_dma_intx_s cn63xx;
+ struct cvmx_mio_boot_dma_intx_s cn63xxp1;
+ struct cvmx_mio_boot_dma_intx_s cn66xx;
+ struct cvmx_mio_boot_dma_intx_s cn68xx;
+ struct cvmx_mio_boot_dma_intx_s cn68xxp1;
+ struct cvmx_mio_boot_dma_intx_s cnf71xx;
+};
+typedef union cvmx_mio_boot_dma_intx cvmx_mio_boot_dma_intx_t;
+
+/**
+ * cvmx_mio_boot_dma_int_en#
+ *
+ * MIO_BOOT_DMA_INT_EN = MIO Boot DMA Interrupt Enable Register (1 per engine * 2 engines)
+ *
+ */
+union cvmx_mio_boot_dma_int_enx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_int_enx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dmarq : 1; /**< DMA Engine X DMARQ asserted interrupt enable */
+ uint64_t done : 1; /**< DMA Engine X request completion interrupt enable */
+#else
+ uint64_t done : 1;
+ uint64_t dmarq : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_mio_boot_dma_int_enx_s cn52xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn52xxp1;
+ struct cvmx_mio_boot_dma_int_enx_s cn56xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_int_enx_s cn61xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn63xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn63xxp1;
+ struct cvmx_mio_boot_dma_int_enx_s cn66xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn68xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn68xxp1;
+ struct cvmx_mio_boot_dma_int_enx_s cnf71xx;
+};
+typedef union cvmx_mio_boot_dma_int_enx cvmx_mio_boot_dma_int_enx_t;
+
+/**
+ * cvmx_mio_boot_dma_tim#
+ *
+ * MIO_BOOT_DMA_TIM = MIO Boot DMA Timing Register (1 per engine * 2 engines)
+ *
+ * DMACK_PI inverts the assertion level of boot_dmack[n]. The default polarity of boot_dmack[1:0] is
+ * selected on the first de-assertion of reset by the values on boot_ad[12:11], where 0 is active high
+ * and 1 is active low (see MIO_BOOT_PIN_DEFS for a read-only copy of the default polarity).
+ * boot_ad[12:11] have internal pulldowns, so place a pullup on boot_ad[n+11] for active low default
+ * polarity on engine n. To interface with CF cards in True IDE Mode, either a pullup should be placed
+ * on boot_ad[n+11] OR the corresponding DMACK_PI[n] should be set.
+ *
+ * DMARQ_PI inverts the assertion level of boot_dmarq[n]. The default polarity of boot_dmarq[1:0] is
+ * active high, thus setting the polarity inversion bits changes the polarity to active low. To
+ * interface with CF cards in True IDE Mode, the corresponding DMARQ_PI[n] should be clear.
+ *
+ * TIM_MULT specifies the timing multiplier for an engine. The timing multiplier applies to all timing
+ * parameters, except for DMARQ and RD_DLY, which simply count eclks. TIM_MULT is encoded as follows:
+ * 0 = 4x, 1 = 1x, 2 = 2x, 3 = 8x.
+ *
+ * RD_DLY specifies the read sample delay in eclk cycles for an engine. For reads, the data bus is
+ * normally sampled on the same eclk edge that drives boot_oe_n high (and also low in DDR mode).
+ * This parameter can delay that sampling edge by up to 7 eclks. Note: the number of eclk cycles
+ * counted by the OE_A and DMACK_H + PAUSE timing parameters must be greater than RD_DLY.
+ *
+ * If DDR is set, then WE_N must be less than WE_A.
+ */
+union cvmx_mio_boot_dma_timx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_timx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dmack_pi : 1; /**< DMA Engine X DMA ack polarity inversion */
+ uint64_t dmarq_pi : 1; /**< DMA Engine X DMA request polarity inversion */
+ uint64_t tim_mult : 2; /**< DMA Engine X timing multiplier */
+ uint64_t rd_dly : 3; /**< DMA Engine X read sample delay */
+ uint64_t ddr : 1; /**< DMA Engine X DDR mode */
+ uint64_t width : 1; /**< DMA Engine X bus width (0 = 16 bits, 1 = 32 bits) */
+ uint64_t reserved_48_54 : 7;
+ uint64_t pause : 6; /**< DMA Engine X pause count */
+ uint64_t dmack_h : 6; /**< DMA Engine X DMA ack hold count */
+ uint64_t we_n : 6; /**< DMA Engine X write enable negated count */
+ uint64_t we_a : 6; /**< DMA Engine X write enable asserted count */
+ uint64_t oe_n : 6; /**< DMA Engine X output enable negated count */
+ uint64_t oe_a : 6; /**< DMA Engine X output enable asserted count */
+ uint64_t dmack_s : 6; /**< DMA Engine X DMA ack setup count */
+ uint64_t dmarq : 6; /**< DMA Engine X DMA request count (must be non-zero) */
+#else
+ uint64_t dmarq : 6;
+ uint64_t dmack_s : 6;
+ uint64_t oe_a : 6;
+ uint64_t oe_n : 6;
+ uint64_t we_a : 6;
+ uint64_t we_n : 6;
+ uint64_t dmack_h : 6;
+ uint64_t pause : 6;
+ uint64_t reserved_48_54 : 7;
+ uint64_t width : 1;
+ uint64_t ddr : 1;
+ uint64_t rd_dly : 3;
+ uint64_t tim_mult : 2;
+ uint64_t dmarq_pi : 1;
+ uint64_t dmack_pi : 1;
+#endif
+ } s;
+ struct cvmx_mio_boot_dma_timx_s cn52xx;
+ struct cvmx_mio_boot_dma_timx_s cn52xxp1;
+ struct cvmx_mio_boot_dma_timx_s cn56xx;
+ struct cvmx_mio_boot_dma_timx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_timx_s cn61xx;
+ struct cvmx_mio_boot_dma_timx_s cn63xx;
+ struct cvmx_mio_boot_dma_timx_s cn63xxp1;
+ struct cvmx_mio_boot_dma_timx_s cn66xx;
+ struct cvmx_mio_boot_dma_timx_s cn68xx;
+ struct cvmx_mio_boot_dma_timx_s cn68xxp1;
+ struct cvmx_mio_boot_dma_timx_s cnf71xx;
+};
+typedef union cvmx_mio_boot_dma_timx cvmx_mio_boot_dma_timx_t;
+
+/**
+ * cvmx_mio_boot_err
+ *
+ * MIO_BOOT_ERR = MIO Boot Error Register
+ *
+ * Contains the address decode error and wait mode error bits. Address decode error is set when a
+ * boot bus access does not hit in any of the 8 remote regions or 2 local memory regions. Wait mode error is
+ * set when wait mode is enabled and the external wait signal is not de-asserted after 32k eclk cycles.
+ */
+union cvmx_mio_boot_err {
+ uint64_t u64;
+ struct cvmx_mio_boot_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t wait_err : 1; /**< Wait mode error */
+ uint64_t adr_err : 1; /**< Address decode error */
+#else
+ uint64_t adr_err : 1;
+ uint64_t wait_err : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_mio_boot_err_s cn30xx;
+ struct cvmx_mio_boot_err_s cn31xx;
+ struct cvmx_mio_boot_err_s cn38xx;
+ struct cvmx_mio_boot_err_s cn38xxp2;
+ struct cvmx_mio_boot_err_s cn50xx;
+ struct cvmx_mio_boot_err_s cn52xx;
+ struct cvmx_mio_boot_err_s cn52xxp1;
+ struct cvmx_mio_boot_err_s cn56xx;
+ struct cvmx_mio_boot_err_s cn56xxp1;
+ struct cvmx_mio_boot_err_s cn58xx;
+ struct cvmx_mio_boot_err_s cn58xxp1;
+ struct cvmx_mio_boot_err_s cn61xx;
+ struct cvmx_mio_boot_err_s cn63xx;
+ struct cvmx_mio_boot_err_s cn63xxp1;
+ struct cvmx_mio_boot_err_s cn66xx;
+ struct cvmx_mio_boot_err_s cn68xx;
+ struct cvmx_mio_boot_err_s cn68xxp1;
+ struct cvmx_mio_boot_err_s cnf71xx;
+};
+typedef union cvmx_mio_boot_err cvmx_mio_boot_err_t;
+
+/**
+ * cvmx_mio_boot_int
+ *
+ * MIO_BOOT_INT = MIO Boot Interrupt Register
+ *
+ * Contains the interrupt enable bits for address decode error and wait mode error.
+ */
+union cvmx_mio_boot_int {
+ uint64_t u64;
+ struct cvmx_mio_boot_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t wait_int : 1; /**< Wait mode error interrupt enable */
+ uint64_t adr_int : 1; /**< Address decode error interrupt enable */
+#else
+ uint64_t adr_int : 1;
+ uint64_t wait_int : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_mio_boot_int_s cn30xx;
+ struct cvmx_mio_boot_int_s cn31xx;
+ struct cvmx_mio_boot_int_s cn38xx;
+ struct cvmx_mio_boot_int_s cn38xxp2;
+ struct cvmx_mio_boot_int_s cn50xx;
+ struct cvmx_mio_boot_int_s cn52xx;
+ struct cvmx_mio_boot_int_s cn52xxp1;
+ struct cvmx_mio_boot_int_s cn56xx;
+ struct cvmx_mio_boot_int_s cn56xxp1;
+ struct cvmx_mio_boot_int_s cn58xx;
+ struct cvmx_mio_boot_int_s cn58xxp1;
+ struct cvmx_mio_boot_int_s cn61xx;
+ struct cvmx_mio_boot_int_s cn63xx;
+ struct cvmx_mio_boot_int_s cn63xxp1;
+ struct cvmx_mio_boot_int_s cn66xx;
+ struct cvmx_mio_boot_int_s cn68xx;
+ struct cvmx_mio_boot_int_s cn68xxp1;
+ struct cvmx_mio_boot_int_s cnf71xx;
+};
+typedef union cvmx_mio_boot_int cvmx_mio_boot_int_t;
+
+/**
+ * cvmx_mio_boot_loc_adr
+ *
+ * MIO_BOOT_LOC_ADR = MIO Boot Local Memory Region Address Register
+ *
+ * Specifies the address for reading or writing the local memory region. This address will post-increment
+ * following an access to the MIO Boot Local Memory Region Data Register (MIO_BOOT_LOC_DAT).
+ *
+ * Local memory region 0 exists from addresses 0x00 - 0x78.
+ * Local memory region 1 exists from addresses 0x80 - 0xf8.
+ */
+union cvmx_mio_boot_loc_adr {
+ uint64_t u64;
+ struct cvmx_mio_boot_loc_adr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t adr : 5; /**< Local memory region address */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t adr : 5;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_boot_loc_adr_s cn30xx;
+ struct cvmx_mio_boot_loc_adr_s cn31xx;
+ struct cvmx_mio_boot_loc_adr_s cn38xx;
+ struct cvmx_mio_boot_loc_adr_s cn38xxp2;
+ struct cvmx_mio_boot_loc_adr_s cn50xx;
+ struct cvmx_mio_boot_loc_adr_s cn52xx;
+ struct cvmx_mio_boot_loc_adr_s cn52xxp1;
+ struct cvmx_mio_boot_loc_adr_s cn56xx;
+ struct cvmx_mio_boot_loc_adr_s cn56xxp1;
+ struct cvmx_mio_boot_loc_adr_s cn58xx;
+ struct cvmx_mio_boot_loc_adr_s cn58xxp1;
+ struct cvmx_mio_boot_loc_adr_s cn61xx;
+ struct cvmx_mio_boot_loc_adr_s cn63xx;
+ struct cvmx_mio_boot_loc_adr_s cn63xxp1;
+ struct cvmx_mio_boot_loc_adr_s cn66xx;
+ struct cvmx_mio_boot_loc_adr_s cn68xx;
+ struct cvmx_mio_boot_loc_adr_s cn68xxp1;
+ struct cvmx_mio_boot_loc_adr_s cnf71xx;
+};
+typedef union cvmx_mio_boot_loc_adr cvmx_mio_boot_loc_adr_t;
+
+/**
+ * cvmx_mio_boot_loc_cfg#
+ *
+ * MIO_BOOT_LOC_CFG = MIO Boot Local Memory Region Config Register (1 per region * 2 regions)
+ *
+ * Contains local memory region enable and local memory region base address parameters. Each local memory region is 128
+ * bytes organized as 16 entries x 8 bytes.
+ *
+ * Base address specifies address bits [31:7] of the region.
+ */
+union cvmx_mio_boot_loc_cfgx {
+ uint64_t u64;
+ struct cvmx_mio_boot_loc_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t en : 1; /**< Local memory region X enable */
+ uint64_t reserved_28_30 : 3;
+ uint64_t base : 25; /**< Local memory region X base address */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t base : 25;
+ uint64_t reserved_28_30 : 3;
+ uint64_t en : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_mio_boot_loc_cfgx_s cn30xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn31xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn38xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn38xxp2;
+ struct cvmx_mio_boot_loc_cfgx_s cn50xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn52xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn52xxp1;
+ struct cvmx_mio_boot_loc_cfgx_s cn56xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn56xxp1;
+ struct cvmx_mio_boot_loc_cfgx_s cn58xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn58xxp1;
+ struct cvmx_mio_boot_loc_cfgx_s cn61xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn63xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn63xxp1;
+ struct cvmx_mio_boot_loc_cfgx_s cn66xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn68xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn68xxp1;
+ struct cvmx_mio_boot_loc_cfgx_s cnf71xx;
+};
+typedef union cvmx_mio_boot_loc_cfgx cvmx_mio_boot_loc_cfgx_t;
+
+/**
+ * cvmx_mio_boot_loc_dat
+ *
+ * MIO_BOOT_LOC_DAT = MIO Boot Local Memory Region Data Register
+ *
+ * This is a pseudo-register that will read/write the local memory region at the address specified by the MIO
+ * Boot Local Memory Region Address Register (MIO_BOOT_LOC_ADR) when accessed.
+ */
+union cvmx_mio_boot_loc_dat {
+ uint64_t u64;
+ struct cvmx_mio_boot_loc_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Local memory region data */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_mio_boot_loc_dat_s cn30xx;
+ struct cvmx_mio_boot_loc_dat_s cn31xx;
+ struct cvmx_mio_boot_loc_dat_s cn38xx;
+ struct cvmx_mio_boot_loc_dat_s cn38xxp2;
+ struct cvmx_mio_boot_loc_dat_s cn50xx;
+ struct cvmx_mio_boot_loc_dat_s cn52xx;
+ struct cvmx_mio_boot_loc_dat_s cn52xxp1;
+ struct cvmx_mio_boot_loc_dat_s cn56xx;
+ struct cvmx_mio_boot_loc_dat_s cn56xxp1;
+ struct cvmx_mio_boot_loc_dat_s cn58xx;
+ struct cvmx_mio_boot_loc_dat_s cn58xxp1;
+ struct cvmx_mio_boot_loc_dat_s cn61xx;
+ struct cvmx_mio_boot_loc_dat_s cn63xx;
+ struct cvmx_mio_boot_loc_dat_s cn63xxp1;
+ struct cvmx_mio_boot_loc_dat_s cn66xx;
+ struct cvmx_mio_boot_loc_dat_s cn68xx;
+ struct cvmx_mio_boot_loc_dat_s cn68xxp1;
+ struct cvmx_mio_boot_loc_dat_s cnf71xx;
+};
+typedef union cvmx_mio_boot_loc_dat cvmx_mio_boot_loc_dat_t;
+
+/**
+ * cvmx_mio_boot_pin_defs
+ *
+ * MIO_BOOT_PIN_DEFS = MIO Boot Pin Defaults Register
+ *
+ */
+union cvmx_mio_boot_pin_defs {
+ uint64_t u64;
+ struct cvmx_mio_boot_pin_defs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t user1 : 16; /**< BOOT_AD [31:16] latched during power up */
+ uint64_t ale : 1; /**< Region 0 default ALE mode */
+ uint64_t width : 1; /**< Region 0 default bus width */
+ uint64_t dmack_p2 : 1; /**< boot_dmack[2] default polarity */
+ uint64_t dmack_p1 : 1; /**< boot_dmack[1] default polarity */
+ uint64_t dmack_p0 : 1; /**< boot_dmack[0] default polarity */
+ uint64_t term : 2; /**< Selects default driver termination */
+ uint64_t nand : 1; /**< Region 0 is NAND flash */
+ uint64_t user0 : 8; /**< BOOT_AD [7:0] latched during power up */
+#else
+ uint64_t user0 : 8;
+ uint64_t nand : 1;
+ uint64_t term : 2;
+ uint64_t dmack_p0 : 1;
+ uint64_t dmack_p1 : 1;
+ uint64_t dmack_p2 : 1;
+ uint64_t width : 1;
+ uint64_t ale : 1;
+ uint64_t user1 : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_mio_boot_pin_defs_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t ale : 1; /**< Region 0 default ALE mode */
+ uint64_t width : 1; /**< Region 0 default bus width */
+ uint64_t reserved_13_13 : 1;
+ uint64_t dmack_p1 : 1; /**< boot_dmack[1] default polarity */
+ uint64_t dmack_p0 : 1; /**< boot_dmack[0] default polarity */
+ uint64_t term : 2; /**< Selects default driver termination */
+ uint64_t nand : 1; /**< Region 0 is NAND flash */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t nand : 1;
+ uint64_t term : 2;
+ uint64_t dmack_p0 : 1;
+ uint64_t dmack_p1 : 1;
+ uint64_t reserved_13_13 : 1;
+ uint64_t width : 1;
+ uint64_t ale : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn52xx;
+ struct cvmx_mio_boot_pin_defs_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t ale : 1; /**< Region 0 default ALE mode */
+ uint64_t width : 1; /**< Region 0 default bus width */
+ uint64_t dmack_p2 : 1; /**< boot_dmack[2] default polarity */
+ uint64_t dmack_p1 : 1; /**< boot_dmack[1] default polarity */
+ uint64_t dmack_p0 : 1; /**< boot_dmack[0] default polarity */
+ uint64_t term : 2; /**< Selects default driver termination */
+ uint64_t reserved_0_8 : 9;
+#else
+ uint64_t reserved_0_8 : 9;
+ uint64_t term : 2;
+ uint64_t dmack_p0 : 1;
+ uint64_t dmack_p1 : 1;
+ uint64_t dmack_p2 : 1;
+ uint64_t width : 1;
+ uint64_t ale : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn56xx;
+ struct cvmx_mio_boot_pin_defs_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t user1 : 16; /**< BOOT_AD [31:16] latched during power up */
+ uint64_t ale : 1; /**< Region 0 default ALE mode */
+ uint64_t width : 1; /**< Region 0 default bus width */
+ uint64_t reserved_13_13 : 1;
+ uint64_t dmack_p1 : 1; /**< boot_dmack[1] default polarity */
+ uint64_t dmack_p0 : 1; /**< boot_dmack[0] default polarity */
+ uint64_t term : 2; /**< Selects default driver termination */
+ uint64_t nand : 1; /**< Region 0 is NAND flash */
+ uint64_t user0 : 8; /**< BOOT_AD [7:0] latched during power up */
+#else
+ uint64_t user0 : 8;
+ uint64_t nand : 1;
+ uint64_t term : 2;
+ uint64_t dmack_p0 : 1;
+ uint64_t dmack_p1 : 1;
+ uint64_t reserved_13_13 : 1;
+ uint64_t width : 1;
+ uint64_t ale : 1;
+ uint64_t user1 : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn61xx;
+ struct cvmx_mio_boot_pin_defs_cn52xx cn63xx;
+ struct cvmx_mio_boot_pin_defs_cn52xx cn63xxp1;
+ struct cvmx_mio_boot_pin_defs_cn52xx cn66xx;
+ struct cvmx_mio_boot_pin_defs_cn52xx cn68xx;
+ struct cvmx_mio_boot_pin_defs_cn52xx cn68xxp1;
+ struct cvmx_mio_boot_pin_defs_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_boot_pin_defs cvmx_mio_boot_pin_defs_t;
+
+/**
+ * cvmx_mio_boot_reg_cfg#
+ */
+union cvmx_mio_boot_reg_cfgx {
+ uint64_t u64;
+ struct cvmx_mio_boot_reg_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t dmack : 2; /**< Region X DMACK */
+ uint64_t tim_mult : 2; /**< Region X timing multiplier */
+ uint64_t rd_dly : 3; /**< Region X read sample delay */
+ uint64_t sam : 1; /**< Region X SAM mode */
+ uint64_t we_ext : 2; /**< Region X write enable count extension */
+ uint64_t oe_ext : 2; /**< Region X output enable count extension */
+ uint64_t en : 1; /**< Region X enable */
+ uint64_t orbit : 1; /**< Region X or bit */
+ uint64_t ale : 1; /**< Region X ALE mode */
+ uint64_t width : 1; /**< Region X bus width */
+ uint64_t size : 12; /**< Region X size */
+ uint64_t base : 16; /**< Region X base address */
+#else
+ uint64_t base : 16;
+ uint64_t size : 12;
+ uint64_t width : 1;
+ uint64_t ale : 1;
+ uint64_t orbit : 1;
+ uint64_t en : 1;
+ uint64_t oe_ext : 2;
+ uint64_t we_ext : 2;
+ uint64_t sam : 1;
+ uint64_t rd_dly : 3;
+ uint64_t tim_mult : 2;
+ uint64_t dmack : 2;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_mio_boot_reg_cfgx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t sam : 1; /**< Region X SAM mode */
+ uint64_t we_ext : 2; /**< Region X write enable count extension */
+ uint64_t oe_ext : 2; /**< Region X output enable count extension */
+ uint64_t en : 1; /**< Region X enable */
+ uint64_t orbit : 1; /**< Region X or bit */
+ uint64_t ale : 1; /**< Region X ALE mode */
+ uint64_t width : 1; /**< Region X bus width */
+ uint64_t size : 12; /**< Region X size */
+ uint64_t base : 16; /**< Region X base address */
+#else
+ uint64_t base : 16;
+ uint64_t size : 12;
+ uint64_t width : 1;
+ uint64_t ale : 1;
+ uint64_t orbit : 1;
+ uint64_t en : 1;
+ uint64_t oe_ext : 2;
+ uint64_t we_ext : 2;
+ uint64_t sam : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } cn30xx;
+ struct cvmx_mio_boot_reg_cfgx_cn30xx cn31xx;
+ struct cvmx_mio_boot_reg_cfgx_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t en : 1; /**< Region X enable */
+ uint64_t orbit : 1; /**< Region X or bit */
+ uint64_t reserved_28_29 : 2;
+ uint64_t size : 12; /**< Region X size */
+ uint64_t base : 16; /**< Region X base address */
+#else
+ uint64_t base : 16;
+ uint64_t size : 12;
+ uint64_t reserved_28_29 : 2;
+ uint64_t orbit : 1;
+ uint64_t en : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn38xx;
+ struct cvmx_mio_boot_reg_cfgx_cn38xx cn38xxp2;
+ struct cvmx_mio_boot_reg_cfgx_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t tim_mult : 2; /**< Region X timing multiplier */
+ uint64_t rd_dly : 3; /**< Region X read sample delay */
+ uint64_t sam : 1; /**< Region X SAM mode */
+ uint64_t we_ext : 2; /**< Region X write enable count extension */
+ uint64_t oe_ext : 2; /**< Region X output enable count extension */
+ uint64_t en : 1; /**< Region X enable */
+ uint64_t orbit : 1; /**< Region X or bit */
+ uint64_t ale : 1; /**< Region X ALE mode */
+ uint64_t width : 1; /**< Region X bus width */
+ uint64_t size : 12; /**< Region X size */
+ uint64_t base : 16; /**< Region X base address */
+#else
+ uint64_t base : 16;
+ uint64_t size : 12;
+ uint64_t width : 1;
+ uint64_t ale : 1;
+ uint64_t orbit : 1;
+ uint64_t en : 1;
+ uint64_t oe_ext : 2;
+ uint64_t we_ext : 2;
+ uint64_t sam : 1;
+ uint64_t rd_dly : 3;
+ uint64_t tim_mult : 2;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } cn50xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn52xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn52xxp1;
+ struct cvmx_mio_boot_reg_cfgx_s cn56xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn56xxp1;
+ struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xx;
+ struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xxp1;
+ struct cvmx_mio_boot_reg_cfgx_s cn61xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn63xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn63xxp1;
+ struct cvmx_mio_boot_reg_cfgx_s cn66xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn68xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn68xxp1;
+ struct cvmx_mio_boot_reg_cfgx_s cnf71xx;
+};
+typedef union cvmx_mio_boot_reg_cfgx cvmx_mio_boot_reg_cfgx_t;
+
+/**
+ * cvmx_mio_boot_reg_tim#
+ */
+union cvmx_mio_boot_reg_timx {
+ uint64_t u64;
+ struct cvmx_mio_boot_reg_timx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pagem : 1; /**< Region X page mode */
+ uint64_t waitm : 1; /**< Region X wait mode */
+ uint64_t pages : 2; /**< Region X page size */
+ uint64_t ale : 6; /**< Region X ALE count */
+ uint64_t page : 6; /**< Region X page count */
+ uint64_t wait : 6; /**< Region X wait count */
+ uint64_t pause : 6; /**< Region X pause count */
+ uint64_t wr_hld : 6; /**< Region X write hold count */
+ uint64_t rd_hld : 6; /**< Region X read hold count */
+ uint64_t we : 6; /**< Region X write enable count */
+ uint64_t oe : 6; /**< Region X output enable count */
+ uint64_t ce : 6; /**< Region X chip enable count */
+ uint64_t adr : 6; /**< Region X address count */
+#else
+ uint64_t adr : 6;
+ uint64_t ce : 6;
+ uint64_t oe : 6;
+ uint64_t we : 6;
+ uint64_t rd_hld : 6;
+ uint64_t wr_hld : 6;
+ uint64_t pause : 6;
+ uint64_t wait : 6;
+ uint64_t page : 6;
+ uint64_t ale : 6;
+ uint64_t pages : 2;
+ uint64_t waitm : 1;
+ uint64_t pagem : 1;
+#endif
+ } s;
+ struct cvmx_mio_boot_reg_timx_s cn30xx;
+ struct cvmx_mio_boot_reg_timx_s cn31xx;
+ struct cvmx_mio_boot_reg_timx_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pagem : 1; /**< Region X page mode */
+ uint64_t waitm : 1; /**< Region X wait mode */
+ uint64_t pages : 2; /**< Region X page size (NOT IN PASS 1) */
+ uint64_t reserved_54_59 : 6;
+ uint64_t page : 6; /**< Region X page count */
+ uint64_t wait : 6; /**< Region X wait count */
+ uint64_t pause : 6; /**< Region X pause count */
+ uint64_t wr_hld : 6; /**< Region X write hold count */
+ uint64_t rd_hld : 6; /**< Region X read hold count */
+ uint64_t we : 6; /**< Region X write enable count */
+ uint64_t oe : 6; /**< Region X output enable count */
+ uint64_t ce : 6; /**< Region X chip enable count */
+ uint64_t adr : 6; /**< Region X address count */
+#else
+ uint64_t adr : 6;
+ uint64_t ce : 6;
+ uint64_t oe : 6;
+ uint64_t we : 6;
+ uint64_t rd_hld : 6;
+ uint64_t wr_hld : 6;
+ uint64_t pause : 6;
+ uint64_t wait : 6;
+ uint64_t page : 6;
+ uint64_t reserved_54_59 : 6;
+ uint64_t pages : 2;
+ uint64_t waitm : 1;
+ uint64_t pagem : 1;
+#endif
+ } cn38xx;
+ struct cvmx_mio_boot_reg_timx_cn38xx cn38xxp2;
+ struct cvmx_mio_boot_reg_timx_s cn50xx;
+ struct cvmx_mio_boot_reg_timx_s cn52xx;
+ struct cvmx_mio_boot_reg_timx_s cn52xxp1;
+ struct cvmx_mio_boot_reg_timx_s cn56xx;
+ struct cvmx_mio_boot_reg_timx_s cn56xxp1;
+ struct cvmx_mio_boot_reg_timx_s cn58xx;
+ struct cvmx_mio_boot_reg_timx_s cn58xxp1;
+ struct cvmx_mio_boot_reg_timx_s cn61xx;
+ struct cvmx_mio_boot_reg_timx_s cn63xx;
+ struct cvmx_mio_boot_reg_timx_s cn63xxp1;
+ struct cvmx_mio_boot_reg_timx_s cn66xx;
+ struct cvmx_mio_boot_reg_timx_s cn68xx;
+ struct cvmx_mio_boot_reg_timx_s cn68xxp1;
+ struct cvmx_mio_boot_reg_timx_s cnf71xx;
+};
+typedef union cvmx_mio_boot_reg_timx cvmx_mio_boot_reg_timx_t;
+
+/**
+ * cvmx_mio_boot_thr
+ *
+ * MIO_BOOT_THR = MIO Boot Threshold Register
+ *
+ * Contains MIO Boot threshold values:
+ *
+ * FIF_THR = Assert ncb__busy when the Boot NCB input FIFO reaches this level (not typically for
+ * customer use).
+ *
+ * DMA_THR = When non-DMA accesses are pending, perform a DMA access after this value of non-DMA
+ * accesses have completed. If set to zero, only perform a DMA access when non-DMA
+ * accesses are not pending.
+ */
+union cvmx_mio_boot_thr {
+ uint64_t u64;
+ struct cvmx_mio_boot_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t dma_thr : 6; /**< DMA threshold */
+ uint64_t reserved_14_15 : 2;
+ uint64_t fif_cnt : 6; /**< Current NCB FIFO count */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fif_thr : 6; /**< NCB busy threshold */
+#else
+ uint64_t fif_thr : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t fif_cnt : 6;
+ uint64_t reserved_14_15 : 2;
+ uint64_t dma_thr : 6;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_mio_boot_thr_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t fif_cnt : 6; /**< Current NCB FIFO count */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fif_thr : 6; /**< NCB busy threshold */
+#else
+ uint64_t fif_thr : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t fif_cnt : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn30xx;
+ struct cvmx_mio_boot_thr_cn30xx cn31xx;
+ struct cvmx_mio_boot_thr_cn30xx cn38xx;
+ struct cvmx_mio_boot_thr_cn30xx cn38xxp2;
+ struct cvmx_mio_boot_thr_cn30xx cn50xx;
+ struct cvmx_mio_boot_thr_s cn52xx;
+ struct cvmx_mio_boot_thr_s cn52xxp1;
+ struct cvmx_mio_boot_thr_s cn56xx;
+ struct cvmx_mio_boot_thr_s cn56xxp1;
+ struct cvmx_mio_boot_thr_cn30xx cn58xx;
+ struct cvmx_mio_boot_thr_cn30xx cn58xxp1;
+ struct cvmx_mio_boot_thr_s cn61xx;
+ struct cvmx_mio_boot_thr_s cn63xx;
+ struct cvmx_mio_boot_thr_s cn63xxp1;
+ struct cvmx_mio_boot_thr_s cn66xx;
+ struct cvmx_mio_boot_thr_s cn68xx;
+ struct cvmx_mio_boot_thr_s cn68xxp1;
+ struct cvmx_mio_boot_thr_s cnf71xx;
+};
+typedef union cvmx_mio_boot_thr cvmx_mio_boot_thr_t;
+
+/**
+ * cvmx_mio_emm_buf_dat
+ *
+ * MIO_EMM_BUF_DAT = MIO EMMC Data buffer access Register
+ *
+ */
+union cvmx_mio_emm_buf_dat {
+ uint64_t u64;
+ struct cvmx_mio_emm_buf_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat : 64; /**< Direct access to the 1KB data buffer memory. Address
+ specified by MIO_EMM_BUF_IDX */
+#else
+ uint64_t dat : 64;
+#endif
+ } s;
+ struct cvmx_mio_emm_buf_dat_s cn61xx;
+ struct cvmx_mio_emm_buf_dat_s cnf71xx;
+};
+typedef union cvmx_mio_emm_buf_dat cvmx_mio_emm_buf_dat_t;
+
+/**
+ * cvmx_mio_emm_buf_idx
+ *
+ * MIO_EMM_BUF_IDX = MIO EMMC Data buffer address Register
+ *
+ */
+union cvmx_mio_emm_buf_idx {
+ uint64_t u64;
+ struct cvmx_mio_emm_buf_idx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t inc : 1; /**< Automatically advance BUF_SEL/OFFSET after each access to
+ MIO_EMM_BUF_DAT. Wraps after last offset of last data buffer. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t buf_num : 1; /**< Specify the data buffer for the next access to MIO_EMM_BUF_DAT */
+ uint64_t offset : 6; /**< Specify the 8B data buffer offset for the next access to
+ MIO_EMM_BUF_DAT */
+#else
+ uint64_t offset : 6;
+ uint64_t buf_num : 1;
+ uint64_t reserved_7_15 : 9;
+ uint64_t inc : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_mio_emm_buf_idx_s cn61xx;
+ struct cvmx_mio_emm_buf_idx_s cnf71xx;
+};
+typedef union cvmx_mio_emm_buf_idx cvmx_mio_emm_buf_idx_t;
+
+/**
+ * cvmx_mio_emm_cfg
+ *
+ * MIO_EMM_CFG = MIO EMMC Configuration Register
+ *
+ */
+union cvmx_mio_emm_cfg {
+ uint64_t u64;
+ struct cvmx_mio_emm_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t boot_fail : 1; /**< SW should set BOOT_FAIL when an unrecoverable error occurs
+ while attempt to boot from eMMC or NOR Flash. When set, the
+ following pattern will be output:
+ BOOT_AD[7:0] pulled up to 1
+ BOOT_CE_N[7:0] driven to 1
+ BOOT_ALE driven to 0
+ BOOT_OE_L driven to 1
+ BOOT_WE_L driven to 1 */
+ uint64_t reserved_4_15 : 12;
+ uint64_t bus_ena : 4; /**< eMMC bus enable mask.
+
+ Setting bit0 of BUS_ENA causes BOOT_CE[1] to become dedicated
+ eMMC bus 0 command (ie. disabling any NOR use)
+
+ Setting bit1 of BUS_ENA causes BOOT_CE[2] to become dedicated
+ eMMC bus 1 command (ie. disabling any NOR use).
+
+ Setting bit2 of BUS_ENA causes BOOT_CE[3] to become dedicated
+ eMMC bus 2 command (ie. disabling any NOR use).
+
+ Setting bit3 of BUS_ENA causes BOOT_CE[4] to become dedicated
+ eMMC bus 3 command (ie. disabling any NOR use).
+
+ Setting any bit of BUS_ENA causes BOOT_CE[5] to become the eMMC
+ clock for both bus0 and bus1. */
+#else
+ uint64_t bus_ena : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t boot_fail : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_mio_emm_cfg_s cn61xx;
+ struct cvmx_mio_emm_cfg_s cnf71xx;
+};
+typedef union cvmx_mio_emm_cfg cvmx_mio_emm_cfg_t;
+
+/**
+ * cvmx_mio_emm_cmd
+ *
+ * MIO_EMM_CMD = MIO EMMC Command Register
+ *
+ */
+union cvmx_mio_emm_cmd {
+ uint64_t u64;
+ struct cvmx_mio_emm_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t bus_id : 2; /**< Specify the eMMC bus */
+ uint64_t cmd_val : 1; /**< Request valid. SW writes this bit to a 1. HW clears it when
+ the operation completes. */
+ uint64_t reserved_56_58 : 3;
+ uint64_t dbuf : 1; /**< Specify the data buffer to be used for a block transfer. */
+ uint64_t offset : 6; /**< Debug only. Specify the number of 8 byte transfers in the
+ used in the command. Value is 64-OFFSET. The block transfer
+ will still start at the first btye in the 512B data buffer.
+ SW must ensure CMD16 has updated the card block length. */
+ uint64_t reserved_43_48 : 6;
+ uint64_t ctype_xor : 2; /**< Reserved. Must be zero */
+ uint64_t rtype_xor : 3; /**< Reserved. Must be zero */
+ uint64_t cmd_idx : 6; /**< eMMC command */
+ uint64_t arg : 32; /**< eMMC command argument */
+#else
+ uint64_t arg : 32;
+ uint64_t cmd_idx : 6;
+ uint64_t rtype_xor : 3;
+ uint64_t ctype_xor : 2;
+ uint64_t reserved_43_48 : 6;
+ uint64_t offset : 6;
+ uint64_t dbuf : 1;
+ uint64_t reserved_56_58 : 3;
+ uint64_t cmd_val : 1;
+ uint64_t bus_id : 2;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_mio_emm_cmd_s cn61xx;
+ struct cvmx_mio_emm_cmd_s cnf71xx;
+};
+typedef union cvmx_mio_emm_cmd cvmx_mio_emm_cmd_t;
+
+/**
+ * cvmx_mio_emm_dma
+ *
+ * MIO_EMM_DMA = MIO EMMC DMA config Register
+ *
+ */
+union cvmx_mio_emm_dma {
+ uint64_t u64;
+ struct cvmx_mio_emm_dma_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t bus_id : 2; /**< Specify the eMMC bus */
+ uint64_t dma_val : 1; /**< SW writes this bit to a 1 to indicate that HW should perform
+ the DMA transfer. HW clears when DMA operation completes or
+ is terminated. */
+ uint64_t sector : 1; /**< Specify CARD_ADDR and eMMC are using sector (512B) addressing. */
+ uint64_t dat_null : 1; /**< Do not perform any eMMC commands. A DMA read will return all
+ 0s. A DMA write tosses the data. In the case of a failure,
+ this can be used to unwind the DMA engine. */
+ uint64_t thres : 6; /**< Number of 8B blocks of data that must exist in the DBUF before
+ the starting the 512B block transfer. 0 indicates to wait for
+ the entire block. */
+ uint64_t rel_wr : 1; /**< Set the reliable write parameter when performing CMD23
+ (SET_BLOCK_COUNT) for a multiple block */
+ uint64_t rw : 1; /**< R/W bit (0 = read, 1 = write) */
+ uint64_t multi : 1; /**< Perform operation using a multiple block command instead of a
+ series of single block commands. */
+ uint64_t block_cnt : 16; /**< Number of blocks to read/write. Hardware decrements the block
+ count after each successful block transfer. */
+ uint64_t card_addr : 32; /**< Data address for media =<2GB is a 32bit byte address and data
+ address for media > 2GB is a 32bit sector (512B) address.
+ Hardware advances the card address after each successful block
+ transfer by 512 for byte addressing and by 1 for sector
+ addressing. */
+#else
+ uint64_t card_addr : 32;
+ uint64_t block_cnt : 16;
+ uint64_t multi : 1;
+ uint64_t rw : 1;
+ uint64_t rel_wr : 1;
+ uint64_t thres : 6;
+ uint64_t dat_null : 1;
+ uint64_t sector : 1;
+ uint64_t dma_val : 1;
+ uint64_t bus_id : 2;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_mio_emm_dma_s cn61xx;
+ struct cvmx_mio_emm_dma_s cnf71xx;
+};
+typedef union cvmx_mio_emm_dma cvmx_mio_emm_dma_t;
+
+/**
+ * cvmx_mio_emm_int
+ *
+ * MIO_EMM_INT = MIO EMMC Interrupt Register
+ *
+ */
+union cvmx_mio_emm_int {
+ uint64_t u64;
+ struct cvmx_mio_emm_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t switch_err : 1; /**< Switch operation encountered an error. */
+ uint64_t switch_done : 1; /**< Switch operation completed successfully */
+ uint64_t dma_err : 1; /**< DMA transfer encountered an error. See MIO_EMM_RSP. */
+ uint64_t cmd_err : 1; /**< Operation specified by MIO_EMM_CMD encountered an error. See
+ MIO_EMM_RSP. */
+ uint64_t dma_done : 1; /**< DMA transfer completed successfully */
+ uint64_t cmd_done : 1; /**< Operation specified by MIO_EMM_CMD completed successfully */
+ uint64_t buf_done : 1; /**< The next 512B block transfer of a multi-block transfer has
+ completed. */
+#else
+ uint64_t buf_done : 1;
+ uint64_t cmd_done : 1;
+ uint64_t dma_done : 1;
+ uint64_t cmd_err : 1;
+ uint64_t dma_err : 1;
+ uint64_t switch_done : 1;
+ uint64_t switch_err : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_mio_emm_int_s cn61xx;
+ struct cvmx_mio_emm_int_s cnf71xx;
+};
+typedef union cvmx_mio_emm_int cvmx_mio_emm_int_t;
+
+/**
+ * cvmx_mio_emm_int_en
+ *
+ * MIO_EMM_INT_EN = MIO EMMC Interrupt enable Register
+ *
+ */
+union cvmx_mio_emm_int_en {
+ uint64_t u64;
+ struct cvmx_mio_emm_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t switch_err : 1; /**< Switch operation encountered an error. */
+ uint64_t switch_done : 1; /**< Switch operation completed. */
+ uint64_t dma_err : 1; /**< DMA transfer encountered an error. See MIO_EMM_RSP. */
+ uint64_t cmd_err : 1; /**< Operation specified by MIO_EMM_CMD encountered an error. See
+ MIO_EMM_RSP. */
+ uint64_t dma_done : 1; /**< DMA transfer completed */
+ uint64_t cmd_done : 1; /**< Operation specified by MIO_EMM_CMD completed */
+ uint64_t buf_done : 1; /**< The next 512B block transfer of a multi-block transfer has
+ completed. */
+#else
+ uint64_t buf_done : 1;
+ uint64_t cmd_done : 1;
+ uint64_t dma_done : 1;
+ uint64_t cmd_err : 1;
+ uint64_t dma_err : 1;
+ uint64_t switch_done : 1;
+ uint64_t switch_err : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_mio_emm_int_en_s cn61xx;
+ struct cvmx_mio_emm_int_en_s cnf71xx;
+};
+typedef union cvmx_mio_emm_int_en cvmx_mio_emm_int_en_t;
+
+/**
+ * cvmx_mio_emm_mode#
+ *
+ * MIO_EMM_MODE = MIO EMMC Operating mode Register
+ *
+ */
+union cvmx_mio_emm_modex {
+ uint64_t u64;
+ struct cvmx_mio_emm_modex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t hs_timing : 1; /**< Current high speed timing mode. Required when CLK frequency
+ higher than 20MHz. */
+ uint64_t reserved_43_47 : 5;
+ uint64_t bus_width : 3; /**< Current card bus mode. Out of reset, the card is in 1 bit data
+ bus mode. Select bus width.
+
+ 0 - 1 bit data bus (power on)
+ 1 - 4 bit data bus
+ 2 - 8 bit data bus
+ 5 - 4 bit data bus (dual data rate)
+ 6 - 8 bit data bus (dual data rate) */
+ uint64_t reserved_36_39 : 4;
+ uint64_t power_class : 4; /**< Out of reset, the card power class is 0, which is the minimum
+ current consumption class for the card. EXT_CSD bytes
+ [203:200] and [239:238] contain the power class for different
+ BUS_WITDH and CLK frequencies. Software should write this
+ field with the 4-bit field from the EXT_CSD bytes
+ corresponding to the selected operating mode. */
+ uint64_t clk_hi : 16; /**< Current number of sclk cycles to hold the eMMC CLK pin high */
+ uint64_t clk_lo : 16; /**< Current number of sclk cycles to hold the eMMC CLK pin low. */
+#else
+ uint64_t clk_lo : 16;
+ uint64_t clk_hi : 16;
+ uint64_t power_class : 4;
+ uint64_t reserved_36_39 : 4;
+ uint64_t bus_width : 3;
+ uint64_t reserved_43_47 : 5;
+ uint64_t hs_timing : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_mio_emm_modex_s cn61xx;
+ struct cvmx_mio_emm_modex_s cnf71xx;
+};
+typedef union cvmx_mio_emm_modex cvmx_mio_emm_modex_t;
+
+/**
+ * cvmx_mio_emm_rca
+ */
+union cvmx_mio_emm_rca {
+ uint64_t u64;
+ struct cvmx_mio_emm_rca_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t card_rca : 16; /**< Whenever SW performs CMD7, HW will update CARD_RCA with the
+ relative card address from the MIO_EMM_CMD[ARG] unless the
+ operations encounters an error. */
+#else
+ uint64_t card_rca : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_mio_emm_rca_s cn61xx;
+ struct cvmx_mio_emm_rca_s cnf71xx;
+};
+typedef union cvmx_mio_emm_rca cvmx_mio_emm_rca_t;
+
+/**
+ * cvmx_mio_emm_rsp_hi
+ *
+ * MIO_EMM_RSP_HI = MIO EMMC Response data high Register
+ *
+ */
+union cvmx_mio_emm_rsp_hi {
+ uint64_t u64;
+ struct cvmx_mio_emm_rsp_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat : 64; /**< Command response (as per JEDEC eMMC spec)
+
+ RSP_TYPE=1 - DAT[63:0] - 0x0
+ RSP_TYPE=2 - DAT[63:0] - CID[127:64] or CSD[127:64]
+ RSP_TYPE=3 - DAT[63:0] - 0x0
+ RSP_TYPE=4 - DAT[63:0] - 0x0
+ RSP_TYPE=5 - DAT[63:0] - 0x0 */
+#else
+ uint64_t dat : 64;
+#endif
+ } s;
+ struct cvmx_mio_emm_rsp_hi_s cn61xx;
+ struct cvmx_mio_emm_rsp_hi_s cnf71xx;
+};
+typedef union cvmx_mio_emm_rsp_hi cvmx_mio_emm_rsp_hi_t;
+
+/**
+ * cvmx_mio_emm_rsp_lo
+ *
+ * MIO_EMM_RSP_LO = MIO EMMC Response data low Register
+ *
+ */
+union cvmx_mio_emm_rsp_lo {
+ uint64_t u64;
+ struct cvmx_mio_emm_rsp_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat : 64; /**< Command response (as per JEDEC eMMC spec)
+
+ RSP_TYPE = 1
+ DAT[63:46] - 0x0
+ DAT[45:40] - Command index
+ DAT[39: 8] - Card status
+ DAT[ 7: 1] - CRC7
+ DAT[ 0] - End bit
+
+ RSP_TYPE = 2
+ DAT[63: 1] - CID[63:1] or CSD[63:1] including CRC
+ DAT[ 0] - End bit
+
+ RSP_TYPE = 3
+ DAT[63:46] - 0x0
+ DAT[45:40] - Check bits (0x3f)
+ DAT[39: 8] - OCR register
+ DAT[ 7: 1] - Check bits (0x7f)
+ DAT[ 0] - End bit
+
+ RSP_TYPE = 4
+ DAT[63:46] - 0x0
+ DAT[45:40] - CMD39 ('10111')
+ DAT[39:24] - RCA[31:16]
+ DAT[ 23] - Status
+ DAT[22:16] - Register address
+ DAT[15: 8] - Register contents
+ DAT[ 7: 1] - CRC7
+ DAT[ 0] - End bit
+
+ RSP_TYPE = 5
+ DAT[63:46] - 0x0
+ DAT[45:40] - CMD40 ('10100')
+ DAT[39:24] - RCA[31:16]
+ DAT[ 23] - Status
+ DAT[22:16] - Register address
+ DAT[15: 8] - Not defined. May be used for IRQ data
+ DAT[ 7: 1] - CRC7
+ DAT[ 0] - End bit */
+#else
+ uint64_t dat : 64;
+#endif
+ } s;
+ struct cvmx_mio_emm_rsp_lo_s cn61xx;
+ struct cvmx_mio_emm_rsp_lo_s cnf71xx;
+};
+typedef union cvmx_mio_emm_rsp_lo cvmx_mio_emm_rsp_lo_t;
+
+/**
+ * cvmx_mio_emm_rsp_sts
+ *
+ * MIO_EMM_RSP_STS = MIO EMMC Response status Register
+ *
+ */
+union cvmx_mio_emm_rsp_sts {
+ uint64_t u64;
+ struct cvmx_mio_emm_rsp_sts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t bus_id : 2; /**< eMMC bus id to which the response status corresponds. */
+ uint64_t cmd_val : 1; /**< Read-only copy of MIO_EMM_CMD[CMD_VAL]. CMD_VAL=1 indicates a
+ direct operation is in progress. */
+ uint64_t switch_val : 1; /**< Read-only copy of MIO_EMM_SWITCH[SWITCH_EXE]. SWITCH_VAL=1
+ indicates a switch operation is in progress. */
+ uint64_t dma_val : 1; /**< Read-only copy of MIO_EMM_DMA[DMA_VAL]. DMA_VAL=1 indicates a
+ DMA operation is in progress. */
+ uint64_t dma_pend : 1; /**< The DMA engine has a pending transfer resulting from an error.
+ SW can resume the transfer by writing MIO_EMM_DMA[DMA_VAL]=1.
+ SW can terminate the transfer by writing MIO_EMM_DMA[DMA_VAL]=1
+ and MIO_EMM_DMA[NULL]=1. HW will clear DMA_PEND and perform
+ the DMA operation */
+ uint64_t reserved_29_55 : 27;
+ uint64_t dbuf_err : 1; /**< For CMD_TYPE=1, indicates a DMA read data arrived from card
+ without a free DBUF.
+
+ For CMD_TYPE=2, indicates a DBUF underflow occurred during a
+ DMA write. See MIO_EMM_DMA[THRES]. */
+ uint64_t reserved_24_27 : 4;
+ uint64_t dbuf : 1; /**< DBUF corresponding to the most recently attempted block
+ transfer. */
+ uint64_t blk_timeout : 1; /**< Timeout waiting for read data or 3bit CRC token */
+ uint64_t blk_crc_err : 1; /**< For CMD_TYPE=1, indicates a card read data CRC mismatch.
+ MIO_EMM_RSP_STS[DBUF] indicates the failing data buffer.
+
+ For CMD_TYPE=2, indicates card returned 3-bit CRC status token
+ indicating the card encountered a write data CRC check
+ mismatch. MIO_EMM_RSP_STS[DBUF] indicates the failing data
+ buffer. */
+ uint64_t rsp_busybit : 1; /**< Debug only. eMMC protocol utilizes DAT0 as a busy signal
+ during block writes and R1b responses. */
+ uint64_t stp_timeout : 1; /**< Stop transmission response timeout. */
+ uint64_t stp_crc_err : 1; /**< Stop transmission response had a CRC error */
+ uint64_t stp_bad_sts : 1; /**< Stop transmission response had bad status. */
+ uint64_t stp_val : 1; /**< Stop transmission response valid. */
+ uint64_t rsp_timeout : 1; /**< Response timeout */
+ uint64_t rsp_crc_err : 1; /**< Response CRC error */
+ uint64_t rsp_bad_sts : 1; /**< Response bad status */
+ uint64_t rsp_val : 1; /**< Response id. See MIO_EMM_RSP_HI/LO */
+ uint64_t rsp_type : 3; /**< Indicates the response type. See MIO_EMM_RSP_HI/LO */
+ uint64_t cmd_type : 2; /**< eMMC command type (0=no data, 1=read, 2=write) */
+ uint64_t cmd_idx : 6; /**< eMMC command index most recently attempted */
+ uint64_t cmd_done : 1; /**< eMMC command completed. Once the command has complete, the
+ status is final and can be examined by SW. */
+#else
+ uint64_t cmd_done : 1;
+ uint64_t cmd_idx : 6;
+ uint64_t cmd_type : 2;
+ uint64_t rsp_type : 3;
+ uint64_t rsp_val : 1;
+ uint64_t rsp_bad_sts : 1;
+ uint64_t rsp_crc_err : 1;
+ uint64_t rsp_timeout : 1;
+ uint64_t stp_val : 1;
+ uint64_t stp_bad_sts : 1;
+ uint64_t stp_crc_err : 1;
+ uint64_t stp_timeout : 1;
+ uint64_t rsp_busybit : 1;
+ uint64_t blk_crc_err : 1;
+ uint64_t blk_timeout : 1;
+ uint64_t dbuf : 1;
+ uint64_t reserved_24_27 : 4;
+ uint64_t dbuf_err : 1;
+ uint64_t reserved_29_55 : 27;
+ uint64_t dma_pend : 1;
+ uint64_t dma_val : 1;
+ uint64_t switch_val : 1;
+ uint64_t cmd_val : 1;
+ uint64_t bus_id : 2;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_mio_emm_rsp_sts_s cn61xx;
+ struct cvmx_mio_emm_rsp_sts_s cnf71xx;
+};
+typedef union cvmx_mio_emm_rsp_sts cvmx_mio_emm_rsp_sts_t;
+
+/**
+ * cvmx_mio_emm_sample
+ */
+union cvmx_mio_emm_sample {
+ uint64_t u64;
+ struct cvmx_mio_emm_sample_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t cmd_cnt : 10; /**< Number of SCLK cycles before the eMMC clock edge to sample the
+ command pin. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t dat_cnt : 10; /**< Number of SCLK cycles before the eMMC clock rising edge to
+ sample the data pin. */
+#else
+ uint64_t dat_cnt : 10;
+ uint64_t reserved_10_15 : 6;
+ uint64_t cmd_cnt : 10;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } s;
+ struct cvmx_mio_emm_sample_s cn61xx;
+ struct cvmx_mio_emm_sample_s cnf71xx;
+};
+typedef union cvmx_mio_emm_sample cvmx_mio_emm_sample_t;
+
+/**
+ * cvmx_mio_emm_sts_mask
+ */
+union cvmx_mio_emm_sts_mask {
+ uint64_t u64;
+ struct cvmx_mio_emm_sts_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t sts_msk : 32; /**< Any bit set in STS_MSK causes the corresponding bit in the card
+ status to be considered when computing response bad status. */
+#else
+ uint64_t sts_msk : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_mio_emm_sts_mask_s cn61xx;
+ struct cvmx_mio_emm_sts_mask_s cnf71xx;
+};
+typedef union cvmx_mio_emm_sts_mask cvmx_mio_emm_sts_mask_t;
+
+/**
+ * cvmx_mio_emm_switch
+ *
+ * MIO_EMM_SWITCH = MIO EMMC Operating mode switch Register
+ *
+ */
+union cvmx_mio_emm_switch {
+ uint64_t u64;
+ struct cvmx_mio_emm_switch_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t bus_id : 2; /**< Specify the eMMC bus */
+ uint64_t switch_exe : 1; /**< When SWITCH_EXE is 0, the operating modes will be update
+ directly without performing any SWITCH operations. This
+ allows SW to perform the SWITCH operations manually, then
+ update the HW.
+
+ SW writes this bit to a 1 to indicate that HW should perform
+ the necessary SWITCH operations. First, the POWER_CLASS
+ switch will be performed. If it fails, SWITCH_ERR0 will be
+ and the remaining SWITCH operations will not be performed. If
+ is succeeds, the POWER_CLASS field will be updated and the
+ HS_TIMING switch will be performed. If it fails, SWITCH_ERR1
+ will be set and the remaining SWITCH operations will not be
+ performed. If is succeeds, the HS_TIMING field will be
+ updated and the BUS_WITDH switch operation will be performed.
+ If it fails, SWITCH_ERR2 will be set. If it succeeds, the
+ BUS_WITDH will be updated.
+
+ Changes to CLK_HI and CLK_LO are discarded if any switch error
+ occurs. */
+ uint64_t switch_err0 : 1; /**< Error encounter while performing POWER_CLASS switch . See
+ MIO_EMM_RSP_STS */
+ uint64_t switch_err1 : 1; /**< Error encounter while performing HS_TIMING switch . See
+ MIO_EMM_RSP_STS */
+ uint64_t switch_err2 : 1; /**< Error encounter while performing BUS_WIDTH switch . See
+ MIO_EMM_RSP_STS */
+ uint64_t reserved_49_55 : 7;
+ uint64_t hs_timing : 1; /**< Requested update to HS_TIMING */
+ uint64_t reserved_43_47 : 5;
+ uint64_t bus_width : 3; /**< Requested update to BUS_WIDTH */
+ uint64_t reserved_36_39 : 4;
+ uint64_t power_class : 4; /**< Requested update to POWER_CLASS */
+ uint64_t clk_hi : 16; /**< Requested update to CLK_HI */
+ uint64_t clk_lo : 16; /**< Requested update to CLK_LO */
+#else
+ uint64_t clk_lo : 16;
+ uint64_t clk_hi : 16;
+ uint64_t power_class : 4;
+ uint64_t reserved_36_39 : 4;
+ uint64_t bus_width : 3;
+ uint64_t reserved_43_47 : 5;
+ uint64_t hs_timing : 1;
+ uint64_t reserved_49_55 : 7;
+ uint64_t switch_err2 : 1;
+ uint64_t switch_err1 : 1;
+ uint64_t switch_err0 : 1;
+ uint64_t switch_exe : 1;
+ uint64_t bus_id : 2;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_mio_emm_switch_s cn61xx;
+ struct cvmx_mio_emm_switch_s cnf71xx;
+};
+typedef union cvmx_mio_emm_switch cvmx_mio_emm_switch_t;
+
+/**
+ * cvmx_mio_emm_wdog
+ *
+ * MIO_EMM_WDOG = MIO EMMC Watchdog Register
+ *
+ */
+union cvmx_mio_emm_wdog {
+ uint64_t u64;
+ struct cvmx_mio_emm_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t clk_cnt : 26; /**< Number of CLK_CNT cycles to wait for the card to return a
+ response, read data, or the 3-bit CRC status token. */
+#else
+ uint64_t clk_cnt : 26;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } s;
+ struct cvmx_mio_emm_wdog_s cn61xx;
+ struct cvmx_mio_emm_wdog_s cnf71xx;
+};
+typedef union cvmx_mio_emm_wdog cvmx_mio_emm_wdog_t;
+
+/**
+ * cvmx_mio_fus_bnk_dat#
+ *
+ * Notes:
+ * The intial state of MIO_FUS_BNK_DAT* is as if bank6 was just read i.e. DAT* = fus[895:768]
+ *
+ */
+union cvmx_mio_fus_bnk_datx {
+ uint64_t u64;
+ struct cvmx_mio_fus_bnk_datx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat : 64; /**< Efuse bank store
+ For reads, the DAT gets the fus bank last read
+ For write, the DAT determines which fuses to blow */
+#else
+ uint64_t dat : 64;
+#endif
+ } s;
+ struct cvmx_mio_fus_bnk_datx_s cn50xx;
+ struct cvmx_mio_fus_bnk_datx_s cn52xx;
+ struct cvmx_mio_fus_bnk_datx_s cn52xxp1;
+ struct cvmx_mio_fus_bnk_datx_s cn56xx;
+ struct cvmx_mio_fus_bnk_datx_s cn56xxp1;
+ struct cvmx_mio_fus_bnk_datx_s cn58xx;
+ struct cvmx_mio_fus_bnk_datx_s cn58xxp1;
+ struct cvmx_mio_fus_bnk_datx_s cn61xx;
+ struct cvmx_mio_fus_bnk_datx_s cn63xx;
+ struct cvmx_mio_fus_bnk_datx_s cn63xxp1;
+ struct cvmx_mio_fus_bnk_datx_s cn66xx;
+ struct cvmx_mio_fus_bnk_datx_s cn68xx;
+ struct cvmx_mio_fus_bnk_datx_s cn68xxp1;
+ struct cvmx_mio_fus_bnk_datx_s cnf71xx;
+};
+typedef union cvmx_mio_fus_bnk_datx cvmx_mio_fus_bnk_datx_t;
+
+/**
+ * cvmx_mio_fus_dat0
+ */
+union cvmx_mio_fus_dat0 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t man_info : 32; /**< Fuse information - manufacturing info [31:0] */
+#else
+ uint64_t man_info : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_mio_fus_dat0_s cn30xx;
+ struct cvmx_mio_fus_dat0_s cn31xx;
+ struct cvmx_mio_fus_dat0_s cn38xx;
+ struct cvmx_mio_fus_dat0_s cn38xxp2;
+ struct cvmx_mio_fus_dat0_s cn50xx;
+ struct cvmx_mio_fus_dat0_s cn52xx;
+ struct cvmx_mio_fus_dat0_s cn52xxp1;
+ struct cvmx_mio_fus_dat0_s cn56xx;
+ struct cvmx_mio_fus_dat0_s cn56xxp1;
+ struct cvmx_mio_fus_dat0_s cn58xx;
+ struct cvmx_mio_fus_dat0_s cn58xxp1;
+ struct cvmx_mio_fus_dat0_s cn61xx;
+ struct cvmx_mio_fus_dat0_s cn63xx;
+ struct cvmx_mio_fus_dat0_s cn63xxp1;
+ struct cvmx_mio_fus_dat0_s cn66xx;
+ struct cvmx_mio_fus_dat0_s cn68xx;
+ struct cvmx_mio_fus_dat0_s cn68xxp1;
+ struct cvmx_mio_fus_dat0_s cnf71xx;
+};
+typedef union cvmx_mio_fus_dat0 cvmx_mio_fus_dat0_t;
+
+/**
+ * cvmx_mio_fus_dat1
+ */
+union cvmx_mio_fus_dat1 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t man_info : 32; /**< Fuse information - manufacturing info [63:32] */
+#else
+ uint64_t man_info : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_mio_fus_dat1_s cn30xx;
+ struct cvmx_mio_fus_dat1_s cn31xx;
+ struct cvmx_mio_fus_dat1_s cn38xx;
+ struct cvmx_mio_fus_dat1_s cn38xxp2;
+ struct cvmx_mio_fus_dat1_s cn50xx;
+ struct cvmx_mio_fus_dat1_s cn52xx;
+ struct cvmx_mio_fus_dat1_s cn52xxp1;
+ struct cvmx_mio_fus_dat1_s cn56xx;
+ struct cvmx_mio_fus_dat1_s cn56xxp1;
+ struct cvmx_mio_fus_dat1_s cn58xx;
+ struct cvmx_mio_fus_dat1_s cn58xxp1;
+ struct cvmx_mio_fus_dat1_s cn61xx;
+ struct cvmx_mio_fus_dat1_s cn63xx;
+ struct cvmx_mio_fus_dat1_s cn63xxp1;
+ struct cvmx_mio_fus_dat1_s cn66xx;
+ struct cvmx_mio_fus_dat1_s cn68xx;
+ struct cvmx_mio_fus_dat1_s cn68xxp1;
+ struct cvmx_mio_fus_dat1_s cnf71xx;
+};
+typedef union cvmx_mio_fus_dat1 cvmx_mio_fus_dat1_t;
+
+/**
+ * cvmx_mio_fus_dat2
+ *
+ * Notes:
+ * CHIP_ID is consumed in several places within Octeon.
+ *
+ * * Core COP0 ProcessorIdentification[Revision]
+ * * Core EJTAG DeviceIdentification[Version]
+ * * PCI_CFG02[RID]
+ * * JTAG controller
+ *
+ * Note: The JTAG controller gets CHIP_ID[3:0] solely from the laser fuses.
+ * Modification to the efuses will not change what the JTAG controller reports
+ * for CHIP_ID.
+ */
+union cvmx_mio_fus_dat2 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t fus118 : 1; /**< Ignore Authentik disable */
+ uint64_t rom_info : 10; /**< Fuse information - ROM info */
+ uint64_t power_limit : 2; /**< Fuse information - Power limit */
+ uint64_t dorm_crypto : 1; /**< Fuse information - See NOCRYPTO */
+ uint64_t fus318 : 1; /**< Reserved */
+ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */
+ uint64_t reserved_30_31 : 2;
+ uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - DORM_CRYPTO and NOCRYPTO
+ together to select 1 of 4 mutually-exclusive
+ modes:
+
+ DORM_CRYPT=0,NOCRYPTO=0 AES/DES/HASH enabled
+ DORM_CRYPT=0,NOCRYPTO=1 AES/DES/HASH disable
+ DORM_CRYPT=1,NOCRYPTO=0 Dormant Encryption enable
+ DORM_CRYPT=1,NOCRYPTO=1 Authenik mode */
+ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */
+ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t reserved_0_15 : 16;
+#else
+ uint64_t reserved_0_15 : 16;
+ uint64_t chip_id : 8;
+ uint64_t bist_dis : 1;
+ uint64_t rst_sht : 1;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t nokasu : 1;
+ uint64_t reserved_30_31 : 2;
+ uint64_t raid_en : 1;
+ uint64_t fus318 : 1;
+ uint64_t dorm_crypto : 1;
+ uint64_t power_limit : 2;
+ uint64_t rom_info : 10;
+ uint64_t fus118 : 1;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_mio_fus_dat2_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */
+ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */
+ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t pll_off : 4; /**< Fuse information - core pll offset
+ Used to compute the base offset for the core pll.
+ the offset will be (PLL_OFF ^ 8)
+ Note, these fuses can only be set from laser fuse */
+ uint64_t reserved_1_11 : 11;
+ uint64_t pp_dis : 1; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 1;
+ uint64_t reserved_1_11 : 11;
+ uint64_t pll_off : 4;
+ uint64_t chip_id : 8;
+ uint64_t bist_dis : 1;
+ uint64_t rst_sht : 1;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn30xx;
+ struct cvmx_mio_fus_dat2_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */
+ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */
+ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t pll_off : 4; /**< Fuse information - core pll offset
+ Used to compute the base offset for the core pll.
+ the offset will be (PLL_OFF ^ 8)
+ Note, these fuses can only be set from laser fuse */
+ uint64_t reserved_2_11 : 10;
+ uint64_t pp_dis : 2; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 2;
+ uint64_t reserved_2_11 : 10;
+ uint64_t pll_off : 4;
+ uint64_t chip_id : 8;
+ uint64_t bist_dis : 1;
+ uint64_t rst_sht : 1;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn31xx;
+ struct cvmx_mio_fus_dat2_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2)
+ (PASS2 Only) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable
+ (PASS2 Only) */
+ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable
+ (PASS2 Only) */
+ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */
+ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t pp_dis : 16; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 16;
+ uint64_t chip_id : 8;
+ uint64_t bist_dis : 1;
+ uint64_t rst_sht : 1;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn38xx;
+ struct cvmx_mio_fus_dat2_cn38xx cn38xxp2;
+ struct cvmx_mio_fus_dat2_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t fus318 : 1; /**< Fuse information - a copy of fuse318 */
+ uint64_t raid_en : 1; /**< Fuse information - RAID enabled
+ (5020 does not have RAID co-processor) */
+ uint64_t reserved_30_31 : 2;
+ uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2)
+ (5020 does not have DFA co-processor) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */
+ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */
+ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t reserved_2_15 : 14;
+ uint64_t pp_dis : 2; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 2;
+ uint64_t reserved_2_15 : 14;
+ uint64_t chip_id : 8;
+ uint64_t bist_dis : 1;
+ uint64_t rst_sht : 1;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t nokasu : 1;
+ uint64_t reserved_30_31 : 2;
+ uint64_t raid_en : 1;
+ uint64_t fus318 : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn50xx;
+ struct cvmx_mio_fus_dat2_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t fus318 : 1; /**< Fuse information - a copy of fuse318 */
+ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */
+ uint64_t reserved_30_31 : 2;
+ uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */
+ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */
+ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t reserved_4_15 : 12;
+ uint64_t pp_dis : 4; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t chip_id : 8;
+ uint64_t bist_dis : 1;
+ uint64_t rst_sht : 1;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t nokasu : 1;
+ uint64_t reserved_30_31 : 2;
+ uint64_t raid_en : 1;
+ uint64_t fus318 : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn52xx;
+ struct cvmx_mio_fus_dat2_cn52xx cn52xxp1;
+ struct cvmx_mio_fus_dat2_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t fus318 : 1; /**< Fuse information - a copy of fuse318 */
+ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */
+ uint64_t reserved_30_31 : 2;
+ uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */
+ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */
+ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pp_dis : 12; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 12;
+ uint64_t reserved_12_15 : 4;
+ uint64_t chip_id : 8;
+ uint64_t bist_dis : 1;
+ uint64_t rst_sht : 1;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t nokasu : 1;
+ uint64_t reserved_30_31 : 2;
+ uint64_t raid_en : 1;
+ uint64_t fus318 : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn56xx;
+ struct cvmx_mio_fus_dat2_cn56xx cn56xxp1;
+ struct cvmx_mio_fus_dat2_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63 : 34;
+ uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */
+ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */
+ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t pp_dis : 16; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 16;
+ uint64_t chip_id : 8;
+ uint64_t bist_dis : 1;
+ uint64_t rst_sht : 1;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t nokasu : 1;
+ uint64_t reserved_30_63 : 34;
+#endif
+ } cn58xx;
+ struct cvmx_mio_fus_dat2_cn58xx cn58xxp1;
+ struct cvmx_mio_fus_dat2_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t fus118 : 1; /**< Ignore Authentik disable */
+ uint64_t rom_info : 10; /**< Fuse information - ROM info */
+ uint64_t power_limit : 2; /**< Fuse information - Power limit */
+ uint64_t dorm_crypto : 1; /**< Fuse information - See NOCRYPTO */
+ uint64_t fus318 : 1; /**< Reserved */
+ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */
+ uint64_t reserved_29_31 : 3;
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - DORM_CRYPTO and NOCRYPTO
+ together to select 1 of 4 mutually-exclusive
+ modes:
+
+ DORM_CRYPT=0,NOCRYPTO=0 AES/DES/HASH enabled
+ DORM_CRYPT=0,NOCRYPTO=1 AES/DES/HASH disable
+ DORM_CRYPT=1,NOCRYPTO=0 Dormant Encryption enable
+ DORM_CRYPT=1,NOCRYPTO=1 Authenik mode */
+ uint64_t reserved_24_25 : 2;
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t reserved_4_15 : 12;
+ uint64_t pp_dis : 4; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t chip_id : 8;
+ uint64_t reserved_24_25 : 2;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t reserved_29_31 : 3;
+ uint64_t raid_en : 1;
+ uint64_t fus318 : 1;
+ uint64_t dorm_crypto : 1;
+ uint64_t power_limit : 2;
+ uint64_t rom_info : 10;
+ uint64_t fus118 : 1;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cn61xx;
+ struct cvmx_mio_fus_dat2_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t dorm_crypto : 1; /**< Fuse information - Dormant Encryption enable */
+ uint64_t fus318 : 1; /**< Reserved */
+ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */
+ uint64_t reserved_29_31 : 3;
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */
+ uint64_t reserved_24_25 : 2;
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t reserved_6_15 : 10;
+ uint64_t pp_dis : 6; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 6;
+ uint64_t reserved_6_15 : 10;
+ uint64_t chip_id : 8;
+ uint64_t reserved_24_25 : 2;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t reserved_29_31 : 3;
+ uint64_t raid_en : 1;
+ uint64_t fus318 : 1;
+ uint64_t dorm_crypto : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } cn63xx;
+ struct cvmx_mio_fus_dat2_cn63xx cn63xxp1;
+ struct cvmx_mio_fus_dat2_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t fus118 : 1; /**< Ignore Authentik disable */
+ uint64_t rom_info : 10; /**< Fuse information - ROM info */
+ uint64_t power_limit : 2; /**< Fuse information - Power limit */
+ uint64_t dorm_crypto : 1; /**< Fuse information - See NOCRYPTO */
+ uint64_t fus318 : 1; /**< Reserved */
+ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */
+ uint64_t reserved_29_31 : 3;
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - DORM_CRYPTO and NOCRYPTO
+ together to select 1 of 4 mutually-exclusive
+ modes:
+
+ DORM_CRYPT=0,NOCRYPTO=0 AES/DES/HASH enabled
+ DORM_CRYPT=0,NOCRYPTO=1 AES/DES/HASH disable
+ DORM_CRYPT=1,NOCRYPTO=0 Dormant Encryption enable
+ DORM_CRYPT=1,NOCRYPTO=1 Authenik mode */
+ uint64_t reserved_24_25 : 2;
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t reserved_10_15 : 6;
+ uint64_t pp_dis : 10; /**< Fuse information - PP_DISABLES */
+#else
+ uint64_t pp_dis : 10;
+ uint64_t reserved_10_15 : 6;
+ uint64_t chip_id : 8;
+ uint64_t reserved_24_25 : 2;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t reserved_29_31 : 3;
+ uint64_t raid_en : 1;
+ uint64_t fus318 : 1;
+ uint64_t dorm_crypto : 1;
+ uint64_t power_limit : 2;
+ uint64_t rom_info : 10;
+ uint64_t fus118 : 1;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cn66xx;
+ struct cvmx_mio_fus_dat2_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t power_limit : 2; /**< Fuse information - Power limit */
+ uint64_t dorm_crypto : 1; /**< Fuse information - Dormant Encryption enable */
+ uint64_t fus318 : 1; /**< Reserved */
+ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */
+ uint64_t reserved_29_31 : 3;
+ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */
+ uint64_t nomul : 1; /**< Fuse information - VMUL disable */
+ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */
+ uint64_t reserved_24_25 : 2;
+ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */
+ uint64_t reserved_0_15 : 16;
+#else
+ uint64_t reserved_0_15 : 16;
+ uint64_t chip_id : 8;
+ uint64_t reserved_24_25 : 2;
+ uint64_t nocrypto : 1;
+ uint64_t nomul : 1;
+ uint64_t nodfa_cp2 : 1;
+ uint64_t reserved_29_31 : 3;
+ uint64_t raid_en : 1;
+ uint64_t fus318 : 1;
+ uint64_t dorm_crypto : 1;
+ uint64_t power_limit : 2;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } cn68xx;
+ struct cvmx_mio_fus_dat2_cn68xx cn68xxp1;
+ struct cvmx_mio_fus_dat2_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_fus_dat2 cvmx_mio_fus_dat2_t;
+
+/**
+ * cvmx_mio_fus_dat3
+ */
+union cvmx_mio_fus_dat3 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t pll_ctl : 10; /**< Fuse information - PLL control */
+ uint64_t dfa_info_dte : 3; /**< Fuse information - DFA information (DTE) */
+ uint64_t dfa_info_clm : 4; /**< Fuse information - DFA information (Cluster mask) */
+ uint64_t reserved_40_40 : 1;
+ uint64_t ema : 2; /**< Fuse information - EMA */
+ uint64_t efus_lck_rsv : 1; /**< Fuse information - efuse lockdown */
+ uint64_t efus_lck_man : 1; /**< Fuse information - efuse lockdown */
+ uint64_t pll_half_dis : 1; /**< Fuse information - RCLK PLL control */
+ uint64_t l2c_crip : 3; /**< Fuse information - L2C Cripple (1/8, 1/4, 1/2) */
+ uint64_t pll_div4 : 1; /**< Fuse information - PLL DIV4 mode
+ (laser fuse only) */
+ uint64_t reserved_29_30 : 2;
+ uint64_t bar2_en : 1; /**< Fuse information - BAR2 Present (when blown '1') */
+ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown */
+ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore */
+ uint64_t nozip : 1; /**< Fuse information - ZIP disable */
+ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE) */
+ uint64_t icache : 24; /**< Fuse information - ICACHE Hard Repair Data */
+#else
+ uint64_t icache : 24;
+ uint64_t nodfa_dte : 1;
+ uint64_t nozip : 1;
+ uint64_t efus_ign : 1;
+ uint64_t efus_lck : 1;
+ uint64_t bar2_en : 1;
+ uint64_t reserved_29_30 : 2;
+ uint64_t pll_div4 : 1;
+ uint64_t l2c_crip : 3;
+ uint64_t pll_half_dis : 1;
+ uint64_t efus_lck_man : 1;
+ uint64_t efus_lck_rsv : 1;
+ uint64_t ema : 2;
+ uint64_t reserved_40_40 : 1;
+ uint64_t dfa_info_clm : 4;
+ uint64_t dfa_info_dte : 3;
+ uint64_t pll_ctl : 10;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } s;
+ struct cvmx_mio_fus_dat3_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pll_div4 : 1; /**< Fuse information - PLL DIV4 mode
+ (laser fuse only) */
+ uint64_t reserved_29_30 : 2;
+ uint64_t bar2_en : 1; /**< Fuse information - BAR2 Enable (when blown '1') */
+ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown */
+ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore
+ This bit only has side effects when blown in
+ the laser fuses. It is ignore if only set in
+ efuse store. */
+ uint64_t nozip : 1; /**< Fuse information - ZIP disable */
+ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE) */
+ uint64_t icache : 24; /**< Fuse information - ICACHE Hard Repair Data */
+#else
+ uint64_t icache : 24;
+ uint64_t nodfa_dte : 1;
+ uint64_t nozip : 1;
+ uint64_t efus_ign : 1;
+ uint64_t efus_lck : 1;
+ uint64_t bar2_en : 1;
+ uint64_t reserved_29_30 : 2;
+ uint64_t pll_div4 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn30xx;
+ struct cvmx_mio_fus_dat3_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pll_div4 : 1; /**< Fuse information - PLL DIV4 mode
+ (laser fuse only) */
+ uint64_t zip_crip : 2; /**< Fuse information - Zip Cripple
+ (O2P Only) */
+ uint64_t bar2_en : 1; /**< Fuse information - BAR2 Enable (when blown '1') */
+ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown */
+ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore
+ This bit only has side effects when blown in
+ the laser fuses. It is ignore if only set in
+ efuse store. */
+ uint64_t nozip : 1; /**< Fuse information - ZIP disable */
+ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE) */
+ uint64_t icache : 24; /**< Fuse information - ICACHE Hard Repair Data */
+#else
+ uint64_t icache : 24;
+ uint64_t nodfa_dte : 1;
+ uint64_t nozip : 1;
+ uint64_t efus_ign : 1;
+ uint64_t efus_lck : 1;
+ uint64_t bar2_en : 1;
+ uint64_t zip_crip : 2;
+ uint64_t pll_div4 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn31xx;
+ struct cvmx_mio_fus_dat3_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t zip_crip : 2; /**< Fuse information - Zip Cripple
+ (PASS3 Only) */
+ uint64_t bar2_en : 1; /**< Fuse information - BAR2 Enable (when blown '1')
+ (PASS2 Only) */
+ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown
+ (PASS2 Only) */
+ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore
+ This bit only has side effects when blown in
+ the laser fuses. It is ignore if only set in
+ efuse store.
+ (PASS2 Only) */
+ uint64_t nozip : 1; /**< Fuse information - ZIP disable
+ (PASS2 Only) */
+ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE)
+ (PASS2 Only) */
+ uint64_t icache : 24; /**< Fuse information - ICACHE Hard Repair Data */
+#else
+ uint64_t icache : 24;
+ uint64_t nodfa_dte : 1;
+ uint64_t nozip : 1;
+ uint64_t efus_ign : 1;
+ uint64_t efus_lck : 1;
+ uint64_t bar2_en : 1;
+ uint64_t zip_crip : 2;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn38xx;
+ struct cvmx_mio_fus_dat3_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t bar2_en : 1; /**< Fuse information - BAR2 Enable (when blown '1')
+ (PASS2 Only) */
+ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown
+ (PASS2 Only) */
+ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore
+ This bit only has side effects when blown in
+ the laser fuses. It is ignore if only set in
+ efuse store.
+ (PASS2 Only) */
+ uint64_t nozip : 1; /**< Fuse information - ZIP disable
+ (PASS2 Only) */
+ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE)
+ (PASS2 Only) */
+ uint64_t icache : 24; /**< Fuse information - ICACHE Hard Repair Data */
+#else
+ uint64_t icache : 24;
+ uint64_t nodfa_dte : 1;
+ uint64_t nozip : 1;
+ uint64_t efus_ign : 1;
+ uint64_t efus_lck : 1;
+ uint64_t bar2_en : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn38xxp2;
+ struct cvmx_mio_fus_dat3_cn38xx cn50xx;
+ struct cvmx_mio_fus_dat3_cn38xx cn52xx;
+ struct cvmx_mio_fus_dat3_cn38xx cn52xxp1;
+ struct cvmx_mio_fus_dat3_cn38xx cn56xx;
+ struct cvmx_mio_fus_dat3_cn38xx cn56xxp1;
+ struct cvmx_mio_fus_dat3_cn38xx cn58xx;
+ struct cvmx_mio_fus_dat3_cn38xx cn58xxp1;
+ struct cvmx_mio_fus_dat3_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t pll_ctl : 10; /**< Fuse information - PLL control */
+ uint64_t dfa_info_dte : 3; /**< Fuse information - DFA information (DTE) */
+ uint64_t dfa_info_clm : 4; /**< Fuse information - DFA information (Cluster mask) */
+ uint64_t reserved_40_40 : 1;
+ uint64_t ema : 2; /**< Fuse information - EMA */
+ uint64_t efus_lck_rsv : 1; /**< Fuse information - efuse lockdown */
+ uint64_t efus_lck_man : 1; /**< Fuse information - efuse lockdown */
+ uint64_t pll_half_dis : 1; /**< Fuse information - RCLK PLL control */
+ uint64_t l2c_crip : 3; /**< Fuse information - L2C Cripple (1/8, 1/4, 1/2) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t zip_info : 2; /**< Fuse information - Zip information */
+ uint64_t bar2_en : 1; /**< Fuse information - BAR2 Present (when blown '1') */
+ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown */
+ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore */
+ uint64_t nozip : 1; /**< Fuse information - ZIP disable */
+ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE) */
+ uint64_t reserved_0_23 : 24;
+#else
+ uint64_t reserved_0_23 : 24;
+ uint64_t nodfa_dte : 1;
+ uint64_t nozip : 1;
+ uint64_t efus_ign : 1;
+ uint64_t efus_lck : 1;
+ uint64_t bar2_en : 1;
+ uint64_t zip_info : 2;
+ uint64_t reserved_31_31 : 1;
+ uint64_t l2c_crip : 3;
+ uint64_t pll_half_dis : 1;
+ uint64_t efus_lck_man : 1;
+ uint64_t efus_lck_rsv : 1;
+ uint64_t ema : 2;
+ uint64_t reserved_40_40 : 1;
+ uint64_t dfa_info_clm : 4;
+ uint64_t dfa_info_dte : 3;
+ uint64_t pll_ctl : 10;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } cn61xx;
+ struct cvmx_mio_fus_dat3_cn61xx cn63xx;
+ struct cvmx_mio_fus_dat3_cn61xx cn63xxp1;
+ struct cvmx_mio_fus_dat3_cn61xx cn66xx;
+ struct cvmx_mio_fus_dat3_cn61xx cn68xx;
+ struct cvmx_mio_fus_dat3_cn61xx cn68xxp1;
+ struct cvmx_mio_fus_dat3_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_fus_dat3 cvmx_mio_fus_dat3_t;
+
+/**
+ * cvmx_mio_fus_ema
+ *
+ * DON'T PUT IN HRM*
+ *
+ */
+union cvmx_mio_fus_ema {
+ uint64_t u64;
+ struct cvmx_mio_fus_ema_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t eff_ema : 3; /**< Reserved */
+ uint64_t reserved_3_3 : 1;
+ uint64_t ema : 3; /**< Reserved */
+#else
+ uint64_t ema : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t eff_ema : 3;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_mio_fus_ema_s cn50xx;
+ struct cvmx_mio_fus_ema_s cn52xx;
+ struct cvmx_mio_fus_ema_s cn52xxp1;
+ struct cvmx_mio_fus_ema_s cn56xx;
+ struct cvmx_mio_fus_ema_s cn56xxp1;
+ struct cvmx_mio_fus_ema_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t ema : 2; /**< EMA Settings */
+#else
+ uint64_t ema : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn58xx;
+ struct cvmx_mio_fus_ema_cn58xx cn58xxp1;
+ struct cvmx_mio_fus_ema_s cn61xx;
+ struct cvmx_mio_fus_ema_s cn63xx;
+ struct cvmx_mio_fus_ema_s cn63xxp1;
+ struct cvmx_mio_fus_ema_s cn66xx;
+ struct cvmx_mio_fus_ema_s cn68xx;
+ struct cvmx_mio_fus_ema_s cn68xxp1;
+ struct cvmx_mio_fus_ema_s cnf71xx;
+};
+typedef union cvmx_mio_fus_ema cvmx_mio_fus_ema_t;
+
+/**
+ * cvmx_mio_fus_pdf
+ */
+union cvmx_mio_fus_pdf {
+ uint64_t u64;
+ struct cvmx_mio_fus_pdf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pdf : 64; /**< Fuse information - Product Definition Field */
+#else
+ uint64_t pdf : 64;
+#endif
+ } s;
+ struct cvmx_mio_fus_pdf_s cn50xx;
+ struct cvmx_mio_fus_pdf_s cn52xx;
+ struct cvmx_mio_fus_pdf_s cn52xxp1;
+ struct cvmx_mio_fus_pdf_s cn56xx;
+ struct cvmx_mio_fus_pdf_s cn56xxp1;
+ struct cvmx_mio_fus_pdf_s cn58xx;
+ struct cvmx_mio_fus_pdf_s cn61xx;
+ struct cvmx_mio_fus_pdf_s cn63xx;
+ struct cvmx_mio_fus_pdf_s cn63xxp1;
+ struct cvmx_mio_fus_pdf_s cn66xx;
+ struct cvmx_mio_fus_pdf_s cn68xx;
+ struct cvmx_mio_fus_pdf_s cn68xxp1;
+ struct cvmx_mio_fus_pdf_s cnf71xx;
+};
+typedef union cvmx_mio_fus_pdf cvmx_mio_fus_pdf_t;
+
+/**
+ * cvmx_mio_fus_pll
+ *
+ * Notes:
+ * The core clkout postscaler should be placed in reset at least 10 ref clocks prior to changing
+ * the core clkout select. The core clkout postscaler should remain under reset for at least 10
+ * ref clocks after the core clkout select changes.
+ *
+ * The pnr clkout postscaler should be placed in reset at least 10 ref clocks prior to changing
+ * the pnr clkout select. The pnr clkout postscaler should remain under reset for at least 10
+ * ref clocks after the pnr clkout select changes.
+ */
+union cvmx_mio_fus_pll {
+ uint64_t u64;
+ struct cvmx_mio_fus_pll_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t rclk_align_r : 8; /**< RCLK right alignment settings */
+ uint64_t rclk_align_l : 8; /**< RCLK left alignment settings */
+ uint64_t reserved_8_31 : 24;
+ uint64_t c_cout_rst : 1; /**< Core clkout postscaler reset */
+ uint64_t c_cout_sel : 2; /**< Core clkout select
+ 0=RCLK,1=PS output,2=PLL output,3=undivided RCLK | $PR
+ (***Pass 1.x: 3=GND) */
+ uint64_t pnr_cout_rst : 1; /**< PNR clkout postscaler reset */
+ uint64_t pnr_cout_sel : 2; /**< PNR clkout select
+ 0=SCLK,1=PS output,2=PLL output,3=undivided RCLK | $PR
+ (***Pass 1.x: 3=GND) */
+ uint64_t rfslip : 1; /**< Reserved */
+ uint64_t fbslip : 1; /**< Reserved */
+#else
+ uint64_t fbslip : 1;
+ uint64_t rfslip : 1;
+ uint64_t pnr_cout_sel : 2;
+ uint64_t pnr_cout_rst : 1;
+ uint64_t c_cout_sel : 2;
+ uint64_t c_cout_rst : 1;
+ uint64_t reserved_8_31 : 24;
+ uint64_t rclk_align_l : 8;
+ uint64_t rclk_align_r : 8;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_mio_fus_pll_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t rfslip : 1; /**< PLL reference clock slip */
+ uint64_t fbslip : 1; /**< PLL feedback clock slip */
+#else
+ uint64_t fbslip : 1;
+ uint64_t rfslip : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn50xx;
+ struct cvmx_mio_fus_pll_cn50xx cn52xx;
+ struct cvmx_mio_fus_pll_cn50xx cn52xxp1;
+ struct cvmx_mio_fus_pll_cn50xx cn56xx;
+ struct cvmx_mio_fus_pll_cn50xx cn56xxp1;
+ struct cvmx_mio_fus_pll_cn50xx cn58xx;
+ struct cvmx_mio_fus_pll_cn50xx cn58xxp1;
+ struct cvmx_mio_fus_pll_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t c_cout_rst : 1; /**< Core clkout postscaler reset */
+ uint64_t c_cout_sel : 2; /**< Core clkout select
+ 0=RCLK,1=PS output,2=PLL output,3=undivided RCLK | $PR
+ (***Pass 1.x: 3=GND) */
+ uint64_t pnr_cout_rst : 1; /**< PNR clkout postscaler reset */
+ uint64_t pnr_cout_sel : 2; /**< PNR clkout select
+ 0=SCLK,1=PS output,2=PLL output,3=undivided RCLK | $PR
+ (***Pass 1.x: 3=GND) */
+ uint64_t rfslip : 1; /**< Reserved */
+ uint64_t fbslip : 1; /**< Reserved */
+#else
+ uint64_t fbslip : 1;
+ uint64_t rfslip : 1;
+ uint64_t pnr_cout_sel : 2;
+ uint64_t pnr_cout_rst : 1;
+ uint64_t c_cout_sel : 2;
+ uint64_t c_cout_rst : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn61xx;
+ struct cvmx_mio_fus_pll_cn61xx cn63xx;
+ struct cvmx_mio_fus_pll_cn61xx cn63xxp1;
+ struct cvmx_mio_fus_pll_cn61xx cn66xx;
+ struct cvmx_mio_fus_pll_s cn68xx;
+ struct cvmx_mio_fus_pll_s cn68xxp1;
+ struct cvmx_mio_fus_pll_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_fus_pll cvmx_mio_fus_pll_t;
+
+/**
+ * cvmx_mio_fus_prog
+ *
+ * DON'T PUT IN HRM*
+ *
+ *
+ * Notes:
+ * This CSR is not present in the HRM.
+ *
+ * To write a bank of fuses, SW must set MIO_FUS_WADR[ADDR] to the bank to be
+ * programmed and then set each bit within MIO_FUS_BNK_DATX to indicate which
+ * fuses to blow. Once ADDR, and DAT are setup, SW can write to
+ * MIO_FUS_PROG[PROG] to start the bank write and poll on PROG. Once PROG is
+ * clear, the bank write is complete.
+ *
+ * A soft blow is still subject to lockdown fuses. After a soft/warm reset, the
+ * chip will behave as though the fuses were actually blown. A cold reset restores
+ * the actual fuse valuse.
+ */
+union cvmx_mio_fus_prog {
+ uint64_t u64;
+ struct cvmx_mio_fus_prog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t soft : 1; /**< When set with PROG, causes only the local storeage
+ to change. Will not really blow any fuses. HW
+ will clear when the program operation is complete */
+ uint64_t prog : 1; /**< Blow the fuse bank
+ SW will set PROG, and then the HW will clear
+ when the program operation is complete */
+#else
+ uint64_t prog : 1;
+ uint64_t soft : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_mio_fus_prog_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t prog : 1; /**< Blow the fuse
+ SW will set PROG, hold it for 10us, then clear it */
+#else
+ uint64_t prog : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_mio_fus_prog_cn30xx cn31xx;
+ struct cvmx_mio_fus_prog_cn30xx cn38xx;
+ struct cvmx_mio_fus_prog_cn30xx cn38xxp2;
+ struct cvmx_mio_fus_prog_cn30xx cn50xx;
+ struct cvmx_mio_fus_prog_cn30xx cn52xx;
+ struct cvmx_mio_fus_prog_cn30xx cn52xxp1;
+ struct cvmx_mio_fus_prog_cn30xx cn56xx;
+ struct cvmx_mio_fus_prog_cn30xx cn56xxp1;
+ struct cvmx_mio_fus_prog_cn30xx cn58xx;
+ struct cvmx_mio_fus_prog_cn30xx cn58xxp1;
+ struct cvmx_mio_fus_prog_s cn61xx;
+ struct cvmx_mio_fus_prog_s cn63xx;
+ struct cvmx_mio_fus_prog_s cn63xxp1;
+ struct cvmx_mio_fus_prog_s cn66xx;
+ struct cvmx_mio_fus_prog_s cn68xx;
+ struct cvmx_mio_fus_prog_s cn68xxp1;
+ struct cvmx_mio_fus_prog_s cnf71xx;
+};
+typedef union cvmx_mio_fus_prog cvmx_mio_fus_prog_t;
+
+/**
+ * cvmx_mio_fus_prog_times
+ *
+ * DON'T PUT IN HRM*
+ *
+ *
+ * Notes:
+ * This CSR is not present in the HRM.
+ *
+ * All values must be > 0 for correct electrical operation.
+ *
+ * IFB fuses are 0..1791
+ * L6G fuses are 1792 to 2047
+ *
+ * The reset values are for IFB fuses for ref_clk of 100MHZ
+ */
+union cvmx_mio_fus_prog_times {
+ uint64_t u64;
+ struct cvmx_mio_fus_prog_times_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t vgate_pin : 1; /**< efuse vgate pin (L6G) */
+ uint64_t fsrc_pin : 1; /**< efuse fsource pin (L6G) */
+ uint64_t prog_pin : 1; /**< efuse program pin (IFB) */
+ uint64_t reserved_6_31 : 26;
+ uint64_t setup : 6; /**< efuse timing param
+
+ SETUP = (tWRS/refclk period)-1
+
+ For IFB: tWRS = 20ns
+ For L6G: tWRS = 20ns */
+#else
+ uint64_t setup : 6;
+ uint64_t reserved_6_31 : 26;
+ uint64_t prog_pin : 1;
+ uint64_t fsrc_pin : 1;
+ uint64_t vgate_pin : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } s;
+ struct cvmx_mio_fus_prog_times_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t prog_pin : 1; /**< efuse program pin */
+ uint64_t out : 8; /**< efuse timing param (ref_clks to delay 10ns) */
+ uint64_t sclk_lo : 4; /**< efuse timing param (ref_clks to delay 5ns) */
+ uint64_t sclk_hi : 12; /**< efuse timing param (ref_clks to delay 1000ns) */
+ uint64_t setup : 8; /**< efuse timing param (ref_clks to delay 10ns) */
+#else
+ uint64_t setup : 8;
+ uint64_t sclk_hi : 12;
+ uint64_t sclk_lo : 4;
+ uint64_t out : 8;
+ uint64_t prog_pin : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } cn50xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn52xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn52xxp1;
+ struct cvmx_mio_fus_prog_times_cn50xx cn56xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn56xxp1;
+ struct cvmx_mio_fus_prog_times_cn50xx cn58xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn58xxp1;
+ struct cvmx_mio_fus_prog_times_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t vgate_pin : 1; /**< efuse vgate pin (L6G) */
+ uint64_t fsrc_pin : 1; /**< efuse fsource pin (L6G) */
+ uint64_t prog_pin : 1; /**< efuse program pin (IFB) */
+ uint64_t out : 7; /**< efuse timing param
+
+ OUT = (tOUT/refclk period)-1
+
+ For IFB: tOUT = 20ns
+ For L6G: tOUT = 20ns */
+ uint64_t sclk_lo : 4; /**< efuse timing param
+
+ SCLK_LO=(tSLO/refclk period)-1
+
+ For IFB: tSLO = 20ns
+ For L6G: tSLO = 20ns */
+ uint64_t sclk_hi : 15; /**< efuse timing param
+ ***NOTE: Pass 1.x reset value is 20000
+
+ SCLK_HI=(tSHI/refclk period)-1
+
+ For IFB: tSHI = 200us
+ For L6G: tSHI = 25us */
+ uint64_t setup : 6; /**< efuse timing param
+
+ SETUP = (tWRS/refclk period)-1
+
+ For IFB: tWRS = 20ns
+ For L6G: tWRS = 20ns */
+#else
+ uint64_t setup : 6;
+ uint64_t sclk_hi : 15;
+ uint64_t sclk_lo : 4;
+ uint64_t out : 7;
+ uint64_t prog_pin : 1;
+ uint64_t fsrc_pin : 1;
+ uint64_t vgate_pin : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } cn61xx;
+ struct cvmx_mio_fus_prog_times_cn61xx cn63xx;
+ struct cvmx_mio_fus_prog_times_cn61xx cn63xxp1;
+ struct cvmx_mio_fus_prog_times_cn61xx cn66xx;
+ struct cvmx_mio_fus_prog_times_cn61xx cn68xx;
+ struct cvmx_mio_fus_prog_times_cn61xx cn68xxp1;
+ struct cvmx_mio_fus_prog_times_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_fus_prog_times cvmx_mio_fus_prog_times_t;
+
+/**
+ * cvmx_mio_fus_rcmd
+ *
+ * Notes:
+ * To read an efuse, SW writes MIO_FUS_RCMD[ADDR,PEND] with the byte address of
+ * the fuse in question, then SW can poll MIO_FUS_RCMD[PEND]. When PEND is
+ * clear, then MIO_FUS_RCMD[DAT] is valid. In addition, if the efuse read went
+ * to the efuse banks (eg. ((ADDR/16) not [0,1,7]) || EFUSE) SW can read
+ * MIO_FUS_BNK_DATX which contains all 128 fuses in the bank associated in
+ * ADDR.
+ */
+union cvmx_mio_fus_rcmd {
+ uint64_t u64;
+ struct cvmx_mio_fus_rcmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t dat : 8; /**< 8bits of fuse data */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pend : 1; /**< SW sets this bit on a write to start FUSE read
+ operation. HW clears when read is complete and
+ the DAT is valid */
+ uint64_t reserved_9_11 : 3;
+ uint64_t efuse : 1; /**< When set, return data from the efuse storage
+ rather than the local storage */
+ uint64_t addr : 8; /**< The byte address of the fuse to read */
+#else
+ uint64_t addr : 8;
+ uint64_t efuse : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t pend : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t dat : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_mio_fus_rcmd_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t dat : 8; /**< 8bits of fuse data */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pend : 1; /**< SW sets this bit on a write to start FUSE read
+ operation. HW clears when read is complete and
+ the DAT is valid */
+ uint64_t reserved_9_11 : 3;
+ uint64_t efuse : 1; /**< When set, return data from the efuse storage
+ rather than the local storage for the 320 HW fuses */
+ uint64_t reserved_7_7 : 1;
+ uint64_t addr : 7; /**< The byte address of the fuse to read */
+#else
+ uint64_t addr : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t efuse : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t pend : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t dat : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn30xx;
+ struct cvmx_mio_fus_rcmd_cn30xx cn31xx;
+ struct cvmx_mio_fus_rcmd_cn30xx cn38xx;
+ struct cvmx_mio_fus_rcmd_cn30xx cn38xxp2;
+ struct cvmx_mio_fus_rcmd_cn30xx cn50xx;
+ struct cvmx_mio_fus_rcmd_s cn52xx;
+ struct cvmx_mio_fus_rcmd_s cn52xxp1;
+ struct cvmx_mio_fus_rcmd_s cn56xx;
+ struct cvmx_mio_fus_rcmd_s cn56xxp1;
+ struct cvmx_mio_fus_rcmd_cn30xx cn58xx;
+ struct cvmx_mio_fus_rcmd_cn30xx cn58xxp1;
+ struct cvmx_mio_fus_rcmd_s cn61xx;
+ struct cvmx_mio_fus_rcmd_s cn63xx;
+ struct cvmx_mio_fus_rcmd_s cn63xxp1;
+ struct cvmx_mio_fus_rcmd_s cn66xx;
+ struct cvmx_mio_fus_rcmd_s cn68xx;
+ struct cvmx_mio_fus_rcmd_s cn68xxp1;
+ struct cvmx_mio_fus_rcmd_s cnf71xx;
+};
+typedef union cvmx_mio_fus_rcmd cvmx_mio_fus_rcmd_t;
+
+/**
+ * cvmx_mio_fus_read_times
+ *
+ * Notes:
+ * IFB fuses are 0..1791
+ * L6G fuses are 1792 to 2047
+ *
+ * The reset values are for IFB fuses for refclk up to 100MHZ when core PLL is enagaged
+ *
+ * If any of the formulas above result in a value less than zero, the corresponding
+ * timing parameter should be set to zero.
+ *
+ * Prior to issuing a read to the fuse banks (via. MIO_FUS_RCMD), this register
+ * should be written with the timing parameters which correspond to the fuse bank type (IFB vs L6G)
+ * that will be read.
+ *
+ * This register should not be written while MIO_FUS_RCMD[PEND]=1.
+ */
+union cvmx_mio_fus_read_times {
+ uint64_t u64;
+ struct cvmx_mio_fus_read_times_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t sch : 4; /**< Hold CS for (SCH+1) refclks after FSET desserts
+
+ SCH = (tSCH/refclk period)-1
+
+ For IFB: tSCH = 160ns
+ For L6G: tSCH = 10ns */
+ uint64_t fsh : 4; /**< Hold FSET for (FSH+1) refclks after PRCHG deasserts
+
+ FSH = (tFSH/refclk period)-1
+
+ For IFB: tFSH = 160ns
+ For L6G: tFSH = 10ns */
+ uint64_t prh : 4; /**< Assert PRCHG (PRH+1) refclks after SIGDEV deasserts
+
+ PRH = (tPRH/refclk period)-1
+
+ For IFB: tPRH = 70ns
+ For L6G: tPRH = 10ns */
+ uint64_t sdh : 4; /**< Hold SIGDEV for (SDH+1) refclks after FSET asserts
+
+ SDH = (tSDH/refclk period)-1
+
+ For IFB: tPRH = 10ns
+ For L6G: tPRH = 10ns */
+ uint64_t setup : 10; /**< Assert CS for (SETUP+1) refclks before asserting
+ SIGDEV, FSET, or PRCHG
+
+ SETUP=(tRDS/refclk period)-1
+
+ For IFB: tRDS = 10000ns
+ For L6G: tRDS = max(tSCS,tSDS,tPRS)
+ where tSCS = 10ns
+ tSDS = 10ns
+ tPRS = 10ns */
+#else
+ uint64_t setup : 10;
+ uint64_t sdh : 4;
+ uint64_t prh : 4;
+ uint64_t fsh : 4;
+ uint64_t sch : 4;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } s;
+ struct cvmx_mio_fus_read_times_s cn61xx;
+ struct cvmx_mio_fus_read_times_s cn63xx;
+ struct cvmx_mio_fus_read_times_s cn63xxp1;
+ struct cvmx_mio_fus_read_times_s cn66xx;
+ struct cvmx_mio_fus_read_times_s cn68xx;
+ struct cvmx_mio_fus_read_times_s cn68xxp1;
+ struct cvmx_mio_fus_read_times_s cnf71xx;
+};
+typedef union cvmx_mio_fus_read_times cvmx_mio_fus_read_times_t;
+
+/**
+ * cvmx_mio_fus_repair_res0
+ */
+union cvmx_mio_fus_repair_res0 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63 : 9;
+ uint64_t too_many : 1; /**< Too many defects */
+ uint64_t repair2 : 18; /**< BISR Results */
+ uint64_t repair1 : 18; /**< BISR Results */
+ uint64_t repair0 : 18; /**< BISR Results */
+#else
+ uint64_t repair0 : 18;
+ uint64_t repair1 : 18;
+ uint64_t repair2 : 18;
+ uint64_t too_many : 1;
+ uint64_t reserved_55_63 : 9;
+#endif
+ } s;
+ struct cvmx_mio_fus_repair_res0_s cn61xx;
+ struct cvmx_mio_fus_repair_res0_s cn63xx;
+ struct cvmx_mio_fus_repair_res0_s cn63xxp1;
+ struct cvmx_mio_fus_repair_res0_s cn66xx;
+ struct cvmx_mio_fus_repair_res0_s cn68xx;
+ struct cvmx_mio_fus_repair_res0_s cn68xxp1;
+ struct cvmx_mio_fus_repair_res0_s cnf71xx;
+};
+typedef union cvmx_mio_fus_repair_res0 cvmx_mio_fus_repair_res0_t;
+
+/**
+ * cvmx_mio_fus_repair_res1
+ */
+union cvmx_mio_fus_repair_res1 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t repair5 : 18; /**< BISR Results */
+ uint64_t repair4 : 18; /**< BISR Results */
+ uint64_t repair3 : 18; /**< BISR Results */
+#else
+ uint64_t repair3 : 18;
+ uint64_t repair4 : 18;
+ uint64_t repair5 : 18;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_mio_fus_repair_res1_s cn61xx;
+ struct cvmx_mio_fus_repair_res1_s cn63xx;
+ struct cvmx_mio_fus_repair_res1_s cn63xxp1;
+ struct cvmx_mio_fus_repair_res1_s cn66xx;
+ struct cvmx_mio_fus_repair_res1_s cn68xx;
+ struct cvmx_mio_fus_repair_res1_s cn68xxp1;
+ struct cvmx_mio_fus_repair_res1_s cnf71xx;
+};
+typedef union cvmx_mio_fus_repair_res1 cvmx_mio_fus_repair_res1_t;
+
+/**
+ * cvmx_mio_fus_repair_res2
+ */
+union cvmx_mio_fus_repair_res2 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t repair6 : 18; /**< BISR Results */
+#else
+ uint64_t repair6 : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_mio_fus_repair_res2_s cn61xx;
+ struct cvmx_mio_fus_repair_res2_s cn63xx;
+ struct cvmx_mio_fus_repair_res2_s cn63xxp1;
+ struct cvmx_mio_fus_repair_res2_s cn66xx;
+ struct cvmx_mio_fus_repair_res2_s cn68xx;
+ struct cvmx_mio_fus_repair_res2_s cn68xxp1;
+ struct cvmx_mio_fus_repair_res2_s cnf71xx;
+};
+typedef union cvmx_mio_fus_repair_res2 cvmx_mio_fus_repair_res2_t;
+
+/**
+ * cvmx_mio_fus_spr_repair_res
+ *
+ * DON'T PUT IN HRM*
+ *
+ */
+union cvmx_mio_fus_spr_repair_res {
+ uint64_t u64;
+ struct cvmx_mio_fus_spr_repair_res_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t repair2 : 14; /**< Reserved (see MIO_FUS_REPAIR_RES*) */
+ uint64_t repair1 : 14; /**< Reserved (see MIO_FUS_REPAIR_RES*) */
+ uint64_t repair0 : 14; /**< Reserved (see MIO_FUS_REPAIR_RES*) */
+#else
+ uint64_t repair0 : 14;
+ uint64_t repair1 : 14;
+ uint64_t repair2 : 14;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } s;
+ struct cvmx_mio_fus_spr_repair_res_s cn30xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn31xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn38xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn50xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn52xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn52xxp1;
+ struct cvmx_mio_fus_spr_repair_res_s cn56xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn56xxp1;
+ struct cvmx_mio_fus_spr_repair_res_s cn58xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn58xxp1;
+ struct cvmx_mio_fus_spr_repair_res_s cn61xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn63xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn63xxp1;
+ struct cvmx_mio_fus_spr_repair_res_s cn66xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn68xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn68xxp1;
+ struct cvmx_mio_fus_spr_repair_res_s cnf71xx;
+};
+typedef union cvmx_mio_fus_spr_repair_res cvmx_mio_fus_spr_repair_res_t;
+
+/**
+ * cvmx_mio_fus_spr_repair_sum
+ *
+ * DON'T PUT IN HRM*
+ *
+ */
+union cvmx_mio_fus_spr_repair_sum {
+ uint64_t u64;
+ struct cvmx_mio_fus_spr_repair_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t too_many : 1; /**< Reserved (see MIO_FUS_REPAIR_RES*) */
+#else
+ uint64_t too_many : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_fus_spr_repair_sum_s cn30xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn31xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn38xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn50xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn52xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn52xxp1;
+ struct cvmx_mio_fus_spr_repair_sum_s cn56xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn56xxp1;
+ struct cvmx_mio_fus_spr_repair_sum_s cn58xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn58xxp1;
+ struct cvmx_mio_fus_spr_repair_sum_s cn61xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn63xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn63xxp1;
+ struct cvmx_mio_fus_spr_repair_sum_s cn66xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn68xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn68xxp1;
+ struct cvmx_mio_fus_spr_repair_sum_s cnf71xx;
+};
+typedef union cvmx_mio_fus_spr_repair_sum cvmx_mio_fus_spr_repair_sum_t;
+
+/**
+ * cvmx_mio_fus_tgg
+ *
+ * Notes:
+ * The TGG fuses are fuses[831:768]. The valid bit (TGG[63]) is fuse[831].
+ *
+ */
+union cvmx_mio_fus_tgg {
+ uint64_t u64;
+ struct cvmx_mio_fus_tgg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t val : 1; /**< Out of reset, VAL will return the TGG[63] fuse.
+ Software may write this CSR bit to zero (to hide
+ the value of the TGG fuses). Software cannot write
+ the valid bit to a one, so it is not possible to
+ read the value of the TGG fuses after the valid
+ bit is clear.
+
+ It is never possible to read the value of the TGG
+ fuses directly (ie. the only way to read the value
+ of the TGG fuses is via the MIO_FUS_TGG CSR.)
+
+ Whenever the fuse corresponding to the valid bit
+ (ie. TGG[63]) is blown, it is not possible to blow
+ the other 63 TGG fuses. (ie. only when the TGG[63]
+ fuse is not blown, the other 63 TGG fuses can be
+ blown. The TGG[63] fuse is the one and only fuse
+ lockdown bit for the other 63 fuses TGG fuses. No
+ other fuse lockdown bits can prevent blowing the 63
+ fuses. */
+ uint64_t dat : 63; /**< Whenever VAL is clear, DAT will always read as
+ zero, regardless of the value of the TGG[62:0]
+ fuses.
+
+ Whenever VAL is set, DAT will match the value of
+ other 63 TGG fuses (ie. TGG[62:0]) */
+#else
+ uint64_t dat : 63;
+ uint64_t val : 1;
+#endif
+ } s;
+ struct cvmx_mio_fus_tgg_s cn61xx;
+ struct cvmx_mio_fus_tgg_s cn66xx;
+ struct cvmx_mio_fus_tgg_s cnf71xx;
+};
+typedef union cvmx_mio_fus_tgg cvmx_mio_fus_tgg_t;
+
+/**
+ * cvmx_mio_fus_unlock
+ */
+union cvmx_mio_fus_unlock {
+ uint64_t u64;
+ struct cvmx_mio_fus_unlock_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t key : 24; /**< When set to the typical value, allows SW to
+ program the efuses */
+#else
+ uint64_t key : 24;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_mio_fus_unlock_s cn30xx;
+ struct cvmx_mio_fus_unlock_s cn31xx;
+};
+typedef union cvmx_mio_fus_unlock cvmx_mio_fus_unlock_t;
+
+/**
+ * cvmx_mio_fus_wadr
+ */
+union cvmx_mio_fus_wadr {
+ uint64_t u64;
+ struct cvmx_mio_fus_wadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t addr : 10; /**< Which of the banks of 128 fuses to blow */
+#else
+ uint64_t addr : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_mio_fus_wadr_s cn30xx;
+ struct cvmx_mio_fus_wadr_s cn31xx;
+ struct cvmx_mio_fus_wadr_s cn38xx;
+ struct cvmx_mio_fus_wadr_s cn38xxp2;
+ struct cvmx_mio_fus_wadr_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t addr : 2; /**< Which of the four banks of 256 fuses to blow */
+#else
+ uint64_t addr : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn50xx;
+ struct cvmx_mio_fus_wadr_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t addr : 3; /**< Which of the four banks of 256 fuses to blow */
+#else
+ uint64_t addr : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn52xx;
+ struct cvmx_mio_fus_wadr_cn52xx cn52xxp1;
+ struct cvmx_mio_fus_wadr_cn52xx cn56xx;
+ struct cvmx_mio_fus_wadr_cn52xx cn56xxp1;
+ struct cvmx_mio_fus_wadr_cn50xx cn58xx;
+ struct cvmx_mio_fus_wadr_cn50xx cn58xxp1;
+ struct cvmx_mio_fus_wadr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t addr : 4; /**< Which of the banks of 128 fuses to blow */
+#else
+ uint64_t addr : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn61xx;
+ struct cvmx_mio_fus_wadr_cn61xx cn63xx;
+ struct cvmx_mio_fus_wadr_cn61xx cn63xxp1;
+ struct cvmx_mio_fus_wadr_cn61xx cn66xx;
+ struct cvmx_mio_fus_wadr_cn61xx cn68xx;
+ struct cvmx_mio_fus_wadr_cn61xx cn68xxp1;
+ struct cvmx_mio_fus_wadr_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_fus_wadr cvmx_mio_fus_wadr_t;
+
+/**
+ * cvmx_mio_gpio_comp
+ *
+ * MIO_GPIO_COMP = MIO GPIO Compensation Register
+ *
+ */
+union cvmx_mio_gpio_comp {
+ uint64_t u64;
+ struct cvmx_mio_gpio_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t pctl : 6; /**< GPIO bus PCTL */
+ uint64_t nctl : 6; /**< GPIO bus NCTL */
+#else
+ uint64_t nctl : 6;
+ uint64_t pctl : 6;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_mio_gpio_comp_s cn61xx;
+ struct cvmx_mio_gpio_comp_s cn63xx;
+ struct cvmx_mio_gpio_comp_s cn63xxp1;
+ struct cvmx_mio_gpio_comp_s cn66xx;
+ struct cvmx_mio_gpio_comp_s cn68xx;
+ struct cvmx_mio_gpio_comp_s cn68xxp1;
+ struct cvmx_mio_gpio_comp_s cnf71xx;
+};
+typedef union cvmx_mio_gpio_comp cvmx_mio_gpio_comp_t;
+
+/**
+ * cvmx_mio_ndf_dma_cfg
+ *
+ * MIO_NDF_DMA_CFG = MIO NAND Flash DMA Config Register
+ *
+ * SIZE is specified in number of 64 bit transfers (encoded in -1 notation).
+ *
+ * ADR must be 64 bit aligned.
+ */
+union cvmx_mio_ndf_dma_cfg {
+ uint64_t u64;
+ struct cvmx_mio_ndf_dma_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t en : 1; /**< DMA Engine enable */
+ uint64_t rw : 1; /**< DMA Engine R/W bit (0 = read, 1 = write) */
+ uint64_t clr : 1; /**< DMA Engine clear EN on device terminated burst */
+ uint64_t reserved_60_60 : 1;
+ uint64_t swap32 : 1; /**< DMA Engine 32 bit swap */
+ uint64_t swap16 : 1; /**< DMA Engine 16 bit swap */
+ uint64_t swap8 : 1; /**< DMA Engine 8 bit swap */
+ uint64_t endian : 1; /**< DMA Engine NCB endian mode (0 = big, 1 = little) */
+ uint64_t size : 20; /**< DMA Engine size */
+ uint64_t adr : 36; /**< DMA Engine address */
+#else
+ uint64_t adr : 36;
+ uint64_t size : 20;
+ uint64_t endian : 1;
+ uint64_t swap8 : 1;
+ uint64_t swap16 : 1;
+ uint64_t swap32 : 1;
+ uint64_t reserved_60_60 : 1;
+ uint64_t clr : 1;
+ uint64_t rw : 1;
+ uint64_t en : 1;
+#endif
+ } s;
+ struct cvmx_mio_ndf_dma_cfg_s cn52xx;
+ struct cvmx_mio_ndf_dma_cfg_s cn61xx;
+ struct cvmx_mio_ndf_dma_cfg_s cn63xx;
+ struct cvmx_mio_ndf_dma_cfg_s cn63xxp1;
+ struct cvmx_mio_ndf_dma_cfg_s cn66xx;
+ struct cvmx_mio_ndf_dma_cfg_s cn68xx;
+ struct cvmx_mio_ndf_dma_cfg_s cn68xxp1;
+ struct cvmx_mio_ndf_dma_cfg_s cnf71xx;
+};
+typedef union cvmx_mio_ndf_dma_cfg cvmx_mio_ndf_dma_cfg_t;
+
+/**
+ * cvmx_mio_ndf_dma_int
+ *
+ * MIO_NDF_DMA_INT = MIO NAND Flash DMA Interrupt Register
+ *
+ */
+union cvmx_mio_ndf_dma_int {
+ uint64_t u64;
+ struct cvmx_mio_ndf_dma_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t done : 1; /**< DMA Engine request completion interrupt */
+#else
+ uint64_t done : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_ndf_dma_int_s cn52xx;
+ struct cvmx_mio_ndf_dma_int_s cn61xx;
+ struct cvmx_mio_ndf_dma_int_s cn63xx;
+ struct cvmx_mio_ndf_dma_int_s cn63xxp1;
+ struct cvmx_mio_ndf_dma_int_s cn66xx;
+ struct cvmx_mio_ndf_dma_int_s cn68xx;
+ struct cvmx_mio_ndf_dma_int_s cn68xxp1;
+ struct cvmx_mio_ndf_dma_int_s cnf71xx;
+};
+typedef union cvmx_mio_ndf_dma_int cvmx_mio_ndf_dma_int_t;
+
+/**
+ * cvmx_mio_ndf_dma_int_en
+ *
+ * MIO_NDF_DMA_INT_EN = MIO NAND Flash DMA Interrupt Enable Register
+ *
+ */
+union cvmx_mio_ndf_dma_int_en {
+ uint64_t u64;
+ struct cvmx_mio_ndf_dma_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t done : 1; /**< DMA Engine request completion interrupt enable */
+#else
+ uint64_t done : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_ndf_dma_int_en_s cn52xx;
+ struct cvmx_mio_ndf_dma_int_en_s cn61xx;
+ struct cvmx_mio_ndf_dma_int_en_s cn63xx;
+ struct cvmx_mio_ndf_dma_int_en_s cn63xxp1;
+ struct cvmx_mio_ndf_dma_int_en_s cn66xx;
+ struct cvmx_mio_ndf_dma_int_en_s cn68xx;
+ struct cvmx_mio_ndf_dma_int_en_s cn68xxp1;
+ struct cvmx_mio_ndf_dma_int_en_s cnf71xx;
+};
+typedef union cvmx_mio_ndf_dma_int_en cvmx_mio_ndf_dma_int_en_t;
+
+/**
+ * cvmx_mio_pll_ctl
+ */
+union cvmx_mio_pll_ctl {
+ uint64_t u64;
+ struct cvmx_mio_pll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t bw_ctl : 5; /**< Core PLL bandwidth control */
+#else
+ uint64_t bw_ctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_mio_pll_ctl_s cn30xx;
+ struct cvmx_mio_pll_ctl_s cn31xx;
+};
+typedef union cvmx_mio_pll_ctl cvmx_mio_pll_ctl_t;
+
+/**
+ * cvmx_mio_pll_setting
+ */
+union cvmx_mio_pll_setting {
+ uint64_t u64;
+ struct cvmx_mio_pll_setting_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t setting : 17; /**< Core PLL setting */
+#else
+ uint64_t setting : 17;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_mio_pll_setting_s cn30xx;
+ struct cvmx_mio_pll_setting_s cn31xx;
+};
+typedef union cvmx_mio_pll_setting cvmx_mio_pll_setting_t;
+
+/**
+ * cvmx_mio_ptp_ckout_hi_incr
+ *
+ * MIO_PTP_CKOUT_HI_INCR = PTP Clock Out Hi Increment
+ *
+ */
+union cvmx_mio_ptp_ckout_hi_incr {
+ uint64_t u64;
+ struct cvmx_mio_ptp_ckout_hi_incr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec : 32; /**< Nanoseconds */
+ uint64_t frnanosec : 32; /**< Fractions of Nanoseconds */
+#else
+ uint64_t frnanosec : 32;
+ uint64_t nanosec : 32;
+#endif
+ } s;
+ struct cvmx_mio_ptp_ckout_hi_incr_s cn61xx;
+ struct cvmx_mio_ptp_ckout_hi_incr_s cn66xx;
+ struct cvmx_mio_ptp_ckout_hi_incr_s cn68xx;
+ struct cvmx_mio_ptp_ckout_hi_incr_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_ckout_hi_incr cvmx_mio_ptp_ckout_hi_incr_t;
+
+/**
+ * cvmx_mio_ptp_ckout_lo_incr
+ *
+ * MIO_PTP_CKOUT_LO_INCR = PTP Clock Out Lo Increment
+ *
+ */
+union cvmx_mio_ptp_ckout_lo_incr {
+ uint64_t u64;
+ struct cvmx_mio_ptp_ckout_lo_incr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec : 32; /**< Nanoseconds */
+ uint64_t frnanosec : 32; /**< Fractions of Nanoseconds */
+#else
+ uint64_t frnanosec : 32;
+ uint64_t nanosec : 32;
+#endif
+ } s;
+ struct cvmx_mio_ptp_ckout_lo_incr_s cn61xx;
+ struct cvmx_mio_ptp_ckout_lo_incr_s cn66xx;
+ struct cvmx_mio_ptp_ckout_lo_incr_s cn68xx;
+ struct cvmx_mio_ptp_ckout_lo_incr_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_ckout_lo_incr cvmx_mio_ptp_ckout_lo_incr_t;
+
+/**
+ * cvmx_mio_ptp_ckout_thresh_hi
+ *
+ * MIO_PTP_CKOUT_THRESH_HI = Hi bytes of PTP Clock Out
+ *
+ * Writes to MIO_PTP_CKOUT_THRESH_HI also clear MIO_PTP_CKOUT_THRESH_LO. To update all 96 bits, write MIO_PTP_CKOUT_THRESH_HI followed
+ * by MIO_PTP_CKOUT_THRESH_LO
+ */
+union cvmx_mio_ptp_ckout_thresh_hi {
+ uint64_t u64;
+ struct cvmx_mio_ptp_ckout_thresh_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec : 64; /**< Nanoseconds */
+#else
+ uint64_t nanosec : 64;
+#endif
+ } s;
+ struct cvmx_mio_ptp_ckout_thresh_hi_s cn61xx;
+ struct cvmx_mio_ptp_ckout_thresh_hi_s cn66xx;
+ struct cvmx_mio_ptp_ckout_thresh_hi_s cn68xx;
+ struct cvmx_mio_ptp_ckout_thresh_hi_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_ckout_thresh_hi cvmx_mio_ptp_ckout_thresh_hi_t;
+
+/**
+ * cvmx_mio_ptp_ckout_thresh_lo
+ *
+ * MIO_PTP_CKOUT_THRESH_LO = Lo bytes of PTP Clock Out
+ *
+ */
+union cvmx_mio_ptp_ckout_thresh_lo {
+ uint64_t u64;
+ struct cvmx_mio_ptp_ckout_thresh_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t frnanosec : 32; /**< Fractions of Nanoseconds */
+#else
+ uint64_t frnanosec : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_mio_ptp_ckout_thresh_lo_s cn61xx;
+ struct cvmx_mio_ptp_ckout_thresh_lo_s cn66xx;
+ struct cvmx_mio_ptp_ckout_thresh_lo_s cn68xx;
+ struct cvmx_mio_ptp_ckout_thresh_lo_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_ckout_thresh_lo cvmx_mio_ptp_ckout_thresh_lo_t;
+
+/**
+ * cvmx_mio_ptp_clock_cfg
+ *
+ * MIO_PTP_CLOCK_CFG = Configuration
+ *
+ */
+union cvmx_mio_ptp_clock_cfg {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t pps : 1; /**< PTP PPS Output
+ reflects ptp__pps after PPS_INV inverter */
+ uint64_t ckout : 1; /**< PTP Clock Output
+ reflects ptp__ckout after CKOUT_INV inverter */
+ uint64_t ext_clk_edge : 2; /**< External Clock input edge
+ 00 = rising edge
+ 01 = falling edge
+ 10 = both rising & falling edge
+ 11 = reserved */
+ uint64_t ckout_out4 : 1; /**< Destination for PTP Clock Out output
+ See CKOUT_OUT */
+ uint64_t pps_out : 5; /**< Destination for PTP PPS output to GPIO
+ 0-19 : GPIO[PPS_OUT[4:0]]
+ - 20:30: Reserved
+ 31 : Disabled
+ This should be different from CKOUT_OUT */
+ uint64_t pps_inv : 1; /**< Invert PTP PPS
+ 0 = don't invert
+ 1 = invert */
+ uint64_t pps_en : 1; /**< Enable PTP PPS */
+ uint64_t ckout_out : 4; /**< Destination for PTP Clock Out output to GPIO
+ 0-19 : GPIO[[CKOUT_OUT4,CKOUT_OUT[3:0]]]
+ - 20:30: Reserved
+ 31 : Disabled
+ This should be different from PPS_OUT */
+ uint64_t ckout_inv : 1; /**< Invert PTP Clock Out
+ 0 = don't invert
+ 1 = invert */
+ uint64_t ckout_en : 1; /**< Enable PTP Clock Out */
+ uint64_t evcnt_in : 6; /**< Source for event counter input
+ 0x00-0x0f : GPIO[EVCNT_IN[3:0]]
+ 0x20 : GPIO[16]
+ 0x21 : GPIO[17]
+ 0x22 : GPIO[18]
+ 0x23 : GPIO[19]
+ 0x10 : QLM0_REF_CLK
+ 0x11 : QLM1_REF_CLK
+ 0x18 : RF_MCLK (PHY pin)
+ 0x12-0x17 : Reserved
+ 0x19-0x1f : Reserved
+ 0x24-0x3f : Reserved */
+ uint64_t evcnt_edge : 1; /**< Event counter input edge
+ 0 = falling edge
+ 1 = rising edge */
+ uint64_t evcnt_en : 1; /**< Enable event counter */
+ uint64_t tstmp_in : 6; /**< Source for timestamp input
+ 0x00-0x0f : GPIO[TSTMP_IN[3:0]]
+ 0x20 : GPIO[16]
+ 0x21 : GPIO[17]
+ 0x22 : GPIO[18]
+ 0x23 : GPIO[19]
+ 0x10 : QLM0_REF_CLK
+ 0x11 : QLM1_REF_CLK
+ 0x18 : RF_MCLK (PHY pin)
+ 0x12-0x17 : Reserved
+ 0x19-0x1f : Reserved
+ 0x24-0x3f : Reserved */
+ uint64_t tstmp_edge : 1; /**< External timestamp input edge
+ 0 = falling edge
+ 1 = rising edge */
+ uint64_t tstmp_en : 1; /**< Enable external timestamp */
+ uint64_t ext_clk_in : 6; /**< Source for external clock
+ 0x00-0x0f : GPIO[EXT_CLK_IN[3:0]]
+ 0x20 : GPIO[16]
+ 0x21 : GPIO[17]
+ 0x22 : GPIO[18]
+ 0x23 : GPIO[19]
+ 0x10 : QLM0_REF_CLK
+ 0x11 : QLM1_REF_CLK
+ 0x18 : RF_MCLK (PHY pin)
+ 0x12-0x17 : Reserved
+ 0x19-0x1f : Reserved
+ 0x24-0x3f : Reserved */
+ uint64_t ext_clk_en : 1; /**< Use external clock */
+ uint64_t ptp_en : 1; /**< Enable PTP Module */
+#else
+ uint64_t ptp_en : 1;
+ uint64_t ext_clk_en : 1;
+ uint64_t ext_clk_in : 6;
+ uint64_t tstmp_en : 1;
+ uint64_t tstmp_edge : 1;
+ uint64_t tstmp_in : 6;
+ uint64_t evcnt_en : 1;
+ uint64_t evcnt_edge : 1;
+ uint64_t evcnt_in : 6;
+ uint64_t ckout_en : 1;
+ uint64_t ckout_inv : 1;
+ uint64_t ckout_out : 4;
+ uint64_t pps_en : 1;
+ uint64_t pps_inv : 1;
+ uint64_t pps_out : 5;
+ uint64_t ckout_out4 : 1;
+ uint64_t ext_clk_edge : 2;
+ uint64_t ckout : 1;
+ uint64_t pps : 1;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } s;
+ struct cvmx_mio_ptp_clock_cfg_s cn61xx;
+ struct cvmx_mio_ptp_clock_cfg_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t evcnt_in : 6; /**< Source for event counter input
+ 0x00-0x0f : GPIO[EVCNT_IN[3:0]]
+ 0x10 : QLM0_REF_CLK
+ 0x11 : QLM1_REF_CLK
+ 0x12 : QLM2_REF_CLK
+ 0x13-0x3f : Reserved */
+ uint64_t evcnt_edge : 1; /**< Event counter input edge
+ 0 = falling edge
+ 1 = rising edge */
+ uint64_t evcnt_en : 1; /**< Enable event counter */
+ uint64_t tstmp_in : 6; /**< Source for timestamp input
+ 0x00-0x0f : GPIO[TSTMP_IN[3:0]]
+ 0x10 : QLM0_REF_CLK
+ 0x11 : QLM1_REF_CLK
+ 0x12 : QLM2_REF_CLK
+ 0x13-0x3f : Reserved */
+ uint64_t tstmp_edge : 1; /**< External timestamp input edge
+ 0 = falling edge
+ 1 = rising edge */
+ uint64_t tstmp_en : 1; /**< Enable external timestamp */
+ uint64_t ext_clk_in : 6; /**< Source for external clock
+ 0x00-0x0f : GPIO[EXT_CLK_IN[3:0]]
+ 0x10 : QLM0_REF_CLK
+ 0x11 : QLM1_REF_CLK
+ 0x12 : QLM2_REF_CLK
+ 0x13-0x3f : Reserved */
+ uint64_t ext_clk_en : 1; /**< Use positive edge of external clock */
+ uint64_t ptp_en : 1; /**< Enable PTP Module */
+#else
+ uint64_t ptp_en : 1;
+ uint64_t ext_clk_en : 1;
+ uint64_t ext_clk_in : 6;
+ uint64_t tstmp_en : 1;
+ uint64_t tstmp_edge : 1;
+ uint64_t tstmp_in : 6;
+ uint64_t evcnt_en : 1;
+ uint64_t evcnt_edge : 1;
+ uint64_t evcnt_in : 6;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn63xx;
+ struct cvmx_mio_ptp_clock_cfg_cn63xx cn63xxp1;
+ struct cvmx_mio_ptp_clock_cfg_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t ext_clk_edge : 2; /**< External Clock input edge
+ 00 = rising edge
+ 01 = falling edge
+ 10 = both rising & falling edge
+ 11 = reserved */
+ uint64_t ckout_out4 : 1; /**< Destination for PTP Clock Out output
+ 0-19 : GPIO[[CKOUT_OUT4,CKOUT_OUT[3:0]]]
+ This should be different from PPS_OUT */
+ uint64_t pps_out : 5; /**< Destination for PTP PPS output
+ 0-19 : GPIO[PPS_OUT[4:0]]
+ This should be different from CKOUT_OUT */
+ uint64_t pps_inv : 1; /**< Invert PTP PPS
+ 0 = don't invert
+ 1 = invert */
+ uint64_t pps_en : 1; /**< Enable PTP PPS */
+ uint64_t ckout_out : 4; /**< Destination for PTP Clock Out output
+ 0-19 : GPIO[[CKOUT_OUT4,CKOUT_OUT[3:0]]]
+ This should be different from PPS_OUT */
+ uint64_t ckout_inv : 1; /**< Invert PTP Clock Out
+ 0 = don't invert
+ 1 = invert */
+ uint64_t ckout_en : 1; /**< Enable PTP Clock Out */
+ uint64_t evcnt_in : 6; /**< Source for event counter input
+ 0x00-0x0f : GPIO[EVCNT_IN[3:0]]
+ 0x20 : GPIO[16]
+ 0x21 : GPIO[17]
+ 0x22 : GPIO[18]
+ 0x23 : GPIO[19]
+ 0x10 : QLM0_REF_CLK
+ 0x11 : QLM1_REF_CLK
+ 0x12 : QLM2_REF_CLK
+ 0x13-0x1f : Reserved
+ 0x24-0x3f : Reserved */
+ uint64_t evcnt_edge : 1; /**< Event counter input edge
+ 0 = falling edge
+ 1 = rising edge */
+ uint64_t evcnt_en : 1; /**< Enable event counter */
+ uint64_t tstmp_in : 6; /**< Source for timestamp input
+ 0x00-0x0f : GPIO[TSTMP_IN[3:0]]
+ 0x20 : GPIO[16]
+ 0x21 : GPIO[17]
+ 0x22 : GPIO[18]
+ 0x23 : GPIO[19]
+ 0x10 : QLM0_REF_CLK
+ 0x11 : QLM1_REF_CLK
+ 0x12 : QLM2_REF_CLK
+ 0x13-0x1f : Reserved
+ 0x24-0x3f : Reserved */
+ uint64_t tstmp_edge : 1; /**< External timestamp input edge
+ 0 = falling edge
+ 1 = rising edge */
+ uint64_t tstmp_en : 1; /**< Enable external timestamp */
+ uint64_t ext_clk_in : 6; /**< Source for external clock
+ 0x00-0x0f : GPIO[EXT_CLK_IN[3:0]]
+ 0x20 : GPIO[16]
+ 0x21 : GPIO[17]
+ 0x22 : GPIO[18]
+ 0x23 : GPIO[19]
+ 0x10 : QLM0_REF_CLK
+ 0x11 : QLM1_REF_CLK
+ 0x12 : QLM2_REF_CLK
+ 0x13-0x1f : Reserved
+ 0x24-0x3f : Reserved */
+ uint64_t ext_clk_en : 1; /**< Use external clock */
+ uint64_t ptp_en : 1; /**< Enable PTP Module */
+#else
+ uint64_t ptp_en : 1;
+ uint64_t ext_clk_en : 1;
+ uint64_t ext_clk_in : 6;
+ uint64_t tstmp_en : 1;
+ uint64_t tstmp_edge : 1;
+ uint64_t tstmp_in : 6;
+ uint64_t evcnt_en : 1;
+ uint64_t evcnt_edge : 1;
+ uint64_t evcnt_in : 6;
+ uint64_t ckout_en : 1;
+ uint64_t ckout_inv : 1;
+ uint64_t ckout_out : 4;
+ uint64_t pps_en : 1;
+ uint64_t pps_inv : 1;
+ uint64_t pps_out : 5;
+ uint64_t ckout_out4 : 1;
+ uint64_t ext_clk_edge : 2;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn66xx;
+ struct cvmx_mio_ptp_clock_cfg_s cn68xx;
+ struct cvmx_mio_ptp_clock_cfg_cn63xx cn68xxp1;
+ struct cvmx_mio_ptp_clock_cfg_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_clock_cfg cvmx_mio_ptp_clock_cfg_t;
+
+/**
+ * cvmx_mio_ptp_clock_comp
+ *
+ * MIO_PTP_CLOCK_COMP = Compensator
+ *
+ */
+union cvmx_mio_ptp_clock_comp {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec : 32; /**< Nanoseconds */
+ uint64_t frnanosec : 32; /**< Fractions of Nanoseconds */
+#else
+ uint64_t frnanosec : 32;
+ uint64_t nanosec : 32;
+#endif
+ } s;
+ struct cvmx_mio_ptp_clock_comp_s cn61xx;
+ struct cvmx_mio_ptp_clock_comp_s cn63xx;
+ struct cvmx_mio_ptp_clock_comp_s cn63xxp1;
+ struct cvmx_mio_ptp_clock_comp_s cn66xx;
+ struct cvmx_mio_ptp_clock_comp_s cn68xx;
+ struct cvmx_mio_ptp_clock_comp_s cn68xxp1;
+ struct cvmx_mio_ptp_clock_comp_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_clock_comp cvmx_mio_ptp_clock_comp_t;
+
+/**
+ * cvmx_mio_ptp_clock_hi
+ *
+ * MIO_PTP_CLOCK_HI = Hi bytes of CLOCK
+ *
+ * Writes to MIO_PTP_CLOCK_HI also clear MIO_PTP_CLOCK_LO. To update all 96 bits, write MIO_PTP_CLOCK_HI followed
+ * by MIO_PTP_CLOCK_LO
+ */
+union cvmx_mio_ptp_clock_hi {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec : 64; /**< Nanoseconds */
+#else
+ uint64_t nanosec : 64;
+#endif
+ } s;
+ struct cvmx_mio_ptp_clock_hi_s cn61xx;
+ struct cvmx_mio_ptp_clock_hi_s cn63xx;
+ struct cvmx_mio_ptp_clock_hi_s cn63xxp1;
+ struct cvmx_mio_ptp_clock_hi_s cn66xx;
+ struct cvmx_mio_ptp_clock_hi_s cn68xx;
+ struct cvmx_mio_ptp_clock_hi_s cn68xxp1;
+ struct cvmx_mio_ptp_clock_hi_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_clock_hi cvmx_mio_ptp_clock_hi_t;
+
+/**
+ * cvmx_mio_ptp_clock_lo
+ *
+ * MIO_PTP_CLOCK_LO = Lo bytes of CLOCK
+ *
+ */
+union cvmx_mio_ptp_clock_lo {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t frnanosec : 32; /**< Fractions of Nanoseconds */
+#else
+ uint64_t frnanosec : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_mio_ptp_clock_lo_s cn61xx;
+ struct cvmx_mio_ptp_clock_lo_s cn63xx;
+ struct cvmx_mio_ptp_clock_lo_s cn63xxp1;
+ struct cvmx_mio_ptp_clock_lo_s cn66xx;
+ struct cvmx_mio_ptp_clock_lo_s cn68xx;
+ struct cvmx_mio_ptp_clock_lo_s cn68xxp1;
+ struct cvmx_mio_ptp_clock_lo_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_clock_lo cvmx_mio_ptp_clock_lo_t;
+
+/**
+ * cvmx_mio_ptp_evt_cnt
+ *
+ * MIO_PTP_EVT_CNT = Event Counter
+ *
+ * Writes to MIO_PTP_EVT_CNT increment this register by the written data. The register counts down by
+ * 1 for every MIO_PTP_CLOCK_CFG[EVCNT_EDGE] edge of MIO_PTP_CLOCK_CFG[EVCNT_IN]. When register equals
+ * 0, an interrupt gets gerated
+ */
+union cvmx_mio_ptp_evt_cnt {
+ uint64_t u64;
+ struct cvmx_mio_ptp_evt_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cntr : 64; /**< Nanoseconds */
+#else
+ uint64_t cntr : 64;
+#endif
+ } s;
+ struct cvmx_mio_ptp_evt_cnt_s cn61xx;
+ struct cvmx_mio_ptp_evt_cnt_s cn63xx;
+ struct cvmx_mio_ptp_evt_cnt_s cn63xxp1;
+ struct cvmx_mio_ptp_evt_cnt_s cn66xx;
+ struct cvmx_mio_ptp_evt_cnt_s cn68xx;
+ struct cvmx_mio_ptp_evt_cnt_s cn68xxp1;
+ struct cvmx_mio_ptp_evt_cnt_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_evt_cnt cvmx_mio_ptp_evt_cnt_t;
+
+/**
+ * cvmx_mio_ptp_phy_1pps_in
+ *
+ * MIO_PTP_PHY_1PPS_IN = PHY 1PPS input mux selection
+ *
+ */
+union cvmx_mio_ptp_phy_1pps_in {
+ uint64_t u64;
+ struct cvmx_mio_ptp_phy_1pps_in_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t sel : 5; /**< Source for PHY 1pps input signal
+ 0-19 : GPIO[SEL[4:0]], for AGPS_1PPS
+ 24 : PPS_OUT (Enabled by PPS_EN and PPS_INV,
+ reflects ptp_pps after PPS_INV inverter)
+ - 20-23: Reserved
+ - 25-30: Reserved
+ 31 : Disabled */
+#else
+ uint64_t sel : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_mio_ptp_phy_1pps_in_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_phy_1pps_in cvmx_mio_ptp_phy_1pps_in_t;
+
+/**
+ * cvmx_mio_ptp_pps_hi_incr
+ *
+ * MIO_PTP_PPS_HI_INCR = PTP PPS Hi Increment
+ *
+ */
+union cvmx_mio_ptp_pps_hi_incr {
+ uint64_t u64;
+ struct cvmx_mio_ptp_pps_hi_incr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec : 32; /**< Nanoseconds */
+ uint64_t frnanosec : 32; /**< Fractions of Nanoseconds */
+#else
+ uint64_t frnanosec : 32;
+ uint64_t nanosec : 32;
+#endif
+ } s;
+ struct cvmx_mio_ptp_pps_hi_incr_s cn61xx;
+ struct cvmx_mio_ptp_pps_hi_incr_s cn66xx;
+ struct cvmx_mio_ptp_pps_hi_incr_s cn68xx;
+ struct cvmx_mio_ptp_pps_hi_incr_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_pps_hi_incr cvmx_mio_ptp_pps_hi_incr_t;
+
+/**
+ * cvmx_mio_ptp_pps_lo_incr
+ *
+ * MIO_PTP_PPS_LO_INCR = PTP PPS Lo Increment
+ *
+ */
+union cvmx_mio_ptp_pps_lo_incr {
+ uint64_t u64;
+ struct cvmx_mio_ptp_pps_lo_incr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec : 32; /**< Nanoseconds */
+ uint64_t frnanosec : 32; /**< Fractions of Nanoseconds */
+#else
+ uint64_t frnanosec : 32;
+ uint64_t nanosec : 32;
+#endif
+ } s;
+ struct cvmx_mio_ptp_pps_lo_incr_s cn61xx;
+ struct cvmx_mio_ptp_pps_lo_incr_s cn66xx;
+ struct cvmx_mio_ptp_pps_lo_incr_s cn68xx;
+ struct cvmx_mio_ptp_pps_lo_incr_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_pps_lo_incr cvmx_mio_ptp_pps_lo_incr_t;
+
+/**
+ * cvmx_mio_ptp_pps_thresh_hi
+ *
+ * MIO_PTP_PPS_THRESH_HI = Hi bytes of PTP PPS
+ *
+ * Writes to MIO_PTP_PPS_THRESH_HI also clear MIO_PTP_PPS_THRESH_LO. To update all 96 bits, write MIO_PTP_PPS_THRESH_HI followed
+ * by MIO_PTP_PPS_THRESH_LO
+ */
+union cvmx_mio_ptp_pps_thresh_hi {
+ uint64_t u64;
+ struct cvmx_mio_ptp_pps_thresh_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec : 64; /**< Nanoseconds */
+#else
+ uint64_t nanosec : 64;
+#endif
+ } s;
+ struct cvmx_mio_ptp_pps_thresh_hi_s cn61xx;
+ struct cvmx_mio_ptp_pps_thresh_hi_s cn66xx;
+ struct cvmx_mio_ptp_pps_thresh_hi_s cn68xx;
+ struct cvmx_mio_ptp_pps_thresh_hi_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_pps_thresh_hi cvmx_mio_ptp_pps_thresh_hi_t;
+
+/**
+ * cvmx_mio_ptp_pps_thresh_lo
+ *
+ * MIO_PTP_PPS_THRESH_LO = Lo bytes of PTP PPS
+ *
+ */
+union cvmx_mio_ptp_pps_thresh_lo {
+ uint64_t u64;
+ struct cvmx_mio_ptp_pps_thresh_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t frnanosec : 32; /**< Fractions of Nanoseconds */
+#else
+ uint64_t frnanosec : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_mio_ptp_pps_thresh_lo_s cn61xx;
+ struct cvmx_mio_ptp_pps_thresh_lo_s cn66xx;
+ struct cvmx_mio_ptp_pps_thresh_lo_s cn68xx;
+ struct cvmx_mio_ptp_pps_thresh_lo_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_pps_thresh_lo cvmx_mio_ptp_pps_thresh_lo_t;
+
+/**
+ * cvmx_mio_ptp_timestamp
+ *
+ * MIO_PTP_TIMESTAMP = Timestamp latched on MIO_PTP_CLOCK_CFG[TSTMP_EDGE] edge of MIO_PTP_CLOCK_CFG[TSTMP_IN]
+ *
+ */
+union cvmx_mio_ptp_timestamp {
+ uint64_t u64;
+ struct cvmx_mio_ptp_timestamp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec : 64; /**< Nanoseconds */
+#else
+ uint64_t nanosec : 64;
+#endif
+ } s;
+ struct cvmx_mio_ptp_timestamp_s cn61xx;
+ struct cvmx_mio_ptp_timestamp_s cn63xx;
+ struct cvmx_mio_ptp_timestamp_s cn63xxp1;
+ struct cvmx_mio_ptp_timestamp_s cn66xx;
+ struct cvmx_mio_ptp_timestamp_s cn68xx;
+ struct cvmx_mio_ptp_timestamp_s cn68xxp1;
+ struct cvmx_mio_ptp_timestamp_s cnf71xx;
+};
+typedef union cvmx_mio_ptp_timestamp cvmx_mio_ptp_timestamp_t;
+
+/**
+ * cvmx_mio_qlm#_cfg
+ *
+ * Notes:
+ * Certain QLM_SPD is valid only for certain QLM_CFG configuration, refer to HRM for valid
+ * combinations. These csrs are reset only on COLD_RESET. The Reset values for QLM_SPD and QLM_CFG
+ * are as follows: MIO_QLM0_CFG SPD=F, CFG=2 SGMII (AGX0)
+ * MIO_QLM1_CFG SPD=0, CFG=1 PCIE 2x1 (PEM0/PEM1)
+ */
+union cvmx_mio_qlmx_cfg {
+ uint64_t u64;
+ struct cvmx_mio_qlmx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t prtmode : 1; /**< Port Mode, value of MIO_RST_CNTLX.PRTMODE[0]
+ 0 = port is EP mode
+ 1 = port is RC mode */
+ uint64_t reserved_12_13 : 2;
+ uint64_t qlm_spd : 4; /**< QLM0 speed for SGMii
+ 0 = 5 Gbaud 100.00 MHz Ref
+ 1 = 2.5 Gbaud 100.00 MHz Ref
+ 2 = 2.5 Gbaud 100.00 MHz Ref
+ 3 = 1.25 Gbaud 100.00 MHz Ref
+ 4 = 1.25 Gbaud 156.25 MHz Ref
+ 5 = 6.25 Gbaud 125.00 MHz Ref
+ 6 = 5 Gbaud 125.00 MHz Ref
+ 7 = 2.5 Gbaud 156.25 MHz Ref
+ 8 = 3.125 Gbaud 125.00 MHz Ref
+ 9 = 2.5 Gbaud 125.00 MHz Ref
+ 10 = 1.25 Gbaud 125.00 MHz Ref
+ 11 = 5 Gbaud 156.25 MHz Ref
+ 12 = 6.25 Gbaud 156.25 MHz Ref
+ 13 = 3.75 Gbaud 156.25 MHz Ref
+ 14 = 3.125 Gbaud 156.25 MHz Ref
+ 15 = QLM Disabled
+
+ QLM1 speed PEM0 PEM1
+ 0 = 2.5/5 2.5/5 Gbaud 100.00 MHz Ref
+ 1 = 2.5 2.5/5 Gbaud 100.00 MHz Ref
+ 2 = 2.5/5 2.5 Gbaud 100.00 MHz Ref
+ 3 = 2.5 2.5 Gbaud 100.00 MHz Ref
+ 4 = 2.5/5 2.5/5 Gbaud 125.00 MHz Ref
+ 6 = 2.5/5 2.5 Gbaud 125.00 MHz Ref
+ 7 = 2.5 2.5 Gbaud 125.00 MHz Ref
+ 9 = 2.5 2.5/5 Gbaud 125.00 MHz Ref
+ 15 = QLM Disabled
+ 5,8,10-14 are reserved */
+ uint64_t reserved_4_7 : 4;
+ uint64_t qlm_cfg : 4; /**< QLM configuration mode
+ For Interface 0:
+ 00 Reserved
+ 01 Reserved
+ 10 SGMII (AGX0)
+ 11 Reserved
+ For Interface 1:
+ 00 PCIE 1x2 (PEM1)
+ 01 PCIE 2x1 (PEM0/PEM1)
+ 1x Reserved */
+#else
+ uint64_t qlm_cfg : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t qlm_spd : 4;
+ uint64_t reserved_12_13 : 2;
+ uint64_t prtmode : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_mio_qlmx_cfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t prtmode : 1; /**< Port Mode, value of MIO_RST_CNTLX.PRTMODE[0]
+ 0 = port is EP mode
+ 1 = port is RC mode
+ For QLM2, HOST_MODE is always '0' because PCIe
+ is not supported. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t qlm_spd : 4; /**< QLM speed for SGMii/XAUI
+ 0 = 5 Gbaud 100.00 MHz Ref
+ 1 = 2.5 Gbaud 100.00 MHz Ref
+ 2 = 2.5 Gbaud 100.00 MHz Ref
+ 3 = 1.25 Gbaud 100.00 MHz Ref
+ 4 = 1.25 Gbaud 156.25 MHz Ref
+ 5 = 6.25 Gbaud 125.00 MHz Ref
+ 6 = 5 Gbaud 125.00 MHz Ref
+ 7 = 2.5 Gbaud 156.25 MHz Ref
+ 8 = 3.125 Gbaud 125.00 MHz Ref
+ 9 = 2.5 Gbaud 125.00 MHz Ref
+ 10 = 1.25 Gbaud 125.00 MHz Ref
+ 11 = 5 Gbaud 156.25 MHz Ref
+ 12 = 6.25 Gbaud 156.25 MHz Ref
+ 13 = 3.75 Gbaud 156.25 MHz Ref
+ 14 = 3.125 Gbaud 156.25 MHz Ref
+ 15 = QLM Disabled
+
+ QLM speed PEM0 PEM1
+ 0 = 2.5/5 2.5/5 Gbaud 100.00 MHz Ref
+ 1 = 2.5 2.5/5 Gbaud 100.00 MHz Ref
+ 2 = 2.5/5 2.5 Gbaud 100.00 MHz Ref
+ 3 = 2.5 2.5 Gbaud 100.00 MHz Ref
+ 4 = 2.5/5 2.5/5 Gbaud 125.00 MHz Ref
+ 6 = 2.5/5 2.5 Gbaud 125.00 MHz Ref
+ 7 = 2.5 2.5 Gbaud 125.00 MHz Ref
+ 9 = 2.5 2.5/5 Gbaud 125.00 MHz Ref
+ 15 = QLM Disabled
+ 5,8,10-14 are reserved */
+ uint64_t reserved_2_7 : 6;
+ uint64_t qlm_cfg : 2; /**< QLM configuration mode
+ For Interface 0:
+ 00 PCIE 1x4 (PEM0)
+ 01 Reserved
+ 10 SGMII (AGX1)
+ 11 XAUI (AGX1)
+ For Interface 1:
+ 00 PCIE 1x2 (PEM1)
+ 01 PCIE 2x1 (PEM0/PEM1)
+ 10 Reserved
+ 11 Reserved
+ For Interface 2:
+ 00 Reserved
+ 01 Reserved
+ 10 SGMII (AGX0)
+ 11 XAUI (AGX0) */
+#else
+ uint64_t qlm_cfg : 2;
+ uint64_t reserved_2_7 : 6;
+ uint64_t qlm_spd : 4;
+ uint64_t reserved_12_13 : 2;
+ uint64_t prtmode : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } cn61xx;
+ struct cvmx_mio_qlmx_cfg_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t qlm_spd : 4; /**< QLM speed
+ 0 = 5 Gbaud
+ 1 = 2.5 Gbaud
+ 2 = 2.5 Gbaud
+ 3 = 1.25 Gbaud
+ 4 = 1.25 Gbaud
+ 5 = 6.25 Gbaud
+ 6 = 5 Gbaud
+ 7 = 2.5 Gbaud
+ 8 = 3.125 Gbaud
+ 9 = 2.5 Gbaud
+ 10 = 1.25 Gbaud
+ 11 = 5 Gbaud
+ 12 = 6.25 Gbaud
+ 13 = 3.75 Gbaud
+ 14 = 3.125 Gbaud
+ 15 = QLM Disabled */
+ uint64_t reserved_4_7 : 4;
+ uint64_t qlm_cfg : 4; /**< QLM configuration mode
+ 0000 PCIE gen2
+ 0001 SRIO 1x4 short
+ 0010 PCIE gen1 only
+ 0011 SRIO 1x4 long
+ 0100 SRIO 2x2 short
+ 0101 SRIO 4x1 short
+ 0110 SRIO 2x2 long
+ 0111 SRIO 4x1 long
+ 1000 PCIE gen2 (alias)
+ 1001 SGMII
+ 1010 PCIE gen1 only (alias)
+ 1011 XAUI
+ 1100 RESERVED
+ 1101 RESERVED
+ 1110 RESERVED
+ 1111 RESERVED
+ NOTE: Internal encodings differ from QLM_MODE
+ pins encodings */
+#else
+ uint64_t qlm_cfg : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t qlm_spd : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn66xx;
+ struct cvmx_mio_qlmx_cfg_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t qlm_spd : 4; /**< QLM speed
+ 0 = 5 Gbaud 100.00 MHz Ref
+ 1 = 2.5 Gbaud 100.00 MHz Ref
+ 2 = 2.5 Gbaud 100.00 MHz Ref
+ 3 = 1.25 Gbaud 100.00 MHz Ref
+ 4 = 1.25 Gbaud 156.25 MHz Ref
+ 5 = 6.25 Gbaud 125.00 MHz Ref
+ 6 = 5 Gbaud 125.00 MHz Ref
+ 7 = 2.5 Gbaud 156.25 MHz Ref
+ 8 = 3.125 Gbaud 125.00 MHz Ref
+ 9 = 2.5 Gbaud 125.00 MHz Ref
+ 10 = 1.25 Gbaud 125.00 MHz Ref
+ 11 = 5 Gbaud 156.25 MHz Ref
+ 12 = 6.25 Gbaud 156.25 MHz Ref
+ 13 = 3.75 Gbaud 156.25 MHz Ref
+ 14 = 3.125 Gbaud 156.25 MHz Ref
+ 15 = QLM Disabled */
+ uint64_t reserved_3_7 : 5;
+ uint64_t qlm_cfg : 3; /**< QLM configuration mode
+ 000 = PCIE
+ 001 = ILK
+ 010 = SGMII
+ 011 = XAUI
+ 100 = RESERVED
+ 101 = RESERVED
+ 110 = RESERVED
+ 111 = RXAUI
+ NOTE: Internal encodings differ from QLM_MODE
+ pins encodings */
+#else
+ uint64_t qlm_cfg : 3;
+ uint64_t reserved_3_7 : 5;
+ uint64_t qlm_spd : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn68xx;
+ struct cvmx_mio_qlmx_cfg_cn68xx cn68xxp1;
+ struct cvmx_mio_qlmx_cfg_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_qlmx_cfg cvmx_mio_qlmx_cfg_t;
+
+/**
+ * cvmx_mio_rst_boot
+ *
+ * Notes:
+ * JTCSRDIS, EJTAGDIS, ROMEN reset to 1 in authentik mode; in all other modes they reset to 0.
+ *
+ */
+union cvmx_mio_rst_boot {
+ uint64_t u64;
+ struct cvmx_mio_rst_boot_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t chipkill : 1; /**< A 0->1 transition of CHIPKILL starts the CHIPKILL
+ timer. When CHIPKILL=1 and the timer expires,
+ internal chip reset is asserted forever until the
+ next chip reset. The CHIPKILL timer can be
+ stopped only by a chip (cold, warm, soft) reset.
+ The length of the CHIPKILL timer is specified by
+ MIO_RST_CKILL[TIMER]. */
+ uint64_t jtcsrdis : 1; /**< If JTCSRDIS=1, internal CSR access via JTAG TAP
+ controller is disabled */
+ uint64_t ejtagdis : 1; /**< If EJTAGDIS=1, external EJTAG access is disabled */
+ uint64_t romen : 1; /**< If ROMEN=1, Authentik/eMMC boot ROM is visible
+ in the boot bus address space. */
+ uint64_t ckill_ppdis : 1; /**< If CK_PPDIS=1, PPs other than 0 are disabled
+ during a CHIPKILL. Writes have no effect when
+ MIO_RST_BOOT[CHIPKILL]=1. */
+ uint64_t jt_tstmode : 1; /**< JTAG test mode */
+ uint64_t reserved_50_57 : 8;
+ uint64_t lboot_ext : 2; /**< Reserved */
+ uint64_t reserved_44_47 : 4;
+ uint64_t qlm4_spd : 4; /**< QLM4_SPD pins sampled at DCOK assertion */
+ uint64_t qlm3_spd : 4; /**< QLM3_SPD pins sampled at DCOK assertion */
+ uint64_t c_mul : 6; /**< Core clock multiplier:
+ C_MUL = (core clk speed) / (ref clock speed)
+ "ref clock speed" should always be 50MHz.
+ If PLL_QLM_REF_CLK_EN=0, "ref clock" comes
+ from PLL_REF_CLK pin.
+ If PLL_QLM_REF_CLK_EN=1, "ref clock" is
+ 1/2 speed of QLMC_REF_CLK_* pins. */
+ uint64_t pnr_mul : 6; /**< Coprocessor clock multiplier:
+ PNR_MUL = (coprocessor clk speed) /
+ (ref clock speed)
+ See C_MUL comments about ref clock. */
+ uint64_t qlm2_spd : 4; /**< QLM2_SPD, report MIO_QLM2_CFG[SPD] */
+ uint64_t qlm1_spd : 4; /**< QLM1_SPD, report MIO_QLM1_CFG[SPD] */
+ uint64_t qlm0_spd : 4; /**< QLM0_SPD, report MIO_QLM0_CFG[SPD] */
+ uint64_t lboot : 10; /**< Last boot cause mask, resets only with dcok.
+
+ bit9 - Soft reset due to watchdog
+ bit8 - Soft reset due to CIU_SOFT_RST write
+ bit7 - Warm reset due to cntl0 link-down or
+ hot-reset
+ bit6 - Warm reset due to cntl1 link-down or
+ hot-reset
+ bit5 - Cntl1 reset due to PERST1_L pin
+ bit4 - Cntl0 reset due to PERST0_L pin
+ bit3 - Warm reset due to PERST1_L pin
+ bit2 - Warm reset due to PERST0_L pin
+ bit1 - Warm reset due to CHIP_RESET_L pin
+ bit0 - Cold reset due to DCOK pin */
+ uint64_t rboot : 1; /**< Determines whether core 0 remains in reset after
+ after chip cold/warm/soft reset. */
+ uint64_t rboot_pin : 1; /**< Read-only access to REMOTE_BOOT pin */
+#else
+ uint64_t rboot_pin : 1;
+ uint64_t rboot : 1;
+ uint64_t lboot : 10;
+ uint64_t qlm0_spd : 4;
+ uint64_t qlm1_spd : 4;
+ uint64_t qlm2_spd : 4;
+ uint64_t pnr_mul : 6;
+ uint64_t c_mul : 6;
+ uint64_t qlm3_spd : 4;
+ uint64_t qlm4_spd : 4;
+ uint64_t reserved_44_47 : 4;
+ uint64_t lboot_ext : 2;
+ uint64_t reserved_50_57 : 8;
+ uint64_t jt_tstmode : 1;
+ uint64_t ckill_ppdis : 1;
+ uint64_t romen : 1;
+ uint64_t ejtagdis : 1;
+ uint64_t jtcsrdis : 1;
+ uint64_t chipkill : 1;
+#endif
+ } s;
+ struct cvmx_mio_rst_boot_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t chipkill : 1; /**< A 0->1 transition of CHIPKILL starts the CHIPKILL
+ timer. When CHIPKILL=1 and the timer expires,
+ internal chip reset is asserted forever until the
+ next chip reset. The CHIPKILL timer can be
+ stopped only by a chip (cold, warm, soft) reset.
+ The length of the CHIPKILL timer is specified by
+ MIO_RST_CKILL[TIMER]. */
+ uint64_t jtcsrdis : 1; /**< If JTCSRDIS=1, internal CSR access via JTAG TAP
+ controller is disabled */
+ uint64_t ejtagdis : 1; /**< If EJTAGDIS=1, external EJTAG access is disabled */
+ uint64_t romen : 1; /**< If ROMEN=1, Authentik/eMMC boot ROM is visible
+ in the boot bus address space. */
+ uint64_t ckill_ppdis : 1; /**< If CK_PPDIS=1, PPs other than 0 are disabled
+ during a CHIPKILL. Writes have no effect when
+ MIO_RST_BOOT[CHIPKILL]=1. */
+ uint64_t jt_tstmode : 1; /**< JTAG test mode */
+ uint64_t reserved_50_57 : 8;
+ uint64_t lboot_ext : 2; /**< Reserved */
+ uint64_t reserved_36_47 : 12;
+ uint64_t c_mul : 6; /**< Core clock multiplier:
+ C_MUL = (core clk speed) / (ref clock speed)
+ "ref clock speed" should always be 50MHz.
+ If PLL_QLM_REF_CLK_EN=0, "ref clock" comes
+ from PLL_REF_CLK pin.
+ If PLL_QLM_REF_CLK_EN=1, "ref clock" is
+ 1/2 speed of QLMC_REF_CLK_* pins. */
+ uint64_t pnr_mul : 6; /**< Coprocessor clock multiplier:
+ PNR_MUL = (coprocessor clk speed) /
+ (ref clock speed)
+ See C_MUL comments about ref clock. */
+ uint64_t qlm2_spd : 4; /**< QLM2_SPD, report MIO_QLM2_CFG[SPD] */
+ uint64_t qlm1_spd : 4; /**< QLM1_SPD, report MIO_QLM1_CFG[SPD] */
+ uint64_t qlm0_spd : 4; /**< QLM0_SPD, report MIO_QLM0_CFG[SPD] */
+ uint64_t lboot : 10; /**< Last boot cause mask, resets only with dcok.
+
+ bit9 - Soft reset due to watchdog
+ bit8 - Soft reset due to CIU_SOFT_RST write
+ bit7 - Warm reset due to cntl0 link-down or
+ hot-reset
+ bit6 - Warm reset due to cntl1 link-down or
+ hot-reset
+ bit5 - Cntl1 reset due to PERST1_L pin
+ bit4 - Cntl0 reset due to PERST0_L pin
+ bit3 - Warm reset due to PERST1_L pin
+ bit2 - Warm reset due to PERST0_L pin
+ bit1 - Warm reset due to CHIP_RESET_L pin
+ bit0 - Cold reset due to DCOK pin */
+ uint64_t rboot : 1; /**< Determines whether core 0 remains in reset after
+ after chip cold/warm/soft reset. */
+ uint64_t rboot_pin : 1; /**< Read-only access to REMOTE_BOOT pin */
+#else
+ uint64_t rboot_pin : 1;
+ uint64_t rboot : 1;
+ uint64_t lboot : 10;
+ uint64_t qlm0_spd : 4;
+ uint64_t qlm1_spd : 4;
+ uint64_t qlm2_spd : 4;
+ uint64_t pnr_mul : 6;
+ uint64_t c_mul : 6;
+ uint64_t reserved_36_47 : 12;
+ uint64_t lboot_ext : 2;
+ uint64_t reserved_50_57 : 8;
+ uint64_t jt_tstmode : 1;
+ uint64_t ckill_ppdis : 1;
+ uint64_t romen : 1;
+ uint64_t ejtagdis : 1;
+ uint64_t jtcsrdis : 1;
+ uint64_t chipkill : 1;
+#endif
+ } cn61xx;
+ struct cvmx_mio_rst_boot_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t c_mul : 6; /**< Core clock multiplier:
+ C_MUL = (core clk speed) / (ref clock speed)
+ "ref clock speed" should always be 50MHz.
+ If PLL_QLM_REF_CLK_EN=0, "ref clock" comes
+ from PLL_REF_CLK pin.
+ If PLL_QLM_REF_CLK_EN=1, "ref clock" is
+ 1/2 speed of QLMC_REF_CLK_* pins. */
+ uint64_t pnr_mul : 6; /**< Coprocessor clock multiplier:
+ PNR_MUL = (coprocessor clk speed) /
+ (ref clock speed)
+ See C_MUL comments about ref clock. */
+ uint64_t qlm2_spd : 4; /**< QLM2_SPD pins sampled at DCOK assertion */
+ uint64_t qlm1_spd : 4; /**< QLM1_SPD pins sampled at DCOK assertion */
+ uint64_t qlm0_spd : 4; /**< QLM0_SPD pins sampled at DCOK assertion */
+ uint64_t lboot : 10; /**< Last boot cause mask, resets only with dock.
+
+ bit9 - Soft reset due to watchdog
+ bit8 - Soft reset due to CIU_SOFT_RST write
+ bit7 - Warm reset due to cntl0 link-down or
+ hot-reset
+ bit6 - Warm reset due to cntl1 link-down or
+ hot-reset
+ bit5 - Cntl1 reset due to PERST1_L pin
+ bit4 - Cntl0 reset due to PERST0_L pin
+ bit3 - Warm reset due to PERST1_L pin
+ bit2 - Warm reset due to PERST0_L pin
+ bit1 - Warm reset due to CHIP_RESET_L pin
+ bit0 - Cold reset due to DCOK pin */
+ uint64_t rboot : 1; /**< Determines whether core 0 remains in reset after
+ after chip cold/warm/soft reset. */
+ uint64_t rboot_pin : 1; /**< Read-only access to REMOTE_BOOT pin */
+#else
+ uint64_t rboot_pin : 1;
+ uint64_t rboot : 1;
+ uint64_t lboot : 10;
+ uint64_t qlm0_spd : 4;
+ uint64_t qlm1_spd : 4;
+ uint64_t qlm2_spd : 4;
+ uint64_t pnr_mul : 6;
+ uint64_t c_mul : 6;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn63xx;
+ struct cvmx_mio_rst_boot_cn63xx cn63xxp1;
+ struct cvmx_mio_rst_boot_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t chipkill : 1; /**< A 0->1 transition of CHIPKILL starts the CHIPKILL
+ timer. When CHIPKILL=1 and the timer expires,
+ internal chip reset is asserted forever until the
+ next chip reset. The CHIPKILL timer can be
+ stopped only by a chip (cold, warm, soft) reset.
+ The length of the CHIPKILL timer is specified by
+ MIO_RST_CKILL[TIMER]. */
+ uint64_t jtcsrdis : 1; /**< If JTCSRDIS=1, internal CSR access via JTAG TAP
+ controller is disabled */
+ uint64_t ejtagdis : 1; /**< If EJTAGDIS=1, external EJTAG access is disabled */
+ uint64_t romen : 1; /**< If ROMEN=1, Authentik ROM is visible in the boot
+ bus address space. */
+ uint64_t ckill_ppdis : 1; /**< If CK_PPDIS=1, PPs other than 0 are disabled
+ during a CHIPKILL. Writes have no effect when
+ MIO_RST_BOOT[CHIPKILL]=1. */
+ uint64_t reserved_50_58 : 9;
+ uint64_t lboot_ext : 2; /**< Extended Last boot cause mask, resets only with
+ dock.
+
+ bit1 - Warm reset due to cntl3 link-down or
+ hot-reset
+ bit0 - Warm reset due to cntl2 link-down or
+ hot-reset */
+ uint64_t reserved_36_47 : 12;
+ uint64_t c_mul : 6; /**< Core clock multiplier:
+ C_MUL = (core clk speed) / (ref clock speed)
+ "ref clock speed" should always be 50MHz.
+ If PLL_QLM_REF_CLK_EN=0, "ref clock" comes
+ from PLL_REF_CLK pin.
+ If PLL_QLM_REF_CLK_EN=1, "ref clock" is
+ 1/2 speed of QLMC_REF_CLK_* pins. */
+ uint64_t pnr_mul : 6; /**< Coprocessor clock multiplier:
+ PNR_MUL = (coprocessor clk speed) /
+ (ref clock speed)
+ See C_MUL comments about ref clock. */
+ uint64_t qlm2_spd : 4; /**< QLM2_SPD pins sampled at DCOK assertion */
+ uint64_t qlm1_spd : 4; /**< QLM1_SPD pins sampled at DCOK assertion */
+ uint64_t qlm0_spd : 4; /**< QLM0_SPD pins sampled at DCOK assertion */
+ uint64_t lboot : 10; /**< Last boot cause mask, resets only with dock.
+
+ bit9 - Soft reset due to watchdog
+ bit8 - Soft reset due to CIU_SOFT_RST write
+ bit7 - Warm reset due to cntl0 link-down or
+ hot-reset
+ bit6 - Warm reset due to cntl1 link-down or
+ hot-reset
+ bit5 - Cntl1 reset due to PERST1_L pin
+ bit4 - Cntl0 reset due to PERST0_L pin
+ bit3 - Warm reset due to PERST1_L pin
+ bit2 - Warm reset due to PERST0_L pin
+ bit1 - Warm reset due to CHIP_RESET_L pin
+ bit0 - Cold reset due to DCOK pin */
+ uint64_t rboot : 1; /**< Determines whether core 0 remains in reset after
+ after chip cold/warm/soft reset. */
+ uint64_t rboot_pin : 1; /**< Read-only access to REMOTE_BOOT pin */
+#else
+ uint64_t rboot_pin : 1;
+ uint64_t rboot : 1;
+ uint64_t lboot : 10;
+ uint64_t qlm0_spd : 4;
+ uint64_t qlm1_spd : 4;
+ uint64_t qlm2_spd : 4;
+ uint64_t pnr_mul : 6;
+ uint64_t c_mul : 6;
+ uint64_t reserved_36_47 : 12;
+ uint64_t lboot_ext : 2;
+ uint64_t reserved_50_58 : 9;
+ uint64_t ckill_ppdis : 1;
+ uint64_t romen : 1;
+ uint64_t ejtagdis : 1;
+ uint64_t jtcsrdis : 1;
+ uint64_t chipkill : 1;
+#endif
+ } cn66xx;
+ struct cvmx_mio_rst_boot_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t jt_tstmode : 1; /**< JTAG test mode */
+ uint64_t reserved_44_57 : 14;
+ uint64_t qlm4_spd : 4; /**< QLM4_SPD pins sampled at DCOK assertion */
+ uint64_t qlm3_spd : 4; /**< QLM3_SPD pins sampled at DCOK assertion */
+ uint64_t c_mul : 6; /**< Core clock multiplier:
+ C_MUL = (core clk speed) / (ref clock speed)
+ "ref clock" is PLL_REF_CLK pin, which should
+ always be 50 MHz. */
+ uint64_t pnr_mul : 6; /**< Coprocessor clock multiplier:
+ PNR_MUL = (coprocessor clk speed)
+ (ref clock speed)
+ See C_MUL comments about ref clock. */
+ uint64_t qlm2_spd : 4; /**< QLM2_SPD pins sampled at DCOK assertion */
+ uint64_t qlm1_spd : 4; /**< QLM1_SPD pins sampled at DCOK assertion */
+ uint64_t qlm0_spd : 4; /**< QLM0_SPD pins sampled at DCOK assertion */
+ uint64_t lboot : 10; /**< Last boot cause mask, resets only with dock.
+
+ bit9 - Soft reset due to watchdog
+ bit8 - Soft reset due to CIU_SOFT_RST write
+ bit7 - Warm reset due to cntl0 link-down or
+ hot-reset
+ bit6 - Warm reset due to cntl1 link-down or
+ hot-reset
+ bit5 - Cntl1 reset due to PERST1_L pin
+ bit4 - Cntl0 reset due to PERST0_L pin
+ bit3 - Warm reset due to PERST1_L pin
+ bit2 - Warm reset due to PERST0_L pin
+ bit1 - Warm reset due to CHIP_RESET_L pin
+ bit0 - Cold reset due to DCOK pin */
+ uint64_t rboot : 1; /**< Determines whether core 0 remains in reset after
+ after chip cold/warm/soft reset. */
+ uint64_t rboot_pin : 1; /**< Read-only access to REMOTE_BOOT pin */
+#else
+ uint64_t rboot_pin : 1;
+ uint64_t rboot : 1;
+ uint64_t lboot : 10;
+ uint64_t qlm0_spd : 4;
+ uint64_t qlm1_spd : 4;
+ uint64_t qlm2_spd : 4;
+ uint64_t pnr_mul : 6;
+ uint64_t c_mul : 6;
+ uint64_t qlm3_spd : 4;
+ uint64_t qlm4_spd : 4;
+ uint64_t reserved_44_57 : 14;
+ uint64_t jt_tstmode : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn68xx;
+ struct cvmx_mio_rst_boot_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t qlm4_spd : 4; /**< QLM4_SPD pins sampled at DCOK assertion */
+ uint64_t qlm3_spd : 4; /**< QLM3_SPD pins sampled at DCOK assertion */
+ uint64_t c_mul : 6; /**< Core clock multiplier:
+ C_MUL = (core clk speed) / (ref clock speed)
+ "ref clock" is PLL_REF_CLK pin, which should
+ always be 50 MHz. */
+ uint64_t pnr_mul : 6; /**< Coprocessor clock multiplier:
+ PNR_MUL = (coprocessor clk speed)
+ (ref clock speed)
+ See C_MUL comments about ref clock. */
+ uint64_t qlm2_spd : 4; /**< QLM2_SPD pins sampled at DCOK assertion */
+ uint64_t qlm1_spd : 4; /**< QLM1_SPD pins sampled at DCOK assertion */
+ uint64_t qlm0_spd : 4; /**< QLM0_SPD pins sampled at DCOK assertion */
+ uint64_t lboot : 10; /**< Last boot cause mask, resets only with dock.
+
+ bit9 - Soft reset due to watchdog
+ bit8 - Soft reset due to CIU_SOFT_RST write
+ bit7 - Warm reset due to cntl0 link-down or
+ hot-reset
+ bit6 - Warm reset due to cntl1 link-down or
+ hot-reset
+ bit5 - Cntl1 reset due to PERST1_L pin
+ bit4 - Cntl0 reset due to PERST0_L pin
+ bit3 - Warm reset due to PERST1_L pin
+ bit2 - Warm reset due to PERST0_L pin
+ bit1 - Warm reset due to CHIP_RESET_L pin
+ bit0 - Cold reset due to DCOK pin */
+ uint64_t rboot : 1; /**< Determines whether core 0 remains in reset after
+ after chip cold/warm/soft reset. */
+ uint64_t rboot_pin : 1; /**< Read-only access to REMOTE_BOOT pin */
+#else
+ uint64_t rboot_pin : 1;
+ uint64_t rboot : 1;
+ uint64_t lboot : 10;
+ uint64_t qlm0_spd : 4;
+ uint64_t qlm1_spd : 4;
+ uint64_t qlm2_spd : 4;
+ uint64_t pnr_mul : 6;
+ uint64_t c_mul : 6;
+ uint64_t qlm3_spd : 4;
+ uint64_t qlm4_spd : 4;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } cn68xxp1;
+ struct cvmx_mio_rst_boot_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_rst_boot cvmx_mio_rst_boot_t;
+
+/**
+ * cvmx_mio_rst_cfg
+ *
+ * Notes:
+ * Cold reset will always performs a full bist.
+ *
+ */
+union cvmx_mio_rst_cfg {
+ uint64_t u64;
+ struct cvmx_mio_rst_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t cntl_clr_bist : 1; /**< Peform clear bist during cntl only reset,
+ instead of a full bist. A warm/soft reset will
+ not change this field. */
+ uint64_t warm_clr_bist : 1; /**< Peform clear bist during warm reset, instead
+ of a full bist. A warm/soft reset will not
+ change this field. */
+ uint64_t soft_clr_bist : 1; /**< Peform clear bist during soft reset, instead
+ of a full bist. A warm/soft reset will not
+ change this field. */
+#else
+ uint64_t soft_clr_bist : 1;
+ uint64_t warm_clr_bist : 1;
+ uint64_t cntl_clr_bist : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_mio_rst_cfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bist_delay : 58; /**< Reserved */
+ uint64_t reserved_3_5 : 3;
+ uint64_t cntl_clr_bist : 1; /**< Peform clear bist during cntl only reset,
+ instead of a full bist. A warm/soft reset will
+ not change this field. */
+ uint64_t warm_clr_bist : 1; /**< Peform clear bist during warm reset, instead
+ of a full bist. A warm/soft reset will not
+ change this field. */
+ uint64_t soft_clr_bist : 1; /**< Peform clear bist during soft reset, instead
+ of a full bist. A warm/soft reset will not
+ change this field. */
+#else
+ uint64_t soft_clr_bist : 1;
+ uint64_t warm_clr_bist : 1;
+ uint64_t cntl_clr_bist : 1;
+ uint64_t reserved_3_5 : 3;
+ uint64_t bist_delay : 58;
+#endif
+ } cn61xx;
+ struct cvmx_mio_rst_cfg_cn61xx cn63xx;
+ struct cvmx_mio_rst_cfg_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bist_delay : 58; /**< Reserved */
+ uint64_t reserved_2_5 : 4;
+ uint64_t warm_clr_bist : 1; /**< Peform clear bist during warm reset, instead
+ of a full bist. A warm/soft reset will not
+ change this field. */
+ uint64_t soft_clr_bist : 1; /**< Peform clear bist during soft reset, instead
+ of a full bist. A warm/soft reset will not
+ change this field. */
+#else
+ uint64_t soft_clr_bist : 1;
+ uint64_t warm_clr_bist : 1;
+ uint64_t reserved_2_5 : 4;
+ uint64_t bist_delay : 58;
+#endif
+ } cn63xxp1;
+ struct cvmx_mio_rst_cfg_cn61xx cn66xx;
+ struct cvmx_mio_rst_cfg_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bist_delay : 56; /**< Reserved */
+ uint64_t reserved_3_7 : 5;
+ uint64_t cntl_clr_bist : 1; /**< Peform clear bist during cntl only reset,
+ instead of a full bist. A warm/soft reset will
+ not change this field. */
+ uint64_t warm_clr_bist : 1; /**< Peform clear bist during warm reset, instead
+ of a full bist. A warm/soft reset will not
+ change this field. */
+ uint64_t soft_clr_bist : 1; /**< Peform clear bist during soft reset, instead
+ of a full bist. A warm/soft reset will not
+ change this field. */
+#else
+ uint64_t soft_clr_bist : 1;
+ uint64_t warm_clr_bist : 1;
+ uint64_t cntl_clr_bist : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t bist_delay : 56;
+#endif
+ } cn68xx;
+ struct cvmx_mio_rst_cfg_cn68xx cn68xxp1;
+ struct cvmx_mio_rst_cfg_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_rst_cfg cvmx_mio_rst_cfg_t;
+
+/**
+ * cvmx_mio_rst_ckill
+ *
+ * MIO_RST_CKILL = MIO Chipkill Timer Register
+ *
+ */
+union cvmx_mio_rst_ckill {
+ uint64_t u64;
+ struct cvmx_mio_rst_ckill_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t timer : 47; /**< CHIPKILL timer measured in SCLKs. Reads return
+ the current CHIPKILL timer. Writes have no
+ effect when MIO_RST_BOOT[CHIPKILL]=1. */
+#else
+ uint64_t timer : 47;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_mio_rst_ckill_s cn61xx;
+ struct cvmx_mio_rst_ckill_s cn66xx;
+ struct cvmx_mio_rst_ckill_s cnf71xx;
+};
+typedef union cvmx_mio_rst_ckill cvmx_mio_rst_ckill_t;
+
+/**
+ * cvmx_mio_rst_cntl#
+ *
+ * Notes:
+ * GEN1_Only mode is enabled for PEM0 when QLM1_SPD[0] is set or when sclk < 550Mhz.
+ * GEN1_Only mode is enabled for PEM1 when QLM1_SPD[1] is set or when sclk < 550Mhz.
+ */
+union cvmx_mio_rst_cntlx {
+ uint64_t u64;
+ struct cvmx_mio_rst_cntlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t in_rev_ln : 1; /**< RO access to corresponding pin PCIE*_REV_LANES
+ which is used for initial value for REV_LANES
+ For INT0/CNTL0: pin PCIE0_REV_LANES
+ For INT1/CNTL1: always zero as no PCIE1 pin */
+ uint64_t rev_lanes : 1; /**< Reverse the lanes for INT*.
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to
+ IN_REVLANE value.
+ When QLM1_CFG=1, INT0(PEM0) REV_LANES internal
+ setting will be always forced to '0', INT1(PEM1)
+ will be forced to '1' regardless CSR value. */
+ uint64_t gen1_only : 1; /**< Disable PCIE GEN2 Capability. This bit is
+ always unpredictable whenever the controller
+ is not attached to any SerDes lanes, and is
+ otherwise always set when SCLK is slower than
+ 550Mhz.
+ The MIO_RST_CNTL*[GEN1_ONLY] value is based on
+ the MIO_QLM1_CFG[QLM_SPD] value. */
+ uint64_t prst_link : 1; /**< Controls whether corresponding controller
+ link-down or hot-reset causes the assertion of
+ CIU_SOFT_PRST*[SOFT_PRST]
+
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to 0 */
+ uint64_t rst_done : 1; /**< Read-only access to controller reset status
+
+ RESET_DONE is always zero (i.e. the controller
+ is held in reset) when:
+ - CIU_SOFT_PRST*[SOFT_PRST]=1, or
+ - RST_RCV==1 and PERST*_L pin is asserted */
+ uint64_t rst_link : 1; /**< Controls whether corresponding controller
+ link-down or hot-reset causes a warm chip reset
+ On cold reset, this field is initialized as
+ follows:
+ 0 = when corresponding HOST_MODE=1
+ 1 = when corresponding HOST_MODE=0
+
+ Note that a link-down or hot-reset event can
+ never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a
+ warm reset when RST_DONE==0). */
+ uint64_t host_mode : 1; /**< RO access to corresponding strap PCIE*_HOST_MODE
+ For CNTL1/INT1, HOST_MODE is always '1' because
+ there is no PCIE1_HOST_MODE pin. */
+ uint64_t prtmode : 2; /**< Port mode
+ 0 = port is EP mode
+ 1 = port is RC mode
+ 2,3 = Reserved
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized as
+ HOST_MODE (corresponding strap PCIE*_HOST_MODE) */
+ uint64_t rst_drv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is driven by the OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding HOST_MODE=0
+ 1 = when corresponding HOST_MODE=1
+
+ When set, OCTEON drives the corresponding
+ PERST*_L pin. Otherwise, OCTEON does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t rst_rcv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is recieved by OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding HOST_MODE=1
+ 1 = when corresponding HOST_MODE=0
+
+ When RST_RCV==1, the PERST*_L value is
+ received and may be used to reset the
+ controller and (optionally, based on RST_CHIP)
+ warm reset the chip.
+
+ When RST_RCV==1 (and RST_CHIP=0),
+ MIO_RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert SW
+ whenever the external reset pin initiates a
+ controller reset sequence.)
+
+ RST_VAL gives the PERST*_L pin value when
+ RST_RCV==1.
+
+ When RST_RCV==0, the PERST*_L pin value is
+ ignored. */
+ uint64_t rst_chip : 1; /**< Controls whether corresponding PERST*_L chip
+ pin causes a chip warm reset like CHIP_RESET_L.
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to 0.
+
+ RST_CHIP is not used when RST_RCV==0.
+
+ When RST_RCV==0, RST_CHIP is ignored.
+
+ When RST_RCV==1, RST_CHIP==1, and PERST*_L
+ asserts, a chip warm reset will be generated. */
+ uint64_t rst_val : 1; /**< Read-only access to corresponding PERST*_L pin
+ Unpredictable when RST_RCV==0. Reads as 1 when
+ RST_RCV==1 and the PERST*_L pin is asserted.
+ Reads as 0 when RST_RCV==1 and the PERST*_L
+ pin is not asserted. */
+#else
+ uint64_t rst_val : 1;
+ uint64_t rst_chip : 1;
+ uint64_t rst_rcv : 1;
+ uint64_t rst_drv : 1;
+ uint64_t prtmode : 2;
+ uint64_t host_mode : 1;
+ uint64_t rst_link : 1;
+ uint64_t rst_done : 1;
+ uint64_t prst_link : 1;
+ uint64_t gen1_only : 1;
+ uint64_t rev_lanes : 1;
+ uint64_t in_rev_ln : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_mio_rst_cntlx_s cn61xx;
+ struct cvmx_mio_rst_cntlx_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t prst_link : 1; /**< Controls whether corresponding controller
+ link-down or hot-reset causes the assertion of
+ CIU_SOFT_PRST*[SOFT_PRST]
+
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to 0 */
+ uint64_t rst_done : 1; /**< Read-only access to controller reset status
+
+ RESET_DONE is always zero (i.e. the controller
+ is held in reset) when:
+ - CIU_SOFT_PRST*[SOFT_PRST]=1, or
+ - RST_RCV==1 and PERST*_L pin is asserted */
+ uint64_t rst_link : 1; /**< Controls whether corresponding controller
+ link-down or hot-reset causes a warm chip reset
+ On cold reset, this field is initialized as
+ follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=1
+ 1 = when corresponding strap QLM*_HOST_MODE=0
+
+ For MIO_RST_CNTL2 and MIO_RST_CNTL3, this field
+ is initialized to 1 on cold reset.
+
+ Note that a link-down or hot-reset event can
+ never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a
+ warm reset when RST_DONE==0). */
+ uint64_t host_mode : 1; /**< RO access to corresponding strap QLM*_HOST_MODE
+
+ For MIO_RST_CNTL2 and MIO_RST_CNTL3, this field
+ is reserved/RAZ.
+
+ QLM0_HOST_MODE corresponds to PCIe0/sRIO0
+ QLM1_HOST_MODE corresponds to PCIe1/sRIO1 */
+ uint64_t prtmode : 2; /**< Port mode
+ 0 = port is EP mode
+ 1 = port is RC mode
+ 2,3 = Reserved
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized as
+ follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=0
+ 1 = when corresponding strap QLM*_HOST_MODE=1
+
+ For MIO_RST_CNTL2 and MIO_RST_CNTL3, this field
+ is initialized to 0 on cold reset. */
+ uint64_t rst_drv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is driven by the OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=0
+ 1 = when corresponding strap QLM*_HOST_MODE=1
+
+ When set, OCTEON drives the corresponding
+ PERST*_L pin. Otherwise, OCTEON does not drive
+ the corresponding PERST*_L pin.
+
+ For MIO_RST_CNTL2 and MIO_RST_CNTL3, this field
+ is reserved/RAZ. */
+ uint64_t rst_rcv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is recieved by OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=1
+ 1 = when corresponding strap QLM*_HOST_MODE=0
+
+ When RST_RCV==1, the PERST*_L value is
+ received and may be used to reset the
+ controller and (optionally, based on RST_CHIP)
+ warm reset the chip.
+
+ When RST_RCV==1 (and RST_CHIP=0),
+ MIO_RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert SW
+ whenever the external reset pin initiates a
+ controller reset sequence.)
+
+ RST_VAL gives the PERST*_L pin value when
+ RST_RCV==1.
+
+ When RST_RCV==0, the PERST*_L pin value is
+ ignored.
+
+ For MIO_RST_CNTL2 and MIO_RST_CNTL3, this field
+ is reserved/RAZ. */
+ uint64_t rst_chip : 1; /**< Controls whether corresponding PERST*_L chip
+ pin causes a chip warm reset like CHIP_RESET_L.
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to 0.
+
+ RST_CHIP is not used when RST_RCV==0.
+
+ When RST_RCV==0, RST_CHIP is ignored.
+
+ When RST_RCV==1, RST_CHIP==1, and PERST*_L
+ asserts, a chip warm reset will be generated.
+
+ For MIO_RST_CNTL2 and MIO_RST_CNTL3, this field
+ is reserved/RAZ. */
+ uint64_t rst_val : 1; /**< Read-only access to corresponding PERST*_L pin
+ Unpredictable when RST_RCV==0. Reads as 1 when
+ RST_RCV==1 and the PERST*_L pin is asserted.
+ Reads as 0 when RST_RCV==1 and the PERST*_L
+ pin is not asserted.
+
+ For MIO_RST_CNTL2 and MIO_RST_CNTL3, this field
+ is reserved/RAZ. */
+#else
+ uint64_t rst_val : 1;
+ uint64_t rst_chip : 1;
+ uint64_t rst_rcv : 1;
+ uint64_t rst_drv : 1;
+ uint64_t prtmode : 2;
+ uint64_t host_mode : 1;
+ uint64_t rst_link : 1;
+ uint64_t rst_done : 1;
+ uint64_t prst_link : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn66xx;
+ struct cvmx_mio_rst_cntlx_cn66xx cn68xx;
+ struct cvmx_mio_rst_cntlx_s cnf71xx;
+};
+typedef union cvmx_mio_rst_cntlx cvmx_mio_rst_cntlx_t;
+
+/**
+ * cvmx_mio_rst_ctl#
+ *
+ * Notes:
+ * GEN1_Only mode is enabled for PEM0 when QLM1_SPD[0] is set or when sclk < 550Mhz.
+ * GEN1_Only mode is enabled for PEM1 when QLM1_SPD[1] is set or when sclk < 550Mhz.
+ */
+union cvmx_mio_rst_ctlx {
+ uint64_t u64;
+ struct cvmx_mio_rst_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t in_rev_ln : 1; /**< RO access to corresponding pin PCIE*_REV_LANES
+ which is used for initial value for REV_LANES
+ For INT0/CNTL0: pin PCIE0_REV_LANES
+ For INT1/CNTL1: always zero as no PCIE1 pin */
+ uint64_t rev_lanes : 1; /**< Reverse the lanes for INT*.
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to
+ IN_REVLANE value.
+ When QLM1_CFG=1, INT0(PEM0) REV_LANES internal
+ setting will be always forced to '0', INT1(PEM1)
+ will be forced to '1' regardless CSR value. */
+ uint64_t gen1_only : 1; /**< Disable PCIE GEN2 Capability. This bit is
+ always unpredictable whenever the controller
+ is not attached to any SerDes lanes, and is
+ otherwise always set when SCLK is slower than
+ 550Mhz.
+ The MIO_RST_CNTL*[GEN1_ONLY] value is based on
+ the MIO_QLM1_CFG[QLM_SPD] value. */
+ uint64_t prst_link : 1; /**< Controls whether corresponding controller
+ link-down or hot-reset causes the assertion of
+ CIU_SOFT_PRST*[SOFT_PRST]
+
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to 0 */
+ uint64_t rst_done : 1; /**< Read-only access to controller reset status
+
+ RESET_DONE is always zero (i.e. the controller
+ is held in reset) when:
+ - CIU_SOFT_PRST*[SOFT_PRST]=1, or
+ - RST_RCV==1 and PERST*_L pin is asserted */
+ uint64_t rst_link : 1; /**< Controls whether corresponding controller
+ link-down or hot-reset causes a warm chip reset
+ On cold reset, this field is initialized as
+ follows:
+ 0 = when corresponding HOST_MODE=1
+ 1 = when corresponding HOST_MODE=0
+
+ Note that a link-down or hot-reset event can
+ never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a
+ warm reset when RST_DONE==0). */
+ uint64_t host_mode : 1; /**< RO access to corresponding strap PCIE*_HOST_MODE
+ For CNTL1/INT1, HOST_MODE is always '1' because
+ there is no PCIE1_HOST_MODE pin. */
+ uint64_t prtmode : 2; /**< Port mode
+ 0 = port is EP mode
+ 1 = port is RC mode
+ 2,3 = Reserved
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized as
+ HOST_MODE (corresponding strap PCIE*_HOST_MODE) */
+ uint64_t rst_drv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is driven by the OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding HOST_MODE=0
+ 1 = when corresponding HOST_MODE=1
+
+ When set, OCTEON drives the corresponding
+ PERST*_L pin. Otherwise, OCTEON does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t rst_rcv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is recieved by OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding HOST_MODE=1
+ 1 = when corresponding HOST_MODE=0
+
+ When RST_RCV==1, the PERST*_L value is
+ received and may be used to reset the
+ controller and (optionally, based on RST_CHIP)
+ warm reset the chip.
+
+ When RST_RCV==1 (and RST_CHIP=0),
+ MIO_RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert SW
+ whenever the external reset pin initiates a
+ controller reset sequence.)
+
+ RST_VAL gives the PERST*_L pin value when
+ RST_RCV==1.
+
+ When RST_RCV==0, the PERST*_L pin value is
+ ignored. */
+ uint64_t rst_chip : 1; /**< Controls whether corresponding PERST*_L chip
+ pin causes a chip warm reset like CHIP_RESET_L.
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to 0.
+
+ RST_CHIP is not used when RST_RCV==0.
+
+ When RST_RCV==0, RST_CHIP is ignored.
+
+ When RST_RCV==1, RST_CHIP==1, and PERST*_L
+ asserts, a chip warm reset will be generated. */
+ uint64_t rst_val : 1; /**< Read-only access to corresponding PERST*_L pin
+ Unpredictable when RST_RCV==0. Reads as 1 when
+ RST_RCV==1 and the PERST*_L pin is asserted.
+ Reads as 0 when RST_RCV==1 and the PERST*_L
+ pin is not asserted. */
+#else
+ uint64_t rst_val : 1;
+ uint64_t rst_chip : 1;
+ uint64_t rst_rcv : 1;
+ uint64_t rst_drv : 1;
+ uint64_t prtmode : 2;
+ uint64_t host_mode : 1;
+ uint64_t rst_link : 1;
+ uint64_t rst_done : 1;
+ uint64_t prst_link : 1;
+ uint64_t gen1_only : 1;
+ uint64_t rev_lanes : 1;
+ uint64_t in_rev_ln : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_mio_rst_ctlx_s cn61xx;
+ struct cvmx_mio_rst_ctlx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t prst_link : 1; /**< Controls whether corresponding controller
+ link-down or hot-reset causes the assertion of
+ CIU_SOFT_PRST*[SOFT_PRST]
+
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to 0
+
+ ***NOTE: Added in pass 2.0 */
+ uint64_t rst_done : 1; /**< Read-only access to controller reset status
+
+ RESET_DONE is always zero (i.e. the controller
+ is held in reset) when:
+ - CIU_SOFT_PRST*[SOFT_PRST]=1, or
+ - RST_RCV==1 and PERST*_L pin is asserted */
+ uint64_t rst_link : 1; /**< Controls whether corresponding controller
+ link-down or hot-reset causes a warm chip reset
+ On cold reset, this field is initialized as
+ follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=1
+ 1 = when corresponding strap QLM*_HOST_MODE=0
+
+ Note that a link-down or hot-reset event can
+ never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a
+ warm reset when RST_DONE==0). */
+ uint64_t host_mode : 1; /**< RO access to corresponding strap QLM*_HOST_MODE */
+ uint64_t prtmode : 2; /**< Port mode
+ 0 = port is EP mode
+ 1 = port is RC mode
+ 2,3 = Reserved
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized as
+ follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=0
+ 1 = when corresponding strap QLM*_HOST_MODE=1 */
+ uint64_t rst_drv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is driven by the OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=0
+ 1 = when corresponding strap QLM*_HOST_MODE=1
+
+ When set, OCTEON drives the corresponding
+ PERST*_L pin. Otherwise, OCTEON does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t rst_rcv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is recieved by OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=1
+ 1 = when corresponding strap QLM*_HOST_MODE=0
+
+ When RST_RCV==1, the PERST*_L value is
+ received and may be used to reset the
+ controller and (optionally, based on RST_CHIP)
+ warm reset the chip.
+
+ When RST_RCV==1 (and RST_CHIP=0),
+ MIO_RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert SW
+ whenever the external reset pin initiates a
+ controller reset sequence.)
+
+ RST_VAL gives the PERST*_L pin value when
+ RST_RCV==1.
+
+ When RST_RCV==0, the PERST*_L pin value is
+ ignored. */
+ uint64_t rst_chip : 1; /**< Controls whether corresponding PERST*_L chip
+ pin causes a chip warm reset like CHIP_RESET_L.
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to 0.
+
+ RST_CHIP is not used when RST_RCV==0.
+
+ When RST_RCV==0, RST_CHIP is ignored.
+
+ When RST_RCV==1, RST_CHIP==1, and PERST*_L
+ asserts, a chip warm reset will be generated. */
+ uint64_t rst_val : 1; /**< Read-only access to corresponding PERST*_L pin
+ Unpredictable when RST_RCV==0. Reads as 1 when
+ RST_RCV==1 and the PERST*_L pin is asserted.
+ Reads as 0 when RST_RCV==1 and the PERST*_L
+ pin is not asserted. */
+#else
+ uint64_t rst_val : 1;
+ uint64_t rst_chip : 1;
+ uint64_t rst_rcv : 1;
+ uint64_t rst_drv : 1;
+ uint64_t prtmode : 2;
+ uint64_t host_mode : 1;
+ uint64_t rst_link : 1;
+ uint64_t rst_done : 1;
+ uint64_t prst_link : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn63xx;
+ struct cvmx_mio_rst_ctlx_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t rst_done : 1; /**< Read-only access to controller reset status
+
+ RESET_DONE is always zero (i.e. the controller
+ is held in reset) when:
+ - CIU_SOFT_PRST*[SOFT_PRST]=1, or
+ - RST_RCV==1 and PERST*_L pin is asserted */
+ uint64_t rst_link : 1; /**< Controls whether corresponding controller
+ link-down or hot-reset causes a warm chip reset
+ On cold reset, this field is initialized as
+ follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=1
+ 1 = when corresponding strap QLM*_HOST_MODE=0
+
+ Note that a link-down or hot-reset event can
+ never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a
+ warm reset when RST_DONE==0). */
+ uint64_t host_mode : 1; /**< RO access to corresponding strap QLM*_HOST_MODE */
+ uint64_t prtmode : 2; /**< Port mode
+ 0 = port is EP mode
+ 1 = port is RC mode
+ 2,3 = Reserved
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized as
+ follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=0
+ 1 = when corresponding strap QLM*_HOST_MODE=1 */
+ uint64_t rst_drv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is driven by the OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=0
+ 1 = when corresponding strap QLM*_HOST_MODE=1
+
+ When set, OCTEON drives the corresponding
+ PERST*_L pin. Otherwise, OCTEON does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t rst_rcv : 1; /**< Controls whether corresponding PERST*_L chip pin
+ is recieved by OCTEON. A warm/soft reset
+ will not change this field. On cold reset,
+ this field is initialized as follows:
+ 0 = when corresponding strap QLM*_HOST_MODE=1
+ 1 = when corresponding strap QLM*_HOST_MODE=0
+
+ When RST_RCV==1, the PERST*_L value is
+ received and may be used to reset the
+ controller and (optionally, based on RST_CHIP)
+ warm reset the chip.
+
+ When RST_RCV==1 (and RST_CHIP=0),
+ MIO_RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert SW
+ whenever the external reset pin initiates a
+ controller reset sequence.)
+
+ RST_VAL gives the PERST*_L pin value when
+ RST_RCV==1.
+
+ When RST_RCV==0, the PERST*_L pin value is
+ ignored. */
+ uint64_t rst_chip : 1; /**< Controls whether corresponding PERST*_L chip
+ pin causes a chip warm reset like CHIP_RESET_L.
+ A warm/soft reset will not change this field.
+ On cold reset, this field is initialized to 0.
+
+ RST_CHIP is not used when RST_RCV==0.
+
+ When RST_RCV==0, RST_CHIP is ignored.
+
+ When RST_RCV==1, RST_CHIP==1, and PERST*_L
+ asserts, a chip warm reset will be generated. */
+ uint64_t rst_val : 1; /**< Read-only access to corresponding PERST*_L pin
+ Unpredictable when RST_RCV==0. Reads as 1 when
+ RST_RCV==1 and the PERST*_L pin is asserted.
+ Reads as 0 when RST_RCV==1 and the PERST*_L
+ pin is not asserted. */
+#else
+ uint64_t rst_val : 1;
+ uint64_t rst_chip : 1;
+ uint64_t rst_rcv : 1;
+ uint64_t rst_drv : 1;
+ uint64_t prtmode : 2;
+ uint64_t host_mode : 1;
+ uint64_t rst_link : 1;
+ uint64_t rst_done : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn63xxp1;
+ struct cvmx_mio_rst_ctlx_cn63xx cn66xx;
+ struct cvmx_mio_rst_ctlx_cn63xx cn68xx;
+ struct cvmx_mio_rst_ctlx_cn63xx cn68xxp1;
+ struct cvmx_mio_rst_ctlx_s cnf71xx;
+};
+typedef union cvmx_mio_rst_ctlx cvmx_mio_rst_ctlx_t;
+
+/**
+ * cvmx_mio_rst_delay
+ */
+union cvmx_mio_rst_delay {
+ uint64_t u64;
+ struct cvmx_mio_rst_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t warm_rst_dly : 16; /**< A warm reset immediately causes an early warm
+ reset notification. However, the assertion of
+ warm reset will be delayed this many sclks.
+ A warm/soft reset will not change this field.
+ NOTE: This must be at least 500 dclks */
+ uint64_t soft_rst_dly : 16; /**< A soft reset immediately causes an early soft
+ reset notification. However, the assertion of
+ soft reset will be delayed this many sclks.
+ A warm/soft reset will not change this field.
+ NOTE: This must be at least 500 dclks */
+#else
+ uint64_t soft_rst_dly : 16;
+ uint64_t warm_rst_dly : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_mio_rst_delay_s cn61xx;
+ struct cvmx_mio_rst_delay_s cn63xx;
+ struct cvmx_mio_rst_delay_s cn63xxp1;
+ struct cvmx_mio_rst_delay_s cn66xx;
+ struct cvmx_mio_rst_delay_s cn68xx;
+ struct cvmx_mio_rst_delay_s cn68xxp1;
+ struct cvmx_mio_rst_delay_s cnf71xx;
+};
+typedef union cvmx_mio_rst_delay cvmx_mio_rst_delay_t;
+
+/**
+ * cvmx_mio_rst_int
+ *
+ * MIO_RST_INT = MIO Reset Interrupt Register
+ *
+ */
+union cvmx_mio_rst_int {
+ uint64_t u64;
+ struct cvmx_mio_rst_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t perst1 : 1; /**< PERST1_L asserted while MIO_RST_CTL1[RST_RCV]=1
+ and MIO_RST_CTL1[RST_CHIP]=0 */
+ uint64_t perst0 : 1; /**< PERST0_L asserted while MIO_RST_CTL0[RST_RCV]=1
+ and MIO_RST_CTL0[RST_CHIP]=0 */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rst_link3 : 1; /**< A controller3 link-down/hot-reset occurred while
+ MIO_RST_CNTL3[RST_LINK]=0. Software must assert
+ then de-assert CIU_SOFT_PRST3[SOFT_PRST] */
+ uint64_t rst_link2 : 1; /**< A controller2 link-down/hot-reset occurred while
+ MIO_RST_CNTL2[RST_LINK]=0. Software must assert
+ then de-assert CIU_SOFT_PRST2[SOFT_PRST] */
+ uint64_t rst_link1 : 1; /**< A controller1 link-down/hot-reset occurred while
+ MIO_RST_CTL1[RST_LINK]=0. Software must assert
+ then de-assert CIU_SOFT_PRST1[SOFT_PRST] */
+ uint64_t rst_link0 : 1; /**< A controller0 link-down/hot-reset occurred while
+ MIO_RST_CTL0[RST_LINK]=0. Software must assert
+ then de-assert CIU_SOFT_PRST[SOFT_PRST] */
+#else
+ uint64_t rst_link0 : 1;
+ uint64_t rst_link1 : 1;
+ uint64_t rst_link2 : 1;
+ uint64_t rst_link3 : 1;
+ uint64_t reserved_4_7 : 4;
+ uint64_t perst0 : 1;
+ uint64_t perst1 : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_mio_rst_int_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t perst1 : 1; /**< PERST1_L asserted while MIO_RST_CTL1[RST_RCV]=1
+ and MIO_RST_CTL1[RST_CHIP]=0 */
+ uint64_t perst0 : 1; /**< PERST0_L asserted while MIO_RST_CTL0[RST_RCV]=1
+ and MIO_RST_CTL0[RST_CHIP]=0 */
+ uint64_t reserved_2_7 : 6;
+ uint64_t rst_link1 : 1; /**< A controller1 link-down/hot-reset occurred while
+ MIO_RST_CTL1[RST_LINK]=0. Software must assert
+ then de-assert CIU_SOFT_PRST1[SOFT_PRST] */
+ uint64_t rst_link0 : 1; /**< A controller0 link-down/hot-reset occurred while
+ MIO_RST_CTL0[RST_LINK]=0. Software must assert
+ then de-assert CIU_SOFT_PRST[SOFT_PRST] */
+#else
+ uint64_t rst_link0 : 1;
+ uint64_t rst_link1 : 1;
+ uint64_t reserved_2_7 : 6;
+ uint64_t perst0 : 1;
+ uint64_t perst1 : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_mio_rst_int_cn61xx cn63xx;
+ struct cvmx_mio_rst_int_cn61xx cn63xxp1;
+ struct cvmx_mio_rst_int_s cn66xx;
+ struct cvmx_mio_rst_int_cn61xx cn68xx;
+ struct cvmx_mio_rst_int_cn61xx cn68xxp1;
+ struct cvmx_mio_rst_int_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_rst_int cvmx_mio_rst_int_t;
+
+/**
+ * cvmx_mio_rst_int_en
+ *
+ * MIO_RST_INT_EN = MIO Reset Interrupt Enable Register
+ *
+ */
+union cvmx_mio_rst_int_en {
+ uint64_t u64;
+ struct cvmx_mio_rst_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t perst1 : 1; /**< Controller1 PERST reset interrupt enable */
+ uint64_t perst0 : 1; /**< Controller0 PERST reset interrupt enable */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rst_link3 : 1; /**< Controller3 link-down/hot reset interrupt enable */
+ uint64_t rst_link2 : 1; /**< Controller2 link-down/hot reset interrupt enable */
+ uint64_t rst_link1 : 1; /**< Controller1 link-down/hot reset interrupt enable */
+ uint64_t rst_link0 : 1; /**< Controller0 link-down/hot reset interrupt enable */
+#else
+ uint64_t rst_link0 : 1;
+ uint64_t rst_link1 : 1;
+ uint64_t rst_link2 : 1;
+ uint64_t rst_link3 : 1;
+ uint64_t reserved_4_7 : 4;
+ uint64_t perst0 : 1;
+ uint64_t perst1 : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_mio_rst_int_en_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t perst1 : 1; /**< Controller1 PERST reset interrupt enable */
+ uint64_t perst0 : 1; /**< Controller0 PERST reset interrupt enable */
+ uint64_t reserved_2_7 : 6;
+ uint64_t rst_link1 : 1; /**< Controller1 link-down/hot reset interrupt enable */
+ uint64_t rst_link0 : 1; /**< Controller0 link-down/hot reset interrupt enable */
+#else
+ uint64_t rst_link0 : 1;
+ uint64_t rst_link1 : 1;
+ uint64_t reserved_2_7 : 6;
+ uint64_t perst0 : 1;
+ uint64_t perst1 : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn61xx;
+ struct cvmx_mio_rst_int_en_cn61xx cn63xx;
+ struct cvmx_mio_rst_int_en_cn61xx cn63xxp1;
+ struct cvmx_mio_rst_int_en_s cn66xx;
+ struct cvmx_mio_rst_int_en_cn61xx cn68xx;
+ struct cvmx_mio_rst_int_en_cn61xx cn68xxp1;
+ struct cvmx_mio_rst_int_en_cn61xx cnf71xx;
+};
+typedef union cvmx_mio_rst_int_en cvmx_mio_rst_int_en_t;
+
+/**
+ * cvmx_mio_tws#_int
+ *
+ * MIO_TWSX_INT = TWSX Interrupt Register
+ *
+ * This register contains the TWSI interrupt enable mask and the interrupt source bits. Note: the
+ * interrupt source bit for the TWSI core interrupt (CORE_INT) is read-only, the appropriate sequence
+ * must be written to the TWSI core to clear this interrupt. The other interrupt source bits are write-
+ * one-to-clear. TS_INT is set on the update of the MIO_TWS_TWSI_SW register (i.e. when it is written
+ * by a TWSI device). ST_INT is set whenever the valid bit of the MIO_TWS_SW_TWSI is cleared (see above
+ * for reasons).
+ *
+ * Note: When using the high-level controller, CORE_EN should be clear and CORE_INT should be ignored.
+ * Conversely, when the high-level controller is disabled, ST_EN / TS_EN should be clear and ST_INT /
+ * TS_INT should be ignored.
+ *
+ * This register also contains a read-only copy of the TWSI bus (SCL and SDA) as well as control bits to
+ * override the current state of the TWSI bus (SCL_OVR and SDA_OVR). Setting an override bit high will
+ * result in the open drain driver being activated, thus driving the corresponding signal low.
+ */
+union cvmx_mio_twsx_int {
+ uint64_t u64;
+ struct cvmx_mio_twsx_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t scl : 1; /**< SCL */
+ uint64_t sda : 1; /**< SDA */
+ uint64_t scl_ovr : 1; /**< SCL override */
+ uint64_t sda_ovr : 1; /**< SDA override */
+ uint64_t reserved_7_7 : 1;
+ uint64_t core_en : 1; /**< TWSI core interrupt enable */
+ uint64_t ts_en : 1; /**< MIO_TWS_TWSI_SW register update interrupt enable */
+ uint64_t st_en : 1; /**< MIO_TWS_SW_TWSI register update interrupt enable */
+ uint64_t reserved_3_3 : 1;
+ uint64_t core_int : 1; /**< TWSI core interrupt */
+ uint64_t ts_int : 1; /**< MIO_TWS_TWSI_SW register update interrupt */
+ uint64_t st_int : 1; /**< MIO_TWS_SW_TWSI register update interrupt */
+#else
+ uint64_t st_int : 1;
+ uint64_t ts_int : 1;
+ uint64_t core_int : 1;
+ uint64_t reserved_3_3 : 1;
+ uint64_t st_en : 1;
+ uint64_t ts_en : 1;
+ uint64_t core_en : 1;
+ uint64_t reserved_7_7 : 1;
+ uint64_t sda_ovr : 1;
+ uint64_t scl_ovr : 1;
+ uint64_t sda : 1;
+ uint64_t scl : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_mio_twsx_int_s cn30xx;
+ struct cvmx_mio_twsx_int_s cn31xx;
+ struct cvmx_mio_twsx_int_s cn38xx;
+ struct cvmx_mio_twsx_int_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t core_en : 1; /**< TWSI core interrupt enable */
+ uint64_t ts_en : 1; /**< MIO_TWS_TWSI_SW register update interrupt enable */
+ uint64_t st_en : 1; /**< MIO_TWS_SW_TWSI register update interrupt enable */
+ uint64_t reserved_3_3 : 1;
+ uint64_t core_int : 1; /**< TWSI core interrupt */
+ uint64_t ts_int : 1; /**< MIO_TWS_TWSI_SW register update interrupt */
+ uint64_t st_int : 1; /**< MIO_TWS_SW_TWSI register update interrupt */
+#else
+ uint64_t st_int : 1;
+ uint64_t ts_int : 1;
+ uint64_t core_int : 1;
+ uint64_t reserved_3_3 : 1;
+ uint64_t st_en : 1;
+ uint64_t ts_en : 1;
+ uint64_t core_en : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn38xxp2;
+ struct cvmx_mio_twsx_int_s cn50xx;
+ struct cvmx_mio_twsx_int_s cn52xx;
+ struct cvmx_mio_twsx_int_s cn52xxp1;
+ struct cvmx_mio_twsx_int_s cn56xx;
+ struct cvmx_mio_twsx_int_s cn56xxp1;
+ struct cvmx_mio_twsx_int_s cn58xx;
+ struct cvmx_mio_twsx_int_s cn58xxp1;
+ struct cvmx_mio_twsx_int_s cn61xx;
+ struct cvmx_mio_twsx_int_s cn63xx;
+ struct cvmx_mio_twsx_int_s cn63xxp1;
+ struct cvmx_mio_twsx_int_s cn66xx;
+ struct cvmx_mio_twsx_int_s cn68xx;
+ struct cvmx_mio_twsx_int_s cn68xxp1;
+ struct cvmx_mio_twsx_int_s cnf71xx;
+};
+typedef union cvmx_mio_twsx_int cvmx_mio_twsx_int_t;
+
+/**
+ * cvmx_mio_tws#_sw_twsi
+ *
+ * MIO_TWSX_SW_TWSI = TWSX Software to TWSI Register
+ *
+ * This register allows software to
+ * - initiate TWSI interface master-mode operations with a write and read the result with a read
+ * - load four bytes for later retrieval (slave mode) with a write and check validity with a read
+ * - launch a TWSI controller configuration read/write with a write and read the result with a read
+ *
+ * This register should be read or written by software, and read by the TWSI device. The TWSI device can
+ * use either two-byte or five-byte reads to reference this register.
+ *
+ * The TWSI device considers this register valid when V==1 and SLONLY==1.
+ */
+union cvmx_mio_twsx_sw_twsi {
+ uint64_t u64;
+ struct cvmx_mio_twsx_sw_twsi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t v : 1; /**< Valid bit
+ - Set on a write (should always be written with
+ a 1)
+ - Cleared when a TWSI master mode op completes
+ - Cleared when a TWSI configuration register
+ access completes
+ - Cleared when the TWSI device reads the
+ register if SLONLY==1 */
+ uint64_t slonly : 1; /**< Slave Only Mode
+ - No operation is initiated with a write when
+ this bit is set - only D field is updated in
+ this case
+ - When clear, a write initiates either a TWSI
+ master-mode operation or a TWSI configuration
+ register access */
+ uint64_t eia : 1; /**< Extended Internal Address - send additional
+ internal address byte (MSB of IA is from IA field
+ of MIO_TWS_SW_TWSI_EXT) */
+ uint64_t op : 4; /**< Opcode field - When the register is written with
+ SLONLY==0, initiate a read or write:
+ 0000 => 7-bit Byte Master Mode TWSI Op
+ 0001 => 7-bit Byte Combined Read Master Mode Op
+ 7-bit Byte Write w/ IA Master Mode Op
+ 0010 => 10-bit Byte Master Mode TWSI Op
+ 0011 => 10-bit Byte Combined Read Master Mode Op
+ 10-bit Byte Write w/ IA Master Mode Op
+ 0100 => TWSI Master Clock Register
+ 0110 => See EOP field
+ 1000 => 7-bit 4-byte Master Mode TWSI Op
+ 1001 => 7-bit 4-byte Comb. Read Master Mode Op
+ 7-bit 4-byte Write w/ IA Master Mode Op
+ 1010 => 10-bit 4-byte Master Mode TWSI Op
+ 1011 => 10-bit 4-byte Comb. Read Master Mode Op
+ 10-bit 4-byte Write w/ IA Master Mode Op */
+ uint64_t r : 1; /**< Read bit or result
+ - If set on a write when SLONLY==0, the
+ operation is a read
+ - On a read, this bit returns the result
+ indication for the most recent master mode
+ operation (1 = success, 0 = fail) */
+ uint64_t sovr : 1; /**< Size Override - if set, use the SIZE field to
+ determine Master Mode Op size rather than what
+ the Opcode field specifies. For operations
+ greater than 4 bytes, the additional data will be
+ contained in the D field of MIO_TWS_SW_TWSI_EXT */
+ uint64_t size : 3; /**< Size in bytes of Master Mode Op if the Size
+ Override bit is set. Specified in -1 notation
+ (i.e. 0 = 1 byte, 1 = 2 bytes ... 7 = 8 bytes) */
+ uint64_t scr : 2; /**< Scratch - unused, but retain state */
+ uint64_t a : 10; /**< Address field
+ - the address of the remote device for a master
+ mode operation
+ - A<9:7> are only used for 10-bit addressing
+ Note that when mastering a 7-bit OP, A<6:0> should
+ not take any of the values 0x78, 0x79, 0x7A nor
+ 0x7B (these 7-bit addresses are reserved to
+ extend to 10-bit addressing). */
+ uint64_t ia : 5; /**< Internal Address - Used when launching a master
+ mode combined read / write with internal address
+ (lower 3 bits are contained in the EOP_IA field) */
+ uint64_t eop_ia : 3; /**< Extra opcode (when OP<3:0> == 0110 and SLONLY==0):
+ 000 => TWSI Slave Address Register
+ 001 => TWSI Data Register
+ 010 => TWSI Control Register
+ 011 => TWSI Clock Control Register (when R == 0)
+ 011 => TWSI Status Register (when R == 1)
+ 100 => TWSI Extended Slave Register
+ 111 => TWSI Soft Reset Register
+ Also the lower 3 bits of Internal Address when
+ launching a master mode combined read / write
+ with internal address */
+ uint64_t d : 32; /**< Data Field
+ Used on a write when
+ - initiating a master-mode write (SLONLY==0)
+ - writing a TWSI config register (SLONLY==0)
+ - a slave mode write (SLONLY==1)
+ The read value is updated by
+ - a write to this register
+ - master mode completion (contains result or
+ error code)
+ - TWSI config register read (contains result) */
+#else
+ uint64_t d : 32;
+ uint64_t eop_ia : 3;
+ uint64_t ia : 5;
+ uint64_t a : 10;
+ uint64_t scr : 2;
+ uint64_t size : 3;
+ uint64_t sovr : 1;
+ uint64_t r : 1;
+ uint64_t op : 4;
+ uint64_t eia : 1;
+ uint64_t slonly : 1;
+ uint64_t v : 1;
+#endif
+ } s;
+ struct cvmx_mio_twsx_sw_twsi_s cn30xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn31xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn38xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn38xxp2;
+ struct cvmx_mio_twsx_sw_twsi_s cn50xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn52xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn52xxp1;
+ struct cvmx_mio_twsx_sw_twsi_s cn56xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn56xxp1;
+ struct cvmx_mio_twsx_sw_twsi_s cn58xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn58xxp1;
+ struct cvmx_mio_twsx_sw_twsi_s cn61xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn63xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn63xxp1;
+ struct cvmx_mio_twsx_sw_twsi_s cn66xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn68xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn68xxp1;
+ struct cvmx_mio_twsx_sw_twsi_s cnf71xx;
+};
+typedef union cvmx_mio_twsx_sw_twsi cvmx_mio_twsx_sw_twsi_t;
+
+/**
+ * cvmx_mio_tws#_sw_twsi_ext
+ *
+ * MIO_TWSX_SW_TWSI_EXT = TWSX Software to TWSI Extension Register
+ *
+ * This register contains an additional byte of internal address and 4 additional bytes of data to be
+ * used with TWSI master mode operations. IA will be sent as the first byte of internal address when
+ * performing master mode combined read / write with internal address operations and the EIA bit of
+ * MIO_TWS_SW_TWSI is set. D extends the data field of MIO_TWS_SW_TWSI for a total of 8 bytes (SOVR
+ * must be set to perform operations greater than 4 bytes).
+ */
+union cvmx_mio_twsx_sw_twsi_ext {
+ uint64_t u64;
+ struct cvmx_mio_twsx_sw_twsi_ext_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t ia : 8; /**< Extended Internal Address */
+ uint64_t d : 32; /**< Extended Data Field */
+#else
+ uint64_t d : 32;
+ uint64_t ia : 8;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn30xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn31xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn38xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn38xxp2;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn50xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn52xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn52xxp1;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn56xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn56xxp1;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn58xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn58xxp1;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn61xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn63xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn63xxp1;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn66xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn68xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn68xxp1;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cnf71xx;
+};
+typedef union cvmx_mio_twsx_sw_twsi_ext cvmx_mio_twsx_sw_twsi_ext_t;
+
+/**
+ * cvmx_mio_tws#_twsi_sw
+ *
+ * MIO_TWSX_TWSI_SW = TWSX TWSI to Software Register
+ *
+ * This register allows the TWSI device to transfer data to software and later check that software has
+ * received the information.
+ *
+ * This register should be read or written by the TWSI device, and read by software. The TWSI device can
+ * use one-byte or four-byte payload writes, and two-byte payload reads.
+ *
+ * The TWSI device considers this register valid when V==1.
+ */
+union cvmx_mio_twsx_twsi_sw {
+ uint64_t u64;
+ struct cvmx_mio_twsx_twsi_sw_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t v : 2; /**< Valid Bits
+ - Not directly writable
+ - Set to 1 on any write by the TWSI device
+ - Cleared on any read by software */
+ uint64_t reserved_32_61 : 30;
+ uint64_t d : 32; /**< Data Field - updated on a write by the TWSI device */
+#else
+ uint64_t d : 32;
+ uint64_t reserved_32_61 : 30;
+ uint64_t v : 2;
+#endif
+ } s;
+ struct cvmx_mio_twsx_twsi_sw_s cn30xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn31xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn38xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn38xxp2;
+ struct cvmx_mio_twsx_twsi_sw_s cn50xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn52xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn52xxp1;
+ struct cvmx_mio_twsx_twsi_sw_s cn56xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn56xxp1;
+ struct cvmx_mio_twsx_twsi_sw_s cn58xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn58xxp1;
+ struct cvmx_mio_twsx_twsi_sw_s cn61xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn63xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn63xxp1;
+ struct cvmx_mio_twsx_twsi_sw_s cn66xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn68xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn68xxp1;
+ struct cvmx_mio_twsx_twsi_sw_s cnf71xx;
+};
+typedef union cvmx_mio_twsx_twsi_sw cvmx_mio_twsx_twsi_sw_t;
+
+/**
+ * cvmx_mio_uart#_dlh
+ *
+ * MIO_UARTX_DLH = MIO UARTX Divisor Latch High Register
+ *
+ * The DLH (Divisor Latch High) register in conjunction with DLL (Divisor Latch Low) register form a
+ * 16-bit, read/write, Divisor Latch register that contains the baud rate divisor for the UART. It is
+ * accessed by first setting the DLAB bit (bit 7) in the Line Control Register (LCR). The output baud
+ * rate is equal to eclk frequency divided by sixteen times the value of the baud rate divisor, as
+ * follows: baud rate = eclk / (16 * divisor).
+ *
+ * Note that the BUSY bit (bit 0) of the UART Status Register (USR) must be clear before writing this
+ * register. BUSY bit is always clear in PASS3.
+ *
+ * Note that with the Divisor Latch Registers (DLL and DLH) set to zero, the baud clock is disabled
+ * and no serial communications will occur. Also, once the DLL or DLH is set, at least 8 clock cycles
+ * of eclk should be allowed to pass before transmitting or receiving data.
+ *
+ * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the
+ * IER and DLH registers are the same.
+ */
+union cvmx_mio_uartx_dlh {
+ uint64_t u64;
+ struct cvmx_mio_uartx_dlh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t dlh : 8; /**< Divisor Latch High Register */
+#else
+ uint64_t dlh : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_dlh_s cn30xx;
+ struct cvmx_mio_uartx_dlh_s cn31xx;
+ struct cvmx_mio_uartx_dlh_s cn38xx;
+ struct cvmx_mio_uartx_dlh_s cn38xxp2;
+ struct cvmx_mio_uartx_dlh_s cn50xx;
+ struct cvmx_mio_uartx_dlh_s cn52xx;
+ struct cvmx_mio_uartx_dlh_s cn52xxp1;
+ struct cvmx_mio_uartx_dlh_s cn56xx;
+ struct cvmx_mio_uartx_dlh_s cn56xxp1;
+ struct cvmx_mio_uartx_dlh_s cn58xx;
+ struct cvmx_mio_uartx_dlh_s cn58xxp1;
+ struct cvmx_mio_uartx_dlh_s cn61xx;
+ struct cvmx_mio_uartx_dlh_s cn63xx;
+ struct cvmx_mio_uartx_dlh_s cn63xxp1;
+ struct cvmx_mio_uartx_dlh_s cn66xx;
+ struct cvmx_mio_uartx_dlh_s cn68xx;
+ struct cvmx_mio_uartx_dlh_s cn68xxp1;
+ struct cvmx_mio_uartx_dlh_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_dlh cvmx_mio_uartx_dlh_t;
+typedef cvmx_mio_uartx_dlh_t cvmx_uart_dlh_t;
+
+/**
+ * cvmx_mio_uart#_dll
+ *
+ * MIO_UARTX_DLL = MIO UARTX Divisor Latch Low Register
+ *
+ * The DLH (Divisor Latch High) register in conjunction with DLL (Divisor Latch Low) register form a
+ * 16-bit, read/write, Divisor Latch register that contains the baud rate divisor for the UART. It is
+ * accessed by first setting the DLAB bit (bit 7) in the Line Control Register (LCR). The output baud
+ * rate is equal to eclk frequency divided by sixteen times the value of the baud rate divisor, as
+ * follows: baud rate = eclk / (16 * divisor).
+ *
+ * Note that the BUSY bit (bit 0) of the UART Status Register (USR) must be clear before writing this
+ * register. BUSY bit is always clear in PASS3.
+ *
+ * Note that with the Divisor Latch Registers (DLL and DLH) set to zero, the baud clock is disabled
+ * and no serial communications will occur. Also, once the DLL or DLH is set, at least 8 clock cycles
+ * of eclk should be allowed to pass before transmitting or receiving data.
+ *
+ * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the
+ * RBR, THR, and DLL registers are the same.
+ */
+union cvmx_mio_uartx_dll {
+ uint64_t u64;
+ struct cvmx_mio_uartx_dll_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t dll : 8; /**< Divisor Latch Low Register */
+#else
+ uint64_t dll : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_dll_s cn30xx;
+ struct cvmx_mio_uartx_dll_s cn31xx;
+ struct cvmx_mio_uartx_dll_s cn38xx;
+ struct cvmx_mio_uartx_dll_s cn38xxp2;
+ struct cvmx_mio_uartx_dll_s cn50xx;
+ struct cvmx_mio_uartx_dll_s cn52xx;
+ struct cvmx_mio_uartx_dll_s cn52xxp1;
+ struct cvmx_mio_uartx_dll_s cn56xx;
+ struct cvmx_mio_uartx_dll_s cn56xxp1;
+ struct cvmx_mio_uartx_dll_s cn58xx;
+ struct cvmx_mio_uartx_dll_s cn58xxp1;
+ struct cvmx_mio_uartx_dll_s cn61xx;
+ struct cvmx_mio_uartx_dll_s cn63xx;
+ struct cvmx_mio_uartx_dll_s cn63xxp1;
+ struct cvmx_mio_uartx_dll_s cn66xx;
+ struct cvmx_mio_uartx_dll_s cn68xx;
+ struct cvmx_mio_uartx_dll_s cn68xxp1;
+ struct cvmx_mio_uartx_dll_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_dll cvmx_mio_uartx_dll_t;
+typedef cvmx_mio_uartx_dll_t cvmx_uart_dll_t;
+
+/**
+ * cvmx_mio_uart#_far
+ *
+ * MIO_UARTX_FAR = MIO UARTX FIFO Access Register
+ *
+ * The FIFO Access Register (FAR) is used to enable a FIFO access mode for testing, so that the receive
+ * FIFO can be written by software and the transmit FIFO can be read by software when the FIFOs are
+ * enabled. When FIFOs are not enabled it allows the RBR to be written by software and the THR to be read
+ * by software. Note, that when the FIFO access mode is enabled/disabled, the control portion of the
+ * receive FIFO and transmit FIFO is reset and the FIFOs are treated as empty.
+ */
+union cvmx_mio_uartx_far {
+ uint64_t u64;
+ struct cvmx_mio_uartx_far_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t far : 1; /**< FIFO Access Register */
+#else
+ uint64_t far : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uartx_far_s cn30xx;
+ struct cvmx_mio_uartx_far_s cn31xx;
+ struct cvmx_mio_uartx_far_s cn38xx;
+ struct cvmx_mio_uartx_far_s cn38xxp2;
+ struct cvmx_mio_uartx_far_s cn50xx;
+ struct cvmx_mio_uartx_far_s cn52xx;
+ struct cvmx_mio_uartx_far_s cn52xxp1;
+ struct cvmx_mio_uartx_far_s cn56xx;
+ struct cvmx_mio_uartx_far_s cn56xxp1;
+ struct cvmx_mio_uartx_far_s cn58xx;
+ struct cvmx_mio_uartx_far_s cn58xxp1;
+ struct cvmx_mio_uartx_far_s cn61xx;
+ struct cvmx_mio_uartx_far_s cn63xx;
+ struct cvmx_mio_uartx_far_s cn63xxp1;
+ struct cvmx_mio_uartx_far_s cn66xx;
+ struct cvmx_mio_uartx_far_s cn68xx;
+ struct cvmx_mio_uartx_far_s cn68xxp1;
+ struct cvmx_mio_uartx_far_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_far cvmx_mio_uartx_far_t;
+typedef cvmx_mio_uartx_far_t cvmx_uart_far_t;
+
+/**
+ * cvmx_mio_uart#_fcr
+ *
+ * MIO_UARTX_FCR = MIO UARTX FIFO Control Register
+ *
+ * The FIFO Control Register (FCR) is a write-only register that controls the read and write data FIFO
+ * operation. When FIFOs and Programmable THRE Interrupt mode are enabled, this register also controls
+ * the THRE Interrupt empty threshold level.
+ *
+ * Setting bit 0 of the FCR enables the transmit and receive FIFOs. Whenever the value of this bit is
+ * changed both the TX and RX FIFOs will be reset.
+ *
+ * Writing a '1' to bit 1 of the FCR resets and flushes data in the receive FIFO. Note that this bit is
+ * self-clearing and it is not necessary to clear this bit.
+ *
+ * Writing a '1' to bit 2 of the FCR resets and flushes data in the transmit FIFO. Note that this bit is
+ * self-clearing and it is not necessary to clear this bit.
+ *
+ * If the FIFOs and Programmable THRE Interrupt mode are enabled, bits 4 and 5 control the empty
+ * threshold level at which THRE Interrupts are generated when the mode is active. See the following
+ * table for encodings:
+ *
+ * TX Trigger
+ * ----------
+ * 00 = empty FIFO
+ * 01 = 2 chars in FIFO
+ * 10 = FIFO 1/4 full
+ * 11 = FIFO 1/2 full
+ *
+ * If the FIFO mode is enabled (bit 0 of the FCR is set to '1') bits 6 and 7 are active. Bit 6 and bit 7
+ * set the trigger level in the receiver FIFO for the Enable Received Data Available Interrupt (ERBFI).
+ * In auto flow control mode the trigger is used to determine when the rts_n signal will be deasserted.
+ * See the following table for encodings:
+ *
+ * RX Trigger
+ * ----------
+ * 00 = 1 char in FIFO
+ * 01 = FIFO 1/4 full
+ * 10 = FIFO 1/2 full
+ * 11 = FIFO 2 chars less than full
+ *
+ * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the
+ * IIR and FCR registers are the same.
+ */
+union cvmx_mio_uartx_fcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_fcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rxtrig : 2; /**< RX Trigger */
+ uint64_t txtrig : 2; /**< TX Trigger */
+ uint64_t reserved_3_3 : 1;
+ uint64_t txfr : 1; /**< TX FIFO reset */
+ uint64_t rxfr : 1; /**< RX FIFO reset */
+ uint64_t en : 1; /**< FIFO enable */
+#else
+ uint64_t en : 1;
+ uint64_t rxfr : 1;
+ uint64_t txfr : 1;
+ uint64_t reserved_3_3 : 1;
+ uint64_t txtrig : 2;
+ uint64_t rxtrig : 2;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_fcr_s cn30xx;
+ struct cvmx_mio_uartx_fcr_s cn31xx;
+ struct cvmx_mio_uartx_fcr_s cn38xx;
+ struct cvmx_mio_uartx_fcr_s cn38xxp2;
+ struct cvmx_mio_uartx_fcr_s cn50xx;
+ struct cvmx_mio_uartx_fcr_s cn52xx;
+ struct cvmx_mio_uartx_fcr_s cn52xxp1;
+ struct cvmx_mio_uartx_fcr_s cn56xx;
+ struct cvmx_mio_uartx_fcr_s cn56xxp1;
+ struct cvmx_mio_uartx_fcr_s cn58xx;
+ struct cvmx_mio_uartx_fcr_s cn58xxp1;
+ struct cvmx_mio_uartx_fcr_s cn61xx;
+ struct cvmx_mio_uartx_fcr_s cn63xx;
+ struct cvmx_mio_uartx_fcr_s cn63xxp1;
+ struct cvmx_mio_uartx_fcr_s cn66xx;
+ struct cvmx_mio_uartx_fcr_s cn68xx;
+ struct cvmx_mio_uartx_fcr_s cn68xxp1;
+ struct cvmx_mio_uartx_fcr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_fcr cvmx_mio_uartx_fcr_t;
+typedef cvmx_mio_uartx_fcr_t cvmx_uart_fcr_t;
+
+/**
+ * cvmx_mio_uart#_htx
+ *
+ * MIO_UARTX_HTX = MIO UARTX Halt TX Register
+ *
+ * The Halt TX Register (HTX) is used to halt transmissions for testing, so that the transmit FIFO can be
+ * filled by software when FIFOs are enabled. If FIFOs are not enabled, setting the HTX register will
+ * have no effect.
+ */
+union cvmx_mio_uartx_htx {
+ uint64_t u64;
+ struct cvmx_mio_uartx_htx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t htx : 1; /**< Halt TX */
+#else
+ uint64_t htx : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uartx_htx_s cn30xx;
+ struct cvmx_mio_uartx_htx_s cn31xx;
+ struct cvmx_mio_uartx_htx_s cn38xx;
+ struct cvmx_mio_uartx_htx_s cn38xxp2;
+ struct cvmx_mio_uartx_htx_s cn50xx;
+ struct cvmx_mio_uartx_htx_s cn52xx;
+ struct cvmx_mio_uartx_htx_s cn52xxp1;
+ struct cvmx_mio_uartx_htx_s cn56xx;
+ struct cvmx_mio_uartx_htx_s cn56xxp1;
+ struct cvmx_mio_uartx_htx_s cn58xx;
+ struct cvmx_mio_uartx_htx_s cn58xxp1;
+ struct cvmx_mio_uartx_htx_s cn61xx;
+ struct cvmx_mio_uartx_htx_s cn63xx;
+ struct cvmx_mio_uartx_htx_s cn63xxp1;
+ struct cvmx_mio_uartx_htx_s cn66xx;
+ struct cvmx_mio_uartx_htx_s cn68xx;
+ struct cvmx_mio_uartx_htx_s cn68xxp1;
+ struct cvmx_mio_uartx_htx_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_htx cvmx_mio_uartx_htx_t;
+typedef cvmx_mio_uartx_htx_t cvmx_uart_htx_t;
+
+/**
+ * cvmx_mio_uart#_ier
+ *
+ * MIO_UARTX_IER = MIO UARTX Interrupt Enable Register
+ *
+ * Interrupt Enable Register (IER) is a read/write register that contains four bits that enable
+ * the generation of interrupts. These four bits are the Enable Received Data Available Interrupt
+ * (ERBFI), the Enable Transmitter Holding Register Empty Interrupt (ETBEI), the Enable Receiver Line
+ * Status Interrupt (ELSI), and the Enable Modem Status Interrupt (EDSSI).
+ *
+ * The IER also contains an enable bit (PTIME) for the Programmable THRE Interrupt mode.
+ *
+ * Note: The Divisor Latch Address Bit (DLAB) of the Line Control Register (LCR) must be clear to access
+ * this register.
+ *
+ * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the
+ * IER and DLH registers are the same.
+ */
+union cvmx_mio_uartx_ier {
+ uint64_t u64;
+ struct cvmx_mio_uartx_ier_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ptime : 1; /**< Programmable THRE Interrupt mode enable */
+ uint64_t reserved_4_6 : 3;
+ uint64_t edssi : 1; /**< Enable Modem Status Interrupt */
+ uint64_t elsi : 1; /**< Enable Receiver Line Status Interrupt */
+ uint64_t etbei : 1; /**< Enable Transmitter Holding Register Empty Interrupt */
+ uint64_t erbfi : 1; /**< Enable Received Data Available Interrupt */
+#else
+ uint64_t erbfi : 1;
+ uint64_t etbei : 1;
+ uint64_t elsi : 1;
+ uint64_t edssi : 1;
+ uint64_t reserved_4_6 : 3;
+ uint64_t ptime : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_ier_s cn30xx;
+ struct cvmx_mio_uartx_ier_s cn31xx;
+ struct cvmx_mio_uartx_ier_s cn38xx;
+ struct cvmx_mio_uartx_ier_s cn38xxp2;
+ struct cvmx_mio_uartx_ier_s cn50xx;
+ struct cvmx_mio_uartx_ier_s cn52xx;
+ struct cvmx_mio_uartx_ier_s cn52xxp1;
+ struct cvmx_mio_uartx_ier_s cn56xx;
+ struct cvmx_mio_uartx_ier_s cn56xxp1;
+ struct cvmx_mio_uartx_ier_s cn58xx;
+ struct cvmx_mio_uartx_ier_s cn58xxp1;
+ struct cvmx_mio_uartx_ier_s cn61xx;
+ struct cvmx_mio_uartx_ier_s cn63xx;
+ struct cvmx_mio_uartx_ier_s cn63xxp1;
+ struct cvmx_mio_uartx_ier_s cn66xx;
+ struct cvmx_mio_uartx_ier_s cn68xx;
+ struct cvmx_mio_uartx_ier_s cn68xxp1;
+ struct cvmx_mio_uartx_ier_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_ier cvmx_mio_uartx_ier_t;
+typedef cvmx_mio_uartx_ier_t cvmx_uart_ier_t;
+
+/**
+ * cvmx_mio_uart#_iir
+ *
+ * MIO_UARTX_IIR = MIO UARTX Interrupt Identity Register
+ *
+ * The Interrupt Identity Register (IIR) is a read-only register that identifies the source of an
+ * interrupt. The upper two bits of the register are FIFO-enabled bits. These bits are '00' if the FIFOs
+ * are disabled, and '11' if they are enabled. The lower four bits identify the highest priority pending
+ * interrupt. The following table defines interrupt source decoding, interrupt priority, and interrupt
+ * reset control:
+ *
+ * Interrupt Priority Interrupt Interrupt Interrupt
+ * ID Level Type Source Reset By
+ * ---------------------------------------------------------------------------------------------------------------------------------
+ * 0001 - None None -
+ *
+ * 0110 Highest Receiver Line Overrun, parity, or framing errors or break Reading the Line Status Register
+ * Status interrupt
+ *
+ * 0100 Second Received Data Receiver data available (FIFOs disabled) or Reading the Receiver Buffer Register
+ * Available RX FIFO trigger level reached (FIFOs (FIFOs disabled) or the FIFO drops below
+ * enabled) the trigger level (FIFOs enabled)
+ *
+ * 1100 Second Character No characters in or out of the RX FIFO Reading the Receiver Buffer Register
+ * Timeout during the last 4 character times and there
+ * Indication is at least 1 character in it during this
+ * time
+ *
+ * 0010 Third Transmitter Transmitter Holding Register Empty Reading the Interrupt Identity Register
+ * Holding (Programmable THRE Mode disabled) or TX (if source of interrupt) or writing into
+ * Register FIFO at or below threshold (Programmable THR (FIFOs or THRE Mode disabled) or TX
+ * Empty THRE Mode enabled) FIFO above threshold (FIFOs and THRE
+ * Mode enabled)
+ *
+ * 0000 Fourth Modem Status Clear To Send (CTS) or Data Set Ready (DSR) Reading the Modem Status Register
+ * Changed or Ring Indicator (RI) or Data Carrier
+ * Detect (DCD) changed (note: if auto flow
+ * control mode is enabled, a change in CTS
+ * will not cause an interrupt)
+ *
+ * 0111 Fifth Busy Detect Software has tried to write to the Line Reading the UART Status Register
+ * Indication Control Register while the BUSY bit of the
+ * UART Status Register was set
+ *
+ * Note: The Busy Detect Indication interrupt has been removed from PASS3 and will never assert.
+ *
+ * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the
+ * IIR and FCR registers are the same.
+ */
+union cvmx_mio_uartx_iir {
+ uint64_t u64;
+ struct cvmx_mio_uartx_iir_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t fen : 2; /**< FIFO-enabled bits */
+ uint64_t reserved_4_5 : 2;
+ cvmx_uart_iid_t iid : 4; /**< Interrupt ID */
+#else
+ cvmx_uart_iid_t iid : 4;
+ uint64_t reserved_4_5 : 2;
+ uint64_t fen : 2;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_iir_s cn30xx;
+ struct cvmx_mio_uartx_iir_s cn31xx;
+ struct cvmx_mio_uartx_iir_s cn38xx;
+ struct cvmx_mio_uartx_iir_s cn38xxp2;
+ struct cvmx_mio_uartx_iir_s cn50xx;
+ struct cvmx_mio_uartx_iir_s cn52xx;
+ struct cvmx_mio_uartx_iir_s cn52xxp1;
+ struct cvmx_mio_uartx_iir_s cn56xx;
+ struct cvmx_mio_uartx_iir_s cn56xxp1;
+ struct cvmx_mio_uartx_iir_s cn58xx;
+ struct cvmx_mio_uartx_iir_s cn58xxp1;
+ struct cvmx_mio_uartx_iir_s cn61xx;
+ struct cvmx_mio_uartx_iir_s cn63xx;
+ struct cvmx_mio_uartx_iir_s cn63xxp1;
+ struct cvmx_mio_uartx_iir_s cn66xx;
+ struct cvmx_mio_uartx_iir_s cn68xx;
+ struct cvmx_mio_uartx_iir_s cn68xxp1;
+ struct cvmx_mio_uartx_iir_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_iir cvmx_mio_uartx_iir_t;
+typedef cvmx_mio_uartx_iir_t cvmx_uart_iir_t;
+
+/**
+ * cvmx_mio_uart#_lcr
+ *
+ * MIO_UARTX_LCR = MIO UARTX Line Control Register
+ *
+ * The Line Control Register (LCR) controls the format of the data that is transmitted and received by
+ * the UART.
+ *
+ * LCR bits 0 and 1 are the Character Length Select field. This field is used to select the number of
+ * data bits per character that are transmitted and received. See the following table for encodings:
+ *
+ * CLS
+ * ---
+ * 00 = 5 bits (bits 0-4 sent)
+ * 01 = 6 bits (bits 0-5 sent)
+ * 10 = 7 bits (bits 0-6 sent)
+ * 11 = 8 bits (all bits sent)
+ *
+ * LCR bit 2 controls the number of stop bits transmitted. If bit 2 is a '0', one stop bit is transmitted
+ * in the serial data. If bit 2 is a '1' and the data bits are set to '00', one and a half stop bits are
+ * generated. Otherwise, two stop bits are generated and transmitted in the serial data out. Note that
+ * regardless of the number of stop bits selected the receiver will only check the first stop bit.
+ *
+ * LCR bit 3 is the Parity Enable bit. This bit is used to enable and disable parity generation and
+ * detection in transmitted and received serial character respectively.
+ *
+ * LCR bit 4 is the Even Parity Select bit. If parity is enabled, bit 4 selects between even and odd
+ * parity. If bit 4 is a '1', an even number of ones is transmitted or checked. If bit 4 is a '0', an odd
+ * number of ones is transmitted or checked.
+ *
+ * LCR bit 6 is the Break Control bit. Setting the Break bit sends a break signal by holding the sout
+ * line low (when not in Loopback mode, as determined by Modem Control Register bit 4). When in Loopback
+ * mode, the break condition is internally looped back to the receiver.
+ *
+ * LCR bit 7 is the Divisor Latch Address bit. Setting this bit enables reading and writing of the
+ * Divisor Latch register (DLL and DLH) to set the baud rate of the UART. This bit must be cleared after
+ * initial baud rate setup in order to access other registers.
+ *
+ * Note: The LCR is writeable only when the UART is not busy (when the BUSY bit (bit 0) of the UART
+ * Status Register (USR) is clear). The LCR is always readable. In PASS3, the LCR is always writable
+ * because the BUSY bit is always clear.
+ */
+union cvmx_mio_uartx_lcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_lcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t dlab : 1; /**< Divisor Latch Address bit */
+ uint64_t brk : 1; /**< Break Control bit */
+ uint64_t reserved_5_5 : 1;
+ uint64_t eps : 1; /**< Even Parity Select bit */
+ uint64_t pen : 1; /**< Parity Enable bit */
+ uint64_t stop : 1; /**< Stop Control bit */
+ cvmx_uart_bits_t cls : 2; /**< Character Length Select */
+#else
+ cvmx_uart_bits_t cls : 2;
+ uint64_t stop : 1;
+ uint64_t pen : 1;
+ uint64_t eps : 1;
+ uint64_t reserved_5_5 : 1;
+ uint64_t brk : 1;
+ uint64_t dlab : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_lcr_s cn30xx;
+ struct cvmx_mio_uartx_lcr_s cn31xx;
+ struct cvmx_mio_uartx_lcr_s cn38xx;
+ struct cvmx_mio_uartx_lcr_s cn38xxp2;
+ struct cvmx_mio_uartx_lcr_s cn50xx;
+ struct cvmx_mio_uartx_lcr_s cn52xx;
+ struct cvmx_mio_uartx_lcr_s cn52xxp1;
+ struct cvmx_mio_uartx_lcr_s cn56xx;
+ struct cvmx_mio_uartx_lcr_s cn56xxp1;
+ struct cvmx_mio_uartx_lcr_s cn58xx;
+ struct cvmx_mio_uartx_lcr_s cn58xxp1;
+ struct cvmx_mio_uartx_lcr_s cn61xx;
+ struct cvmx_mio_uartx_lcr_s cn63xx;
+ struct cvmx_mio_uartx_lcr_s cn63xxp1;
+ struct cvmx_mio_uartx_lcr_s cn66xx;
+ struct cvmx_mio_uartx_lcr_s cn68xx;
+ struct cvmx_mio_uartx_lcr_s cn68xxp1;
+ struct cvmx_mio_uartx_lcr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_lcr cvmx_mio_uartx_lcr_t;
+typedef cvmx_mio_uartx_lcr_t cvmx_uart_lcr_t;
+
+/**
+ * cvmx_mio_uart#_lsr
+ *
+ * MIO_UARTX_LSR = MIO UARTX Line Status Register
+ *
+ * The Line Status Register (LSR) contains status of the receiver and transmitter data transfers. This
+ * status can be read by the user at anytime.
+ *
+ * LSR bit 0 is the Data Ready (DR) bit. When set, this bit indicates the receiver contains at least one
+ * character in the RBR or the receiver FIFO. This bit is cleared when the RBR is read in the non-FIFO
+ * mode, or when the receiver FIFO is empty, in FIFO mode.
+ *
+ * LSR bit 1 is the Overrun Error (OE) bit. When set, this bit indicates an overrun error has occurred
+ * because a new data character was received before the previous data was read. In the non-FIFO mode, the
+ * OE bit is set when a new character arrives in the receiver before the previous character was read from
+ * the RBR. When this happens, the data in the RBR is overwritten. In the FIFO mode, an overrun error
+ * occurs when the FIFO is full and a new character arrives at the receiver. The data in the FIFO is
+ * retained and the data in the receive shift register is lost.
+ *
+ * LSR bit 2 is the Parity Error (PE) bit. This bit is set whenever there is a parity error in the
+ * receiver if the Parity Enable (PEN) bit in the LCR is set. In the FIFO mode, since the parity error is
+ * associated with a character received, it is revealed when the character with the parity error arrives
+ * at the top of the FIFO. It should be noted that the Parity Error (PE) bit will be set if a break
+ * interrupt has occurred, as indicated by the Break Interrupt (BI) bit.
+ *
+ * LSR bit 3 is the Framing Error (FE) bit. This bit is set whenever there is a framing error in the
+ * receiver. A framing error occurs when the receiver does not detect a valid STOP bit in the received
+ * data. In the FIFO mode, since the framing error is associated with a character received, it is
+ * revealed when the character with the framing error is at the top of the FIFO. When a framing error
+ * occurs the UART will try resynchronize. It does this by assuming that the error was due to the start
+ * bit of the next character and then continues receiving the other bits (i.e. data and/or parity and
+ * stop). It should be noted that the Framing Error (FE) bit will be set if a break interrupt has
+ * occurred, as indicated by the Break Interrupt (BI) bit.
+ *
+ * Note: The OE, PE, and FE bits are reset when a read of the LSR is performed.
+ *
+ * LSR bit 4 is the Break Interrupt (BI) bit. This bit is set whenever the serial input (sin) is held in
+ * a 0 state for longer than the sum of start time + data bits + parity + stop bits. A break condition on
+ * sin causes one and only one character, consisting of all zeros, to be received by the UART. In the
+ * FIFO mode, the character associated with the break condition is carried through the FIFO and is
+ * revealed when the character is at the top of the FIFO. Reading the LSR clears the BI bit. In the non-
+ * FIFO mode, the BI indication occurs immediately and persists until the LSR is read.
+ *
+ * LSR bit 5 is the Transmitter Holding Register Empty (THRE) bit. When Programmable THRE Interrupt mode
+ * is disabled, this bit indicates that the UART can accept a new character for transmission. This bit is
+ * set whenever data is transferred from the THR (or TX FIFO) to the transmitter shift register and no
+ * new data has been written to the THR (or TX FIFO). This also causes a THRE Interrupt to occur, if the
+ * THRE Interrupt is enabled. When FIFOs and Programmable THRE Interrupt mode are enabled, LSR bit 5
+ * functionality is switched to indicate the transmitter FIFO is full, and no longer controls THRE
+ * Interrupts, which are then controlled by the FCR[5:4] threshold setting.
+ *
+ * LSR bit 6 is the Transmitter Empty (TEMT) bit. In the FIFO mode, this bit is set whenever the
+ * Transmitter Shift Register and the FIFO are both empty. In the non-FIFO mode, this bit is set whenever
+ * the Transmitter Holding Register and the Transmitter Shift Register are both empty. This bit is
+ * typically used to make sure it is safe to change control registers. Changing control registers while
+ * the transmitter is busy can result in corrupt data being transmitted.
+ *
+ * LSR bit 7 is the Error in Receiver FIFO (FERR) bit. This bit is active only when FIFOs are enabled. It
+ * is set when there is at least one parity error, framing error, or break indication in the FIFO. This
+ * bit is cleared when the LSR is read and the character with the error is at the top of the receiver
+ * FIFO and there are no subsequent errors in the FIFO.
+ */
+union cvmx_mio_uartx_lsr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_lsr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ferr : 1; /**< Error in Receiver FIFO bit */
+ uint64_t temt : 1; /**< Transmitter Empty bit */
+ uint64_t thre : 1; /**< Transmitter Holding Register Empty bit */
+ uint64_t bi : 1; /**< Break Interrupt bit */
+ uint64_t fe : 1; /**< Framing Error bit */
+ uint64_t pe : 1; /**< Parity Error bit */
+ uint64_t oe : 1; /**< Overrun Error bit */
+ uint64_t dr : 1; /**< Data Ready bit */
+#else
+ uint64_t dr : 1;
+ uint64_t oe : 1;
+ uint64_t pe : 1;
+ uint64_t fe : 1;
+ uint64_t bi : 1;
+ uint64_t thre : 1;
+ uint64_t temt : 1;
+ uint64_t ferr : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_lsr_s cn30xx;
+ struct cvmx_mio_uartx_lsr_s cn31xx;
+ struct cvmx_mio_uartx_lsr_s cn38xx;
+ struct cvmx_mio_uartx_lsr_s cn38xxp2;
+ struct cvmx_mio_uartx_lsr_s cn50xx;
+ struct cvmx_mio_uartx_lsr_s cn52xx;
+ struct cvmx_mio_uartx_lsr_s cn52xxp1;
+ struct cvmx_mio_uartx_lsr_s cn56xx;
+ struct cvmx_mio_uartx_lsr_s cn56xxp1;
+ struct cvmx_mio_uartx_lsr_s cn58xx;
+ struct cvmx_mio_uartx_lsr_s cn58xxp1;
+ struct cvmx_mio_uartx_lsr_s cn61xx;
+ struct cvmx_mio_uartx_lsr_s cn63xx;
+ struct cvmx_mio_uartx_lsr_s cn63xxp1;
+ struct cvmx_mio_uartx_lsr_s cn66xx;
+ struct cvmx_mio_uartx_lsr_s cn68xx;
+ struct cvmx_mio_uartx_lsr_s cn68xxp1;
+ struct cvmx_mio_uartx_lsr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_lsr cvmx_mio_uartx_lsr_t;
+typedef cvmx_mio_uartx_lsr_t cvmx_uart_lsr_t;
+
+/**
+ * cvmx_mio_uart#_mcr
+ *
+ * MIO_UARTX_MCR = MIO UARTX Modem Control Register
+ *
+ * The lower four bits of the Modem Control Register (MCR) directly manipulate the outputs of the UART.
+ * The DTR (bit 0), RTS (bit 1), OUT1 (bit 2), and OUT2 (bit 3) bits are inverted and then drive the
+ * corresponding UART outputs, dtr_n, rts_n, out1_n, and out2_n. In loopback mode, these outputs are
+ * driven inactive high while the values in these locations are internally looped back to the inputs.
+ *
+ * Note: When Auto RTS is enabled, the rts_n output is controlled in the same way, but is also gated
+ * with the receiver FIFO threshold trigger (rts_n is inactive high when above the threshold). The
+ * rts_n output will be de-asserted whenever RTS (bit 1) is set low.
+ *
+ * Note: The UART0 out1_n and out2_n outputs are not present on the pins of the chip, but the UART0 OUT1
+ * and OUT2 bits still function in Loopback mode. The UART1 dtr_n, out1_n, and out2_n outputs are not
+ * present on the pins of the chip, but the UART1 DTR, OUT1, and OUT2 bits still function in Loopback
+ * mode.
+ *
+ * MCR bit 4 is the Loopback bit. When set, data on the sout line is held high, while serial data output
+ * is looped back to the sin line, internally. In this mode all the interrupts are fully functional. This
+ * feature is used for diagnostic purposes. Also, in loopback mode, the modem control inputs (dsr_n,
+ * cts_n, ri_n, dcd_n) are disconnected and the four modem control outputs (dtr_n, rts_n, out1_n, out1_n)
+ * are looped back to the inputs, internally.
+ *
+ * MCR bit 5 is the Auto Flow Control Enable (AFCE) bit. When FIFOs are enabled and this bit is set,
+ * 16750-compatible Auto RTS and Auto CTS serial data flow control features are enabled.
+ *
+ * Auto RTS becomes active when the following occurs:
+ * 1. MCR bit 1 is set
+ * 2. FIFOs are enabled by setting FIFO Control Register (FCR) bit 0
+ * 3. MCR bit 5 is set (must be set after FCR bit 0)
+ *
+ * When active, the rts_n output is forced inactive-high when the receiver FIFO level reaches the
+ * threshold set by FCR[7:6]. When rts_n is connected to the cts_n input of another UART device, the
+ * other UART stops sending serial data until the receiver FIFO has available space.
+ *
+ * The selectable receiver FIFO threshold values are: 1, 1/4, 1/2, and 2 less than full. Since one
+ * additional character may be transmitted to the UART after rts_n has become inactive (due to data
+ * already having entered the transmitter block in the other UART), setting the threshold to 2 less
+ * than full allows maximum use of the FIFO with a safety zone of one character.
+ *
+ * Once the receiver FIFO becomes completely empty by reading the Receiver Buffer Register (RBR), rts_n
+ * again becomes active-low, signalling the other UART to continue sending data. It is important to note
+ * that, even if everything else is set to Enabled and the correct MCR bits are set, if the FIFOs are
+ * disabled through FCR[0], Auto Flow Control is also disabled. When Auto RTS is disabled or inactive,
+ * rts_n is controlled solely by MCR[1].
+ *
+ * Auto CTS becomes active when the following occurs:
+ * 1. FIFOs are enabled by setting FIFO Control Register (FCR) bit 0
+ * 2. MCR bit 5 is set (must be set after FCR bit 0)
+ *
+ * When active, the UART transmitter is disabled whenever the cts_n input becomes inactive-high. This
+ * prevents overflowing the FIFO of the receiving UART.
+ *
+ * Note that, if the cts_n input is not inactivated before the middle of the last stop bit, another
+ * character is transmitted before the transmitter is disabled. While the transmitter is disabled, the
+ * transmitter FIFO can still be written to, and even overflowed. Therefore, when using this mode, either
+ * the true FIFO depth (64 characters) must be known to software, or the Programmable THRE Interrupt mode
+ * must be enabled to access the FIFO full status through the Line Status Register. When using the FIFO
+ * full status, software can poll this before each write to the Transmitter FIFO.
+ *
+ * Note: FIFO full status is also available in the UART Status Register (USR) or the actual level of the
+ * FIFO may be read through the Transmit FIFO Level (TFL) register.
+ *
+ * When the cts_n input becomes active-low again, transmission resumes. It is important to note that,
+ * even if everything else is set to Enabled, Auto Flow Control is also disabled if the FIFOs are
+ * disabled through FCR[0]. When Auto CTS is disabled or inactive, the transmitter is unaffected by
+ * cts_n.
+ */
+union cvmx_mio_uartx_mcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_mcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t afce : 1; /**< Auto Flow Control Enable bit */
+ uint64_t loop : 1; /**< Loopback bit */
+ uint64_t out2 : 1; /**< OUT2 output bit */
+ uint64_t out1 : 1; /**< OUT1 output bit */
+ uint64_t rts : 1; /**< Request To Send output bit */
+ uint64_t dtr : 1; /**< Data Terminal Ready output bit */
+#else
+ uint64_t dtr : 1;
+ uint64_t rts : 1;
+ uint64_t out1 : 1;
+ uint64_t out2 : 1;
+ uint64_t loop : 1;
+ uint64_t afce : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_mio_uartx_mcr_s cn30xx;
+ struct cvmx_mio_uartx_mcr_s cn31xx;
+ struct cvmx_mio_uartx_mcr_s cn38xx;
+ struct cvmx_mio_uartx_mcr_s cn38xxp2;
+ struct cvmx_mio_uartx_mcr_s cn50xx;
+ struct cvmx_mio_uartx_mcr_s cn52xx;
+ struct cvmx_mio_uartx_mcr_s cn52xxp1;
+ struct cvmx_mio_uartx_mcr_s cn56xx;
+ struct cvmx_mio_uartx_mcr_s cn56xxp1;
+ struct cvmx_mio_uartx_mcr_s cn58xx;
+ struct cvmx_mio_uartx_mcr_s cn58xxp1;
+ struct cvmx_mio_uartx_mcr_s cn61xx;
+ struct cvmx_mio_uartx_mcr_s cn63xx;
+ struct cvmx_mio_uartx_mcr_s cn63xxp1;
+ struct cvmx_mio_uartx_mcr_s cn66xx;
+ struct cvmx_mio_uartx_mcr_s cn68xx;
+ struct cvmx_mio_uartx_mcr_s cn68xxp1;
+ struct cvmx_mio_uartx_mcr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_mcr cvmx_mio_uartx_mcr_t;
+typedef cvmx_mio_uartx_mcr_t cvmx_uart_mcr_t;
+
+/**
+ * cvmx_mio_uart#_msr
+ *
+ * MIO_UARTX_MSR = MIO UARTX Modem Status Register
+ *
+ * The Modem Status Register (MSR) contains the current status of the modem control input lines and if
+ * they changed.
+ *
+ * DCTS (bit 0), DDSR (bit 1), and DDCD (bit 3) bits record whether the modem control lines (cts_n,
+ * dsr_n, and dcd_n) have changed since the last time the user read the MSR. TERI (bit 2) indicates ri_n
+ * has changed from an active-low, to an inactive-high state since the last time the MSR was read. In
+ * Loopback mode, DCTS reflects changes on MCR bit 1 (RTS), DDSR reflects changes on MCR bit 0 (DTR), and
+ * DDCD reflects changes on MCR bit 3 (Out2), while TERI reflects when MCR bit 2 (Out1) has changed state
+ * from a high to a low.
+ *
+ * Note: if the DCTS bit is not set and the cts_n signal is asserted (low) and a reset occurs (software
+ * or otherwise), then the DCTS bit will get set when the reset is removed if the cts_n signal remains
+ * asserted.
+ *
+ * The CTS, DSR, RI, and DCD Modem Status bits contain information on the current state of the modem
+ * control lines. CTS (bit 4) is the compliment of cts_n, DSR (bit 5) is the compliment of dsr_n, RI
+ * (bit 6) is the compliment of ri_n, and DCD (bit 7) is the compliment of dcd_n. In Loopback mode, CTS
+ * is the same as MCR bit 1 (RTS), DSR is the same as MCR bit 0 (DTR), RI is the same as MCR bit 2
+ * (Out1), and DCD is the same as MCR bit 3 (Out2).
+ *
+ * Note: The UART0 dsr_n and ri_n inputs are internally tied to power and not present on the pins of chip.
+ * Thus the UART0 DSR and RI bits will be '0' when not in Loopback mode. The UART1 dsr_n, ri_n, and dcd_n
+ * inputs are internally tied to power and not present on the pins of chip. Thus the UART1 DSR, RI, and
+ * DCD bits will be '0' when not in Loopback mode.
+ */
+union cvmx_mio_uartx_msr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_msr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t dcd : 1; /**< Data Carrier Detect input bit */
+ uint64_t ri : 1; /**< Ring Indicator input bit */
+ uint64_t dsr : 1; /**< Data Set Ready input bit */
+ uint64_t cts : 1; /**< Clear To Send input bit */
+ uint64_t ddcd : 1; /**< Delta Data Carrier Detect bit */
+ uint64_t teri : 1; /**< Trailing Edge of Ring Indicator bit */
+ uint64_t ddsr : 1; /**< Delta Data Set Ready bit */
+ uint64_t dcts : 1; /**< Delta Clear To Send bit */
+#else
+ uint64_t dcts : 1;
+ uint64_t ddsr : 1;
+ uint64_t teri : 1;
+ uint64_t ddcd : 1;
+ uint64_t cts : 1;
+ uint64_t dsr : 1;
+ uint64_t ri : 1;
+ uint64_t dcd : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_msr_s cn30xx;
+ struct cvmx_mio_uartx_msr_s cn31xx;
+ struct cvmx_mio_uartx_msr_s cn38xx;
+ struct cvmx_mio_uartx_msr_s cn38xxp2;
+ struct cvmx_mio_uartx_msr_s cn50xx;
+ struct cvmx_mio_uartx_msr_s cn52xx;
+ struct cvmx_mio_uartx_msr_s cn52xxp1;
+ struct cvmx_mio_uartx_msr_s cn56xx;
+ struct cvmx_mio_uartx_msr_s cn56xxp1;
+ struct cvmx_mio_uartx_msr_s cn58xx;
+ struct cvmx_mio_uartx_msr_s cn58xxp1;
+ struct cvmx_mio_uartx_msr_s cn61xx;
+ struct cvmx_mio_uartx_msr_s cn63xx;
+ struct cvmx_mio_uartx_msr_s cn63xxp1;
+ struct cvmx_mio_uartx_msr_s cn66xx;
+ struct cvmx_mio_uartx_msr_s cn68xx;
+ struct cvmx_mio_uartx_msr_s cn68xxp1;
+ struct cvmx_mio_uartx_msr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_msr cvmx_mio_uartx_msr_t;
+typedef cvmx_mio_uartx_msr_t cvmx_uart_msr_t;
+
+/**
+ * cvmx_mio_uart#_rbr
+ *
+ * MIO_UARTX_RBR = MIO UARTX Receive Buffer Register
+ *
+ * The Receive Buffer Register (RBR) is a read-only register that contains the data byte received on the
+ * serial input port (sin). The data in this register is valid only if the Data Ready (DR) bit in the
+ * Line status Register (LSR) is set. When the FIFOs are programmed OFF, the data in the RBR must be
+ * read before the next data arrives, otherwise it is overwritten, resulting in an overrun error. When
+ * the FIFOs are programmed ON, this register accesses the head of the receive FIFO. If the receive FIFO
+ * is full (64 characters) and this register is not read before the next data character arrives, then the
+ * data already in the FIFO is preserved, but any incoming data is lost. An overrun error also occurs.
+ *
+ * Note: The Divisor Latch Address Bit (DLAB) of the Line Control Register (LCR) must be clear to access
+ * this register.
+ *
+ * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the
+ * RBR, THR, and DLL registers are the same.
+ */
+union cvmx_mio_uartx_rbr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_rbr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rbr : 8; /**< Receive Buffer Register */
+#else
+ uint64_t rbr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_rbr_s cn30xx;
+ struct cvmx_mio_uartx_rbr_s cn31xx;
+ struct cvmx_mio_uartx_rbr_s cn38xx;
+ struct cvmx_mio_uartx_rbr_s cn38xxp2;
+ struct cvmx_mio_uartx_rbr_s cn50xx;
+ struct cvmx_mio_uartx_rbr_s cn52xx;
+ struct cvmx_mio_uartx_rbr_s cn52xxp1;
+ struct cvmx_mio_uartx_rbr_s cn56xx;
+ struct cvmx_mio_uartx_rbr_s cn56xxp1;
+ struct cvmx_mio_uartx_rbr_s cn58xx;
+ struct cvmx_mio_uartx_rbr_s cn58xxp1;
+ struct cvmx_mio_uartx_rbr_s cn61xx;
+ struct cvmx_mio_uartx_rbr_s cn63xx;
+ struct cvmx_mio_uartx_rbr_s cn63xxp1;
+ struct cvmx_mio_uartx_rbr_s cn66xx;
+ struct cvmx_mio_uartx_rbr_s cn68xx;
+ struct cvmx_mio_uartx_rbr_s cn68xxp1;
+ struct cvmx_mio_uartx_rbr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_rbr cvmx_mio_uartx_rbr_t;
+typedef cvmx_mio_uartx_rbr_t cvmx_uart_rbr_t;
+
+/**
+ * cvmx_mio_uart#_rfl
+ *
+ * MIO_UARTX_RFL = MIO UARTX Receive FIFO Level Register
+ *
+ * The Receive FIFO Level Register (RFL) indicates the number of data entries in the receive FIFO.
+ */
+union cvmx_mio_uartx_rfl {
+ uint64_t u64;
+ struct cvmx_mio_uartx_rfl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t rfl : 7; /**< Receive FIFO Level Register */
+#else
+ uint64_t rfl : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_mio_uartx_rfl_s cn30xx;
+ struct cvmx_mio_uartx_rfl_s cn31xx;
+ struct cvmx_mio_uartx_rfl_s cn38xx;
+ struct cvmx_mio_uartx_rfl_s cn38xxp2;
+ struct cvmx_mio_uartx_rfl_s cn50xx;
+ struct cvmx_mio_uartx_rfl_s cn52xx;
+ struct cvmx_mio_uartx_rfl_s cn52xxp1;
+ struct cvmx_mio_uartx_rfl_s cn56xx;
+ struct cvmx_mio_uartx_rfl_s cn56xxp1;
+ struct cvmx_mio_uartx_rfl_s cn58xx;
+ struct cvmx_mio_uartx_rfl_s cn58xxp1;
+ struct cvmx_mio_uartx_rfl_s cn61xx;
+ struct cvmx_mio_uartx_rfl_s cn63xx;
+ struct cvmx_mio_uartx_rfl_s cn63xxp1;
+ struct cvmx_mio_uartx_rfl_s cn66xx;
+ struct cvmx_mio_uartx_rfl_s cn68xx;
+ struct cvmx_mio_uartx_rfl_s cn68xxp1;
+ struct cvmx_mio_uartx_rfl_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_rfl cvmx_mio_uartx_rfl_t;
+typedef cvmx_mio_uartx_rfl_t cvmx_uart_rfl_t;
+
+/**
+ * cvmx_mio_uart#_rfw
+ *
+ * MIO_UARTX_RFW = MIO UARTX Receive FIFO Write Register
+ *
+ * The Receive FIFO Write Register (RFW) is only valid when FIFO access mode is enabled (FAR bit 0 is
+ * set). When FIFOs are enabled, this register is used to write data to the receive FIFO. Each
+ * consecutive write pushes the new data to the next write location in the receive FIFO. When FIFOs are
+ * not enabled, this register is used to write data to the RBR.
+ */
+union cvmx_mio_uartx_rfw {
+ uint64_t u64;
+ struct cvmx_mio_uartx_rfw_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t rffe : 1; /**< Receive FIFO Framing Error */
+ uint64_t rfpe : 1; /**< Receive FIFO Parity Error */
+ uint64_t rfwd : 8; /**< Receive FIFO Write Data */
+#else
+ uint64_t rfwd : 8;
+ uint64_t rfpe : 1;
+ uint64_t rffe : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_mio_uartx_rfw_s cn30xx;
+ struct cvmx_mio_uartx_rfw_s cn31xx;
+ struct cvmx_mio_uartx_rfw_s cn38xx;
+ struct cvmx_mio_uartx_rfw_s cn38xxp2;
+ struct cvmx_mio_uartx_rfw_s cn50xx;
+ struct cvmx_mio_uartx_rfw_s cn52xx;
+ struct cvmx_mio_uartx_rfw_s cn52xxp1;
+ struct cvmx_mio_uartx_rfw_s cn56xx;
+ struct cvmx_mio_uartx_rfw_s cn56xxp1;
+ struct cvmx_mio_uartx_rfw_s cn58xx;
+ struct cvmx_mio_uartx_rfw_s cn58xxp1;
+ struct cvmx_mio_uartx_rfw_s cn61xx;
+ struct cvmx_mio_uartx_rfw_s cn63xx;
+ struct cvmx_mio_uartx_rfw_s cn63xxp1;
+ struct cvmx_mio_uartx_rfw_s cn66xx;
+ struct cvmx_mio_uartx_rfw_s cn68xx;
+ struct cvmx_mio_uartx_rfw_s cn68xxp1;
+ struct cvmx_mio_uartx_rfw_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_rfw cvmx_mio_uartx_rfw_t;
+typedef cvmx_mio_uartx_rfw_t cvmx_uart_rfw_t;
+
+/**
+ * cvmx_mio_uart#_sbcr
+ *
+ * MIO_UARTX_SBCR = MIO UARTX Shadow Break Control Register
+ *
+ * The Shadow Break Control Register (SBCR) is a shadow register for the BREAK bit (LCR bit 6) that can
+ * be used to remove the burden of having to perform a read-modify-write on the LCR.
+ */
+union cvmx_mio_uartx_sbcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_sbcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t sbcr : 1; /**< Shadow Break Control */
+#else
+ uint64_t sbcr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uartx_sbcr_s cn30xx;
+ struct cvmx_mio_uartx_sbcr_s cn31xx;
+ struct cvmx_mio_uartx_sbcr_s cn38xx;
+ struct cvmx_mio_uartx_sbcr_s cn38xxp2;
+ struct cvmx_mio_uartx_sbcr_s cn50xx;
+ struct cvmx_mio_uartx_sbcr_s cn52xx;
+ struct cvmx_mio_uartx_sbcr_s cn52xxp1;
+ struct cvmx_mio_uartx_sbcr_s cn56xx;
+ struct cvmx_mio_uartx_sbcr_s cn56xxp1;
+ struct cvmx_mio_uartx_sbcr_s cn58xx;
+ struct cvmx_mio_uartx_sbcr_s cn58xxp1;
+ struct cvmx_mio_uartx_sbcr_s cn61xx;
+ struct cvmx_mio_uartx_sbcr_s cn63xx;
+ struct cvmx_mio_uartx_sbcr_s cn63xxp1;
+ struct cvmx_mio_uartx_sbcr_s cn66xx;
+ struct cvmx_mio_uartx_sbcr_s cn68xx;
+ struct cvmx_mio_uartx_sbcr_s cn68xxp1;
+ struct cvmx_mio_uartx_sbcr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_sbcr cvmx_mio_uartx_sbcr_t;
+typedef cvmx_mio_uartx_sbcr_t cvmx_uart_sbcr_t;
+
+/**
+ * cvmx_mio_uart#_scr
+ *
+ * MIO_UARTX_SCR = MIO UARTX Scratchpad Register
+ *
+ * The Scratchpad Register (SCR) is an 8-bit read/write register for programmers to use as a temporary
+ * storage space.
+ */
+union cvmx_mio_uartx_scr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_scr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t scr : 8; /**< Scratchpad Register */
+#else
+ uint64_t scr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_scr_s cn30xx;
+ struct cvmx_mio_uartx_scr_s cn31xx;
+ struct cvmx_mio_uartx_scr_s cn38xx;
+ struct cvmx_mio_uartx_scr_s cn38xxp2;
+ struct cvmx_mio_uartx_scr_s cn50xx;
+ struct cvmx_mio_uartx_scr_s cn52xx;
+ struct cvmx_mio_uartx_scr_s cn52xxp1;
+ struct cvmx_mio_uartx_scr_s cn56xx;
+ struct cvmx_mio_uartx_scr_s cn56xxp1;
+ struct cvmx_mio_uartx_scr_s cn58xx;
+ struct cvmx_mio_uartx_scr_s cn58xxp1;
+ struct cvmx_mio_uartx_scr_s cn61xx;
+ struct cvmx_mio_uartx_scr_s cn63xx;
+ struct cvmx_mio_uartx_scr_s cn63xxp1;
+ struct cvmx_mio_uartx_scr_s cn66xx;
+ struct cvmx_mio_uartx_scr_s cn68xx;
+ struct cvmx_mio_uartx_scr_s cn68xxp1;
+ struct cvmx_mio_uartx_scr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_scr cvmx_mio_uartx_scr_t;
+typedef cvmx_mio_uartx_scr_t cvmx_uart_scr_t;
+
+/**
+ * cvmx_mio_uart#_sfe
+ *
+ * MIO_UARTX_SFE = MIO UARTX Shadow FIFO Enable Register
+ *
+ * The Shadow FIFO Enable Register (SFE) is a shadow register for the FIFO enable bit (FCR bit 0) that
+ * can be used to remove the burden of having to store the previously written value to the FCR in memory
+ * and having to mask this value so that only the FIFO enable bit gets updated.
+ */
+union cvmx_mio_uartx_sfe {
+ uint64_t u64;
+ struct cvmx_mio_uartx_sfe_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t sfe : 1; /**< Shadow FIFO Enable */
+#else
+ uint64_t sfe : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uartx_sfe_s cn30xx;
+ struct cvmx_mio_uartx_sfe_s cn31xx;
+ struct cvmx_mio_uartx_sfe_s cn38xx;
+ struct cvmx_mio_uartx_sfe_s cn38xxp2;
+ struct cvmx_mio_uartx_sfe_s cn50xx;
+ struct cvmx_mio_uartx_sfe_s cn52xx;
+ struct cvmx_mio_uartx_sfe_s cn52xxp1;
+ struct cvmx_mio_uartx_sfe_s cn56xx;
+ struct cvmx_mio_uartx_sfe_s cn56xxp1;
+ struct cvmx_mio_uartx_sfe_s cn58xx;
+ struct cvmx_mio_uartx_sfe_s cn58xxp1;
+ struct cvmx_mio_uartx_sfe_s cn61xx;
+ struct cvmx_mio_uartx_sfe_s cn63xx;
+ struct cvmx_mio_uartx_sfe_s cn63xxp1;
+ struct cvmx_mio_uartx_sfe_s cn66xx;
+ struct cvmx_mio_uartx_sfe_s cn68xx;
+ struct cvmx_mio_uartx_sfe_s cn68xxp1;
+ struct cvmx_mio_uartx_sfe_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_sfe cvmx_mio_uartx_sfe_t;
+typedef cvmx_mio_uartx_sfe_t cvmx_uart_sfe_t;
+
+/**
+ * cvmx_mio_uart#_srr
+ *
+ * MIO_UARTX_SRR = MIO UARTX Software Reset Register
+ *
+ * The Software Reset Register (SRR) is a write-only register that resets the UART and/or the receive
+ * FIFO and/or the transmit FIFO.
+ *
+ * Bit 0 of the SRR is the UART Soft Reset (USR) bit. Setting this bit resets the UART.
+ *
+ * Bit 1 of the SRR is a shadow copy of the RX FIFO Reset bit (FCR bit 1). This can be used to remove
+ * the burden on software having to store previously written FCR values (which are pretty static) just
+ * to reset the receive FIFO.
+ *
+ * Bit 2 of the SRR is a shadow copy of the TX FIFO Reset bit (FCR bit 2). This can be used to remove
+ * the burden on software having to store previously written FCR values (which are pretty static) just
+ * to reset the transmit FIFO.
+ */
+union cvmx_mio_uartx_srr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_srr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t stfr : 1; /**< Shadow TX FIFO Reset */
+ uint64_t srfr : 1; /**< Shadow RX FIFO Reset */
+ uint64_t usr : 1; /**< UART Soft Reset */
+#else
+ uint64_t usr : 1;
+ uint64_t srfr : 1;
+ uint64_t stfr : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_mio_uartx_srr_s cn30xx;
+ struct cvmx_mio_uartx_srr_s cn31xx;
+ struct cvmx_mio_uartx_srr_s cn38xx;
+ struct cvmx_mio_uartx_srr_s cn38xxp2;
+ struct cvmx_mio_uartx_srr_s cn50xx;
+ struct cvmx_mio_uartx_srr_s cn52xx;
+ struct cvmx_mio_uartx_srr_s cn52xxp1;
+ struct cvmx_mio_uartx_srr_s cn56xx;
+ struct cvmx_mio_uartx_srr_s cn56xxp1;
+ struct cvmx_mio_uartx_srr_s cn58xx;
+ struct cvmx_mio_uartx_srr_s cn58xxp1;
+ struct cvmx_mio_uartx_srr_s cn61xx;
+ struct cvmx_mio_uartx_srr_s cn63xx;
+ struct cvmx_mio_uartx_srr_s cn63xxp1;
+ struct cvmx_mio_uartx_srr_s cn66xx;
+ struct cvmx_mio_uartx_srr_s cn68xx;
+ struct cvmx_mio_uartx_srr_s cn68xxp1;
+ struct cvmx_mio_uartx_srr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_srr cvmx_mio_uartx_srr_t;
+typedef cvmx_mio_uartx_srr_t cvmx_uart_srr_t;
+
+/**
+ * cvmx_mio_uart#_srt
+ *
+ * MIO_UARTX_SRT = MIO UARTX Shadow RX Trigger Register
+ *
+ * The Shadow RX Trigger Register (SRT) is a shadow register for the RX Trigger bits (FCR bits 7:6) that
+ * can be used to remove the burden of having to store the previously written value to the FCR in memory
+ * and having to mask this value so that only the RX Trigger bits get updated.
+ */
+union cvmx_mio_uartx_srt {
+ uint64_t u64;
+ struct cvmx_mio_uartx_srt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t srt : 2; /**< Shadow RX Trigger */
+#else
+ uint64_t srt : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_mio_uartx_srt_s cn30xx;
+ struct cvmx_mio_uartx_srt_s cn31xx;
+ struct cvmx_mio_uartx_srt_s cn38xx;
+ struct cvmx_mio_uartx_srt_s cn38xxp2;
+ struct cvmx_mio_uartx_srt_s cn50xx;
+ struct cvmx_mio_uartx_srt_s cn52xx;
+ struct cvmx_mio_uartx_srt_s cn52xxp1;
+ struct cvmx_mio_uartx_srt_s cn56xx;
+ struct cvmx_mio_uartx_srt_s cn56xxp1;
+ struct cvmx_mio_uartx_srt_s cn58xx;
+ struct cvmx_mio_uartx_srt_s cn58xxp1;
+ struct cvmx_mio_uartx_srt_s cn61xx;
+ struct cvmx_mio_uartx_srt_s cn63xx;
+ struct cvmx_mio_uartx_srt_s cn63xxp1;
+ struct cvmx_mio_uartx_srt_s cn66xx;
+ struct cvmx_mio_uartx_srt_s cn68xx;
+ struct cvmx_mio_uartx_srt_s cn68xxp1;
+ struct cvmx_mio_uartx_srt_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_srt cvmx_mio_uartx_srt_t;
+typedef cvmx_mio_uartx_srt_t cvmx_uart_srt_t;
+
+/**
+ * cvmx_mio_uart#_srts
+ *
+ * MIO_UARTX_SRTS = MIO UARTX Shadow Request To Send Register
+ *
+ * The Shadow Request To Send Register (SRTS) is a shadow register for the RTS bit (MCR bit 1) that can
+ * be used to remove the burden of having to perform a read-modify-write on the MCR.
+ */
+union cvmx_mio_uartx_srts {
+ uint64_t u64;
+ struct cvmx_mio_uartx_srts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t srts : 1; /**< Shadow Request To Send */
+#else
+ uint64_t srts : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uartx_srts_s cn30xx;
+ struct cvmx_mio_uartx_srts_s cn31xx;
+ struct cvmx_mio_uartx_srts_s cn38xx;
+ struct cvmx_mio_uartx_srts_s cn38xxp2;
+ struct cvmx_mio_uartx_srts_s cn50xx;
+ struct cvmx_mio_uartx_srts_s cn52xx;
+ struct cvmx_mio_uartx_srts_s cn52xxp1;
+ struct cvmx_mio_uartx_srts_s cn56xx;
+ struct cvmx_mio_uartx_srts_s cn56xxp1;
+ struct cvmx_mio_uartx_srts_s cn58xx;
+ struct cvmx_mio_uartx_srts_s cn58xxp1;
+ struct cvmx_mio_uartx_srts_s cn61xx;
+ struct cvmx_mio_uartx_srts_s cn63xx;
+ struct cvmx_mio_uartx_srts_s cn63xxp1;
+ struct cvmx_mio_uartx_srts_s cn66xx;
+ struct cvmx_mio_uartx_srts_s cn68xx;
+ struct cvmx_mio_uartx_srts_s cn68xxp1;
+ struct cvmx_mio_uartx_srts_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_srts cvmx_mio_uartx_srts_t;
+typedef cvmx_mio_uartx_srts_t cvmx_uart_srts_t;
+
+/**
+ * cvmx_mio_uart#_stt
+ *
+ * MIO_UARTX_STT = MIO UARTX Shadow TX Trigger Register
+ *
+ * The Shadow TX Trigger Register (STT) is a shadow register for the TX Trigger bits (FCR bits 5:4) that
+ * can be used to remove the burden of having to store the previously written value to the FCR in memory
+ * and having to mask this value so that only the TX Trigger bits get updated.
+ */
+union cvmx_mio_uartx_stt {
+ uint64_t u64;
+ struct cvmx_mio_uartx_stt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t stt : 2; /**< Shadow TX Trigger */
+#else
+ uint64_t stt : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_mio_uartx_stt_s cn30xx;
+ struct cvmx_mio_uartx_stt_s cn31xx;
+ struct cvmx_mio_uartx_stt_s cn38xx;
+ struct cvmx_mio_uartx_stt_s cn38xxp2;
+ struct cvmx_mio_uartx_stt_s cn50xx;
+ struct cvmx_mio_uartx_stt_s cn52xx;
+ struct cvmx_mio_uartx_stt_s cn52xxp1;
+ struct cvmx_mio_uartx_stt_s cn56xx;
+ struct cvmx_mio_uartx_stt_s cn56xxp1;
+ struct cvmx_mio_uartx_stt_s cn58xx;
+ struct cvmx_mio_uartx_stt_s cn58xxp1;
+ struct cvmx_mio_uartx_stt_s cn61xx;
+ struct cvmx_mio_uartx_stt_s cn63xx;
+ struct cvmx_mio_uartx_stt_s cn63xxp1;
+ struct cvmx_mio_uartx_stt_s cn66xx;
+ struct cvmx_mio_uartx_stt_s cn68xx;
+ struct cvmx_mio_uartx_stt_s cn68xxp1;
+ struct cvmx_mio_uartx_stt_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_stt cvmx_mio_uartx_stt_t;
+typedef cvmx_mio_uartx_stt_t cvmx_uart_stt_t;
+
+/**
+ * cvmx_mio_uart#_tfl
+ *
+ * MIO_UARTX_TFL = MIO UARTX Transmit FIFO Level Register
+ *
+ * The Transmit FIFO Level Register (TFL) indicates the number of data entries in the transmit FIFO.
+ */
+union cvmx_mio_uartx_tfl {
+ uint64_t u64;
+ struct cvmx_mio_uartx_tfl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t tfl : 7; /**< Transmit FIFO Level Register */
+#else
+ uint64_t tfl : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_mio_uartx_tfl_s cn30xx;
+ struct cvmx_mio_uartx_tfl_s cn31xx;
+ struct cvmx_mio_uartx_tfl_s cn38xx;
+ struct cvmx_mio_uartx_tfl_s cn38xxp2;
+ struct cvmx_mio_uartx_tfl_s cn50xx;
+ struct cvmx_mio_uartx_tfl_s cn52xx;
+ struct cvmx_mio_uartx_tfl_s cn52xxp1;
+ struct cvmx_mio_uartx_tfl_s cn56xx;
+ struct cvmx_mio_uartx_tfl_s cn56xxp1;
+ struct cvmx_mio_uartx_tfl_s cn58xx;
+ struct cvmx_mio_uartx_tfl_s cn58xxp1;
+ struct cvmx_mio_uartx_tfl_s cn61xx;
+ struct cvmx_mio_uartx_tfl_s cn63xx;
+ struct cvmx_mio_uartx_tfl_s cn63xxp1;
+ struct cvmx_mio_uartx_tfl_s cn66xx;
+ struct cvmx_mio_uartx_tfl_s cn68xx;
+ struct cvmx_mio_uartx_tfl_s cn68xxp1;
+ struct cvmx_mio_uartx_tfl_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_tfl cvmx_mio_uartx_tfl_t;
+typedef cvmx_mio_uartx_tfl_t cvmx_uart_tfl_t;
+
+/**
+ * cvmx_mio_uart#_tfr
+ *
+ * MIO_UARTX_TFR = MIO UARTX Transmit FIFO Read Register
+ *
+ * The Transmit FIFO Read Register (TFR) is only valid when FIFO access mode is enabled (FAR bit 0 is
+ * set). When FIFOs are enabled, reading this register gives the data at the top of the transmit FIFO.
+ * Each consecutive read pops the transmit FIFO and gives the next data value that is currently at the
+ * top of the FIFO. When FIFOs are not enabled, reading this register gives the data in the THR.
+ */
+union cvmx_mio_uartx_tfr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_tfr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t tfr : 8; /**< Transmit FIFO Read Register */
+#else
+ uint64_t tfr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_tfr_s cn30xx;
+ struct cvmx_mio_uartx_tfr_s cn31xx;
+ struct cvmx_mio_uartx_tfr_s cn38xx;
+ struct cvmx_mio_uartx_tfr_s cn38xxp2;
+ struct cvmx_mio_uartx_tfr_s cn50xx;
+ struct cvmx_mio_uartx_tfr_s cn52xx;
+ struct cvmx_mio_uartx_tfr_s cn52xxp1;
+ struct cvmx_mio_uartx_tfr_s cn56xx;
+ struct cvmx_mio_uartx_tfr_s cn56xxp1;
+ struct cvmx_mio_uartx_tfr_s cn58xx;
+ struct cvmx_mio_uartx_tfr_s cn58xxp1;
+ struct cvmx_mio_uartx_tfr_s cn61xx;
+ struct cvmx_mio_uartx_tfr_s cn63xx;
+ struct cvmx_mio_uartx_tfr_s cn63xxp1;
+ struct cvmx_mio_uartx_tfr_s cn66xx;
+ struct cvmx_mio_uartx_tfr_s cn68xx;
+ struct cvmx_mio_uartx_tfr_s cn68xxp1;
+ struct cvmx_mio_uartx_tfr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_tfr cvmx_mio_uartx_tfr_t;
+typedef cvmx_mio_uartx_tfr_t cvmx_uart_tfr_t;
+
+/**
+ * cvmx_mio_uart#_thr
+ *
+ * MIO_UARTX_THR = MIO UARTX Transmit Holding Register
+ *
+ * Transmit Holding Register (THR) is a write-only register that contains data to be transmitted on the
+ * serial output port (sout). Data can be written to the THR any time that the THR Empty (THRE) bit of
+ * the Line Status Register (LSR) is set.
+ *
+ * If FIFOs are not enabled and THRE is set, writing a single character to the THR clears the THRE. Any
+ * additional writes to the THR before the THRE is set again causes the THR data to be overwritten.
+ *
+ * If FIFOs are enabled and THRE is set (and Programmable THRE mode disabled), 64 characters of data may
+ * be written to the THR before the FIFO is full. Any attempt to write data when the FIFO is full results
+ * in the write data being lost.
+ *
+ * Note: The Divisor Latch Address Bit (DLAB) of the Line Control Register (LCR) must be clear to access
+ * this register.
+ *
+ * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the
+ * RBR, THR, and DLL registers are the same.
+ */
+union cvmx_mio_uartx_thr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t thr : 8; /**< Transmit Holding Register */
+#else
+ uint64_t thr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uartx_thr_s cn30xx;
+ struct cvmx_mio_uartx_thr_s cn31xx;
+ struct cvmx_mio_uartx_thr_s cn38xx;
+ struct cvmx_mio_uartx_thr_s cn38xxp2;
+ struct cvmx_mio_uartx_thr_s cn50xx;
+ struct cvmx_mio_uartx_thr_s cn52xx;
+ struct cvmx_mio_uartx_thr_s cn52xxp1;
+ struct cvmx_mio_uartx_thr_s cn56xx;
+ struct cvmx_mio_uartx_thr_s cn56xxp1;
+ struct cvmx_mio_uartx_thr_s cn58xx;
+ struct cvmx_mio_uartx_thr_s cn58xxp1;
+ struct cvmx_mio_uartx_thr_s cn61xx;
+ struct cvmx_mio_uartx_thr_s cn63xx;
+ struct cvmx_mio_uartx_thr_s cn63xxp1;
+ struct cvmx_mio_uartx_thr_s cn66xx;
+ struct cvmx_mio_uartx_thr_s cn68xx;
+ struct cvmx_mio_uartx_thr_s cn68xxp1;
+ struct cvmx_mio_uartx_thr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_thr cvmx_mio_uartx_thr_t;
+typedef cvmx_mio_uartx_thr_t cvmx_uart_thr_t;
+
+/**
+ * cvmx_mio_uart#_usr
+ *
+ * MIO_UARTX_USR = MIO UARTX UART Status Register
+ *
+ * The UART Status Register (USR) contains UART status information.
+ *
+ * USR bit 0 is the BUSY bit. When set this bit indicates that a serial transfer is in progress, when
+ * clear it indicates that the UART is idle or inactive.
+ *
+ * Note: In PASS3, the BUSY bit will always be clear.
+ *
+ * USR bits 1-4 indicate the following FIFO status: TX FIFO Not Full (TFNF), TX FIFO Empty (TFE), RX
+ * FIFO Not Empty (RFNE), and RX FIFO Full (RFF).
+ */
+union cvmx_mio_uartx_usr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_usr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t rff : 1; /**< RX FIFO Full */
+ uint64_t rfne : 1; /**< RX FIFO Not Empty */
+ uint64_t tfe : 1; /**< TX FIFO Empty */
+ uint64_t tfnf : 1; /**< TX FIFO Not Full */
+ uint64_t busy : 1; /**< Busy bit (always 0 in PASS3) */
+#else
+ uint64_t busy : 1;
+ uint64_t tfnf : 1;
+ uint64_t tfe : 1;
+ uint64_t rfne : 1;
+ uint64_t rff : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_mio_uartx_usr_s cn30xx;
+ struct cvmx_mio_uartx_usr_s cn31xx;
+ struct cvmx_mio_uartx_usr_s cn38xx;
+ struct cvmx_mio_uartx_usr_s cn38xxp2;
+ struct cvmx_mio_uartx_usr_s cn50xx;
+ struct cvmx_mio_uartx_usr_s cn52xx;
+ struct cvmx_mio_uartx_usr_s cn52xxp1;
+ struct cvmx_mio_uartx_usr_s cn56xx;
+ struct cvmx_mio_uartx_usr_s cn56xxp1;
+ struct cvmx_mio_uartx_usr_s cn58xx;
+ struct cvmx_mio_uartx_usr_s cn58xxp1;
+ struct cvmx_mio_uartx_usr_s cn61xx;
+ struct cvmx_mio_uartx_usr_s cn63xx;
+ struct cvmx_mio_uartx_usr_s cn63xxp1;
+ struct cvmx_mio_uartx_usr_s cn66xx;
+ struct cvmx_mio_uartx_usr_s cn68xx;
+ struct cvmx_mio_uartx_usr_s cn68xxp1;
+ struct cvmx_mio_uartx_usr_s cnf71xx;
+};
+typedef union cvmx_mio_uartx_usr cvmx_mio_uartx_usr_t;
+typedef cvmx_mio_uartx_usr_t cvmx_uart_usr_t;
+
+/**
+ * cvmx_mio_uart2_dlh
+ */
+union cvmx_mio_uart2_dlh {
+ uint64_t u64;
+ struct cvmx_mio_uart2_dlh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t dlh : 8; /**< Divisor Latch High Register */
+#else
+ uint64_t dlh : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_dlh_s cn52xx;
+ struct cvmx_mio_uart2_dlh_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_dlh cvmx_mio_uart2_dlh_t;
+
+/**
+ * cvmx_mio_uart2_dll
+ */
+union cvmx_mio_uart2_dll {
+ uint64_t u64;
+ struct cvmx_mio_uart2_dll_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t dll : 8; /**< Divisor Latch Low Register */
+#else
+ uint64_t dll : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_dll_s cn52xx;
+ struct cvmx_mio_uart2_dll_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_dll cvmx_mio_uart2_dll_t;
+
+/**
+ * cvmx_mio_uart2_far
+ */
+union cvmx_mio_uart2_far {
+ uint64_t u64;
+ struct cvmx_mio_uart2_far_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t far : 1; /**< FIFO Access Register */
+#else
+ uint64_t far : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uart2_far_s cn52xx;
+ struct cvmx_mio_uart2_far_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_far cvmx_mio_uart2_far_t;
+
+/**
+ * cvmx_mio_uart2_fcr
+ */
+union cvmx_mio_uart2_fcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_fcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rxtrig : 2; /**< RX Trigger */
+ uint64_t txtrig : 2; /**< TX Trigger */
+ uint64_t reserved_3_3 : 1;
+ uint64_t txfr : 1; /**< TX FIFO reset */
+ uint64_t rxfr : 1; /**< RX FIFO reset */
+ uint64_t en : 1; /**< FIFO enable */
+#else
+ uint64_t en : 1;
+ uint64_t rxfr : 1;
+ uint64_t txfr : 1;
+ uint64_t reserved_3_3 : 1;
+ uint64_t txtrig : 2;
+ uint64_t rxtrig : 2;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_fcr_s cn52xx;
+ struct cvmx_mio_uart2_fcr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_fcr cvmx_mio_uart2_fcr_t;
+
+/**
+ * cvmx_mio_uart2_htx
+ */
+union cvmx_mio_uart2_htx {
+ uint64_t u64;
+ struct cvmx_mio_uart2_htx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t htx : 1; /**< Halt TX */
+#else
+ uint64_t htx : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uart2_htx_s cn52xx;
+ struct cvmx_mio_uart2_htx_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_htx cvmx_mio_uart2_htx_t;
+
+/**
+ * cvmx_mio_uart2_ier
+ */
+union cvmx_mio_uart2_ier {
+ uint64_t u64;
+ struct cvmx_mio_uart2_ier_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ptime : 1; /**< Programmable THRE Interrupt mode enable */
+ uint64_t reserved_4_6 : 3;
+ uint64_t edssi : 1; /**< Enable Modem Status Interrupt */
+ uint64_t elsi : 1; /**< Enable Receiver Line Status Interrupt */
+ uint64_t etbei : 1; /**< Enable Transmitter Holding Register Empty Interrupt */
+ uint64_t erbfi : 1; /**< Enable Received Data Available Interrupt */
+#else
+ uint64_t erbfi : 1;
+ uint64_t etbei : 1;
+ uint64_t elsi : 1;
+ uint64_t edssi : 1;
+ uint64_t reserved_4_6 : 3;
+ uint64_t ptime : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_ier_s cn52xx;
+ struct cvmx_mio_uart2_ier_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_ier cvmx_mio_uart2_ier_t;
+
+/**
+ * cvmx_mio_uart2_iir
+ */
+union cvmx_mio_uart2_iir {
+ uint64_t u64;
+ struct cvmx_mio_uart2_iir_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t fen : 2; /**< FIFO-enabled bits */
+ uint64_t reserved_4_5 : 2;
+ uint64_t iid : 4; /**< Interrupt ID */
+#else
+ uint64_t iid : 4;
+ uint64_t reserved_4_5 : 2;
+ uint64_t fen : 2;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_iir_s cn52xx;
+ struct cvmx_mio_uart2_iir_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_iir cvmx_mio_uart2_iir_t;
+
+/**
+ * cvmx_mio_uart2_lcr
+ */
+union cvmx_mio_uart2_lcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_lcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t dlab : 1; /**< Divisor Latch Address bit */
+ uint64_t brk : 1; /**< Break Control bit */
+ uint64_t reserved_5_5 : 1;
+ uint64_t eps : 1; /**< Even Parity Select bit */
+ uint64_t pen : 1; /**< Parity Enable bit */
+ uint64_t stop : 1; /**< Stop Control bit */
+ uint64_t cls : 2; /**< Character Length Select */
+#else
+ uint64_t cls : 2;
+ uint64_t stop : 1;
+ uint64_t pen : 1;
+ uint64_t eps : 1;
+ uint64_t reserved_5_5 : 1;
+ uint64_t brk : 1;
+ uint64_t dlab : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_lcr_s cn52xx;
+ struct cvmx_mio_uart2_lcr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_lcr cvmx_mio_uart2_lcr_t;
+
+/**
+ * cvmx_mio_uart2_lsr
+ */
+union cvmx_mio_uart2_lsr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_lsr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ferr : 1; /**< Error in Receiver FIFO bit */
+ uint64_t temt : 1; /**< Transmitter Empty bit */
+ uint64_t thre : 1; /**< Transmitter Holding Register Empty bit */
+ uint64_t bi : 1; /**< Break Interrupt bit */
+ uint64_t fe : 1; /**< Framing Error bit */
+ uint64_t pe : 1; /**< Parity Error bit */
+ uint64_t oe : 1; /**< Overrun Error bit */
+ uint64_t dr : 1; /**< Data Ready bit */
+#else
+ uint64_t dr : 1;
+ uint64_t oe : 1;
+ uint64_t pe : 1;
+ uint64_t fe : 1;
+ uint64_t bi : 1;
+ uint64_t thre : 1;
+ uint64_t temt : 1;
+ uint64_t ferr : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_lsr_s cn52xx;
+ struct cvmx_mio_uart2_lsr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_lsr cvmx_mio_uart2_lsr_t;
+
+/**
+ * cvmx_mio_uart2_mcr
+ */
+union cvmx_mio_uart2_mcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_mcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t afce : 1; /**< Auto Flow Control Enable bit */
+ uint64_t loop : 1; /**< Loopback bit */
+ uint64_t out2 : 1; /**< OUT2 output bit */
+ uint64_t out1 : 1; /**< OUT1 output bit */
+ uint64_t rts : 1; /**< Request To Send output bit */
+ uint64_t dtr : 1; /**< Data Terminal Ready output bit */
+#else
+ uint64_t dtr : 1;
+ uint64_t rts : 1;
+ uint64_t out1 : 1;
+ uint64_t out2 : 1;
+ uint64_t loop : 1;
+ uint64_t afce : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_mio_uart2_mcr_s cn52xx;
+ struct cvmx_mio_uart2_mcr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_mcr cvmx_mio_uart2_mcr_t;
+
+/**
+ * cvmx_mio_uart2_msr
+ */
+union cvmx_mio_uart2_msr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_msr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t dcd : 1; /**< Data Carrier Detect input bit */
+ uint64_t ri : 1; /**< Ring Indicator input bit */
+ uint64_t dsr : 1; /**< Data Set Ready input bit */
+ uint64_t cts : 1; /**< Clear To Send input bit */
+ uint64_t ddcd : 1; /**< Delta Data Carrier Detect bit */
+ uint64_t teri : 1; /**< Trailing Edge of Ring Indicator bit */
+ uint64_t ddsr : 1; /**< Delta Data Set Ready bit */
+ uint64_t dcts : 1; /**< Delta Clear To Send bit */
+#else
+ uint64_t dcts : 1;
+ uint64_t ddsr : 1;
+ uint64_t teri : 1;
+ uint64_t ddcd : 1;
+ uint64_t cts : 1;
+ uint64_t dsr : 1;
+ uint64_t ri : 1;
+ uint64_t dcd : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_msr_s cn52xx;
+ struct cvmx_mio_uart2_msr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_msr cvmx_mio_uart2_msr_t;
+
+/**
+ * cvmx_mio_uart2_rbr
+ */
+union cvmx_mio_uart2_rbr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_rbr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rbr : 8; /**< Receive Buffer Register */
+#else
+ uint64_t rbr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_rbr_s cn52xx;
+ struct cvmx_mio_uart2_rbr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_rbr cvmx_mio_uart2_rbr_t;
+
+/**
+ * cvmx_mio_uart2_rfl
+ */
+union cvmx_mio_uart2_rfl {
+ uint64_t u64;
+ struct cvmx_mio_uart2_rfl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t rfl : 7; /**< Receive FIFO Level Register */
+#else
+ uint64_t rfl : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_mio_uart2_rfl_s cn52xx;
+ struct cvmx_mio_uart2_rfl_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_rfl cvmx_mio_uart2_rfl_t;
+
+/**
+ * cvmx_mio_uart2_rfw
+ */
+union cvmx_mio_uart2_rfw {
+ uint64_t u64;
+ struct cvmx_mio_uart2_rfw_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t rffe : 1; /**< Receive FIFO Framing Error */
+ uint64_t rfpe : 1; /**< Receive FIFO Parity Error */
+ uint64_t rfwd : 8; /**< Receive FIFO Write Data */
+#else
+ uint64_t rfwd : 8;
+ uint64_t rfpe : 1;
+ uint64_t rffe : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_mio_uart2_rfw_s cn52xx;
+ struct cvmx_mio_uart2_rfw_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_rfw cvmx_mio_uart2_rfw_t;
+
+/**
+ * cvmx_mio_uart2_sbcr
+ */
+union cvmx_mio_uart2_sbcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_sbcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t sbcr : 1; /**< Shadow Break Control */
+#else
+ uint64_t sbcr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uart2_sbcr_s cn52xx;
+ struct cvmx_mio_uart2_sbcr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_sbcr cvmx_mio_uart2_sbcr_t;
+
+/**
+ * cvmx_mio_uart2_scr
+ */
+union cvmx_mio_uart2_scr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_scr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t scr : 8; /**< Scratchpad Register */
+#else
+ uint64_t scr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_scr_s cn52xx;
+ struct cvmx_mio_uart2_scr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_scr cvmx_mio_uart2_scr_t;
+
+/**
+ * cvmx_mio_uart2_sfe
+ */
+union cvmx_mio_uart2_sfe {
+ uint64_t u64;
+ struct cvmx_mio_uart2_sfe_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t sfe : 1; /**< Shadow FIFO Enable */
+#else
+ uint64_t sfe : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uart2_sfe_s cn52xx;
+ struct cvmx_mio_uart2_sfe_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_sfe cvmx_mio_uart2_sfe_t;
+
+/**
+ * cvmx_mio_uart2_srr
+ */
+union cvmx_mio_uart2_srr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_srr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t stfr : 1; /**< Shadow TX FIFO Reset */
+ uint64_t srfr : 1; /**< Shadow RX FIFO Reset */
+ uint64_t usr : 1; /**< UART Soft Reset */
+#else
+ uint64_t usr : 1;
+ uint64_t srfr : 1;
+ uint64_t stfr : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_mio_uart2_srr_s cn52xx;
+ struct cvmx_mio_uart2_srr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_srr cvmx_mio_uart2_srr_t;
+
+/**
+ * cvmx_mio_uart2_srt
+ */
+union cvmx_mio_uart2_srt {
+ uint64_t u64;
+ struct cvmx_mio_uart2_srt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t srt : 2; /**< Shadow RX Trigger */
+#else
+ uint64_t srt : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_mio_uart2_srt_s cn52xx;
+ struct cvmx_mio_uart2_srt_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_srt cvmx_mio_uart2_srt_t;
+
+/**
+ * cvmx_mio_uart2_srts
+ */
+union cvmx_mio_uart2_srts {
+ uint64_t u64;
+ struct cvmx_mio_uart2_srts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t srts : 1; /**< Shadow Request To Send */
+#else
+ uint64_t srts : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_mio_uart2_srts_s cn52xx;
+ struct cvmx_mio_uart2_srts_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_srts cvmx_mio_uart2_srts_t;
+
+/**
+ * cvmx_mio_uart2_stt
+ */
+union cvmx_mio_uart2_stt {
+ uint64_t u64;
+ struct cvmx_mio_uart2_stt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t stt : 2; /**< Shadow TX Trigger */
+#else
+ uint64_t stt : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_mio_uart2_stt_s cn52xx;
+ struct cvmx_mio_uart2_stt_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_stt cvmx_mio_uart2_stt_t;
+
+/**
+ * cvmx_mio_uart2_tfl
+ */
+union cvmx_mio_uart2_tfl {
+ uint64_t u64;
+ struct cvmx_mio_uart2_tfl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t tfl : 7; /**< Transmit FIFO Level Register */
+#else
+ uint64_t tfl : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_mio_uart2_tfl_s cn52xx;
+ struct cvmx_mio_uart2_tfl_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_tfl cvmx_mio_uart2_tfl_t;
+
+/**
+ * cvmx_mio_uart2_tfr
+ */
+union cvmx_mio_uart2_tfr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_tfr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t tfr : 8; /**< Transmit FIFO Read Register */
+#else
+ uint64_t tfr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_tfr_s cn52xx;
+ struct cvmx_mio_uart2_tfr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_tfr cvmx_mio_uart2_tfr_t;
+
+/**
+ * cvmx_mio_uart2_thr
+ */
+union cvmx_mio_uart2_thr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t thr : 8; /**< Transmit Holding Register */
+#else
+ uint64_t thr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mio_uart2_thr_s cn52xx;
+ struct cvmx_mio_uart2_thr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_thr cvmx_mio_uart2_thr_t;
+
+/**
+ * cvmx_mio_uart2_usr
+ */
+union cvmx_mio_uart2_usr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_usr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t rff : 1; /**< RX FIFO Full */
+ uint64_t rfne : 1; /**< RX FIFO Not Empty */
+ uint64_t tfe : 1; /**< TX FIFO Empty */
+ uint64_t tfnf : 1; /**< TX FIFO Not Full */
+ uint64_t busy : 1; /**< Busy bit (always 0 in PASS3) */
+#else
+ uint64_t busy : 1;
+ uint64_t tfnf : 1;
+ uint64_t tfe : 1;
+ uint64_t rfne : 1;
+ uint64_t rff : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_mio_uart2_usr_s cn52xx;
+ struct cvmx_mio_uart2_usr_s cn52xxp1;
+};
+typedef union cvmx_mio_uart2_usr cvmx_mio_uart2_usr_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-mio-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-mixx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-mixx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-mixx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1525 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-mixx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon mixx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_MIXX_DEFS_H__
+#define __CVMX_MIXX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_BIST(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_BIST(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100078ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_BIST(offset) (CVMX_ADD_IO_SEG(0x0001070000100078ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100020ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100020ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_INTENA(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_INTENA(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100050ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_INTENA(offset) (CVMX_ADD_IO_SEG(0x0001070000100050ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_IRCNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_IRCNT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100030ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_IRCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100030ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_IRHWM(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_IRHWM(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100028ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_IRHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100028ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_IRING1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_IRING1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100010ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_IRING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100010ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_IRING2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_IRING2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100018ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_IRING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100018ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_ISR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_ISR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100048ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_ISR(offset) (CVMX_ADD_IO_SEG(0x0001070000100048ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_ORCNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_ORCNT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100040ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_ORCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100040ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_ORHWM(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_ORHWM(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100038ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_ORHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100038ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_ORING1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_ORING1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100000ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_ORING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100000ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_ORING2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_ORING2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100008ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_ORING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100008ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_REMCNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_REMCNT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100058ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_REMCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100058ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_TSCTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_TSCTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100068ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_TSCTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100068ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MIXX_TSTAMP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_MIXX_TSTAMP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000100060ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_MIXX_TSTAMP(offset) (CVMX_ADD_IO_SEG(0x0001070000100060ull) + ((offset) & 1) * 2048)
+#endif
+
+/**
+ * cvmx_mix#_bist
+ *
+ * MIX_BIST = MIX BIST Register
+ *
+ * Description:
+ * NOTE: To read the MIX_BIST register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_bist {
+ uint64_t u64;
+ struct cvmx_mixx_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t opfdat : 1; /**< Bist Results for AGO OPF Buffer RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t mrgdat : 1; /**< Bist Results for AGI MRG Buffer RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t mrqdat : 1; /**< Bist Results for NBR CSR RdReq RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ipfdat : 1; /**< Bist Results for MIX Inbound Packet RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t irfdat : 1; /**< Bist Results for MIX I-Ring Entry RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t orfdat : 1; /**< Bist Results for MIX O-Ring Entry RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t orfdat : 1;
+ uint64_t irfdat : 1;
+ uint64_t ipfdat : 1;
+ uint64_t mrqdat : 1;
+ uint64_t mrgdat : 1;
+ uint64_t opfdat : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_mixx_bist_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mrqdat : 1; /**< Bist Results for NBR CSR RdReq RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ipfdat : 1; /**< Bist Results for MIX Inbound Packet RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t irfdat : 1; /**< Bist Results for MIX I-Ring Entry RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t orfdat : 1; /**< Bist Results for MIX O-Ring Entry RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t orfdat : 1;
+ uint64_t irfdat : 1;
+ uint64_t ipfdat : 1;
+ uint64_t mrqdat : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_mixx_bist_cn52xx cn52xxp1;
+ struct cvmx_mixx_bist_cn52xx cn56xx;
+ struct cvmx_mixx_bist_cn52xx cn56xxp1;
+ struct cvmx_mixx_bist_s cn61xx;
+ struct cvmx_mixx_bist_s cn63xx;
+ struct cvmx_mixx_bist_s cn63xxp1;
+ struct cvmx_mixx_bist_s cn66xx;
+ struct cvmx_mixx_bist_s cn68xx;
+ struct cvmx_mixx_bist_s cn68xxp1;
+};
+typedef union cvmx_mixx_bist cvmx_mixx_bist_t;
+
+/**
+ * cvmx_mix#_ctl
+ *
+ * MIX_CTL = MIX Control Register
+ *
+ * Description:
+ * NOTE: To write to the MIX_CTL register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_CTL register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_ctl {
+ uint64_t u64;
+ struct cvmx_mixx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t ts_thresh : 4; /**< TimeStamp Interrupt Threshold
+ When the \#of pending Timestamp interrupts (MIX_TSCTL[TSCNT]
+ is greater than MIX_CTL[TS_THRESH], then a programmable
+ TimeStamp Interrupt is issued (see MIX_INTR[TS]
+ MIX_INTENA[TSENA]).
+ SWNOTE: For o63, since the implementation only supports
+ 4 oustanding timestamp interrupts, this field should
+ only be programmed from [0..3]. */
+ uint64_t crc_strip : 1; /**< HW CRC Strip Enable
+ When enabled, the last 4 bytes(CRC) of the ingress packet
+ are not included in cumulative packet byte length.
+ In other words, the cumulative LEN field for all
+ I-Ring Buffer Entries associated with a given ingress
+ packet will be 4 bytes less (so that the final 4B HW CRC
+ packet data is not processed by software). */
+ uint64_t busy : 1; /**< MIX Busy Status bit
+ MIX will assert busy status any time there are:
+ 1) L2/DRAM reads in-flight (NCB-arb to read
+ response)
+ 2) L2/DRAM writes in-flight (NCB-arb to write
+ data is sent.
+ 3) L2/DRAM write commits in-flight (NCB-arb to write
+ commit response).
+ NOTE: After MIX_CTL[EN]=0, the MIX will eventually
+ complete any "inflight" transactions, at which point the
+ BUSY will de-assert. */
+ uint64_t en : 1; /**< MIX Enable bit
+ When EN=0, MIX will no longer arbitrate for
+ any new L2/DRAM read/write requests on the NCB Bus.
+ MIX will complete any requests that are currently
+ pended for the NCB Bus. */
+ uint64_t reset : 1; /**< MIX Soft Reset
+ When SW writes a '1' to MIX_CTL[RESET], the
+ MII-MIX/AGL logic will execute a soft reset.
+ NOTE: During a soft reset, CSR accesses are not effected.
+ However, the values of the CSR fields will be effected by
+ soft reset (except MIX_CTL[RESET] itself).
+ NOTE: After power-on, the MII-AGL/MIX are held in reset
+ until the MIX_CTL[RESET] is written to zero. SW MUST also
+ perform a MIX_CTL CSR read after this write to ensure the
+ soft reset de-assertion has had sufficient time to propagate
+ to all MIO-MIX internal logic before any subsequent MIX CSR
+ accesses are issued.
+ The intended "soft reset" sequence is: (please also
+ refer to HRM Section 12.6.2 on MIX/AGL Block Reset).
+ 1) Write MIX_CTL[EN]=0
+ [To prevent any NEW transactions from being started]
+ 2) Wait for MIX_CTL[BUSY]=0
+ [To indicate that all inflight transactions have
+ completed]
+ 3) Write MIX_CTL[RESET]=1, followed by a MIX_CTL CSR read
+ and wait for the result.
+ 4) Re-Initialize the MIX/AGL just as would be done
+ for a hard reset.
+ NOTE: Once the MII has been soft-reset, please refer to HRM Section
+ 12.6.1 MIX/AGL BringUp Sequence to complete the MIX/AGL
+ re-initialization sequence. */
+ uint64_t lendian : 1; /**< Packet Little Endian Mode
+ (0: Big Endian Mode/1: Little Endian Mode)
+ When the mode is set, MIX will byte-swap packet data
+ loads/stores at the MIX/NCB boundary. */
+ uint64_t nbtarb : 1; /**< MIX CB-Request Arbitration Mode.
+ When set to zero, the arbiter is fixed priority with
+ the following priority scheme:
+ Highest Priority: I-Ring Packet Write Request
+ O-Ring Packet Read Request
+ I-Ring Entry Write Request
+ I-Ring Entry Read Request
+ O-Ring Entry Read Request
+ When set to one, the arbiter is round robin. */
+ uint64_t mrq_hwm : 2; /**< MIX CB-Request FIFO Programmable High Water Mark.
+ The MRQ contains 16 CB-Requests which are CSR Rd/Wr
+ Requests. If the MRQ backs up with "HWM" entries,
+ then new CB-Requests are 'stalled'.
+ [0]: HWM = 11
+ [1]: HWM = 10
+ [2]: HWM = 9
+ [3]: HWM = 8
+ NOTE: This must only be written at power-on/boot time. */
+#else
+ uint64_t mrq_hwm : 2;
+ uint64_t nbtarb : 1;
+ uint64_t lendian : 1;
+ uint64_t reset : 1;
+ uint64_t en : 1;
+ uint64_t busy : 1;
+ uint64_t crc_strip : 1;
+ uint64_t ts_thresh : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_mixx_ctl_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t crc_strip : 1; /**< HW CRC Strip Enable
+ When enabled, the last 4 bytes(CRC) of the ingress packet
+ are not included in cumulative packet byte length.
+ In other words, the cumulative LEN field for all
+ I-Ring Buffer Entries associated with a given ingress
+ packet will be 4 bytes less (so that the final 4B HW CRC
+ packet data is not processed by software). */
+ uint64_t busy : 1; /**< MIX Busy Status bit
+ MIX will assert busy status any time there are:
+ 1) L2/DRAM reads in-flight (NCB-arb to read
+ response)
+ 2) L2/DRAM writes in-flight (NCB-arb to write
+ data is sent.
+ 3) L2/DRAM write commits in-flight (NCB-arb to write
+ commit response).
+ NOTE: After MIX_CTL[EN]=0, the MIX will eventually
+ complete any "inflight" transactions, at which point the
+ BUSY will de-assert. */
+ uint64_t en : 1; /**< MIX Enable bit
+ When EN=0, MIX will no longer arbitrate for
+ any new L2/DRAM read/write requests on the NCB Bus.
+ MIX will complete any requests that are currently
+ pended for the NCB Bus. */
+ uint64_t reset : 1; /**< MIX Soft Reset
+ When SW writes a '1' to MIX_CTL[RESET], the
+ MII-MIX/AGL logic will execute a soft reset.
+ NOTE: During a soft reset, CSR accesses are not effected.
+ However, the values of the CSR fields will be effected by
+ soft reset (except MIX_CTL[RESET] itself).
+ NOTE: After power-on, the MII-AGL/MIX are held in reset
+ until the MIX_CTL[RESET] is written to zero. SW MUST also
+ perform a MIX_CTL CSR read after this write to ensure the
+ soft reset de-assertion has had sufficient time to propagate
+ to all MIO-MIX internal logic before any subsequent MIX CSR
+ accesses are issued.
+ The intended "soft reset" sequence is: (please also
+ refer to HRM Section 12.6.2 on MIX/AGL Block Reset).
+ 1) Write MIX_CTL[EN]=0
+ [To prevent any NEW transactions from being started]
+ 2) Wait for MIX_CTL[BUSY]=0
+ [To indicate that all inflight transactions have
+ completed]
+ 3) Write MIX_CTL[RESET]=1, followed by a MIX_CTL CSR read
+ and wait for the result.
+ 4) Re-Initialize the MIX/AGL just as would be done
+ for a hard reset.
+ NOTE: Once the MII has been soft-reset, please refer to HRM Section
+ 12.6.1 MIX/AGL BringUp Sequence to complete the MIX/AGL
+ re-initialization sequence. */
+ uint64_t lendian : 1; /**< Packet Little Endian Mode
+ (0: Big Endian Mode/1: Little Endian Mode)
+ When the mode is set, MIX will byte-swap packet data
+ loads/stores at the MIX/NCB boundary. */
+ uint64_t nbtarb : 1; /**< MIX CB-Request Arbitration Mode.
+ When set to zero, the arbiter is fixed priority with
+ the following priority scheme:
+ Highest Priority: I-Ring Packet Write Request
+ O-Ring Packet Read Request
+ I-Ring Entry Write Request
+ I-Ring Entry Read Request
+ O-Ring Entry Read Request
+ When set to one, the arbiter is round robin. */
+ uint64_t mrq_hwm : 2; /**< MIX CB-Request FIFO Programmable High Water Mark.
+ The MRQ contains 16 CB-Requests which are CSR Rd/Wr
+ Requests. If the MRQ backs up with "HWM" entries,
+ then new CB-Requests are 'stalled'.
+ [0]: HWM = 11
+ [1]: HWM = 10
+ [2]: HWM = 9
+ [3]: HWM = 8
+ NOTE: This must only be written at power-on/boot time. */
+#else
+ uint64_t mrq_hwm : 2;
+ uint64_t nbtarb : 1;
+ uint64_t lendian : 1;
+ uint64_t reset : 1;
+ uint64_t en : 1;
+ uint64_t busy : 1;
+ uint64_t crc_strip : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn52xx;
+ struct cvmx_mixx_ctl_cn52xx cn52xxp1;
+ struct cvmx_mixx_ctl_cn52xx cn56xx;
+ struct cvmx_mixx_ctl_cn52xx cn56xxp1;
+ struct cvmx_mixx_ctl_s cn61xx;
+ struct cvmx_mixx_ctl_s cn63xx;
+ struct cvmx_mixx_ctl_s cn63xxp1;
+ struct cvmx_mixx_ctl_s cn66xx;
+ struct cvmx_mixx_ctl_s cn68xx;
+ struct cvmx_mixx_ctl_s cn68xxp1;
+};
+typedef union cvmx_mixx_ctl cvmx_mixx_ctl_t;
+
+/**
+ * cvmx_mix#_intena
+ *
+ * MIX_INTENA = MIX Local Interrupt Enable Mask Register
+ *
+ * Description:
+ * NOTE: To write to the MIX_INTENA register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_INTENA register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_intena {
+ uint64_t u64;
+ struct cvmx_mixx_intena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t tsena : 1; /**< TimeStamp Interrupt Enable
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Outbound Ring with Timestamp
+ event (see: MIX_ISR[TS]). */
+ uint64_t orunena : 1; /**< ORCNT UnderFlow Detected Enable
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an ORCNT underflow condition
+ MIX_ISR[ORUN]. */
+ uint64_t irunena : 1; /**< IRCNT UnderFlow Interrupt Enable
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an IRCNT underflow condition
+ MIX_ISR[IRUN]. */
+ uint64_t data_drpena : 1; /**< Data was dropped due to RX FIFO full Interrupt
+ enable. If both the global interrupt mask bits
+ (CIU2_EN_xx_yy_PKT[MII]) and the local interrupt mask
+ bit(DATA_DRPENA) is set, than an interrupt is
+ reported for this event. */
+ uint64_t ithena : 1; /**< Inbound Ring Threshold Exceeded Interrupt Enable
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Inbound Ring Threshold
+ Exceeded event(IRTHRESH). */
+ uint64_t othena : 1; /**< Outbound Ring Threshold Exceeded Interrupt Enable
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Outbound Ring Threshold
+ Exceeded event(ORTHRESH). */
+ uint64_t ivfena : 1; /**< Inbound DoorBell(IDBELL) Overflow Detected
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Inbound Doorbell Overflow
+ event(IDBOVF). */
+ uint64_t ovfena : 1; /**< Outbound DoorBell(ODBELL) Overflow Interrupt Enable
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Outbound Doorbell Overflow
+ event(ODBOVF). */
+#else
+ uint64_t ovfena : 1;
+ uint64_t ivfena : 1;
+ uint64_t othena : 1;
+ uint64_t ithena : 1;
+ uint64_t data_drpena : 1;
+ uint64_t irunena : 1;
+ uint64_t orunena : 1;
+ uint64_t tsena : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mixx_intena_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t orunena : 1; /**< ORCNT UnderFlow Detected
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an ORCNT underflow condition
+ MIX_ISR[ORUN]. */
+ uint64_t irunena : 1; /**< IRCNT UnderFlow Interrupt Enable
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an IRCNT underflow condition
+ MIX_ISR[IRUN]. */
+ uint64_t data_drpena : 1; /**< Data was dropped due to RX FIFO full Interrupt
+ enable. If both the global interrupt mask bits
+ (CIU_INTx_EN*[MII]) and the local interrupt mask
+ bit(DATA_DRPENA) is set, than an interrupt is
+ reported for this event. */
+ uint64_t ithena : 1; /**< Inbound Ring Threshold Exceeded Interrupt Enable
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Inbound Ring Threshold
+ Exceeded event(IRTHRESH). */
+ uint64_t othena : 1; /**< Outbound Ring Threshold Exceeded Interrupt Enable
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Outbound Ring Threshold
+ Exceeded event(ORTHRESH). */
+ uint64_t ivfena : 1; /**< Inbound DoorBell(IDBELL) Overflow Detected
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Inbound Doorbell Overflow
+ event(IDBOVF). */
+ uint64_t ovfena : 1; /**< Outbound DoorBell(ODBELL) Overflow Interrupt Enable
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Outbound Doorbell Overflow
+ event(ODBOVF). */
+#else
+ uint64_t ovfena : 1;
+ uint64_t ivfena : 1;
+ uint64_t othena : 1;
+ uint64_t ithena : 1;
+ uint64_t data_drpena : 1;
+ uint64_t irunena : 1;
+ uint64_t orunena : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn52xx;
+ struct cvmx_mixx_intena_cn52xx cn52xxp1;
+ struct cvmx_mixx_intena_cn52xx cn56xx;
+ struct cvmx_mixx_intena_cn52xx cn56xxp1;
+ struct cvmx_mixx_intena_s cn61xx;
+ struct cvmx_mixx_intena_s cn63xx;
+ struct cvmx_mixx_intena_s cn63xxp1;
+ struct cvmx_mixx_intena_s cn66xx;
+ struct cvmx_mixx_intena_s cn68xx;
+ struct cvmx_mixx_intena_s cn68xxp1;
+};
+typedef union cvmx_mixx_intena cvmx_mixx_intena_t;
+
+/**
+ * cvmx_mix#_ircnt
+ *
+ * MIX_IRCNT = MIX I-Ring Pending Packet Counter
+ *
+ * Description:
+ * NOTE: To write to the MIX_IRCNT register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_IRCNT register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_ircnt {
+ uint64_t u64;
+ struct cvmx_mixx_ircnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t ircnt : 20; /**< Pending \# of I-Ring Packets.
+ Whenever HW writes a completion code of Done, Trunc,
+ CRCErr or Err, it increments the IRCNT (to indicate
+ to SW the \# of pending Input packets in system memory).
+ NOTE: The HW guarantees that the completion code write
+ is always visible in system memory BEFORE it increments
+ the IRCNT.
+ Reads of IRCNT return the current inbound packet count.
+ Writes of IRCNT decrement the count by the value
+ written.
+ This register is used to generate interrupts to alert
+ SW of pending inbound MIX packets in system memory.
+ NOTE: In the case of inbound packets that span multiple
+ I-Ring entries, SW must keep track of the \# of I-Ring Entries
+ associated with a given inbound packet to reclaim the
+ proper \# of I-Ring Entries for re-use. */
+#else
+ uint64_t ircnt : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_mixx_ircnt_s cn52xx;
+ struct cvmx_mixx_ircnt_s cn52xxp1;
+ struct cvmx_mixx_ircnt_s cn56xx;
+ struct cvmx_mixx_ircnt_s cn56xxp1;
+ struct cvmx_mixx_ircnt_s cn61xx;
+ struct cvmx_mixx_ircnt_s cn63xx;
+ struct cvmx_mixx_ircnt_s cn63xxp1;
+ struct cvmx_mixx_ircnt_s cn66xx;
+ struct cvmx_mixx_ircnt_s cn68xx;
+ struct cvmx_mixx_ircnt_s cn68xxp1;
+};
+typedef union cvmx_mixx_ircnt cvmx_mixx_ircnt_t;
+
+/**
+ * cvmx_mix#_irhwm
+ *
+ * MIX_IRHWM = MIX I-Ring High-Water Mark Threshold Register
+ *
+ * Description:
+ * NOTE: To write to the MIX_IHWM register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_IHWM register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_irhwm {
+ uint64_t u64;
+ struct cvmx_mixx_irhwm_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t ibplwm : 20; /**< I-Ring BackPressure Low Water Mark Threshold.
+ When the \#of available I-Ring Entries (IDBELL)
+ is less than IBPLWM, the AGL-MAC will:
+ a) In full-duplex mode: send periodic PAUSE packets.
+ b) In half-duplex mode: Force collisions.
+ This programmable mechanism is provided as a means
+ to backpressure input traffic 'early' enough (so
+ that packets are not 'dropped' by OCTEON). */
+ uint64_t irhwm : 20; /**< I-Ring Entry High Water Mark Threshold.
+ Used to determine when the \# of Inbound packets
+ in system memory(MIX_IRCNT[IRCNT]) exceeds this IRHWM
+ threshold.
+ NOTE: The power-on value of the CIU2_EN_xx_yy_PKT[MII]
+ interrupt enable bits is zero and must be enabled
+ to allow interrupts to be reported. */
+#else
+ uint64_t irhwm : 20;
+ uint64_t ibplwm : 20;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_mixx_irhwm_s cn52xx;
+ struct cvmx_mixx_irhwm_s cn52xxp1;
+ struct cvmx_mixx_irhwm_s cn56xx;
+ struct cvmx_mixx_irhwm_s cn56xxp1;
+ struct cvmx_mixx_irhwm_s cn61xx;
+ struct cvmx_mixx_irhwm_s cn63xx;
+ struct cvmx_mixx_irhwm_s cn63xxp1;
+ struct cvmx_mixx_irhwm_s cn66xx;
+ struct cvmx_mixx_irhwm_s cn68xx;
+ struct cvmx_mixx_irhwm_s cn68xxp1;
+};
+typedef union cvmx_mixx_irhwm cvmx_mixx_irhwm_t;
+
+/**
+ * cvmx_mix#_iring1
+ *
+ * MIX_IRING1 = MIX Inbound Ring Register \#1
+ *
+ * Description:
+ * NOTE: To write to the MIX_IRING1 register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_IRING1 register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_iring1 {
+ uint64_t u64;
+ struct cvmx_mixx_iring1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t isize : 20; /**< Represents the Inbound Ring Buffer's Size(in 8B
+ words). The ring can be as large as 1M entries.
+ NOTE: This CSR MUST BE setup written by SW poweron
+ (when IDBELL/IRCNT=0). */
+ uint64_t ibase : 37; /**< Represents the 8B-aligned base address of the first
+ Inbound Ring entry in system memory.
+ NOTE: SW MUST ONLY write to this register during
+ power-on/boot code. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t ibase : 37;
+ uint64_t isize : 20;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } s;
+ struct cvmx_mixx_iring1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t isize : 20; /**< Represents the Inbound Ring Buffer's Size(in 8B
+ words). The ring can be as large as 1M entries.
+ NOTE: This CSR MUST BE setup written by SW poweron
+ (when IDBELL/IRCNT=0). */
+ uint64_t reserved_36_39 : 4;
+ uint64_t ibase : 33; /**< Represents the 8B-aligned base address of the first
+ Inbound Ring entry in system memory.
+ NOTE: SW MUST ONLY write to this register during
+ power-on/boot code. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t ibase : 33;
+ uint64_t reserved_36_39 : 4;
+ uint64_t isize : 20;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } cn52xx;
+ struct cvmx_mixx_iring1_cn52xx cn52xxp1;
+ struct cvmx_mixx_iring1_cn52xx cn56xx;
+ struct cvmx_mixx_iring1_cn52xx cn56xxp1;
+ struct cvmx_mixx_iring1_s cn61xx;
+ struct cvmx_mixx_iring1_s cn63xx;
+ struct cvmx_mixx_iring1_s cn63xxp1;
+ struct cvmx_mixx_iring1_s cn66xx;
+ struct cvmx_mixx_iring1_s cn68xx;
+ struct cvmx_mixx_iring1_s cn68xxp1;
+};
+typedef union cvmx_mixx_iring1 cvmx_mixx_iring1_t;
+
+/**
+ * cvmx_mix#_iring2
+ *
+ * MIX_IRING2 = MIX Inbound Ring Register \#2
+ *
+ * Description:
+ * NOTE: To write to the MIX_IRING2 register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_IRING2 register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_iring2 {
+ uint64_t u64;
+ struct cvmx_mixx_iring2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t itlptr : 20; /**< The Inbound Ring Tail Pointer selects the I-Ring
+ Entry that the HW will process next. After the HW
+ completes receiving an inbound packet, it increments
+ the I-Ring Tail Pointer. [NOTE: The I-Ring Tail
+ Pointer HW increment is always modulo ISIZE.
+ NOTE: This field is 'read-only' to SW. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t idbell : 20; /**< Represents the cumulative total of pending
+ Inbound Ring Buffer Entries. Each I-Ring
+ Buffer Entry contains 1) an L2/DRAM byte pointer
+ along with a 2) a Byte Length.
+ After SW inserts a new entry into the I-Ring Buffer,
+ it "rings the doorbell for the inbound ring". When
+ the MIX HW receives the doorbell ring, it advances
+ the doorbell count for the I-Ring.
+ SW must never cause the doorbell count for the
+ I-Ring to exceed the size of the I-ring(ISIZE).
+ A read of the CSR indicates the current doorbell
+ count. */
+#else
+ uint64_t idbell : 20;
+ uint64_t reserved_20_31 : 12;
+ uint64_t itlptr : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+ struct cvmx_mixx_iring2_s cn52xx;
+ struct cvmx_mixx_iring2_s cn52xxp1;
+ struct cvmx_mixx_iring2_s cn56xx;
+ struct cvmx_mixx_iring2_s cn56xxp1;
+ struct cvmx_mixx_iring2_s cn61xx;
+ struct cvmx_mixx_iring2_s cn63xx;
+ struct cvmx_mixx_iring2_s cn63xxp1;
+ struct cvmx_mixx_iring2_s cn66xx;
+ struct cvmx_mixx_iring2_s cn68xx;
+ struct cvmx_mixx_iring2_s cn68xxp1;
+};
+typedef union cvmx_mixx_iring2 cvmx_mixx_iring2_t;
+
+/**
+ * cvmx_mix#_isr
+ *
+ * MIX_ISR = MIX Interrupt/Status Register
+ *
+ * Description:
+ * NOTE: To write to the MIX_ISR register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_ISR register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_isr {
+ uint64_t u64;
+ struct cvmx_mixx_isr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ts : 1; /**< TimeStamp Interrupt
+ When the \#of pending Timestamp Interrupts (MIX_TSCTL[TSCNT])
+ is greater than the TimeStamp Interrupt Threshold
+ (MIX_CTL[TS_THRESH]) value this interrupt bit is set.
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and this local interrupt mask bit is set, than an
+ interrupt is reported for an Outbound Ring with Timestamp
+ event (see: MIX_INTENA[TSENA]). */
+ uint64_t orun : 1; /**< ORCNT UnderFlow Detected
+ If SW writes a larger value than what is currently
+ in the MIX_ORCNT[ORCNT], then HW will report the
+ underflow condition.
+ NOTE: The MIX_ORCNT[IOCNT] will clamp to to zero.
+ NOTE: If an ORUN underflow condition is detected,
+ the integrity of the MIX/AGL HW state has
+ been compromised. To recover, SW must issue a
+ software reset sequence (see: MIX_CTL[RESET] */
+ uint64_t irun : 1; /**< IRCNT UnderFlow Detected
+ If SW writes a larger value than what is currently
+ in the MIX_IRCNT[IRCNT], then HW will report the
+ underflow condition.
+ NOTE: The MIX_IRCNT[IRCNT] will clamp to to zero.
+ NOTE: If an IRUN underflow condition is detected,
+ the integrity of the MIX/AGL HW state has
+ been compromised. To recover, SW must issue a
+ software reset sequence (see: MIX_CTL[RESET] */
+ uint64_t data_drp : 1; /**< Data was dropped due to RX FIFO full
+ If this does occur, the DATA_DRP is set and the
+ CIU2_RAW_PKT[MII] bit is set.
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and the local interrupt mask bit(DATA_DRPENA) is set, than an
+ interrupt is reported for this event. */
+ uint64_t irthresh : 1; /**< Inbound Ring Packet Threshold Exceeded
+ When the pending \#inbound packets in system
+ memory(IRCNT) has exceeded a programmable threshold
+ (IRHWM), then this bit is set. If this does occur,
+ the IRTHRESH is set and the CIU2_RAW_PKT[MII] bit
+ is set if ((MIX_ISR & MIX_INTENA) != 0)).
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and the local interrupt mask bit(ITHENA) is set, than an
+ interrupt is reported for this event. */
+ uint64_t orthresh : 1; /**< Outbound Ring Packet Threshold Exceeded
+ When the pending \#outbound packets in system
+ memory(ORCNT) has exceeded a programmable threshold
+ (ORHWM), then this bit is set. If this does occur,
+ the ORTHRESH is set and the CIU2_RAW_PKT[MII] bit
+ is set if ((MIX_ISR & MIX_INTENA) != 0)).
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and the local interrupt mask bit(OTHENA) is set, than an
+ interrupt is reported for this event. */
+ uint64_t idblovf : 1; /**< Inbound DoorBell(IDBELL) Overflow Detected
+ If SW attempts to write to the MIX_IRING2[IDBELL]
+ with a value greater than the remaining \#of
+ I-Ring Buffer Entries (MIX_REMCNT[IREMCNT]), then
+ the following occurs:
+ 1) The MIX_IRING2[IDBELL] write is IGNORED
+ 2) The ODBLOVF is set and the CIU2_RAW_PKT[MII]
+ bit is set if ((MIX_ISR & MIX_INTENA) != 0)).
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and the local interrupt mask bit(IVFENA) is set, than an
+ interrupt is reported for this event.
+ SW should keep track of the \#I-Ring Entries in use
+ (ie: cumulative \# of IDBELL writes), and ensure that
+ future IDBELL writes don't exceed the size of the
+ I-Ring Buffer (MIX_IRING2[ISIZE]).
+ SW must reclaim I-Ring Entries by keeping track of the
+ \#IRing-Entries, and writing to the MIX_IRCNT[IRCNT].
+ NOTE: The MIX_IRCNT[IRCNT] register represents the
+ total \#packets(not IRing Entries) and SW must further
+ keep track of the \# of I-Ring Entries associated with
+ each packet as they are processed.
+ NOTE: There is no recovery from an IDBLOVF Interrupt.
+ If it occurs, it's an indication that SW has
+ overwritten the I-Ring buffer, and the only recourse
+ is a HW reset. */
+ uint64_t odblovf : 1; /**< Outbound DoorBell(ODBELL) Overflow Detected
+ If SW attempts to write to the MIX_ORING2[ODBELL]
+ with a value greater than the remaining \#of
+ O-Ring Buffer Entries (MIX_REMCNT[OREMCNT]), then
+ the following occurs:
+ 1) The MIX_ORING2[ODBELL] write is IGNORED
+ 2) The ODBLOVF is set and the CIU2_RAW_PKT[MII]
+ bit is set if ((MIX_ISR & MIX_INTENA) != 0)).
+ If both the global interrupt mask bits (CIU2_EN_xx_yy_PKT[MII])
+ and the local interrupt mask bit(OVFENA) is set, than an
+ interrupt is reported for this event.
+ SW should keep track of the \#I-Ring Entries in use
+ (ie: cumulative \# of ODBELL writes), and ensure that
+ future ODBELL writes don't exceed the size of the
+ O-Ring Buffer (MIX_ORING2[OSIZE]).
+ SW must reclaim O-Ring Entries by writing to the
+ MIX_ORCNT[ORCNT]. .
+ NOTE: There is no recovery from an ODBLOVF Interrupt.
+ If it occurs, it's an indication that SW has
+ overwritten the O-Ring buffer, and the only recourse
+ is a HW reset. */
+#else
+ uint64_t odblovf : 1;
+ uint64_t idblovf : 1;
+ uint64_t orthresh : 1;
+ uint64_t irthresh : 1;
+ uint64_t data_drp : 1;
+ uint64_t irun : 1;
+ uint64_t orun : 1;
+ uint64_t ts : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mixx_isr_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t orun : 1; /**< ORCNT UnderFlow Detected
+ If SW writes a larger value than what is currently
+ in the MIX_ORCNT[ORCNT], then HW will report the
+ underflow condition.
+ NOTE: The MIX_ORCNT[IOCNT] will clamp to to zero.
+ NOTE: If an ORUN underflow condition is detected,
+ the integrity of the MIX/AGL HW state has
+ been compromised. To recover, SW must issue a
+ software reset sequence (see: MIX_CTL[RESET] */
+ uint64_t irun : 1; /**< IRCNT UnderFlow Detected
+ If SW writes a larger value than what is currently
+ in the MIX_IRCNT[IRCNT], then HW will report the
+ underflow condition.
+ NOTE: The MIX_IRCNT[IRCNT] will clamp to to zero.
+ NOTE: If an IRUN underflow condition is detected,
+ the integrity of the MIX/AGL HW state has
+ been compromised. To recover, SW must issue a
+ software reset sequence (see: MIX_CTL[RESET] */
+ uint64_t data_drp : 1; /**< Data was dropped due to RX FIFO full
+ If this does occur, the DATA_DRP is set and the
+ CIU_INTx_SUM0,4[MII] bits are set.
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and the local interrupt mask bit(DATA_DRPENA) is set, than an
+ interrupt is reported for this event. */
+ uint64_t irthresh : 1; /**< Inbound Ring Packet Threshold Exceeded
+ When the pending \#inbound packets in system
+ memory(IRCNT) has exceeded a programmable threshold
+ (IRHWM), then this bit is set. If this does occur,
+ the IRTHRESH is set and the CIU_INTx_SUM0,4[MII] bits
+ are set if ((MIX_ISR & MIX_INTENA) != 0)).
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and the local interrupt mask bit(ITHENA) is set, than an
+ interrupt is reported for this event. */
+ uint64_t orthresh : 1; /**< Outbound Ring Packet Threshold Exceeded
+ When the pending \#outbound packets in system
+ memory(ORCNT) has exceeded a programmable threshold
+ (ORHWM), then this bit is set. If this does occur,
+ the ORTHRESH is set and the CIU_INTx_SUM0,4[MII] bits
+ are set if ((MIX_ISR & MIX_INTENA) != 0)).
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and the local interrupt mask bit(OTHENA) is set, than an
+ interrupt is reported for this event. */
+ uint64_t idblovf : 1; /**< Inbound DoorBell(IDBELL) Overflow Detected
+ If SW attempts to write to the MIX_IRING2[IDBELL]
+ with a value greater than the remaining \#of
+ I-Ring Buffer Entries (MIX_REMCNT[IREMCNT]), then
+ the following occurs:
+ 1) The MIX_IRING2[IDBELL] write is IGNORED
+ 2) The ODBLOVF is set and the CIU_INTx_SUM0,4[MII]
+ bits are set if ((MIX_ISR & MIX_INTENA) != 0)).
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and the local interrupt mask bit(IVFENA) is set, than an
+ interrupt is reported for this event.
+ SW should keep track of the \#I-Ring Entries in use
+ (ie: cumulative \# of IDBELL writes), and ensure that
+ future IDBELL writes don't exceed the size of the
+ I-Ring Buffer (MIX_IRING2[ISIZE]).
+ SW must reclaim I-Ring Entries by keeping track of the
+ \#IRing-Entries, and writing to the MIX_IRCNT[IRCNT].
+ NOTE: The MIX_IRCNT[IRCNT] register represents the
+ total \#packets(not IRing Entries) and SW must further
+ keep track of the \# of I-Ring Entries associated with
+ each packet as they are processed.
+ NOTE: There is no recovery from an IDBLOVF Interrupt.
+ If it occurs, it's an indication that SW has
+ overwritten the I-Ring buffer, and the only recourse
+ is a HW reset. */
+ uint64_t odblovf : 1; /**< Outbound DoorBell(ODBELL) Overflow Detected
+ If SW attempts to write to the MIX_ORING2[ODBELL]
+ with a value greater than the remaining \#of
+ O-Ring Buffer Entries (MIX_REMCNT[OREMCNT]), then
+ the following occurs:
+ 1) The MIX_ORING2[ODBELL] write is IGNORED
+ 2) The ODBLOVF is set and the CIU_INTx_SUM0,4[MII]
+ bits are set if ((MIX_ISR & MIX_INTENA) != 0)).
+ If both the global interrupt mask bits (CIU_INTx_EN*[MII])
+ and the local interrupt mask bit(OVFENA) is set, than an
+ interrupt is reported for this event.
+ SW should keep track of the \#I-Ring Entries in use
+ (ie: cumulative \# of ODBELL writes), and ensure that
+ future ODBELL writes don't exceed the size of the
+ O-Ring Buffer (MIX_ORING2[OSIZE]).
+ SW must reclaim O-Ring Entries by writing to the
+ MIX_ORCNT[ORCNT]. .
+ NOTE: There is no recovery from an ODBLOVF Interrupt.
+ If it occurs, it's an indication that SW has
+ overwritten the O-Ring buffer, and the only recourse
+ is a HW reset. */
+#else
+ uint64_t odblovf : 1;
+ uint64_t idblovf : 1;
+ uint64_t orthresh : 1;
+ uint64_t irthresh : 1;
+ uint64_t data_drp : 1;
+ uint64_t irun : 1;
+ uint64_t orun : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn52xx;
+ struct cvmx_mixx_isr_cn52xx cn52xxp1;
+ struct cvmx_mixx_isr_cn52xx cn56xx;
+ struct cvmx_mixx_isr_cn52xx cn56xxp1;
+ struct cvmx_mixx_isr_s cn61xx;
+ struct cvmx_mixx_isr_s cn63xx;
+ struct cvmx_mixx_isr_s cn63xxp1;
+ struct cvmx_mixx_isr_s cn66xx;
+ struct cvmx_mixx_isr_s cn68xx;
+ struct cvmx_mixx_isr_s cn68xxp1;
+};
+typedef union cvmx_mixx_isr cvmx_mixx_isr_t;
+
+/**
+ * cvmx_mix#_orcnt
+ *
+ * MIX_ORCNT = MIX O-Ring Packets Sent Counter
+ *
+ * Description:
+ * NOTE: To write to the MIX_ORCNT register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_ORCNT register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_orcnt {
+ uint64_t u64;
+ struct cvmx_mixx_orcnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t orcnt : 20; /**< Pending \# of O-Ring Packets.
+ Whenever HW removes a packet from the O-Ring, it
+ increments the ORCNT (to indicate to SW the \# of
+ Output packets in system memory that can be reclaimed).
+ Reads of ORCNT return the current count.
+ Writes of ORCNT decrement the count by the value
+ written.
+ This register is used to generate interrupts to alert
+ SW of pending outbound MIX packets that have been
+ removed from system memory. (see MIX_ISR[ORTHRESH]
+ description for more details).
+ NOTE: For outbound packets, the \# of O-Ring Packets
+ is equal to the \# of O-Ring Entries. */
+#else
+ uint64_t orcnt : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_mixx_orcnt_s cn52xx;
+ struct cvmx_mixx_orcnt_s cn52xxp1;
+ struct cvmx_mixx_orcnt_s cn56xx;
+ struct cvmx_mixx_orcnt_s cn56xxp1;
+ struct cvmx_mixx_orcnt_s cn61xx;
+ struct cvmx_mixx_orcnt_s cn63xx;
+ struct cvmx_mixx_orcnt_s cn63xxp1;
+ struct cvmx_mixx_orcnt_s cn66xx;
+ struct cvmx_mixx_orcnt_s cn68xx;
+ struct cvmx_mixx_orcnt_s cn68xxp1;
+};
+typedef union cvmx_mixx_orcnt cvmx_mixx_orcnt_t;
+
+/**
+ * cvmx_mix#_orhwm
+ *
+ * MIX_ORHWM = MIX O-Ring High-Water Mark Threshold Register
+ *
+ * Description:
+ * NOTE: To write to the MIX_ORHWM register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_ORHWM register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_orhwm {
+ uint64_t u64;
+ struct cvmx_mixx_orhwm_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t orhwm : 20; /**< O-Ring Entry High Water Mark Threshold.
+ Used to determine when the \# of Outbound packets
+ in system memory that can be reclaimed
+ (MIX_ORCNT[ORCNT]) exceeds this ORHWM threshold.
+ NOTE: The power-on value of the CIU2_EN_xx_yy_PKT[MII]
+ interrupt enable bits is zero and must be enabled
+ to allow interrupts to be reported. */
+#else
+ uint64_t orhwm : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_mixx_orhwm_s cn52xx;
+ struct cvmx_mixx_orhwm_s cn52xxp1;
+ struct cvmx_mixx_orhwm_s cn56xx;
+ struct cvmx_mixx_orhwm_s cn56xxp1;
+ struct cvmx_mixx_orhwm_s cn61xx;
+ struct cvmx_mixx_orhwm_s cn63xx;
+ struct cvmx_mixx_orhwm_s cn63xxp1;
+ struct cvmx_mixx_orhwm_s cn66xx;
+ struct cvmx_mixx_orhwm_s cn68xx;
+ struct cvmx_mixx_orhwm_s cn68xxp1;
+};
+typedef union cvmx_mixx_orhwm cvmx_mixx_orhwm_t;
+
+/**
+ * cvmx_mix#_oring1
+ *
+ * MIX_ORING1 = MIX Outbound Ring Register \#1
+ *
+ * Description:
+ * NOTE: To write to the MIX_ORING1 register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_ORING1 register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_oring1 {
+ uint64_t u64;
+ struct cvmx_mixx_oring1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t osize : 20; /**< Represents the Outbound Ring Buffer's Size(in 8B
+ words). The ring can be as large as 1M entries.
+ NOTE: This CSR MUST BE setup written by SW poweron
+ (when ODBELL/ORCNT=0). */
+ uint64_t obase : 37; /**< Represents the 8B-aligned base address of the first
+ Outbound Ring(O-Ring) Entry in system memory.
+ NOTE: SW MUST ONLY write to this register during
+ power-on/boot code. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t obase : 37;
+ uint64_t osize : 20;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } s;
+ struct cvmx_mixx_oring1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t osize : 20; /**< Represents the Outbound Ring Buffer's Size(in 8B
+ words). The ring can be as large as 1M entries.
+ NOTE: This CSR MUST BE setup written by SW poweron
+ (when ODBELL/ORCNT=0). */
+ uint64_t reserved_36_39 : 4;
+ uint64_t obase : 33; /**< Represents the 8B-aligned base address of the first
+ Outbound Ring(O-Ring) Entry in system memory.
+ NOTE: SW MUST ONLY write to this register during
+ power-on/boot code. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t obase : 33;
+ uint64_t reserved_36_39 : 4;
+ uint64_t osize : 20;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } cn52xx;
+ struct cvmx_mixx_oring1_cn52xx cn52xxp1;
+ struct cvmx_mixx_oring1_cn52xx cn56xx;
+ struct cvmx_mixx_oring1_cn52xx cn56xxp1;
+ struct cvmx_mixx_oring1_s cn61xx;
+ struct cvmx_mixx_oring1_s cn63xx;
+ struct cvmx_mixx_oring1_s cn63xxp1;
+ struct cvmx_mixx_oring1_s cn66xx;
+ struct cvmx_mixx_oring1_s cn68xx;
+ struct cvmx_mixx_oring1_s cn68xxp1;
+};
+typedef union cvmx_mixx_oring1 cvmx_mixx_oring1_t;
+
+/**
+ * cvmx_mix#_oring2
+ *
+ * MIX_ORING2 = MIX Outbound Ring Register \#2
+ *
+ * Description:
+ * NOTE: To write to the MIX_ORING2 register, a device would issue an IOBST directed at the MIO.
+ * To read the MIX_ORING2 register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_oring2 {
+ uint64_t u64;
+ struct cvmx_mixx_oring2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t otlptr : 20; /**< The Outbound Ring Tail Pointer selects the O-Ring
+ Entry that the HW will process next. After the HW
+ completes sending an outbound packet, it increments
+ the O-Ring Tail Pointer. [NOTE: The O-Ring Tail
+ Pointer HW increment is always modulo
+ MIX_ORING2[OSIZE].
+ NOTE: This field is 'read-only' to SW. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t odbell : 20; /**< Represents the cumulative total of pending
+ Outbound Ring(O-Ring) Buffer Entries. Each O-Ring
+ Buffer Entry contains 1) an L2/DRAM byte pointer
+ along with a 2) a Byte Length.
+ After SW inserts new entries into the O-Ring Buffer,
+ it "rings the doorbell with the count of the newly
+ inserted entries". When the MIX HW receives the
+ doorbell ring, it increments the current doorbell
+ count by the CSR write value.
+ SW must never cause the doorbell count for the
+ O-Ring to exceed the size of the ring(OSIZE).
+ A read of the CSR indicates the current doorbell
+ count. */
+#else
+ uint64_t odbell : 20;
+ uint64_t reserved_20_31 : 12;
+ uint64_t otlptr : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+ struct cvmx_mixx_oring2_s cn52xx;
+ struct cvmx_mixx_oring2_s cn52xxp1;
+ struct cvmx_mixx_oring2_s cn56xx;
+ struct cvmx_mixx_oring2_s cn56xxp1;
+ struct cvmx_mixx_oring2_s cn61xx;
+ struct cvmx_mixx_oring2_s cn63xx;
+ struct cvmx_mixx_oring2_s cn63xxp1;
+ struct cvmx_mixx_oring2_s cn66xx;
+ struct cvmx_mixx_oring2_s cn68xx;
+ struct cvmx_mixx_oring2_s cn68xxp1;
+};
+typedef union cvmx_mixx_oring2 cvmx_mixx_oring2_t;
+
+/**
+ * cvmx_mix#_remcnt
+ *
+ * MIX_REMCNT = MIX Ring Buffer Remainder Counts (useful for HW debug only)
+ *
+ * Description:
+ * NOTE: To read the MIX_REMCNT register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_remcnt {
+ uint64_t u64;
+ struct cvmx_mixx_remcnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t iremcnt : 20; /**< Remaining I-Ring Buffer Count
+ Reflects the \# of unused/remaining I-Ring Entries
+ that HW currently detects in the I-Ring Buffer.
+ HW uses this value to detect I-Ring Doorbell overflows.
+ (see: MIX_ISR[IDBLOVF])
+ When SW writes the MIX_IRING1[ISIZE], the IREMCNT
+ is loaded with MIX_IRING2[ISIZE] value. (NOTE: ISIZE should only
+ be written at power-on, when it's known that there are
+ no I-Ring Entries currently in use by HW).
+ When SW writes to the IDBELL register, the IREMCNT
+ is decremented by the CSR write value.
+ When HW issues an IRing Write Request(onto NCB Bus),
+ the IREMCNT is incremented by 1. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t oremcnt : 20; /**< Remaining O-Ring Buffer Count
+ Reflects the \# of unused/remaining O-Ring Entries
+ that HW currently detects in the O-Ring Buffer.
+ HW uses this value to detect O-Ring Doorbell overflows.
+ (see: MIX_ISR[ODBLOVF])
+ When SW writes the MIX_IRING1[OSIZE], the OREMCNT
+ is loaded with MIX_ORING2[OSIZE] value. (NOTE: OSIZE should only
+ be written at power-on, when it's known that there are
+ no O-Ring Entries currently in use by HW).
+ When SW writes to the ODBELL register, the OREMCNT
+ is decremented by the CSR write value.
+ When SW writes to MIX_[OREMCNT], the OREMCNT is decremented
+ by the CSR write value. */
+#else
+ uint64_t oremcnt : 20;
+ uint64_t reserved_20_31 : 12;
+ uint64_t iremcnt : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+ struct cvmx_mixx_remcnt_s cn52xx;
+ struct cvmx_mixx_remcnt_s cn52xxp1;
+ struct cvmx_mixx_remcnt_s cn56xx;
+ struct cvmx_mixx_remcnt_s cn56xxp1;
+ struct cvmx_mixx_remcnt_s cn61xx;
+ struct cvmx_mixx_remcnt_s cn63xx;
+ struct cvmx_mixx_remcnt_s cn63xxp1;
+ struct cvmx_mixx_remcnt_s cn66xx;
+ struct cvmx_mixx_remcnt_s cn68xx;
+ struct cvmx_mixx_remcnt_s cn68xxp1;
+};
+typedef union cvmx_mixx_remcnt cvmx_mixx_remcnt_t;
+
+/**
+ * cvmx_mix#_tsctl
+ *
+ * MIX_TSCTL = MIX TimeStamp Control Register
+ *
+ * Description:
+ * NOTE: To read the MIX_TSCTL register, a device would issue an IOBLD64 directed at the MIO.
+ *
+ * Notes:
+ * SW can read the MIX_TSCTL register to determine the \#pending timestamp interrupts(TSCNT)
+ * as well as the \#outstanding timestamp requests in flight(TSTOT), as well as the \#of available
+ * timestamp entries (TSAVL) in the timestamp fifo.
+ * A write to the MIX_TSCTL register will advance the MIX*_TSTAMP fifo head ptr by 1, and
+ * also decrements the MIX*_TSCTL[TSCNT] and MIX*_TSCTL[TSTOT] pending count(s) by 1.
+ * For example, if SW reads MIX*_TSCTL[TSCNT]=2 (2 pending timestamp interrupts), it would immediately
+ * issue this sequence:
+ * 1) MIX*_TSTAMP[TSTAMP] read followed by MIX*_TSCTL write
+ * [gets timestamp value/pops timestamp fifo and decrements pending count(s) by 1]
+ * 2) MIX*_TSTAMP[TSTAMP] read followed by MIX*_TSCTL write
+ * [gets timestamp value/pops timestamp fifo and decrements pending count(s) by 1]
+ *
+ * SWNOTE: A MIX_TSCTL write when MIX_TSCTL[TSCNT]=0 (ie: TimeStamp Fifo empty), then the write is ignored.
+ */
+union cvmx_mixx_tsctl {
+ uint64_t u64;
+ struct cvmx_mixx_tsctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t tsavl : 5; /**< # of MIX TimeStamp Entries Available for use
+ For o63: TSAVL MAX=4 (implementation
+ depth of timestamp fifo)
+ TSAVL = [IMPLEMENTATION_DEPTH=4(MAX) - TSCNT] */
+ uint64_t reserved_13_15 : 3;
+ uint64_t tstot : 5; /**< # of pending MIX TimeStamp Requests in-flight
+ For o63: TSTOT must never exceed MAX=4 (implementation
+ depth of timestamp fifo) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t tscnt : 5; /**< # of pending MIX TimeStamp Interrupts
+ For o63: TSCNT must never exceed MAX=4 (implementation
+ depth of timestamp fifo) */
+#else
+ uint64_t tscnt : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t tstot : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t tsavl : 5;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_mixx_tsctl_s cn61xx;
+ struct cvmx_mixx_tsctl_s cn63xx;
+ struct cvmx_mixx_tsctl_s cn63xxp1;
+ struct cvmx_mixx_tsctl_s cn66xx;
+ struct cvmx_mixx_tsctl_s cn68xx;
+ struct cvmx_mixx_tsctl_s cn68xxp1;
+};
+typedef union cvmx_mixx_tsctl cvmx_mixx_tsctl_t;
+
+/**
+ * cvmx_mix#_tstamp
+ *
+ * MIX_TSTAMP = MIX TimeStamp Register
+ *
+ * Description:
+ * NOTE: To read the MIX_TSTAMP register, a device would issue an IOBLD64 directed at the MIO.
+ */
+union cvmx_mixx_tstamp {
+ uint64_t u64;
+ struct cvmx_mixx_tstamp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t tstamp : 64; /**< MIX TimeStamp Value
+ When SW sets up an ORING Entry with [47]=1(TSTAMP),
+ The packet is tagged with a specal SOP w/TSTAMP flag
+ as it is sent to the AGL.
+ Later the AGL will send "sample" strobe(s) to capture
+ a global 64bit timestamp value followed by a "commit"
+ strobe which writes the last sampled value into the
+ outbound Timestamp fifo (max depth=4) and increments
+ the MIX_TSCTL[TSCNT] register to indicate the total
+ \#of pending Timestamp interrupts.
+ If the \#pending Timestamp interrupts (MIX_TSCTL[TSCNT])
+ is greater than the MIX_CTL[TS_THRESH] value, then
+ a programmable interrupt is also triggered (see:
+ MIX_ISR[TS] MIX_INTENA[TSENA]).
+ SW will then read the MIX*_TSTAMP[TSTAMP]
+ register value, and MUST THEN write the MIX_TSCTL
+ register, which will decrement MIX_TSCTL[TSCNT] register,
+ to indicate that a single timestamp interrupt has
+ been serviced.
+ NOTE: The MIO-MIX HW tracks upto MAX=4 outstanding
+ timestamped outbound packets at a time. All subsequent
+ ORING Entries w/SOP-TSTAMP will be stalled until
+ SW can service the 4 outstanding interrupts.
+ SW can read the MIX_TSCTL register to determine the
+ \#pending timestamp interrupts(TSCNT) as well as the
+ \#outstanding timestamp requests in flight(TSTOT), as
+ well as the \#of available timestamp entries (TSAVL).
+ SW NOTE: A MIX_TSTAMP read when MIX_TSCTL[TSCNT]=0, will
+ result in a return value of all zeroes. SW should only
+ read this register when MIX_ISR[TS]=1 (or when
+ MIX_TSCTL[TSCNT] != 0) to retrieve the timestamp value
+ recorded by HW. If SW reads the TSTAMP when HW has not
+ recorded a valid timestamp, then an all zeroes value is
+ returned. */
+#else
+ uint64_t tstamp : 64;
+#endif
+ } s;
+ struct cvmx_mixx_tstamp_s cn61xx;
+ struct cvmx_mixx_tstamp_s cn63xx;
+ struct cvmx_mixx_tstamp_s cn63xxp1;
+ struct cvmx_mixx_tstamp_s cn66xx;
+ struct cvmx_mixx_tstamp_s cn68xx;
+ struct cvmx_mixx_tstamp_s cn68xxp1;
+};
+typedef union cvmx_mixx_tstamp cvmx_mixx_tstamp_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-mixx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-mpi-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-mpi-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-mpi-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,561 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-mpi-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon mpi.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_MPI_DEFS_H__
+#define __CVMX_MPI_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MPI_CFG CVMX_MPI_CFG_FUNC()
+static inline uint64_t CVMX_MPI_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MPI_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000001000ull);
+}
+#else
+#define CVMX_MPI_CFG (CVMX_ADD_IO_SEG(0x0001070000001000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_MPI_DATX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 8)))))
+ cvmx_warn("CVMX_MPI_DATX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000001080ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_MPI_DATX(offset) (CVMX_ADD_IO_SEG(0x0001070000001080ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MPI_STS CVMX_MPI_STS_FUNC()
+static inline uint64_t CVMX_MPI_STS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MPI_STS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000001008ull);
+}
+#else
+#define CVMX_MPI_STS (CVMX_ADD_IO_SEG(0x0001070000001008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_MPI_TX CVMX_MPI_TX_FUNC()
+static inline uint64_t CVMX_MPI_TX_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_MPI_TX not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000001010ull);
+}
+#else
+#define CVMX_MPI_TX (CVMX_ADD_IO_SEG(0x0001070000001010ull))
+#endif
+
+/**
+ * cvmx_mpi_cfg
+ *
+ * SPI_MPI interface
+ *
+ *
+ * Notes:
+ * Some of the SPI/MPI pins are muxed with UART pins.
+ * SPI_CLK : spi clock, dedicated pin
+ * SPI_DI : spi input, shared with UART0_DCD_N/SPI_DI, enabled when MPI_CFG[ENABLE]=1
+ * SPI_DO : spi output, mux to UART0_DTR_N/SPI_DO, enabled when MPI_CFG[ENABLE]=1
+ * SPI_CS0_L : chips select 0, mux to BOOT_CE_N<6>/SPI_CS0_L pin, enabled when MPI_CFG[CSENA0]=1 and MPI_CFG[ENABLE]=1
+ * SPI_CS1_L : chips select 1, mux to BOOT_CE_N<7>/SPI_CS1_L pin, enabled when MPI_CFG[CSENA1]=1 and MPI_CFG[ENABLE]=1
+ */
+union cvmx_mpi_cfg {
+ uint64_t u64;
+ struct cvmx_mpi_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t clkdiv : 13; /**< Fspi_clk = Fsclk / (2 * CLKDIV) | NS
+ CLKDIV = Fsclk / (2 * Fspi_clk) */
+ uint64_t csena3 : 1; /**< If 0, UART1_RTS_L/SPI_CS3_L pin is UART pin | NS
+ 1, UART1_RTS_L/SPI_CS3_L pin is SPI pin
+ SPI_CS3_L drives UART1_RTS_L/SPI_CS3_L */
+ uint64_t csena2 : 1; /**< If 0, UART0_RTS_L/SPI_CS2_L pin is UART pin | NS
+ 1, UART0_RTS_L/SPI_CS2_L pin is SPI pin
+ SPI_CS2_L drives UART0_RTS_L/SPI_CS2_L */
+ uint64_t csena1 : 1; /**< If 0, BOOT_CE_N<7>/SPI_CS1_L pin is BOOT pin | NS
+ 1, BOOT_CE_N<7>/SPI_CS1_L pin is SPI pin
+ SPI_CS1_L drives BOOT_CE_N<7>/SPI_CS1_L */
+ uint64_t csena0 : 1; /**< If 0, BOOT_CE_N<6>/SPI_CS0_L pin is BOOT pin | NS
+ 1, BOOT_CE_N<6>/SPI_CS0_L pin is SPI pin
+ SPI_CS0_L drives BOOT_CE_N<6>/SPI_CS0_L */
+ uint64_t cslate : 1; /**< If 0, SPI_CS asserts 1/2 SCLK before transaction | NS
+ 1, SPI_CS assert coincident with transaction
+ NOTE: This control apply for 2 CSs */
+ uint64_t tritx : 1; /**< If 0, SPI_DO pin is driven when slave is not | NS
+ expected to be driving
+ 1, SPI_DO pin is tristated when not transmitting
+ NOTE: only used when WIREOR==1 */
+ uint64_t idleclks : 2; /**< Guarantee IDLECLKS idle sclk cycles between | NS
+ commands. */
+ uint64_t cshi : 1; /**< If 0, CS is low asserted | NS
+ 1, CS is high asserted */
+ uint64_t csena : 1; /**< If 0, the MPI_CS is a GPIO, not used by MPI_TX
+ 1, CS is driven per MPI_TX intruction */
+ uint64_t int_ena : 1; /**< If 0, polling is required | NS
+ 1, MPI engine interrupts X end of transaction */
+ uint64_t lsbfirst : 1; /**< If 0, shift MSB first | NS
+ 1, shift LSB first */
+ uint64_t wireor : 1; /**< If 0, SPI_DO and SPI_DI are separate wires (SPI) | NS
+ SPI_DO pin is always driven
+ 1, SPI_DO/DI is all from SPI_DO pin (MPI)
+ SPI_DO pin is tristated when not transmitting
+ NOTE: if WIREOR==1, SPI_DI pin is not used by the
+ MPI engine */
+ uint64_t clk_cont : 1; /**< If 0, clock idles to value given by IDLELO after | NS
+ completion of MPI transaction
+ 1, clock never idles, requires CS deassertion
+ assertion between commands */
+ uint64_t idlelo : 1; /**< If 0, SPI_CLK idles high, 1st transition is hi->lo | NS
+ 1, SPI_CLK idles low, 1st transition is lo->hi */
+ uint64_t enable : 1; /**< If 0, UART0_DTR_L/SPI_DO, UART0_DCD_L/SPI_DI | NS
+ BOOT_CE_N<7:6>/SPI_CSx_L
+ pins are UART/BOOT pins
+ 1, UART0_DTR_L/SPI_DO and UART0_DCD_L/SPI_DI
+ pins are SPI/MPI pins.
+ BOOT_CE_N<6>/SPI_CS0_L is SPI pin if CSENA0=1
+ BOOT_CE_N<7>/SPI_CS1_L is SPI pin if CSENA1=1 */
+#else
+ uint64_t enable : 1;
+ uint64_t idlelo : 1;
+ uint64_t clk_cont : 1;
+ uint64_t wireor : 1;
+ uint64_t lsbfirst : 1;
+ uint64_t int_ena : 1;
+ uint64_t csena : 1;
+ uint64_t cshi : 1;
+ uint64_t idleclks : 2;
+ uint64_t tritx : 1;
+ uint64_t cslate : 1;
+ uint64_t csena0 : 1;
+ uint64_t csena1 : 1;
+ uint64_t csena2 : 1;
+ uint64_t csena3 : 1;
+ uint64_t clkdiv : 13;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_mpi_cfg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t clkdiv : 13; /**< Fsclk = Feclk / (2 * CLKDIV)
+ CLKDIV = Feclk / (2 * Fsclk) */
+ uint64_t reserved_12_15 : 4;
+ uint64_t cslate : 1; /**< If 0, MPI_CS asserts 1/2 SCLK before transaction
+ 1, MPI_CS assert coincident with transaction
+ NOTE: only used if CSENA == 1 */
+ uint64_t tritx : 1; /**< If 0, MPI_TX pin is driven when slave is not
+ expected to be driving
+ 1, MPI_TX pin is tristated when not transmitting
+ NOTE: only used when WIREOR==1 */
+ uint64_t idleclks : 2; /**< Guarantee IDLECLKS idle sclk cycles between
+ commands. */
+ uint64_t cshi : 1; /**< If 0, CS is low asserted
+ 1, CS is high asserted */
+ uint64_t csena : 1; /**< If 0, the MPI_CS is a GPIO, not used by MPI_TX
+ 1, CS is driven per MPI_TX intruction */
+ uint64_t int_ena : 1; /**< If 0, polling is required
+ 1, MPI engine interrupts X end of transaction */
+ uint64_t lsbfirst : 1; /**< If 0, shift MSB first
+ 1, shift LSB first */
+ uint64_t wireor : 1; /**< If 0, MPI_TX and MPI_RX are separate wires (SPI)
+ MPI_TX pin is always driven
+ 1, MPI_TX/RX is all from MPI_TX pin (MPI)
+ MPI_TX pin is tristated when not transmitting
+ NOTE: if WIREOR==1, MPI_RX pin is not used by the
+ MPI engine */
+ uint64_t clk_cont : 1; /**< If 0, clock idles to value given by IDLELO after
+ completion of MPI transaction
+ 1, clock never idles, requires CS deassertion
+ assertion between commands */
+ uint64_t idlelo : 1; /**< If 0, MPI_CLK idles high, 1st transition is hi->lo
+ 1, MPI_CLK idles low, 1st transition is lo->hi */
+ uint64_t enable : 1; /**< If 0, all MPI pins are GPIOs
+ 1, MPI_CLK, MPI_CS, and MPI_TX are driven */
+#else
+ uint64_t enable : 1;
+ uint64_t idlelo : 1;
+ uint64_t clk_cont : 1;
+ uint64_t wireor : 1;
+ uint64_t lsbfirst : 1;
+ uint64_t int_ena : 1;
+ uint64_t csena : 1;
+ uint64_t cshi : 1;
+ uint64_t idleclks : 2;
+ uint64_t tritx : 1;
+ uint64_t cslate : 1;
+ uint64_t reserved_12_15 : 4;
+ uint64_t clkdiv : 13;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn30xx;
+ struct cvmx_mpi_cfg_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t clkdiv : 13; /**< Fsclk = Feclk / (2 * CLKDIV)
+ CLKDIV = Feclk / (2 * Fsclk) */
+ uint64_t reserved_11_15 : 5;
+ uint64_t tritx : 1; /**< If 0, MPI_TX pin is driven when slave is not
+ expected to be driving
+ 1, MPI_TX pin is tristated when not transmitting
+ NOTE: only used when WIREOR==1 */
+ uint64_t idleclks : 2; /**< Guarantee IDLECLKS idle sclk cycles between
+ commands. */
+ uint64_t cshi : 1; /**< If 0, CS is low asserted
+ 1, CS is high asserted */
+ uint64_t csena : 1; /**< If 0, the MPI_CS is a GPIO, not used by MPI_TX
+ 1, CS is driven per MPI_TX intruction */
+ uint64_t int_ena : 1; /**< If 0, polling is required
+ 1, MPI engine interrupts X end of transaction */
+ uint64_t lsbfirst : 1; /**< If 0, shift MSB first
+ 1, shift LSB first */
+ uint64_t wireor : 1; /**< If 0, MPI_TX and MPI_RX are separate wires (SPI)
+ MPI_TX pin is always driven
+ 1, MPI_TX/RX is all from MPI_TX pin (MPI)
+ MPI_TX pin is tristated when not transmitting
+ NOTE: if WIREOR==1, MPI_RX pin is not used by the
+ MPI engine */
+ uint64_t clk_cont : 1; /**< If 0, clock idles to value given by IDLELO after
+ completion of MPI transaction
+ 1, clock never idles, requires CS deassertion
+ assertion between commands */
+ uint64_t idlelo : 1; /**< If 0, MPI_CLK idles high, 1st transition is hi->lo
+ 1, MPI_CLK idles low, 1st transition is lo->hi */
+ uint64_t enable : 1; /**< If 0, all MPI pins are GPIOs
+ 1, MPI_CLK, MPI_CS, and MPI_TX are driven */
+#else
+ uint64_t enable : 1;
+ uint64_t idlelo : 1;
+ uint64_t clk_cont : 1;
+ uint64_t wireor : 1;
+ uint64_t lsbfirst : 1;
+ uint64_t int_ena : 1;
+ uint64_t csena : 1;
+ uint64_t cshi : 1;
+ uint64_t idleclks : 2;
+ uint64_t tritx : 1;
+ uint64_t reserved_11_15 : 5;
+ uint64_t clkdiv : 13;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn31xx;
+ struct cvmx_mpi_cfg_cn30xx cn50xx;
+ struct cvmx_mpi_cfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t clkdiv : 13; /**< Fspi_clk = Fsclk / (2 * CLKDIV) | NS
+ CLKDIV = Fsclk / (2 * Fspi_clk) */
+ uint64_t reserved_14_15 : 2;
+ uint64_t csena1 : 1; /**< If 0, BOOT_CE_N<7>/SPI_CS1_L pin is BOOT pin | NS
+ 1, BOOT_CE_N<7>/SPI_CS1_L pin is SPI pin
+ SPI_CS1_L drives BOOT_CE_N<7>/SPI_CS1_L */
+ uint64_t csena0 : 1; /**< If 0, BOOT_CE_N<6>/SPI_CS0_L pin is BOOT pin | NS
+ 1, BOOT_CE_N<6>/SPI_CS0_L pin is SPI pin
+ SPI_CS0_L drives BOOT_CE_N<6>/SPI_CS0_L */
+ uint64_t cslate : 1; /**< If 0, SPI_CS asserts 1/2 SCLK before transaction | NS
+ 1, SPI_CS assert coincident with transaction
+ NOTE: This control apply for 2 CSs */
+ uint64_t tritx : 1; /**< If 0, SPI_DO pin is driven when slave is not | NS
+ expected to be driving
+ 1, SPI_DO pin is tristated when not transmitting
+ NOTE: only used when WIREOR==1 */
+ uint64_t idleclks : 2; /**< Guarantee IDLECLKS idle sclk cycles between | NS
+ commands. */
+ uint64_t cshi : 1; /**< If 0, CS is low asserted | NS
+ 1, CS is high asserted */
+ uint64_t reserved_6_6 : 1;
+ uint64_t int_ena : 1; /**< If 0, polling is required | NS
+ 1, MPI engine interrupts X end of transaction */
+ uint64_t lsbfirst : 1; /**< If 0, shift MSB first | NS
+ 1, shift LSB first */
+ uint64_t wireor : 1; /**< If 0, SPI_DO and SPI_DI are separate wires (SPI) | NS
+ SPI_DO pin is always driven
+ 1, SPI_DO/DI is all from SPI_DO pin (MPI)
+ SPI_DO pin is tristated when not transmitting
+ NOTE: if WIREOR==1, SPI_DI pin is not used by the
+ MPI engine */
+ uint64_t clk_cont : 1; /**< If 0, clock idles to value given by IDLELO after | NS
+ completion of MPI transaction
+ 1, clock never idles, requires CS deassertion
+ assertion between commands */
+ uint64_t idlelo : 1; /**< If 0, SPI_CLK idles high, 1st transition is hi->lo | NS
+ 1, SPI_CLK idles low, 1st transition is lo->hi */
+ uint64_t enable : 1; /**< If 0, UART0_DTR_L/SPI_DO, UART0_DCD_L/SPI_DI | NS
+ BOOT_CE_N<7:6>/SPI_CSx_L
+ pins are UART/BOOT pins
+ 1, UART0_DTR_L/SPI_DO and UART0_DCD_L/SPI_DI
+ pins are SPI/MPI pins.
+ BOOT_CE_N<6>/SPI_CS0_L is SPI pin if CSENA0=1
+ BOOT_CE_N<7>/SPI_CS1_L is SPI pin if CSENA1=1 */
+#else
+ uint64_t enable : 1;
+ uint64_t idlelo : 1;
+ uint64_t clk_cont : 1;
+ uint64_t wireor : 1;
+ uint64_t lsbfirst : 1;
+ uint64_t int_ena : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t cshi : 1;
+ uint64_t idleclks : 2;
+ uint64_t tritx : 1;
+ uint64_t cslate : 1;
+ uint64_t csena0 : 1;
+ uint64_t csena1 : 1;
+ uint64_t reserved_14_15 : 2;
+ uint64_t clkdiv : 13;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn61xx;
+ struct cvmx_mpi_cfg_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t clkdiv : 13; /**< Fspi_clk = Fsclk / (2 * CLKDIV) | NS
+ CLKDIV = Fsclk / (2 * Fspi_clk) */
+ uint64_t csena3 : 1; /**< If 0, UART1_RTS_L/SPI_CS3_L pin is UART pin | NS
+ 1, UART1_RTS_L/SPI_CS3_L pin is SPI pin
+ SPI_CS3_L drives UART1_RTS_L/SPI_CS3_L */
+ uint64_t csena2 : 1; /**< If 0, UART0_RTS_L/SPI_CS2_L pin is UART pin | NS
+ 1, UART0_RTS_L/SPI_CS2_L pin is SPI pin
+ SPI_CS2_L drives UART0_RTS_L/SPI_CS2_L */
+ uint64_t reserved_12_13 : 2;
+ uint64_t cslate : 1; /**< If 0, SPI_CS asserts 1/2 SCLK before transaction | NS
+ 1, SPI_CS assert coincident with transaction
+ NOTE: This control apply for 4 CSs */
+ uint64_t tritx : 1; /**< If 0, SPI_DO pin is driven when slave is not | NS
+ expected to be driving
+ 1, SPI_DO pin is tristated when not transmitting
+ NOTE: only used when WIREOR==1 */
+ uint64_t idleclks : 2; /**< Guarantee IDLECLKS idle sclk cycles between | NS
+ commands. */
+ uint64_t cshi : 1; /**< If 0, CS is low asserted | NS
+ 1, CS is high asserted */
+ uint64_t reserved_6_6 : 1;
+ uint64_t int_ena : 1; /**< If 0, polling is required | NS
+ 1, MPI engine interrupts X end of transaction */
+ uint64_t lsbfirst : 1; /**< If 0, shift MSB first | NS
+ 1, shift LSB first */
+ uint64_t wireor : 1; /**< If 0, SPI_DO and SPI_DI are separate wires (SPI) | NS
+ SPI_DO pin is always driven
+ 1, SPI_DO/DI is all from SPI_DO pin (MPI)
+ SPI_DO pin is tristated when not transmitting
+ NOTE: if WIREOR==1, SPI_DI pin is not used by the
+ MPI engine */
+ uint64_t clk_cont : 1; /**< If 0, clock idles to value given by IDLELO after | NS
+ completion of MPI transaction
+ 1, clock never idles, requires CS deassertion
+ assertion between commands */
+ uint64_t idlelo : 1; /**< If 0, SPI_CLK idles high, 1st transition is hi->lo | NS
+ 1, SPI_CLK idles low, 1st transition is lo->hi */
+ uint64_t enable : 1; /**< If 0, UART0_DTR_L/SPI_DO, UART0_DCD_L/SPI_DI | NS
+ UART0_RTS_L/SPI_CS2_L, UART1_RTS_L/SPI_CS3_L
+ pins are UART pins
+ 1, UART0_DTR_L/SPI_DO and UART0_DCD_L/SPI_DI
+ pins are SPI/MPI pins.
+ UART0_RTS_L/SPI_CS2_L is SPI pin if CSENA2=1
+ UART1_RTS_L/SPI_CS3_L is SPI pin if CSENA3=1 */
+#else
+ uint64_t enable : 1;
+ uint64_t idlelo : 1;
+ uint64_t clk_cont : 1;
+ uint64_t wireor : 1;
+ uint64_t lsbfirst : 1;
+ uint64_t int_ena : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t cshi : 1;
+ uint64_t idleclks : 2;
+ uint64_t tritx : 1;
+ uint64_t cslate : 1;
+ uint64_t reserved_12_13 : 2;
+ uint64_t csena2 : 1;
+ uint64_t csena3 : 1;
+ uint64_t clkdiv : 13;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn66xx;
+ struct cvmx_mpi_cfg_cn61xx cnf71xx;
+};
+typedef union cvmx_mpi_cfg cvmx_mpi_cfg_t;
+
+/**
+ * cvmx_mpi_dat#
+ */
+union cvmx_mpi_datx {
+ uint64_t u64;
+ struct cvmx_mpi_datx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t data : 8; /**< Data to transmit/received | NS */
+#else
+ uint64_t data : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_mpi_datx_s cn30xx;
+ struct cvmx_mpi_datx_s cn31xx;
+ struct cvmx_mpi_datx_s cn50xx;
+ struct cvmx_mpi_datx_s cn61xx;
+ struct cvmx_mpi_datx_s cn66xx;
+ struct cvmx_mpi_datx_s cnf71xx;
+};
+typedef union cvmx_mpi_datx cvmx_mpi_datx_t;
+
+/**
+ * cvmx_mpi_sts
+ */
+union cvmx_mpi_sts {
+ uint64_t u64;
+ struct cvmx_mpi_sts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t rxnum : 5; /**< Number of bytes written for transaction | NS */
+ uint64_t reserved_1_7 : 7;
+ uint64_t busy : 1; /**< If 0, no MPI transaction in progress | NS
+ 1, MPI engine is processing a transaction */
+#else
+ uint64_t busy : 1;
+ uint64_t reserved_1_7 : 7;
+ uint64_t rxnum : 5;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_mpi_sts_s cn30xx;
+ struct cvmx_mpi_sts_s cn31xx;
+ struct cvmx_mpi_sts_s cn50xx;
+ struct cvmx_mpi_sts_s cn61xx;
+ struct cvmx_mpi_sts_s cn66xx;
+ struct cvmx_mpi_sts_s cnf71xx;
+};
+typedef union cvmx_mpi_sts cvmx_mpi_sts_t;
+
+/**
+ * cvmx_mpi_tx
+ */
+union cvmx_mpi_tx {
+ uint64_t u64;
+ struct cvmx_mpi_tx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t csid : 2; /**< Which CS to assert for this transaction | NS */
+ uint64_t reserved_17_19 : 3;
+ uint64_t leavecs : 1; /**< If 0, deassert CS after transaction is done | NS
+ 1, leave CS asserted after transactrion is done */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txnum : 5; /**< Number of bytes to transmit | NS */
+ uint64_t reserved_5_7 : 3;
+ uint64_t totnum : 5; /**< Number of bytes to shift (transmit + receive) | NS */
+#else
+ uint64_t totnum : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t txnum : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t leavecs : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t csid : 2;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_mpi_tx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t leavecs : 1; /**< If 0, deassert CS after transaction is done
+ 1, leave CS asserted after transactrion is done */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txnum : 5; /**< Number of bytes to transmit */
+ uint64_t reserved_5_7 : 3;
+ uint64_t totnum : 5; /**< Number of bytes to shift (transmit + receive) */
+#else
+ uint64_t totnum : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t txnum : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t leavecs : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn30xx;
+ struct cvmx_mpi_tx_cn30xx cn31xx;
+ struct cvmx_mpi_tx_cn30xx cn50xx;
+ struct cvmx_mpi_tx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t csid : 1; /**< Which CS to assert for this transaction | NS */
+ uint64_t reserved_17_19 : 3;
+ uint64_t leavecs : 1; /**< If 0, deassert CS after transaction is done | NS
+ 1, leave CS asserted after transactrion is done */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txnum : 5; /**< Number of bytes to transmit | NS */
+ uint64_t reserved_5_7 : 3;
+ uint64_t totnum : 5; /**< Number of bytes to shift (transmit + receive) | NS */
+#else
+ uint64_t totnum : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t txnum : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t leavecs : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t csid : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } cn61xx;
+ struct cvmx_mpi_tx_s cn66xx;
+ struct cvmx_mpi_tx_cn61xx cnf71xx;
+};
+typedef union cvmx_mpi_tx cvmx_mpi_tx_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-mpi-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-nand.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-nand.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-nand.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1993 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+/**
+ * @file
+ *
+ * Interface to the NAND flash controller.
+ * See cvmx-nand.h for usage documentation and notes.
+ *
+ * <hr>$Revision: 35726 $<hr>
+ */
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-nand.h>
+#include <asm/octeon/cvmx-ndf-defs.h>
+#include <asm/octeon/cvmx-swap.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#else
+#include "cvmx.h"
+#include "cvmx-nand.h"
+#include "cvmx-swap.h"
+#include "cvmx-bootmem.h"
+#endif
+#if defined(__U_BOOT__) && defined(CONFIG_HW_WATCHDOG)
+# include <watchdog.h>
+#else
+# define WATCHDOG_RESET()
+#endif
+
+#define NAND_COMMAND_READ_ID 0x90
+#define NAND_COMMAND_READ_PARAM_PAGE 0xec
+#define NAND_COMMAND_RESET 0xff
+#define NAND_COMMAND_STATUS 0x70
+#define NAND_COMMAND_READ 0x00
+#define NAND_COMMAND_READ_FIN 0x30
+#define NAND_COMMAND_ERASE 0x60
+#define NAND_COMMAND_ERASE_FIN 0xd0
+#define NAND_COMMAND_PROGRAM 0x80
+#define NAND_COMMAND_PROGRAM_FIN 0x10
+#define NAND_TIMEOUT_USECS_READ 100000
+#define NAND_TIMEOUT_USECS_WRITE 1000000
+#define NAND_TIMEOUT_USECS_BLOCK_ERASE 1000000
+
+#define CVMX_NAND_ROUNDUP(_Dividend, _Divisor) (((_Dividend)+((_Divisor)-1))/(_Divisor))
+#undef min
+#define min(X, Y) \
+ ({ typeof (X) __x = (X); \
+ typeof (Y) __y = (Y); \
+ (__x < __y) ? __x : __y; })
+
+#undef max
+#define max(X, Y) \
+ ({ typeof (X) __x = (X); \
+ typeof (Y) __y = (Y); \
+ (__x > __y) ? __x : __y; })
+
+
+/* Structure to store the parameters that we care about that
+** describe the ONFI speed modes. This is used to configure
+** the flash timing to match what is reported in the
+** parameter page of the ONFI flash chip. */
+typedef struct
+{
+ int twp;
+ int twh;
+ int twc;
+ int tclh;
+ int tals;
+} onfi_speed_mode_desc_t;
+static const onfi_speed_mode_desc_t onfi_speed_modes[] =
+{
+
+ {50,30,100,20,50}, /* Mode 0 */
+ {25,15, 45,10,25}, /* Mode 1 */
+ {17,15, 35,10,15}, /* Mode 2 */
+ {15,10, 30, 5,10}, /* Mode 3 */
+ {12,10, 25, 5,10}, /* Mode 4, requires EDO timings */
+ {10, 7, 20, 5,10}, /* Mode 5, requries EDO timings */
+ {10,10, 25, 5,12}, /* Mode 6, requires EDO timings */
+};
+
+
+
+typedef enum
+{
+ CVMX_NAND_STATE_16BIT = 1<<0,
+} cvmx_nand_state_flags_t;
+
+/**
+ * Structure used to store data about the NAND devices hooked
+ * to the bootbus.
+ */
+typedef struct
+{
+ int page_size;
+ int oob_size;
+ int pages_per_block;
+ int blocks;
+ int tim_mult;
+ int tim_par[8];
+ int clen[4];
+ int alen[4];
+ int rdn[4];
+ int wrn[2];
+ int onfi_timing;
+ cvmx_nand_state_flags_t flags;
+} cvmx_nand_state_t;
+
+/**
+ * Array indexed by bootbus chip select with information
+ * about NAND devices.
+ */
+#if defined(__U_BOOT__)
+/* For u-boot nand boot we need to play some tricks to be able
+** to use this early in boot. We put them in a special section that is merged
+** with the text segment. (Using the text segment directly results in an assembler warning.)
+*/
+/*#define USE_DATA_IN_TEXT*/
+#endif
+
+#ifdef USE_DATA_IN_TEXT
+static uint8_t cvmx_nand_buffer[CVMX_NAND_MAX_PAGE_AND_OOB_SIZE] __attribute__((aligned(8))) __attribute__ ((section (".data_in_text")));
+static cvmx_nand_state_t cvmx_nand_state[8] __attribute__ ((section (".data_in_text")));
+static cvmx_nand_state_t cvmx_nand_default __attribute__ ((section (".data_in_text")));
+static cvmx_nand_initialize_flags_t cvmx_nand_flags __attribute__ ((section (".data_in_text")));
+static int debug_indent __attribute__ ((section (".data_in_text")));
+#else
+static CVMX_SHARED cvmx_nand_state_t cvmx_nand_state[8];
+static CVMX_SHARED cvmx_nand_state_t cvmx_nand_default;
+static CVMX_SHARED cvmx_nand_initialize_flags_t cvmx_nand_flags;
+static CVMX_SHARED uint8_t *cvmx_nand_buffer = NULL;
+static int debug_indent = 0;
+#endif
+
+static CVMX_SHARED const char *cvmx_nand_opcode_labels[] =
+{
+ "NOP", /* 0 */
+ "Timing", /* 1 */
+ "Wait", /* 2 */
+ "Chip Enable / Disable", /* 3 */
+ "CLE", /* 4 */
+ "ALE", /* 5 */
+ "6 - Unknown", /* 6 */
+ "7 - Unknown", /* 7 */
+ "Write", /* 8 */
+ "Read", /* 9 */
+ "Read EDO", /* 10 */
+ "Wait Status", /* 11 */
+ "12 - Unknown", /* 12 */
+ "13 - Unknown", /* 13 */
+ "14 - Unknown", /* 14 */
+ "Bus Aquire / Release" /* 15 */
+};
+
+#define ULL unsigned long long
+/* This macro logs out whenever a function is called if debugging is on */
+#define CVMX_NAND_LOG_CALLED() \
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
+ cvmx_dprintf("%*s%s: called\n", 2*debug_indent++, "", __FUNCTION__);
+
+/* This macro logs out each function parameter if debugging is on */
+#define CVMX_NAND_LOG_PARAM(format, param) \
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
+ cvmx_dprintf("%*s%s: param %s = " format "\n", 2*debug_indent, "", __FUNCTION__, #param, param);
+
+/* This macro logs out when a function returns a value */
+#define CVMX_NAND_RETURN(v) \
+ do { \
+ typeof(v) r = v; \
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
+ cvmx_dprintf("%*s%s: returned %s(%d)\n", 2*--debug_indent, "", __FUNCTION__, #v, r); \
+ return r; \
+ } while (0);
+
+/* This macro logs out when a function doesn't return a value */
+#define CVMX_NAND_RETURN_NOTHING() \
+ do { \
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
+ cvmx_dprintf("%*s%s: returned\n", 2*--debug_indent, "", __FUNCTION__); \
+ return; \
+ } while (0);
+
+
+
+
+
+
+/* Compute the CRC for the ONFI parameter page. Adapted from sample code
+** in the specification.
+*/
+static uint16_t __onfi_parameter_crc_compute(uint8_t *data)
+{
+ const int order = 16; // Order of the CRC-16
+ unsigned long i, j, c, bit;
+ unsigned long crc = 0x4F4E; // Initialize the shift register with 0x4F4E
+ unsigned long crcmask = ((((unsigned long)1<<(order-1))-1)<<1)|1;
+ unsigned long crchighbit = (unsigned long)1<<(order-1);
+
+ for (i = 0; i < 254; i++)
+ {
+ c = (unsigned long)data[i];
+ for (j = 0x80; j; j >>= 1) {
+ bit = crc & crchighbit;
+ crc <<= 1;
+ if (c & j)
+ bit ^= crchighbit;
+ if (bit)
+ crc ^= 0x8005;
+ }
+ crc &= crcmask;
+ }
+ return(crc);
+}
+
+
+/**
+ * Validate the ONFI parameter page and return a pointer to
+ * the config values.
+ *
+ * @param param_page Pointer to the raw NAND data returned after a parameter page read. It will
+ * contain at least 4 copies of the parameter structure.
+ *
+ * @return Pointer to a validated paramter page, or NULL if one couldn't be found.
+ */
+static cvmx_nand_onfi_param_page_t *__cvmx_nand_onfi_process(cvmx_nand_onfi_param_page_t param_page[4])
+{
+ int index;
+
+ for (index=0; index<4; index++)
+ {
+ uint16_t crc = __onfi_parameter_crc_compute((void *)¶m_page[index]);
+ if (crc == cvmx_le16_to_cpu(param_page[index].crc))
+ break;
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Paramter page %d is corrupt. (Expected CRC: 0x%04x, computed: 0x%04x)\n",
+ __FUNCTION__, index, cvmx_le16_to_cpu(param_page[index].crc), crc);
+ }
+
+ if (index == 4)
+ {
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: All parameter pages fail CRC check. Checking to see if any look sane.\n", __FUNCTION__);
+
+ if (!memcmp(param_page, param_page + 1, 256))
+ {
+ /* First and second copies match, now check some values */
+ if (param_page[0].pages_per_block != 0 && param_page[0].pages_per_block != 0xFFFFFFFF
+ && param_page[0].page_data_bytes != 0 && param_page[0].page_data_bytes != 0xFFFFFFFF
+ && param_page[0].page_spare_bytes != 0 && param_page[0].page_spare_bytes != 0xFFFF
+ && param_page[0].blocks_per_lun != 0 && param_page[0].blocks_per_lun != 0xFFFFFFFF
+ && param_page[0].timing_mode != 0 && param_page[0].timing_mode != 0xFFFF)
+ {
+ /* Looks like we have enough values to use */
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Page 0 looks sane, using even though CRC fails.\n", __FUNCTION__);
+ index = 0;
+ }
+ }
+ }
+
+ if (index == 4)
+ {
+ cvmx_dprintf("%s: WARNING: ONFI part but no valid ONFI parameter pages found.\n", __FUNCTION__);
+ return NULL;
+ }
+
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ {
+ cvmx_dprintf("%*sONFI Information (from copy %d in param page)\n", 2*debug_indent, "", index);
+ debug_indent++;
+ cvmx_dprintf("%*sonfi = %c%c%c%c\n", 2*debug_indent, "", param_page[index].onfi[0], param_page[index].onfi[1],
+ param_page[index].onfi[2], param_page[index].onfi[3]);
+ cvmx_dprintf("%*srevision_number = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].revision_number));
+ cvmx_dprintf("%*sfeatures = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].features));
+ cvmx_dprintf("%*soptional_commands = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].optional_commands));
+
+ cvmx_dprintf("%*smanufacturer = %12.12s\n", 2*debug_indent, "", param_page[index].manufacturer);
+ cvmx_dprintf("%*smodel = %20.20s\n", 2*debug_indent, "", param_page[index].model);
+ cvmx_dprintf("%*sjedec_id = 0x%x\n", 2*debug_indent, "", param_page[index].jedec_id);
+ cvmx_dprintf("%*sdate_code = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].date_code));
+
+ cvmx_dprintf("%*spage_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].page_data_bytes));
+ cvmx_dprintf("%*spage_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].page_spare_bytes));
+ cvmx_dprintf("%*spartial_page_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].partial_page_data_bytes));
+ cvmx_dprintf("%*spartial_page_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].partial_page_spare_bytes));
+ cvmx_dprintf("%*spages_per_block = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].pages_per_block));
+ cvmx_dprintf("%*sblocks_per_lun = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].blocks_per_lun));
+ cvmx_dprintf("%*snumber_lun = %u\n", 2*debug_indent, "", param_page[index].number_lun);
+ cvmx_dprintf("%*saddress_cycles = 0x%x\n", 2*debug_indent, "", param_page[index].address_cycles);
+ cvmx_dprintf("%*sbits_per_cell = %u\n", 2*debug_indent, "", param_page[index].bits_per_cell);
+ cvmx_dprintf("%*sbad_block_per_lun = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].bad_block_per_lun));
+ cvmx_dprintf("%*sblock_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].block_endurance));
+ cvmx_dprintf("%*sgood_blocks = %u\n", 2*debug_indent, "", param_page[index].good_blocks);
+ cvmx_dprintf("%*sgood_block_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].good_block_endurance));
+ cvmx_dprintf("%*sprograms_per_page = %u\n", 2*debug_indent, "", param_page[index].programs_per_page);
+ cvmx_dprintf("%*spartial_program_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].partial_program_attrib);
+ cvmx_dprintf("%*sbits_ecc = %u\n", 2*debug_indent, "", param_page[index].bits_ecc);
+ cvmx_dprintf("%*sinterleaved_address_bits = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_address_bits);
+ cvmx_dprintf("%*sinterleaved_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_attrib);
+
+ cvmx_dprintf("%*spin_capacitance = %u\n", 2*debug_indent, "", param_page[index].pin_capacitance);
+ cvmx_dprintf("%*stiming_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].timing_mode));
+ cvmx_dprintf("%*scache_timing_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].cache_timing_mode));
+ cvmx_dprintf("%*st_prog = %d us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_prog));
+ cvmx_dprintf("%*st_bers = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_bers));
+ cvmx_dprintf("%*st_r = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_r));
+ cvmx_dprintf("%*st_ccs = %u ns\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_ccs));
+ cvmx_dprintf("%*svendor_revision = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].vendor_revision));
+ //uint8_t vendor_specific[88]; /**< Byte 166-253: Vendor specific */
+ cvmx_dprintf("%*scrc = 0x%x\n", 2*debug_indent, "", param_page[index].crc);
+ debug_indent--;
+ }
+ return param_page + index;
+}
+
+void __set_onfi_timing_mode(int *tim_par, int clocks_us, int mode)
+{
+ const onfi_speed_mode_desc_t *mp = &onfi_speed_modes[mode]; /* use shorter name to fill in timing array */
+ int margin;
+ int pulse_adjust;
+
+ if (mode > 6)
+ {
+ cvmx_dprintf("%s: invalid ONFI timing mode: %d\n", __FUNCTION__, mode);
+ return;
+ }
+
+ /* Adjust the read/write pulse duty cycle to make it more even. The cycle time
+ ** requirement is longer than the sum of the high low times, so we exend both the high
+ ** and low times to meet the cycle time requirement.
+ */
+ pulse_adjust = ((mp->twc - mp->twh - mp->twp)/2 + 1) * clocks_us;
+
+ /* Add a small margin to all timings. */
+ margin = 2 * clocks_us;
+ /* Update timing parameters based on supported mode */
+ tim_par[1] = CVMX_NAND_ROUNDUP(mp->twp * clocks_us + margin + pulse_adjust, 1000); /* Twp, WE# pulse width */
+ tim_par[2] = CVMX_NAND_ROUNDUP(max(mp->twh, mp->twc - mp->twp) * clocks_us + margin + pulse_adjust, 1000); /* Tw, WE# pulse width high */
+ tim_par[3] = CVMX_NAND_ROUNDUP(mp->tclh * clocks_us + margin, 1000); /* Tclh, CLE hold time */
+ tim_par[4] = CVMX_NAND_ROUNDUP(mp->tals * clocks_us + margin, 1000); /* Tals, ALE setup time */
+ tim_par[5] = tim_par[3]; /* Talh, ALE hold time */
+ tim_par[6] = tim_par[1]; /* Trp, RE# pulse width*/
+ tim_par[7] = tim_par[2]; /* Treh, RE# high hold time */
+
+}
+
+
+/* Internal helper function to set chip configuration to use default values */
+static void __set_chip_defaults(int chip, int clocks_us)
+{
+ if (!cvmx_nand_default.page_size)
+ return;
+ cvmx_nand_state[chip].page_size = cvmx_nand_default.page_size; /* NAND page size in bytes */
+ cvmx_nand_state[chip].oob_size = cvmx_nand_default.oob_size; /* NAND OOB (spare) size in bytes (per page) */
+ cvmx_nand_state[chip].pages_per_block = cvmx_nand_default.pages_per_block;
+ cvmx_nand_state[chip].blocks = cvmx_nand_default.blocks;
+ cvmx_nand_state[chip].onfi_timing = cvmx_nand_default.onfi_timing;
+ __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ {
+
+ cvmx_dprintf("%s: Using default NAND parameters.\n", __FUNCTION__);
+ cvmx_dprintf("%s: Defaults: page size: %d, OOB size: %d, pages per block %d, blocks: %d, timing mode: %d\n",
+ __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, cvmx_nand_state[chip].pages_per_block,
+ cvmx_nand_state[chip].blocks, cvmx_nand_state[chip].onfi_timing);
+ }
+}
+/* Do the proper wait for the ready/busy signal. First wait
+** for busy to be valid, then wait for busy to de-assert.
+*/
+static int __wait_for_busy_done(int chip)
+{
+ cvmx_nand_cmd_t cmd;
+
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.wait.two = 2;
+ cmd.wait.r_b=0;
+ cmd.wait.n = 2;
+
+ /* Wait for RB to be valied (tWB).
+ ** Use 5 * tWC as proxy. In some modes this is
+ ** much longer than required, but does not affect performance
+ ** since we will wait much longer for busy to de-assert.
+ */
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ cmd.wait.r_b=1; /* Now wait for busy to be de-asserted */
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
+/**
+ * Called to initialize the NAND controller for use. Note that
+ * you must be running out of L2 or memory and not NAND before
+ * calling this function.
+ * When probing for NAND chips, this function attempts to autoconfigure based on the NAND parts detected.
+ * It currently supports autodetection for ONFI parts (with valid parameter pages), and some Samsung NAND
+ * parts (decoding ID bits.) If autoconfiguration fails, the defaults set with __set_chip_defaults()
+ * prior to calling cvmx_nand_initialize() are used.
+ * If defaults are set and the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is provided, the defaults are used
+ * for all chips in the active_chips mask.
+ *
+ * @param flags Optional initialization flags
+ * If the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is passed, chips are not probed,
+ * and the default parameters (if set with cvmx_nand_set_defaults) are used for all chips
+ * in the active_chips mask.
+ * @param active_chips
+ * Each bit in this parameter represents a chip select that might
+ * contain NAND flash. Any chip select present in this bitmask may
+ * be connected to NAND. It is normally safe to pass 0xff here and
+ * let the API probe all 8 chip selects.
+ *
+ * @return Zero on success, a negative cvmx_nand_status error code on failure
+ */
+cvmx_nand_status_t cvmx_nand_initialize(cvmx_nand_initialize_flags_t flags, int active_chips)
+{
+ int chip;
+ int start_chip;
+ int stop_chip;
+ uint64_t clocks_us;
+ union cvmx_ndf_misc ndf_misc;
+ uint8_t nand_id_buffer[16];
+
+ if (!octeon_has_feature(OCTEON_FEATURE_NAND))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_DEVICE);
+
+ cvmx_nand_flags = flags;
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("0x%x", flags);
+
+ memset(&cvmx_nand_state, 0, sizeof(cvmx_nand_state));
+
+#ifndef USE_DATA_IN_TEXT
+ /* cvmx_nand_buffer is statically allocated in the TEXT_IN_DATA case */
+ if (!cvmx_nand_buffer)
+ {
+ cvmx_nand_buffer = cvmx_bootmem_alloc_named_flags(CVMX_NAND_MAX_PAGE_AND_OOB_SIZE, 128, "__nand_buffer", CVMX_BOOTMEM_FLAG_END_ALLOC);
+ }
+ if (!cvmx_nand_buffer) {
+ const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block("__nand_buffer");
+ if (block_desc)
+ cvmx_nand_buffer = cvmx_phys_to_ptr(block_desc->base_addr);
+ }
+
+ if (!cvmx_nand_buffer)
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+#endif
+
+ /* Disable boot mode and reset the fifo */
+ ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
+ ndf_misc.s.rd_cmd = 0;
+ ndf_misc.s.bt_dma = 0;
+ ndf_misc.s.bt_dis = 1;
+ ndf_misc.s.ex_dis = 0;
+ ndf_misc.s.rst_ff = 1;
+ cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
+ cvmx_read_csr(CVMX_NDF_MISC);
+
+ /* Bring the fifo out of reset */
+ cvmx_wait_usec(1);
+ ndf_misc.s.rst_ff = 0;
+ cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
+ cvmx_read_csr(CVMX_NDF_MISC);
+ cvmx_wait_usec(1);
+
+ /* Clear the ECC counter */
+ //cvmx_write_csr(CVMX_NDF_ECC_CNT, cvmx_read_csr(CVMX_NDF_ECC_CNT));
+
+ /* Clear the interrupt state */
+ cvmx_write_csr(CVMX_NDF_INT, cvmx_read_csr(CVMX_NDF_INT));
+ cvmx_write_csr(CVMX_NDF_INT_EN, 0);
+ cvmx_write_csr(CVMX_MIO_NDF_DMA_INT, cvmx_read_csr(CVMX_MIO_NDF_DMA_INT));
+ cvmx_write_csr(CVMX_MIO_NDF_DMA_INT_EN, 0);
+
+
+ /* The simulator crashes if you access non existant devices. Assume
+ only chip select 1 is connected to NAND */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ {
+ start_chip = 1;
+ stop_chip = 2;
+ }
+ else
+ {
+ start_chip = 0;
+ stop_chip = 8;
+ }
+
+ /* Figure out how many clocks are in one microsecond, rounding up */
+ clocks_us = CVMX_NAND_ROUNDUP(cvmx_clock_get_rate(CVMX_CLOCK_SCLK), 1000000);
+
+ /* If the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is set, then
+ ** use the supplied default values to configured the chips in the
+ ** active_chips mask */
+ if (cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE)
+ {
+ if (cvmx_nand_default.page_size)
+ {
+ for (chip=start_chip; chip<stop_chip; chip++)
+ {
+ /* Skip chip selects that the caller didn't supply in the active chip bits */
+ if (((1<<chip) & active_chips) == 0)
+ continue;
+ __set_chip_defaults(chip, clocks_us);
+ }
+ }
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+ }
+
+ /* Probe and see what NAND flash we can find */
+ for (chip=start_chip; chip<stop_chip; chip++)
+ {
+ union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+ cvmx_nand_onfi_param_page_t *onfi_param_page;
+ int probe_failed;
+ int width_16;
+
+ /* Skip chip selects that the caller didn't supply in the active chip bits */
+ if (((1<<chip) & active_chips) == 0)
+ continue;
+
+ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(chip));
+ /* Enabled regions can't be connected to NAND flash */
+ if (mio_boot_reg_cfg.s.en)
+ continue;
+
+ /* Start out with some sane, but slow, defaults */
+ cvmx_nand_state[chip].page_size = 0;
+ cvmx_nand_state[chip].oob_size = 64;
+ cvmx_nand_state[chip].pages_per_block = 64;
+ cvmx_nand_state[chip].blocks = 100;
+
+
+ /* Set timing mode to ONFI mode 0 for initial accesses */
+ __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, 0);
+
+ /* Put the index of which timing parameter to use. The indexes are into the tim_par
+ ** which match the indexes of the 8 timing parameters that the hardware supports.
+ ** Index 0 is not software controlled, and is fixed by hardware. */
+ cvmx_nand_state[chip].clen[0] = 0; /* Command doesn't need to be held before WE */
+ cvmx_nand_state[chip].clen[1] = 1; /* Twp, WE# pulse width */
+ cvmx_nand_state[chip].clen[2] = 3; /* Tclh, CLE hold time */
+ cvmx_nand_state[chip].clen[3] = 1;
+
+ cvmx_nand_state[chip].alen[0] = 4; /* Tals, ALE setup time */
+ cvmx_nand_state[chip].alen[1] = 1; /* Twp, WE# pulse width */
+ cvmx_nand_state[chip].alen[2] = 2; /* Twh, WE# pulse width high */
+ cvmx_nand_state[chip].alen[3] = 5; /* Talh, ALE hold time */
+
+ cvmx_nand_state[chip].rdn[0] = 0;
+ cvmx_nand_state[chip].rdn[1] = 6; /* Trp, RE# pulse width*/
+ cvmx_nand_state[chip].rdn[2] = 7; /* Treh, RE# high hold time */
+ cvmx_nand_state[chip].rdn[3] = 0;
+
+ cvmx_nand_state[chip].wrn[0] = 1; /* Twp, WE# pulse width */
+ cvmx_nand_state[chip].wrn[1] = 2; /* Twh, WE# pulse width high */
+
+ /* Probe and see if we get an answer. Read more than required, as in
+ ** 16 bit mode only every other byte is valid.
+ ** Here we probe twice, once in 8 bit mode, and once in 16 bit mode to autodetect
+ ** the width.
+ */
+ probe_failed = 1;
+ for (width_16 = 0; width_16 <= 1 && probe_failed; width_16++)
+ {
+ probe_failed = 0;
+
+ if (width_16)
+ cvmx_nand_state[chip].flags |= CVMX_NAND_STATE_16BIT;
+ memset(cvmx_nand_buffer, 0xff, 16);
+ if (cvmx_nand_read_id(chip, 0x0, cvmx_ptr_to_phys(cvmx_nand_buffer), 16) < 16)
+ {
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
+ probe_failed = 1;
+
+ }
+ if (*(uint32_t*)cvmx_nand_buffer == 0xffffffff || *(uint32_t*)cvmx_nand_buffer == 0x0)
+ {
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Probe returned nothing for chip %d\n", __FUNCTION__, chip);
+ probe_failed = 1;
+ }
+ }
+ /* Neither 8 or 16 bit mode worked, so go on to next chip select */
+ if (probe_failed)
+ continue;
+
+ /* Save copy of ID for later use */
+ memcpy(nand_id_buffer, cvmx_nand_buffer, sizeof(nand_id_buffer));
+
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: NAND chip %d has ID 0x%08llx\n", __FUNCTION__, chip, (unsigned long long int)*(uint64_t*)cvmx_nand_buffer);
+ /* Read more than required, as in 16 bit mode only every other byte is valid. */
+ if (cvmx_nand_read_id(chip, 0x20, cvmx_ptr_to_phys(cvmx_nand_buffer), 8) < 8)
+ {
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
+ continue;
+ }
+
+ if (((cvmx_nand_buffer[0] == 'O') && (cvmx_nand_buffer[1] == 'N') &&
+ (cvmx_nand_buffer[2] == 'F') && (cvmx_nand_buffer[3] == 'I')))
+ {
+ /* We have an ONFI part, so read the parameter page */
+
+ cvmx_nand_read_param_page(chip, cvmx_ptr_to_phys(cvmx_nand_buffer), 2048);
+ onfi_param_page = __cvmx_nand_onfi_process((cvmx_nand_onfi_param_page_t *)cvmx_nand_buffer);
+ if (onfi_param_page)
+ {
+ /* ONFI NAND parts are described by a parameter page. Here we extract the configuration values
+ ** from the parameter page that we need to access the chip. */
+ cvmx_nand_state[chip].page_size = cvmx_le32_to_cpu(onfi_param_page->page_data_bytes);
+ cvmx_nand_state[chip].oob_size = cvmx_le16_to_cpu(onfi_param_page->page_spare_bytes);
+ cvmx_nand_state[chip].pages_per_block = cvmx_le32_to_cpu(onfi_param_page->pages_per_block);
+ cvmx_nand_state[chip].blocks = cvmx_le32_to_cpu(onfi_param_page->blocks_per_lun) * onfi_param_page->number_lun;
+
+ if (cvmx_le16_to_cpu(onfi_param_page->timing_mode) <= 0x3f)
+ {
+ int mode_mask = cvmx_le16_to_cpu(onfi_param_page->timing_mode);
+ int mode = 0;
+ int i;
+ for (i = 0; i < 6;i++)
+ {
+ if (mode_mask & (1 << i))
+ mode = i;
+ }
+ cvmx_nand_state[chip].onfi_timing = mode;
+ }
+ else
+ {
+ cvmx_dprintf("%s: Invalid timing mode (%d) in ONFI parameter page, ignoring\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
+ cvmx_nand_state[chip].onfi_timing = 0;
+
+ }
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Using ONFI timing mode: %d\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
+ __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
+ if (cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size > CVMX_NAND_MAX_PAGE_AND_OOB_SIZE)
+ {
+ cvmx_dprintf("%s: ERROR: Page size (%d) + OOB size (%d) is greater than max size (%d)\n",
+ __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, CVMX_NAND_MAX_PAGE_AND_OOB_SIZE);
+ return(CVMX_NAND_ERROR);
+ }
+ /* We have completed setup for this ONFI chip, so go on to next chip. */
+ continue;
+ }
+ else
+ {
+ /* Parameter page is not valid */
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: ONFI paramater page missing or invalid.\n", __FUNCTION__);
+
+ }
+
+
+ }
+ else
+ {
+ /* We have a non-ONFI part. */
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Chip %d doesn't support ONFI.\n", __FUNCTION__, chip);
+
+
+ if (nand_id_buffer[0] == 0xEC)
+ {
+ /* We have a Samsung part, so decode part info from ID bytes */
+ uint64_t nand_size_bits = (64*1024*1024ULL) << ((nand_id_buffer[4] & 0x70) >> 4); /* Plane size */
+ cvmx_nand_state[chip].page_size = 1024 << (nand_id_buffer[3] & 0x3); /* NAND page size in bytes */
+ /* NAND OOB (spare) size in bytes (per page) */
+ cvmx_nand_state[chip].oob_size = (cvmx_nand_state[chip].page_size / 512) * ((nand_id_buffer[3] & 4) ? 16 : 8);
+ cvmx_nand_state[chip].pages_per_block = (0x10000 << ((nand_id_buffer[3] & 0x30) >> 4))/cvmx_nand_state[chip].page_size;
+
+ nand_size_bits *= 1 << ((nand_id_buffer[4] & 0xc) >> 2);
+
+ cvmx_nand_state[chip].oob_size = cvmx_nand_state[chip].page_size/64;
+ if (nand_id_buffer[3] & 0x4)
+ cvmx_nand_state[chip].oob_size *= 2;
+
+ cvmx_nand_state[chip].blocks = nand_size_bits/(8ULL*cvmx_nand_state[chip].page_size*cvmx_nand_state[chip].pages_per_block);
+ switch (nand_id_buffer[1]) {
+ case 0xD3: /* K9F8G08U0M */
+ case 0xDC: /* K9F4G08U0B */
+ cvmx_nand_state[chip].onfi_timing = 6;
+ break;
+ default:
+ cvmx_nand_state[chip].onfi_timing = 2;
+ break;
+ }
+
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ {
+ cvmx_dprintf("%s: Samsung NAND chip detected, using parameters decoded from ID bytes.\n", __FUNCTION__);
+ cvmx_dprintf("%s: Defaults: page size: %d, OOB size: %d, pages per block %d, part size: %d MBytes, timing mode: %d\n",
+ __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, cvmx_nand_state[chip].pages_per_block,
+ (int)(nand_size_bits/(8*1024*1024)), cvmx_nand_state[chip].onfi_timing);
+ }
+
+ __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
+ if (cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size > CVMX_NAND_MAX_PAGE_AND_OOB_SIZE)
+ {
+ cvmx_dprintf("%s: ERROR: Page size (%d) + OOB size (%d) is greater than max size (%d)\n",
+ __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, CVMX_NAND_MAX_PAGE_AND_OOB_SIZE);
+ return(CVMX_NAND_ERROR);
+ }
+
+ /* We have completed setup for this Samsung chip, so go on to next chip. */
+ continue;
+
+
+ }
+
+ }
+
+
+
+ /* We were not able to automatically identify the NAND chip parameters. If default values were configured,
+ ** use them. */
+ if (cvmx_nand_default.page_size)
+ {
+ __set_chip_defaults(chip, clocks_us);
+ }
+ else
+ {
+
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Unable to determine NAND parameters, and no defaults supplied.\n", __FUNCTION__);
+ }
+ }
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_nand_initialize);
+#endif
+
+
+/**
+ * Call to shutdown the NAND controller after all transactions
+ * are done. In most setups this will never be called.
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+cvmx_nand_status_t cvmx_nand_shutdown(void)
+{
+ CVMX_NAND_LOG_CALLED();
+ memset(&cvmx_nand_state, 0, sizeof(cvmx_nand_state));
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
+
+
+/**
+ * Returns a bitmask representing the chip selects that are
+ * connected to NAND chips. This can be called after the
+ * initialize to determine the actual number of NAND chips
+ * found. Each bit in the response coresponds to a chip select.
+ *
+ * @return Zero if no NAND chips were found. Otherwise a bit is set for
+ * each chip select (1<<chip).
+ */
+int cvmx_nand_get_active_chips(void)
+{
+ int chip;
+ int result = 0;
+ for (chip=0; chip<8; chip++)
+ {
+ if (cvmx_nand_state[chip].page_size)
+ result |= 1<<chip;
+ }
+ return result;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_nand_get_active_chips);
+#endif
+
+
+/**
+ * Override the timing parameters for a NAND chip
+ *
+ * @param chip Chip select to override
+ * @param tim_mult
+ * @param tim_par
+ * @param clen
+ * @param alen
+ * @param rdn
+ * @param wrn
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+cvmx_nand_status_t cvmx_nand_set_timing(int chip, int tim_mult, int tim_par[8], int clen[4], int alen[4], int rdn[4], int wrn[2])
+{
+ int i;
+ CVMX_NAND_LOG_CALLED();
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!cvmx_nand_state[chip].page_size)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ cvmx_nand_state[chip].tim_mult = tim_mult;
+ for (i=0;i<8;i++)
+ cvmx_nand_state[chip].tim_par[i] = tim_par[i];
+ for (i=0;i<4;i++)
+ cvmx_nand_state[chip].clen[i] = clen[i];
+ for (i=0;i<4;i++)
+ cvmx_nand_state[chip].alen[i] = alen[i];
+ for (i=0;i<4;i++)
+ cvmx_nand_state[chip].rdn[i] = rdn[i];
+ for (i=0;i<2;i++)
+ cvmx_nand_state[chip].wrn[i] = wrn[i];
+
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
+
+
+/**
+ * @INTERNAL
+ * Get the number of free bytes in the NAND command queue
+ *
+ * @return Number of bytes in queue
+ */
+static inline int __cvmx_nand_get_free_cmd_bytes(void)
+{
+ union cvmx_ndf_misc ndf_misc;
+ CVMX_NAND_LOG_CALLED();
+ ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
+ CVMX_NAND_RETURN((int)ndf_misc.s.fr_byt);
+}
+
+
+/**
+ * Submit a command to the NAND command queue. Generally this
+ * will not be used directly. Instead most programs will use the other
+ * higher level NAND functions.
+ *
+ * @param cmd Command to submit
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+cvmx_nand_status_t cvmx_nand_submit(cvmx_nand_cmd_t cmd)
+{
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[0]);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[1]);
+ CVMX_NAND_LOG_PARAM("%s", cvmx_nand_opcode_labels[cmd.s.op_code]);
+ switch (cmd.s.op_code)
+ {
+ /* All these commands fit in one 64bit word */
+ case 0: /* NOP */
+ case 1: /* Timing */
+ case 2: /* WAIT */
+ case 3: /* Chip Enable/Disable */
+ case 4: /* CLE */
+ case 8: /* Write */
+ case 9: /* Read */
+ case 10: /* Read EDO */
+ case 15: /* Bus Aquire/Release */
+ if (__cvmx_nand_get_free_cmd_bytes() < 8)
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+
+ case 5: /* ALE commands take either one or two 64bit words */
+ if (cmd.ale.adr_byte_num < 5)
+ {
+ if (__cvmx_nand_get_free_cmd_bytes() < 8)
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+ }
+ else
+ {
+ if (__cvmx_nand_get_free_cmd_bytes() < 16)
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
+ cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+ }
+
+ case 11: /* Wait status commands take two 64bit words */
+ if (__cvmx_nand_get_free_cmd_bytes() < 16)
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
+ cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+
+ default:
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ }
+}
+
+
+/**
+ * @INTERNAL
+ * Get the number of bits required to encode the column bits. This
+ * does not include padding to align on a byte boundary.
+ *
+ * @param chip NAND chip to get data for
+ *
+ * @return Number of column bits
+ */
+static inline int __cvmx_nand_get_column_bits(int chip)
+{
+ return cvmx_pop(cvmx_nand_state[chip].page_size - 1);
+}
+
+
+/**
+ * @INTERNAL
+ * Get the number of bits required to encode the row bits. This
+ * does not include padding to align on a byte boundary.
+ *
+ * @param chip NAND chip to get data for
+ *
+ * @return Number of row bits
+ */
+static inline int __cvmx_nand_get_row_bits(int chip)
+{
+ return cvmx_pop(cvmx_nand_state[chip].blocks-1) + cvmx_pop(cvmx_nand_state[chip].pages_per_block-1);
+}
+
+
+/**
+ * @INTERNAL
+ * Get the number of address cycles required for this NAND part.
+ * This include column bits, padding, page bits, and block bits.
+ *
+ * @param chip NAND chip to get data for
+ *
+ * @return Number of address cycles on the bus
+ */
+static inline int __cvmx_nand_get_address_cycles(int chip)
+{
+ int address_bits = ((__cvmx_nand_get_column_bits(chip) + 7) >> 3) << 3;
+ address_bits += ((__cvmx_nand_get_row_bits(chip) + 7) >> 3) << 3;
+ return (address_bits + 7) >> 3;
+}
+
+
+/**
+ * @INTERNAL
+ * Build the set of command common to most transactions
+ * @param chip NAND chip to program
+ * @param cmd_data NAND command for CLE cycle 1
+ * @param num_address_cycles
+ * Number of address cycles to put on the bus
+ * @param nand_address
+ * Data to be put on the bus. It is translated according to
+ * the rules in the file information section.
+ *
+ * @param cmd_data2 If non zero, adds a second CLE cycle used by a number of NAND
+ * transactions.
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+static inline cvmx_nand_status_t __cvmx_nand_build_pre_cmd(int chip, int cmd_data, int num_address_cycles, uint64_t nand_address, int cmd_data2)
+{
+ cvmx_nand_status_t result;
+ cvmx_nand_cmd_t cmd;
+
+ CVMX_NAND_LOG_CALLED();
+
+ /* Send timing parameters */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.set_tm_par.one = 1;
+ cmd.set_tm_par.tim_mult = cvmx_nand_state[chip].tim_mult;
+ /* tim_par[0] unused */
+ cmd.set_tm_par.tim_par1 = cvmx_nand_state[chip].tim_par[1];
+ cmd.set_tm_par.tim_par2 = cvmx_nand_state[chip].tim_par[2];
+ cmd.set_tm_par.tim_par3 = cvmx_nand_state[chip].tim_par[3];
+ cmd.set_tm_par.tim_par4 = cvmx_nand_state[chip].tim_par[4];
+ cmd.set_tm_par.tim_par5 = cvmx_nand_state[chip].tim_par[5];
+ cmd.set_tm_par.tim_par6 = cvmx_nand_state[chip].tim_par[6];
+ cmd.set_tm_par.tim_par7 = cvmx_nand_state[chip].tim_par[7];
+ result = cvmx_nand_submit(cmd);
+ if (result)
+ CVMX_NAND_RETURN(result);
+
+ /* Send bus select */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.bus_acq.fifteen = 15;
+ cmd.bus_acq.one = 1;
+ result = cvmx_nand_submit(cmd);
+ if (result)
+ CVMX_NAND_RETURN(result);
+
+ /* Send chip select */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.chip_en.chip = chip;
+ cmd.chip_en.one = 1;
+ cmd.chip_en.three = 3;
+ cmd.chip_en.width = (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT) ? 2 : 1;
+ result = cvmx_nand_submit(cmd);
+ if (result)
+ CVMX_NAND_RETURN(result);
+
+ /* Send wait, fixed time
+ ** This meets chip enable to command latch enable timing.
+ ** This is tCS - tCLS from the ONFI spec.
+ ** Use tWP as a proxy, as this is adequate for
+ ** all ONFI 1.0 timing modes. */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.wait.two = 2;
+ cmd.wait.n = 1;
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* Send CLE */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cle.cmd_data = cmd_data;
+ cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
+ cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
+ cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
+ cmd.cle.four = 4;
+ result = cvmx_nand_submit(cmd);
+ if (result)
+ CVMX_NAND_RETURN(result);
+
+ /* Send ALE */
+ if (num_address_cycles)
+ {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.ale.adr_byte_num = num_address_cycles;
+ if (num_address_cycles < __cvmx_nand_get_address_cycles(chip))
+ {
+ cmd.ale.adr_bytes_l = nand_address;
+ cmd.ale.adr_bytes_h = nand_address >> 32;
+ }
+ else
+ {
+ int column_bits = __cvmx_nand_get_column_bits(chip);
+ int column_shift = ((column_bits + 7) >> 3) << 3;
+ int column = nand_address & (cvmx_nand_state[chip].page_size-1);
+ int row = nand_address >> column_bits;
+ cmd.ale.adr_bytes_l = column + (row << column_shift);
+ cmd.ale.adr_bytes_h = row >> (32 - column_shift);
+ }
+ cmd.ale.alen1 = cvmx_nand_state[chip].alen[0];
+ cmd.ale.alen2 = cvmx_nand_state[chip].alen[1];
+ cmd.ale.alen3 = cvmx_nand_state[chip].alen[2];
+ cmd.ale.alen4 = cvmx_nand_state[chip].alen[3];
+ cmd.ale.five = 5;
+ result = cvmx_nand_submit(cmd);
+ if (result)
+ CVMX_NAND_RETURN(result);
+ }
+
+ /* Send CLE 2 */
+ if (cmd_data2)
+ {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cle.cmd_data = cmd_data2;
+ cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
+ cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
+ cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
+ cmd.cle.four = 4;
+ result = cvmx_nand_submit(cmd);
+ if (result)
+ CVMX_NAND_RETURN(result);
+ }
+
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
+
+
+/**
+ * @INTERNAL
+ * Build the set of command common to most transactions
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+static inline cvmx_nand_status_t __cvmx_nand_build_post_cmd(void)
+{
+ cvmx_nand_status_t result;
+ cvmx_nand_cmd_t cmd;
+
+ CVMX_NAND_LOG_CALLED();
+
+ /* Send chip deselect */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.chip_dis.three = 3;
+ result = cvmx_nand_submit(cmd);
+ if (result)
+ CVMX_NAND_RETURN(result);
+
+ /* Send bus release */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.bus_rel.fifteen = 15;
+ result = cvmx_nand_submit(cmd);
+ if (result)
+ CVMX_NAND_RETURN(result);
+
+ /* Ring the doorbell */
+ cvmx_write_csr(CVMX_NDF_DRBELL, 1);
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
+
+
+/**
+ * @INTERNAL
+ * Setup the NAND DMA engine for a transfer
+ *
+ * @param chip Chip select for NAND flash
+ * @param is_write Non zero if this is a write
+ * @param buffer_address
+ * Physical memory address to DMA to/from
+ * @param buffer_length
+ * Length of the DMA in bytes
+ */
+static inline void __cvmx_nand_setup_dma(int chip, int is_write, uint64_t buffer_address, int buffer_length)
+{
+ union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+ CVMX_NAND_LOG_PARAM("%d", is_write);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
+ CVMX_NAND_LOG_PARAM("%d", buffer_length);
+ ndf_dma_cfg.u64 = 0;
+ ndf_dma_cfg.s.en = 1;
+ ndf_dma_cfg.s.rw = is_write; /* One means DMA reads from memory and writes to flash */
+ ndf_dma_cfg.s.clr = 0;
+ ndf_dma_cfg.s.size = ((buffer_length + 7) >> 3) - 1;
+ ndf_dma_cfg.s.adr = buffer_address;
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_MIO_NDF_DMA_CFG, ndf_dma_cfg.u64);
+ CVMX_NAND_RETURN_NOTHING();
+}
+
+
+/**
+ * Dump a buffer out in hex for debug
+ *
+ * @param buffer_address
+ * Starting physical address
+ * @param buffer_length
+ * Number of bytes to display
+ */
+static void __cvmx_nand_hex_dump(uint64_t buffer_address, int buffer_length)
+{
+ uint8_t *buffer = cvmx_phys_to_ptr(buffer_address);
+ int offset = 0;
+ while (offset < buffer_length)
+ {
+ int i;
+ cvmx_dprintf("%*s%04x:", 2*debug_indent, "", offset);
+ for (i=0; i<32; i++)
+ {
+ if ((i&3) == 0)
+ cvmx_dprintf(" ");
+ if (offset+i < buffer_length)
+ cvmx_dprintf("%02x", 0xff & buffer[offset+i]);
+ else
+ cvmx_dprintf(" ");
+ }
+ cvmx_dprintf("\n");
+ offset += 32;
+ }
+}
+
+/**
+ * @INTERNAL
+ * Perform a low level NAND read command
+ *
+ * @param chip Chip to read from
+ * @param nand_command1
+ * First command cycle value
+ * @param address_cycles
+ * Number of address cycles after comand 1
+ * @param nand_address
+ * NAND address to use for address cycles
+ * @param nand_command2
+ * NAND command cycle 2 if not zero
+ * @param buffer_address
+ * Physical address to DMA into
+ * @param buffer_length
+ * Length of the transfer in bytes
+ *
+ * @return Number of bytes transfered or a negative error code
+ */
+static inline int __cvmx_nand_low_level_read(int chip, int nand_command1, int address_cycles, uint64_t nand_address, int nand_command2, uint64_t buffer_address, int buffer_length)
+{
+ cvmx_nand_cmd_t cmd;
+ union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
+ int bytes;
+
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+ CVMX_NAND_LOG_PARAM("0x%x", nand_command1);
+ CVMX_NAND_LOG_PARAM("%d", address_cycles);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
+ CVMX_NAND_LOG_PARAM("0x%x", nand_command2);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
+ CVMX_NAND_LOG_PARAM("%d", buffer_length);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!buffer_address)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (buffer_address & 7)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (buffer_length & 7)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!buffer_length)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ /* Build the command and address cycles */
+ if (__cvmx_nand_build_pre_cmd(chip, nand_command1, address_cycles, nand_address, nand_command2))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* Send WAIT. This waits for some time, then
+ ** waits for busy to be de-asserted. */
+ if (__wait_for_busy_done(chip))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* Wait for tRR after busy de-asserts.
+ ** Use 2* tALS as proxy. This is overkill in
+ ** the slow modes, but not bad in the faster ones. */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.wait.two = 2;
+ cmd.wait.n=4;
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* Send READ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.rd.data_bytes = buffer_length;
+ if (cvmx_nand_state[chip].onfi_timing >= 4)
+ cmd.rd.nine = 10; /* READ_EDO command is required for ONFI timing modes 4 and 5 */
+ else
+ cmd.rd.nine = 9;
+ cmd.rd.rdn1 = cvmx_nand_state[chip].rdn[0];
+ cmd.rd.rdn2 = cvmx_nand_state[chip].rdn[1];
+ cmd.rd.rdn3 = cvmx_nand_state[chip].rdn[2];
+ cmd.rd.rdn4 = cvmx_nand_state[chip].rdn[3];
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ __cvmx_nand_setup_dma(chip, 0, buffer_address, buffer_length);
+
+ if (__cvmx_nand_build_post_cmd())
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+ WATCHDOG_RESET();
+ /* Wait for the DMA to complete */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS_READ))
+ {
+ WATCHDOG_RESET();
+ CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
+ }
+ /* Return the number of bytes transfered */
+ ndf_dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_NDF_DMA_CFG);
+ bytes = ndf_dma_cfg.s.adr - buffer_address;
+
+ if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
+ __cvmx_nand_hex_dump(buffer_address, bytes);
+
+ CVMX_NAND_RETURN(bytes);
+}
+
+
+/**
+ * Read a page from NAND. If the buffer has room, the out of band
+ * data will be included.
+ *
+ * @param chip Chip select for NAND flash
+ * @param nand_address
+ * Location in NAND to read. See description in file comment
+ * @param buffer_address
+ * Physical address to store the result at
+ * @param buffer_length
+ * Number of bytes to read
+ *
+ * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
+ */
+int cvmx_nand_page_read(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
+{
+ int bytes;
+
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
+ CVMX_NAND_LOG_PARAM("%d", buffer_length);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!cvmx_nand_state[chip].page_size)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!buffer_address)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (buffer_address & 7)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (buffer_length & 7)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!buffer_length)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ /* For 16 bit mode, addresses within a page are word address, rather than byte addresses */
+ if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
+ nand_address = (nand_address & ~(cvmx_nand_state[chip].page_size - 1)) | ((nand_address & (cvmx_nand_state[chip].page_size - 1)) >> 1);
+
+ bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ, __cvmx_nand_get_address_cycles(chip), nand_address, NAND_COMMAND_READ_FIN, buffer_address, buffer_length);
+ CVMX_NAND_RETURN(bytes);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_nand_page_read);
+#endif
+
+
+/**
+ * Write a page to NAND. The buffer must contain the entire page
+ * including the out of band data.
+ *
+ * @param chip Chip select for NAND flash
+ * @param nand_address
+ * Location in NAND to write. See description in file comment
+ * @param buffer_address
+ * Physical address to read the data from
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+cvmx_nand_status_t cvmx_nand_page_write(int chip, uint64_t nand_address, uint64_t buffer_address)
+{
+ cvmx_nand_cmd_t cmd;
+ int buffer_length;
+
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!cvmx_nand_state[chip].page_size)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!buffer_address)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (buffer_address & 7)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ /* For 16 bit mode, addresses within a page are word address, rather than byte addresses */
+ if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
+ nand_address = (nand_address & ~(cvmx_nand_state[chip].page_size - 1)) | ((nand_address & (cvmx_nand_state[chip].page_size - 1)) >> 1);
+
+ buffer_length = cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size;
+
+ /* The NAND DMA engine always does transfers in 8 byte blocks, so round the buffer size down
+ ** to a multiple of 8, otherwise we will transfer too much data to the NAND chip.
+ ** Note this prevents the last few bytes of the OOB being written. If these bytes
+ ** need to be written, then this check needs to be removed, but this will result in
+ ** extra write cycles beyond the end of the OOB. */
+ buffer_length &= ~0x7;
+
+ /* Build the command and address cycles */
+ if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_PROGRAM, __cvmx_nand_get_address_cycles(chip), nand_address, 0))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* Send WRITE */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.wr.data_bytes = buffer_length;
+ cmd.wr.eight = 8;
+ cmd.wr.wrn1 = cvmx_nand_state[chip].wrn[0];
+ cmd.wr.wrn2 = cvmx_nand_state[chip].wrn[1];
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* Send WRITE command */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cle.cmd_data = NAND_COMMAND_PROGRAM_FIN;
+ cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
+ cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
+ cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
+ cmd.cle.four = 4;
+ if (cvmx_nand_submit(cmd))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ __cvmx_nand_setup_dma(chip, 1, buffer_address, buffer_length);
+
+ /* WAIT for R_B to signal program is complete */
+ if (__wait_for_busy_done(chip))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ if (__cvmx_nand_build_post_cmd())
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* Wait for the DMA to complete */
+ WATCHDOG_RESET();
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS_WRITE))
+ {
+ WATCHDOG_RESET();
+ CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
+ }
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_nand_page_write);
+#endif
+
+
+/**
+ * Erase a NAND block. A single block contains multiple pages.
+ *
+ * @param chip Chip select for NAND flash
+ * @param nand_address
+ * Location in NAND to erase. See description in file comment
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+cvmx_nand_status_t cvmx_nand_block_erase(int chip, uint64_t nand_address)
+{
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!cvmx_nand_state[chip].page_size)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ /* Build the command and address cycles */
+ if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_ERASE,
+ (__cvmx_nand_get_row_bits(chip)+7) >> 3,
+ nand_address >> __cvmx_nand_get_column_bits(chip),
+ NAND_COMMAND_ERASE_FIN))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* WAIT for R_B to signal erase is complete */
+ if (__wait_for_busy_done(chip))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ if (__cvmx_nand_build_post_cmd())
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* Wait for the command queue to be idle, which means the wait is done */
+ WATCHDOG_RESET();
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_NDF_ST_REG, cvmx_ndf_st_reg_t, exe_idle, ==, 1, NAND_TIMEOUT_USECS_BLOCK_ERASE))
+ {
+ WATCHDOG_RESET();
+ CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
+ }
+
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_nand_block_erase);
+#endif
+
+
+/* Some reads (read ID, read parameter page) only use the low 8 bits of the bus
+** in 16 bit mode. We remove the unused bytes so that the data we present to the
+** caller is as expected (same as 8 bit mode.)
+*/
+static void __cvmx_nand_fixup_16bit_id_reads(uint8_t *buf, int buffer_length)
+{
+ /* Decimate data, taking only every other byte. */
+ int i;
+ for (i = 0; i < buffer_length/2; i++)
+ buf[i] = buf[2*i + 1];
+}
+
+/**
+ * Read the NAND ID information
+ *
+ * @param chip Chip select for NAND flash
+ * @param nand_address
+ * NAND address to read ID from. Usually this is either 0x0 or 0x20.
+ * @param buffer_address
+ * Physical address to store data in
+ * @param buffer_length
+ * Length of the buffer. Usually this is 4-8 bytes. For 16 bit mode, this must be twice
+ * as large as the actual expected data.
+ *
+ * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
+ */
+int cvmx_nand_read_id(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
+{
+ int bytes;
+
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
+ CVMX_NAND_LOG_PARAM("%d", buffer_length);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!buffer_address)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (buffer_address & 7)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!buffer_length)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_ID, 1, nand_address, 0, buffer_address, buffer_length);
+ if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
+ __cvmx_nand_fixup_16bit_id_reads(cvmx_phys_to_ptr(buffer_address), buffer_length);
+
+ CVMX_NAND_RETURN(bytes);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_nand_read_id);
+#endif
+
+
+/**
+ * Read the NAND parameter page
+ *
+ * @param chip Chip select for NAND flash
+ * @param buffer_address
+ * Physical address to store data in
+ * @param buffer_length
+ * Length of the buffer. Usually 1024 bytes for 8 bit, 2048 for 16 bit mode.
+ *
+ * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
+ */
+int cvmx_nand_read_param_page(int chip, uint64_t buffer_address, int buffer_length)
+{
+ int bytes;
+
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+ CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
+ CVMX_NAND_LOG_PARAM("%d", buffer_length);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!buffer_address)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (buffer_address & 7)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (buffer_length & 7)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!buffer_length)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_PARAM_PAGE, 1, 0x0, 0, buffer_address, buffer_length);
+ if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
+ __cvmx_nand_fixup_16bit_id_reads(cvmx_phys_to_ptr(buffer_address), buffer_length);
+ CVMX_NAND_RETURN(bytes);
+}
+
+
+/**
+ * Get the status of the NAND flash
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return NAND status or a negative cvmx_nand_status_t error code on failure
+ */
+int cvmx_nand_get_status(int chip)
+{
+ int status;
+ int offset = !!(cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT); /* Normalize flag to 0/1 */
+
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ *((uint8_t*)cvmx_nand_buffer + offset) = 0xff;
+ status = __cvmx_nand_low_level_read(chip, NAND_COMMAND_STATUS, 0, 0, 0, cvmx_ptr_to_phys(cvmx_nand_buffer), 8);
+ if (status > 0)
+ status = *((uint8_t*)cvmx_nand_buffer + offset);
+
+ CVMX_NAND_RETURN(status);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_nand_get_status);
+#endif
+
+
+/**
+ * Get the page size, excluding out of band data. This function
+ * will return zero for chip selects not connected to NAND.
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return Page size in bytes or a negative cvmx_nand_status_t error code on failure
+ */
+int cvmx_nand_get_page_size(int chip)
+{
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ CVMX_NAND_RETURN(cvmx_nand_state[chip].page_size);
+}
+
+
+/**
+ * Get the OOB size.
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return OOB in bytes or a negative cvmx_nand_status_t error code on failure
+ */
+int cvmx_nand_get_oob_size(int chip)
+{
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ CVMX_NAND_RETURN(cvmx_nand_state[chip].oob_size);
+}
+
+
+/**
+ * Get the number of pages per NAND block
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return Number of pages in each block or a negative cvmx_nand_status_t error
+ * code on failure
+ */
+int cvmx_nand_get_pages_per_block(int chip)
+{
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ CVMX_NAND_RETURN(cvmx_nand_state[chip].pages_per_block);
+}
+
+
+/**
+ * Get the number of blocks in the NAND flash
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return Number of blocks or a negative cvmx_nand_status_t error code on failure
+ */
+int cvmx_nand_get_blocks(int chip)
+{
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ CVMX_NAND_RETURN(cvmx_nand_state[chip].blocks);
+}
+
+
+/**
+ * Reset the NAND flash
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+cvmx_nand_status_t cvmx_nand_reset(int chip)
+{
+ CVMX_NAND_LOG_CALLED();
+ CVMX_NAND_LOG_PARAM("%d", chip);
+
+ if ((chip < 0) || (chip > 7))
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+ if (!cvmx_nand_state[chip].page_size)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_RESET, 0, 0, 0))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ /* WAIT for R_B to signal reset is complete */
+ if (__wait_for_busy_done(chip))
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ if (__cvmx_nand_build_post_cmd())
+ CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
+
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_nand_reset);
+#endif
+
+
+
+
+/**
+ * This function computes the Octeon specific ECC data used by the NAND boot
+ * feature.
+ *
+ * @param block pointer to 256 bytes of data
+ * @param eccp pointer to where 8 bytes of ECC data will be stored
+ */
+void cvmx_nand_compute_boot_ecc(unsigned char *block, unsigned char *eccp)
+{
+ unsigned char pd0, pd1, pd2;
+ int i, j;
+
+ pd0 = pd1 = pd2 = 0;
+
+ for (i = 0; i < 256; i++) /* PD0<0> */
+ pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
+ for (i = 0; i < 256; i++) /* PD0<1> */
+ pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
+ for (i = 0; i < 256; i++) /* PD0<2> */
+ pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
+ for (i = 0; i < 128; i++) /* PD0<3> */
+ pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
+ (block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
+ (block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
+ for (i = 0; i < 64; i++) /* PD0<4> */
+ for (j = 0; j < 2; j++)
+ pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
+ (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
+ (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
+ for (i = 0; i < 32; i++) /* PD0<5> */
+ for (j = 0; j < 4; j++)
+ pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
+ (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
+ (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
+ for (i = 0; i < 16; i++) /* PD0<6> */
+ for (j = 0; j < 8; j++)
+ pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
+ (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
+ (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
+ for (i = 0; i < 8; i++) /* PD0<7> */
+ for (j = 0; j < 16; j++)
+ pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
+ (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
+ (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
+ for (i = 0; i < 4; i++) /* PD1<0> */
+ for (j = 0; j < 32; j++)
+ pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
+ (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
+ (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
+ for (i = 0; i < 2; i++) /* PD1<1> */
+ for (j = 0; j < 64; j++)
+ pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
+ (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
+ (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
+ for (i = 0; i < 128; i++) /* PD1<2> */
+ pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
+ (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
+ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
+ /* PD1<3> */
+ /* PD1<4> */
+ for (i = 0; i < 256; i++) /* PD1<5> */
+ pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
+ for (i = 0; i < 256; i++) /* PD1<6> */
+ pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
+ for (i = 0; i < 256; i++) /* PD1<7> */
+ pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
+ for (i = 0; i < 128; i++) /* PD2<0> */
+ pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
+ (block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
+ (block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
+ for (i = 0; i < 64; i++) /* PD2<1> */
+ for (j = 2; j < 4; j++)
+ pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
+ (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
+ (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
+ for (i = 0; i < 32; i++) /* PD2<2> */
+ for (j = 4; j < 8; j++)
+ pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
+ (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
+ (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
+ for (i = 0; i < 16; i++) /* PD2<3> */
+ for (j = 8; j < 16; j++)
+ pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
+ (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
+ (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
+ for (i = 0; i < 8; i++) /* PD2<4> */
+ for (j = 16; j < 32; j++)
+ pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
+ (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
+ (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
+ for (i = 0; i < 4; i++) /* PD2<5> */
+ for (j = 32; j < 64; j++)
+ pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
+ (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
+ (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
+ for (i = 0; i < 2; i++) /* PD2<6> */
+ for (j = 64; j < 128; j++)
+ pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
+ (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
+ (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
+ for (i = 128; i < 256; i++) /* PD2<7> */
+ pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
+ (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
+ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
+
+ eccp[0] = pd0;
+ eccp[1] = pd1;
+ eccp[2] = pd2;
+}
+
+/**
+ * Check an Octeon ECC block, fixing errors if possible
+ *
+ * @param block Pointer to block to check
+ *
+ * @return Zero if block has no errors, one if errors were corrected, two
+ * if the errors could not be corrected.
+ */
+int cvmx_nand_correct_boot_ecc(uint8_t *block)
+{
+ unsigned char pd0, pd1, pd2;
+ int i, j;
+ unsigned char xorpd0, xorpd1, xorpd2;
+ int xor_num;
+ unsigned int check;
+
+ asm volatile ("pref 0,0(%0);pref 0,128(%0);pref 0,256(%0)\n" :: "r" (block));
+
+ pd0 = pd1 = pd2 = 0;
+
+ for (i = 0; i < 256; i++) /* PD0<0> */
+ pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
+ for (i = 0; i < 256; i++) /* PD0<1> */
+ pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
+ for (i = 0; i < 256; i++) /* PD0<2> */
+ pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
+ for (i = 0; i < 128; i++) /* PD0<3> */
+ pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
+ (block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
+ (block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
+ for (i = 0; i < 64; i++) /* PD0<4> */
+ for (j = 0; j < 2; j++)
+ pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
+ (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
+ (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
+ for (i = 0; i < 32; i++) /* PD0<5> */
+ for (j = 0; j < 4; j++)
+ pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
+ (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
+ (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
+ for (i = 0; i < 16; i++) /* PD0<6> */
+ for (j = 0; j < 8; j++)
+ pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
+ (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
+ (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
+ for (i = 0; i < 8; i++) /* PD0<7> */
+ for (j = 0; j < 16; j++)
+ pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
+ (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
+ (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
+ for (i = 0; i < 4; i++) /* PD1<0> */
+ for (j = 0; j < 32; j++)
+ pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
+ (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
+ (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
+ for (i = 0; i < 2; i++) /* PD1<1> */
+ for (j = 0; j < 64; j++)
+ pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
+ (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
+ (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
+ for (i = 0; i < 128; i++) /* PD1<2> */
+ pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
+ (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
+ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
+ /* PD1<3> */
+ /* PD1<4> */
+ for (i = 0; i < 256; i++) /* PD1<5> */
+ pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
+ for (i = 0; i < 256; i++) /* PD1<6> */
+ pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
+ for (i = 0; i < 256; i++) /* PD1<7> */
+ pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
+ for (i = 0; i < 128; i++) /* PD2<0> */
+ pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
+ (block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
+ (block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
+ for (i = 0; i < 64; i++) /* PD2<1> */
+ for (j = 2; j < 4; j++)
+ pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
+ (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
+ (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
+ for (i = 0; i < 32; i++) /* PD2<2> */
+ for (j = 4; j < 8; j++)
+ pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
+ (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
+ (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
+ for (i = 0; i < 16; i++) /* PD2<3> */
+ for (j = 8; j < 16; j++)
+ pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
+ (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
+ (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
+ for (i = 0; i < 8; i++) /* PD2<4> */
+ for (j = 16; j < 32; j++)
+ pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
+ (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
+ (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
+ for (i = 0; i < 4; i++) /* PD2<5> */
+ for (j = 32; j < 64; j++)
+ pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
+ (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
+ (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
+ for (i = 0; i < 2; i++) /* PD2<6> */
+ for (j = 64; j < 128; j++)
+ pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
+ (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
+ (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
+ for (i = 128; i < 256; i++) /* PD2<7> */
+ pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
+ (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
+ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
+
+ xorpd0 = pd0 ^ block[256];
+ xorpd1 = pd1 ^ block[257];
+ xorpd2 = pd2 ^ block[258];
+
+ xor_num = __builtin_popcount((xorpd0 << 16) | (xorpd1 << 8) | xorpd2);
+ check = (((xorpd1 & 7) << 8) | xorpd0) ^ ((xorpd2 << 3) | (xorpd1 >> 5));
+
+ if (xor_num == 0)
+ return 0;
+ else if ((xor_num > 1) && (check != 0x7FF))
+ return 2;
+
+ if (check == 0x7FF)
+ {
+ /* Correct the error */
+ block[xorpd2] ^= 1 << (xorpd1 >> 5);
+ }
+
+ return 1;
+}
+
+cvmx_nand_status_t cvmx_nand_set_defaults(int page_size, int oob_size, int pages_per_block, int blocks, int onfi_timing_mode)
+{
+ if (!page_size || !oob_size || !pages_per_block || !blocks || onfi_timing_mode > 5)
+ CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
+
+ cvmx_nand_default.page_size = page_size;
+ cvmx_nand_default.oob_size = oob_size;
+ cvmx_nand_default.pages_per_block = pages_per_block;
+ cvmx_nand_default.blocks = blocks;
+ cvmx_nand_default.onfi_timing = onfi_timing_mode;
+
+ CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-nand.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-nand.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-nand.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-nand.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,717 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This header defines the CVMX interface to the NAND flash controller. The
+ * basic operations common to all NAND devices are supported by this API, but
+ * many more advanced functions are not support. The low level hardware supports
+ * all types of transactions, but this API only implements the must commonly
+ * used operations. This API performs no locking, so it is the responsibility of
+ * the caller to make sure only one thread of execution is accessing the NAND
+ * controller at a time. Most applications should not use this API directly but
+ * instead use a flash logical layer supplied through a secondary system. For
+ * example, the Linux MTD layer provides a driver for running JFFS2 on top of
+ * NAND flash.
+ *
+ * <h2>Selecting the NAND Chip</h2>
+ *
+ * Octeon's NAND controller assumes a single NAND chip is connected to a boot
+ * bus chip select. Throughout this API, NAND chips are referred to by the chip
+ * select they are connected to (0-7). Chip select 0 will only be a NAND chip
+ * when you are booting from NAND flash.
+ *
+ * <h2>NAND Addressing</h2>
+ *
+ * Various functions in cvmx-nand use addresses to index into NAND flash. All
+ * functions us a uniform address translation scheme to map the passed address
+ * into a NAND block, page, and column. In NAND flash a page represents the
+ * basic unit of reads and writes. Each page contains a power of two number of
+ * bytes and some number of extra out of band (OOB) bytes. A fixed number of
+ * pages fit into each NAND block. Here is the mapping of bits in the cvmx-nand
+ * address to the NAND hardware:
+ * <pre>
+ * 63 56 48 40 32 24 16 8 0
+ * +-------+-------+-------+-------+-------+-------+-------+------+
+ * | 64 bit cvmx-nand nand_address|
+ * +------------------------------------------------+----+--------+
+ * | block |page| column |
+ * +-------+-------+-------+-------+-------+--------+----+--------+
+ * 63 56 48 40 32 24 16 8 0
+ * </pre>
+ * Basically the block, page, and column addresses are packet together. Before
+ * being sent out the NAND pins for addressing the column is padded out to an
+ * even number of bytes. This means that column address are 2 bytes, or 2
+ * address cycles, for page sizes between 512 and 65536 bytes. Page sizes
+ * between 128KB and 16MB would use 3 column address cycles. NAND device
+ * normally either have 32 or 64 pages per block, needing either 5 or 6 address
+ * bits respectively. This means you have 10 bits for block address using 4
+ * address cycles, or 18 for 5 address cycles. Using the cvmx-nand addressing
+ * scheme, it is not possible to directly index the OOB data. Instead you can
+ * access it by reading or writing more data than the normal page size would
+ * allow. Logically the OOB data is appended onto the the page data. For
+ * example, this means that a read of 65 bytes from a column address of 0x7ff
+ * would yield byte 2047 of the page and then 64 bytes of OOB data.
+ *
+ * <hr>$Revision: 35726 $<hr>
+ */
+
+#ifndef __CVMX_NAND_H__
+#define __CVMX_NAND_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Maxium PAGE + OOB size supported. This is used to size
+** buffers, some that must be statically allocated. */
+#define CVMX_NAND_MAX_PAGE_AND_OOB_SIZE (4096 + 256)
+
+
+/* Block size for boot ECC */
+#define CVMX_NAND_BOOT_ECC_BLOCK_SIZE (256)
+/* ECC bytes for each block */
+#define CVMX_NAND_BOOT_ECC_ECC_SIZE (8)
+
+/**
+ * Flags to be passed to the initialize function
+ */
+typedef enum
+{
+ CVMX_NAND_INITIALIZE_FLAGS_16BIT = 1<<0,
+ CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE = 1<<1,
+ CVMX_NAND_INITIALIZE_FLAGS_DEBUG = 1<<15,
+} cvmx_nand_initialize_flags_t;
+
+/**
+ * Return codes from NAND functions
+ */
+typedef enum
+{
+ CVMX_NAND_SUCCESS = 0,
+ CVMX_NAND_NO_MEMORY = -1,
+ CVMX_NAND_BUSY = -2,
+ CVMX_NAND_INVALID_PARAM = -3,
+ CVMX_NAND_TIMEOUT = -4,
+ CVMX_NAND_ERROR = -5,
+ CVMX_NAND_NO_DEVICE = -6,
+} cvmx_nand_status_t;
+
+/**
+ * NAND NOP command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_4_63 : 60;
+ uint64_t zero : 4;
+} cvmx_nand_cmd_nop_t;
+
+/**
+ * NAND SET_TM_PAR command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t tim_par7 : 8;
+ uint64_t tim_par6 : 8;
+ uint64_t tim_par5 : 8;
+ uint64_t tim_par4 : 8;
+ uint64_t tim_par3 : 8;
+ uint64_t tim_par2 : 8;
+ uint64_t tim_par1 : 8;
+ uint64_t tim_mult : 4;
+ uint64_t one : 4;
+} cvmx_nand_cmd_set_tm_par_t;
+
+/**
+ * NAND WAIT command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_11_63 : 53;
+ uint64_t n : 3;
+ uint64_t reserved_5_7 : 3;
+ uint64_t r_b : 1;
+ uint64_t two : 4;
+} cvmx_nand_cmd_wait_t;
+
+/**
+ * NAND CHIP_EN command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_10_63 : 54;
+ uint64_t width : 2;
+ uint64_t one : 1;
+ uint64_t chip : 3;
+ uint64_t three : 4;
+} cvmx_nand_cmd_chip_en_t;
+
+/**
+ * NAND CHIP_DIS command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_4_63 : 60;
+ uint64_t three : 4;
+} cvmx_nand_cmd_chip_dis_t;
+
+/**
+ * NAND CLE command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_25_63 : 39;
+ uint64_t clen3 : 3;
+ uint64_t clen2 : 3;
+ uint64_t clen1 : 3;
+ uint64_t cmd_data : 8;
+ uint64_t reserved_4_7 : 4;
+ uint64_t four : 4;
+} cvmx_nand_cmd_cle_t;
+
+/**
+ * NAND ALE command definition
+ */
+typedef struct
+{
+ uint64_t reserved_96_127 : 32;
+ uint64_t adr_bytes_h : 32;
+ uint64_t adr_bytes_l : 32;
+ uint64_t reserved_28_31 : 4;
+ uint64_t alen4 : 3;
+ uint64_t alen3 : 3;
+ uint64_t alen2 : 3;
+ uint64_t alen1 : 3;
+ uint64_t reserved_12_15 : 4;
+ uint64_t adr_byte_num : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t five : 4;
+} cvmx_nand_cmd_ale_t;
+
+/**
+ * NAND WR command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_31_63 : 34;
+ uint64_t wrn2 : 3;
+ uint64_t wrn1 : 3;
+ uint64_t reserved_20_24 : 4;
+ uint64_t data_bytes : 16;
+ uint64_t eight : 4;
+} cvmx_nand_cmd_wr_t;
+
+/**
+ * NAND RD command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_32_63 : 32;
+ uint64_t rdn4 : 3;
+ uint64_t rdn3 : 3;
+ uint64_t rdn2 : 3;
+ uint64_t rdn1 : 3;
+ uint64_t data_bytes : 16;
+ uint64_t nine : 4;
+} cvmx_nand_cmd_rd_t;
+
+/**
+ * NAND RD_EDO command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_32_63 : 32;
+ uint64_t rdn4 : 3;
+ uint64_t rdn3 : 3;
+ uint64_t rdn2 : 3;
+ uint64_t rdn1 : 3;
+ uint64_t data_bytes : 16;
+ uint64_t ten : 4;
+} cvmx_nand_cmd_rd_edo_t;
+
+/**
+ * NAND WAIT_STATUS command definition
+ */
+typedef struct
+{
+ uint64_t rdn4 : 3;
+ uint64_t rdn3 : 3;
+ uint64_t rdn2 : 3;
+ uint64_t rdn1 : 3;
+ uint64_t comp_byte : 8;
+ uint64_t and_mask : 8;
+ uint64_t nine : 4;
+ uint64_t reserved_28_95 : 64;
+ uint64_t clen4 : 3;
+ uint64_t clen3 : 3;
+ uint64_t clen2 : 3;
+ uint64_t clen1 : 3;
+ uint64_t data : 8;
+ uint64_t reserved_4_7 : 4;
+ uint64_t eleven : 4;
+} cvmx_nand_cmd_wait_status_t;
+
+/**
+ * NAND WAIT_STATUS_ALE command definition
+ */
+typedef struct
+{
+ uint64_t rdn4 : 3;
+ uint64_t rdn3 : 3;
+ uint64_t rdn2 : 3;
+ uint64_t rdn1 : 3;
+ uint64_t comp_byte : 8;
+ uint64_t and_mask : 8;
+ uint64_t nine : 4;
+ uint64_t adr_bytes : 32;
+ uint64_t reserved_60_63 : 4;
+ uint64_t alen4 : 3;
+ uint64_t alen3 : 3;
+ uint64_t alen2 : 3;
+ uint64_t alen1 : 3;
+ uint64_t reserved_44_47 : 4;
+ uint64_t adr_byte_num : 4;
+ uint64_t five : 4;
+ uint64_t reserved_25_31 : 7;
+ uint64_t clen3 : 3;
+ uint64_t clen2 : 3;
+ uint64_t clen1 : 3;
+ uint64_t data : 8;
+ uint64_t reserved_4_7 : 4;
+ uint64_t eleven : 4;
+} cvmx_nand_cmd_wait_status_ale_t;
+
+/**
+ * NAND BUS_ACQ command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_8_63 : 56;
+ uint64_t one : 4;
+ uint64_t fifteen : 4;
+} cvmx_nand_cmd_bus_acq_t;
+
+/**
+ * NAND BUS_REL command definition
+ */
+typedef struct
+{
+ uint64_t reserved_64_127 : 64;
+ uint64_t reserved_8_63 : 56;
+ uint64_t zero : 4;
+ uint64_t fifteen : 4;
+} cvmx_nand_cmd_bus_rel_t;
+
+/**
+ * NAND command union of all possible commands
+ */
+typedef union
+{
+ uint64_t u64[2];
+ cvmx_nand_cmd_nop_t nop;
+ cvmx_nand_cmd_set_tm_par_t set_tm_par;
+ cvmx_nand_cmd_wait_t wait;
+ cvmx_nand_cmd_chip_en_t chip_en;
+ cvmx_nand_cmd_chip_dis_t chip_dis;
+ cvmx_nand_cmd_cle_t cle;
+ cvmx_nand_cmd_ale_t ale;
+ cvmx_nand_cmd_rd_t rd;
+ cvmx_nand_cmd_rd_edo_t rd_edo;
+ cvmx_nand_cmd_wr_t wr;
+ cvmx_nand_cmd_wait_status_t wait_status;
+ cvmx_nand_cmd_wait_status_ale_t wait_status_ale;
+ cvmx_nand_cmd_bus_acq_t bus_acq;
+ cvmx_nand_cmd_bus_rel_t bus_rel;
+ struct
+ {
+ uint64_t reserved_64_127: 64;
+ uint64_t reserved_4_63 : 60;
+ uint64_t op_code : 4;
+ } s;
+} cvmx_nand_cmd_t;
+
+
+typedef struct __attribute__ ((packed))
+{
+ char onfi[4]; /**< Bytes 0-3: The ASCII characters 'O', 'N', 'F', 'I' */
+ uint16_t revision_number; /**< Bytes 4-5: ONFI revision number
+ - 2-15 Reserved (0)
+ - 1 1 = supports ONFI version 1.0
+ - 0 Reserved (0) */
+ uint16_t features; /**< Bytes 6-7: Features supported
+ - 5-15 Reserved (0)
+ - 4 1 = supports odd to even page Copyback
+ - 3 1 = supports interleaved operations
+ - 2 1 = supports non-sequential page programming
+ - 1 1 = supports multiple LUN operations
+ - 0 1 = supports 16-bit data bus width */
+ uint16_t optional_commands; /**< Bytes 8-9: Optional commands supported
+ - 6-15 Reserved (0)
+ - 5 1 = supports Read Unique ID
+ - 4 1 = supports Copyback
+ - 3 1 = supports Read Status Enhanced
+ - 2 1 = supports Get Features and Set Features
+ - 1 1 = supports Read Cache commands
+ - 0 1 = supports Page Cache Program command */
+ uint8_t reserved_10_31[22]; /**< Bytes 10-31: Reserved */
+
+ char manufacturer[12]; /**< Bytes 32-43: Device manufacturer (12 ASCII characters) */
+ char model[20]; /**< Bytes 40-63: Device model (20 ASCII characters) */
+ uint8_t jedec_id; /**< Byte 64: JEDEC manufacturer ID */
+ uint16_t date_code; /**< Byte 65-66: Date code */
+ uint8_t reserved_67_79[13]; /**< Bytes 67-79: Reserved */
+
+ uint32_t page_data_bytes; /**< Bytes 80-83: Number of data bytes per page */
+ uint16_t page_spare_bytes; /**< Bytes 84-85: Number of spare bytes per page */
+ uint32_t partial_page_data_bytes; /**< Bytes 86-89: Number of data bytes per partial page */
+ uint16_t partial_page_spare_bytes; /**< Bytes 90-91: Number of spare bytes per partial page */
+ uint32_t pages_per_block; /**< Bytes 92-95: Number of pages per block */
+ uint32_t blocks_per_lun; /**< Bytes 96-99: Number of blocks per logical unit (LUN) */
+ uint8_t number_lun; /**< Byte 100: Number of logical units (LUNs) */
+ uint8_t address_cycles; /**< Byte 101: Number of address cycles
+ - 4-7 Column address cycles
+ - 0-3 Row address cycles */
+ uint8_t bits_per_cell; /**< Byte 102: Number of bits per cell */
+ uint16_t bad_block_per_lun; /**< Bytes 103-104: Bad blocks maximum per LUN */
+ uint16_t block_endurance; /**< Bytes 105-106: Block endurance */
+ uint8_t good_blocks; /**< Byte 107: Guaranteed valid blocks at beginning of target */
+ uint16_t good_block_endurance; /**< Bytes 108-109: Block endurance for guaranteed valid blocks */
+ uint8_t programs_per_page; /**< Byte 110: Number of programs per page */
+ uint8_t partial_program_attrib; /**< Byte 111: Partial programming attributes
+ - 5-7 Reserved
+ - 4 1 = partial page layout is partial page data followed by partial page spare
+ - 1-3 Reserved
+ - 0 1 = partial page programming has constraints */
+ uint8_t bits_ecc; /**< Byte 112: Number of bits ECC correctability */
+ uint8_t interleaved_address_bits; /**< Byte 113: Number of interleaved address bits
+ - 4-7 Reserved (0)
+ - 0-3 Number of interleaved address bits */
+ uint8_t interleaved_attrib; /**< Byte 114: Interleaved operation attributes
+ - 4-7 Reserved (0)
+ - 3 Address restrictions for program cache
+ - 2 1 = program cache supported
+ - 1 1 = no block address restrictions
+ - 0 Overlapped / concurrent interleaving support */
+ uint8_t reserved_115_127[13]; /**< Bytes 115-127: Reserved (0) */
+
+ uint8_t pin_capacitance; /**< Byte 128: I/O pin capacitance */
+ uint16_t timing_mode; /**< Byte 129-130: Timing mode support
+ - 6-15 Reserved (0)
+ - 5 1 = supports timing mode 5
+ - 4 1 = supports timing mode 4
+ - 3 1 = supports timing mode 3
+ - 2 1 = supports timing mode 2
+ - 1 1 = supports timing mode 1
+ - 0 1 = supports timing mode 0, shall be 1 */
+ uint16_t cache_timing_mode; /**< Byte 131-132: Program cache timing mode support
+ - 6-15 Reserved (0)
+ - 5 1 = supports timing mode 5
+ - 4 1 = supports timing mode 4
+ - 3 1 = supports timing mode 3
+ - 2 1 = supports timing mode 2
+ - 1 1 = supports timing mode 1
+ - 0 1 = supports timing mode 0 */
+ uint16_t t_prog; /**< Byte 133-134: Maximum page program time (us) */
+ uint16_t t_bers; /**< Byte 135-136: Maximum block erase time (us) */
+ uint16_t t_r; /**< Byte 137-148: Maximum page read time (us) */
+ uint16_t t_ccs; /**< Byte 139-140: Minimum change column setup time (ns) */
+ uint8_t reserved_141_163[23]; /**< Byte 141-163: Reserved (0) */
+
+ uint16_t vendor_revision; /**< Byte 164-165: Vendor specific Revision number */
+ uint8_t vendor_specific[88]; /**< Byte 166-253: Vendor specific */
+ uint16_t crc; /**< Byte 254-255: Integrity CRC */
+} cvmx_nand_onfi_param_page_t;
+
+
+/**
+ * Called to initialize the NAND controller for use. Note that
+ * you must be running out of L2 or memory and not NAND before
+ * calling this function.
+ * When probing for NAND chips, this function attempts to autoconfigure based on the NAND parts detected.
+ * It currently supports autodetection for ONFI parts (with valid parameter pages), and some Samsung NAND
+ * parts (decoding ID bits.) If autoconfiguration fails, the defaults set with __set_chip_defaults()
+ * prior to calling cvmx_nand_initialize() are used.
+ * If defaults are set and the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is provided, the defaults are used
+ * for all chips in the active_chips mask.
+ *
+ * @param flags Optional initialization flags
+ * If the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is passed, chips are not probed,
+ * and the default parameters (if set with cvmx_nand_set_defaults) are used for all chips
+ * in the active_chips mask.
+ * @param active_chips
+ * Each bit in this parameter represents a chip select that might
+ * contain NAND flash. Any chip select present in this bitmask may
+ * be connected to NAND. It is normally safe to pass 0xff here and
+ * let the API probe all 8 chip selects.
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern cvmx_nand_status_t cvmx_nand_initialize(cvmx_nand_initialize_flags_t flags, int active_chips);
+
+
+
+/**
+ * This function may be called before cvmx_nand_initialize to set default values that will be used
+ * for NAND chips that do not identify themselves in a way that allows autoconfiguration. (ONFI chip with
+ * missing parameter page, for example.)
+ * The parameters set by this function will be used by _all_ non-autoconfigured NAND chips.
+ *
+ *
+ * NOTE: This function signature is _NOT_ stable, and will change in the future as required to support
+ * various NAND chips.
+ *
+ * @param page_size page size in bytes
+ * @param oob_size Out of band size in bytes (per page)
+ * @param pages_per_block
+ * number of pages per block
+ * @param blocks Total number of blocks in device
+ * @param onfi_timing_mode
+ * ONFI timing mode
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern cvmx_nand_status_t cvmx_nand_set_defaults(int page_size, int oob_size, int pages_per_block, int blocks, int onfi_timing_mode);
+
+
+/**
+ * Call to shutdown the NAND controller after all transactions
+ * are done. In most setups this will never be called.
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern cvmx_nand_status_t cvmx_nand_shutdown(void);
+
+
+/**
+ * Returns a bitmask representing the chip selects that are
+ * connected to NAND chips. This can be called after the
+ * initialize to determine the actual number of NAND chips
+ * found. Each bit in the response coresponds to a chip select.
+ *
+ * @return Zero if no NAND chips were found. Otherwise a bit is set for
+ * each chip select (1<<chip).
+ */
+extern int cvmx_nand_get_active_chips(void);
+
+
+/**
+ * Override the timing parameters for a NAND chip
+ *
+ * @param chip Chip select to override
+ * @param tim_mult
+ * @param tim_par
+ * @param clen
+ * @param alen
+ * @param rdn
+ * @param wrn
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern cvmx_nand_status_t cvmx_nand_set_timing(int chip, int tim_mult, int tim_par[7], int clen[4], int alen[4], int rdn[4], int wrn[2]);
+
+
+/**
+ * Submit a command to the NAND command queue. Generally this
+ * will not be used directly. Instead most programs will use the other
+ * higher level NAND functions.
+ *
+ * @param cmd Command to submit
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern cvmx_nand_status_t cvmx_nand_submit(cvmx_nand_cmd_t cmd);
+
+/**
+ * Read a page from NAND. If the buffer has room, the out of band
+ * data will be included.
+ *
+ * @param chip Chip select for NAND flash
+ * @param nand_address
+ * Location in NAND to read. See description in file comment
+ * @param buffer_address
+ * Physical address to store the result at
+ * @param buffer_length
+ * Number of bytes to read
+ *
+ * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern int cvmx_nand_page_read(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length);
+
+/**
+ * Write a page to NAND. The buffer must contain the entire page
+ * including the out of band data.
+ *
+ * @param chip Chip select for NAND flash
+ * @param nand_address
+ * Location in NAND to write. See description in file comment
+ * @param buffer_address
+ * Physical address to read the data from
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern cvmx_nand_status_t cvmx_nand_page_write(int chip, uint64_t nand_address, uint64_t buffer_address);
+
+/**
+ * Erase a NAND block. A single block contains multiple pages.
+ *
+ * @param chip Chip select for NAND flash
+ * @param nand_address
+ * Location in NAND to erase. See description in file comment
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern cvmx_nand_status_t cvmx_nand_block_erase(int chip, uint64_t nand_address);
+
+/**
+ * Read the NAND ID information
+ *
+ * @param chip Chip select for NAND flash
+ * @param nand_address
+ * NAND address to read ID from. Usually this is either 0x0 or 0x20.
+ * @param buffer_address
+ * Physical address to store data in
+ * @param buffer_length
+ * Length of the buffer. Usually this is 4 bytes
+ *
+ * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern int cvmx_nand_read_id(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length);
+
+/**
+ * Read the NAND parameter page
+ *
+ * @param chip Chip select for NAND flash
+ * @param buffer_address
+ * Physical address to store data in
+ * @param buffer_length
+ * Length of the buffer. Usually this is 4 bytes
+ *
+ * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern int cvmx_nand_read_param_page(int chip, uint64_t buffer_address, int buffer_length);
+
+/**
+ * Get the status of the NAND flash
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return NAND status or a negative cvmx_nand_status_t error code on failure
+ */
+extern int cvmx_nand_get_status(int chip);
+
+/**
+ * Get the page size, excluding out of band data. This function
+ * will return zero for chip selects not connected to NAND.
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return Page size in bytes or a negative cvmx_nand_status_t error code on failure
+ */
+extern int cvmx_nand_get_page_size(int chip);
+
+/**
+ * Get the OOB size.
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return OOB in bytes or a negative cvmx_nand_status_t error code on failure
+ */
+extern int cvmx_nand_get_oob_size(int chip);
+
+/**
+ * Get the number of pages per NAND block
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return Numboer of pages in each block or a negative cvmx_nand_status_t error code on failure
+ */
+extern int cvmx_nand_get_pages_per_block(int chip);
+
+/**
+ * Get the number of blocks in the NAND flash
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return Number of blocks or a negative cvmx_nand_status_t error code on failure
+ */
+extern int cvmx_nand_get_blocks(int chip);
+
+/**
+ * Reset the NAND flash
+ *
+ * @param chip Chip select for NAND flash
+ *
+ * @return Zero on success, a negative cvmx_nand_status_t error code on failure
+ */
+extern cvmx_nand_status_t cvmx_nand_reset(int chip);
+
+/**
+ * This function computes the Octeon specific ECC data used by the NAND boot
+ * feature.
+ *
+ * @param block pointer to 256 bytes of data
+ * @param eccp pointer to where 8 bytes of ECC data will be stored
+ */
+extern void cvmx_nand_compute_boot_ecc(unsigned char *block, unsigned char *eccp);
+
+
+extern int cvmx_nand_correct_boot_ecc(uint8_t *block);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_NAND_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-nand.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-ndf-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-ndf-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-ndf-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,542 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-ndf-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon ndf.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_NDF_DEFS_H__
+#define __CVMX_NDF_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NDF_BT_PG_INFO CVMX_NDF_BT_PG_INFO_FUNC()
+static inline uint64_t CVMX_NDF_BT_PG_INFO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_NDF_BT_PG_INFO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070001000018ull);
+}
+#else
+#define CVMX_NDF_BT_PG_INFO (CVMX_ADD_IO_SEG(0x0001070001000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NDF_CMD CVMX_NDF_CMD_FUNC()
+static inline uint64_t CVMX_NDF_CMD_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_NDF_CMD not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070001000000ull);
+}
+#else
+#define CVMX_NDF_CMD (CVMX_ADD_IO_SEG(0x0001070001000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NDF_DRBELL CVMX_NDF_DRBELL_FUNC()
+static inline uint64_t CVMX_NDF_DRBELL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_NDF_DRBELL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070001000030ull);
+}
+#else
+#define CVMX_NDF_DRBELL (CVMX_ADD_IO_SEG(0x0001070001000030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NDF_ECC_CNT CVMX_NDF_ECC_CNT_FUNC()
+static inline uint64_t CVMX_NDF_ECC_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_NDF_ECC_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070001000010ull);
+}
+#else
+#define CVMX_NDF_ECC_CNT (CVMX_ADD_IO_SEG(0x0001070001000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NDF_INT CVMX_NDF_INT_FUNC()
+static inline uint64_t CVMX_NDF_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_NDF_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070001000020ull);
+}
+#else
+#define CVMX_NDF_INT (CVMX_ADD_IO_SEG(0x0001070001000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NDF_INT_EN CVMX_NDF_INT_EN_FUNC()
+static inline uint64_t CVMX_NDF_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_NDF_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070001000028ull);
+}
+#else
+#define CVMX_NDF_INT_EN (CVMX_ADD_IO_SEG(0x0001070001000028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NDF_MISC CVMX_NDF_MISC_FUNC()
+static inline uint64_t CVMX_NDF_MISC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_NDF_MISC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070001000008ull);
+}
+#else
+#define CVMX_NDF_MISC (CVMX_ADD_IO_SEG(0x0001070001000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NDF_ST_REG CVMX_NDF_ST_REG_FUNC()
+static inline uint64_t CVMX_NDF_ST_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_NDF_ST_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070001000038ull);
+}
+#else
+#define CVMX_NDF_ST_REG (CVMX_ADD_IO_SEG(0x0001070001000038ull))
+#endif
+
+/**
+ * cvmx_ndf_bt_pg_info
+ *
+ * Notes:
+ * NDF_BT_PG_INFO provides page size and number of column plus row address cycles information. SW writes to this CSR
+ * during boot from Nand Flash. Additionally SW also writes the multiplier value for timing parameters. This value is
+ * used during boot, in the SET_TM_PARAM command. This information is used only by the boot load state machine and is
+ * otherwise a don't care, once boot is disabled. Also, boot dma's do not use this value.
+ *
+ * Bytes per Nand Flash page = 2 ** (SIZE + 1) times 256 bytes.
+ * 512, 1k, 2k, 4k, 8k, 16k, 32k and 64k are legal bytes per page values
+ *
+ * Legal values for ADR_CYC field are 3 through 8. SW CSR writes with a value less than 3 will write a 3 to this
+ * field, and a SW CSR write with a value greater than 8, will write an 8 to this field.
+ *
+ * Like all NDF_... registers, 64-bit operations must be used to access this register
+ */
+union cvmx_ndf_bt_pg_info {
+ uint64_t u64;
+ struct cvmx_ndf_bt_pg_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t t_mult : 4; /**< Boot time TIM_MULT[3:0] field of SET__TM_PAR[63:0]
+ command */
+ uint64_t adr_cyc : 4; /**< # of column address cycles */
+ uint64_t size : 3; /**< bytes per page in the nand device */
+#else
+ uint64_t size : 3;
+ uint64_t adr_cyc : 4;
+ uint64_t t_mult : 4;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_ndf_bt_pg_info_s cn52xx;
+ struct cvmx_ndf_bt_pg_info_s cn63xx;
+ struct cvmx_ndf_bt_pg_info_s cn63xxp1;
+ struct cvmx_ndf_bt_pg_info_s cn66xx;
+ struct cvmx_ndf_bt_pg_info_s cn68xx;
+ struct cvmx_ndf_bt_pg_info_s cn68xxp1;
+};
+typedef union cvmx_ndf_bt_pg_info cvmx_ndf_bt_pg_info_t;
+
+/**
+ * cvmx_ndf_cmd
+ *
+ * Notes:
+ * When SW reads this csr, RD_VAL bit in NDF_MISC csr is cleared to 0. SW must always write all 8 bytes whenever it writes
+ * this csr. If there are fewer than 8 bytes left in the command sequence that SW wants the NAND flash controller to execute, it
+ * must insert Idle (WAIT) commands to make up 8 bytes. SW also must ensure there is enough vacancy in the command fifo to accept these
+ * 8 bytes, by first reading the FR_BYT field in the NDF_MISC csr.
+ *
+ * Like all NDF_... registers, 64-bit operations must be used to access this register
+ */
+union cvmx_ndf_cmd {
+ uint64_t u64;
+ struct cvmx_ndf_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nf_cmd : 64; /**< 8 Command Bytes */
+#else
+ uint64_t nf_cmd : 64;
+#endif
+ } s;
+ struct cvmx_ndf_cmd_s cn52xx;
+ struct cvmx_ndf_cmd_s cn63xx;
+ struct cvmx_ndf_cmd_s cn63xxp1;
+ struct cvmx_ndf_cmd_s cn66xx;
+ struct cvmx_ndf_cmd_s cn68xx;
+ struct cvmx_ndf_cmd_s cn68xxp1;
+};
+typedef union cvmx_ndf_cmd cvmx_ndf_cmd_t;
+
+/**
+ * cvmx_ndf_drbell
+ *
+ * Notes:
+ * SW csr writes will increment CNT by the signed 8 bit value being written. SW csr reads return the current CNT value.
+ * HW will also modify the value of the CNT field. Everytime HW executes a BUS_ACQ[15:0] command, to arbitrate and win the
+ * flash bus, it decrements the CNT field by 1. If the CNT field is already 0 or negative, HW command execution unit will
+ * stall when it fetches the new BUS_ACQ[15:0] command, from the command fifo. Only when the SW writes to this CSR with a
+ * non-zero data value, can the execution unit come out of the stalled condition, and resume execution.
+ *
+ * The intended use of this doorbell CSR is to control execution of the Nand Flash commands. The NDF execution unit
+ * has to arbitrate for the flash bus, before it can enable a Nand Flash device connected to the Octeon chip, by
+ * asserting the device's chip enable. Therefore SW should first load the command fifo, with a full sequence of
+ * commands to perform a Nand Flash device task. This command sequence will start with a bus acquire command and
+ * the last command in the sequence will be a bus release command. The execution unit will start execution of
+ * the sequence only if the [CNT] field is non-zero when it fetches the bus acquire command, which is the first
+ * command in this sequence. SW can also, load multiple such sequences, each starting with a chip enable command
+ * and ending with a chip disable command, and then write a non-zero data value to this csr to increment the
+ * CNT field by the number of the command sequences, loaded to the command fifo.
+ *
+ * Like all NDF_... registers, 64-bit operations must be used to access this register
+ */
+union cvmx_ndf_drbell {
+ uint64_t u64;
+ struct cvmx_ndf_drbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t cnt : 8; /**< Doorbell count register, 2's complement 8 bit value */
+#else
+ uint64_t cnt : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_ndf_drbell_s cn52xx;
+ struct cvmx_ndf_drbell_s cn63xx;
+ struct cvmx_ndf_drbell_s cn63xxp1;
+ struct cvmx_ndf_drbell_s cn66xx;
+ struct cvmx_ndf_drbell_s cn68xx;
+ struct cvmx_ndf_drbell_s cn68xxp1;
+};
+typedef union cvmx_ndf_drbell cvmx_ndf_drbell_t;
+
+/**
+ * cvmx_ndf_ecc_cnt
+ *
+ * Notes:
+ * XOR_ECC[31:8] = [ecc_gen_byt258, ecc_gen_byt257, ecc_gen_byt256] xor [ecc_258, ecc_257, ecc_256]
+ * ecc_258, ecc_257 and ecc_256 are bytes stored in Nand Flash and read out during boot
+ * ecc_gen_byt258, ecc_gen_byt257, ecc_gen_byt256 are generated from data read out from Nand Flash
+ *
+ * Like all NDF_... registers, 64-bit operations must be used to access this register
+ */
+union cvmx_ndf_ecc_cnt {
+ uint64_t u64;
+ struct cvmx_ndf_ecc_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t xor_ecc : 24; /**< result of XOR of ecc read bytes and ecc genarated
+ bytes. The value pertains to the last 1 bit ecc err */
+ uint64_t ecc_err : 8; /**< Count = \# of 1 bit errors fixed during boot
+ This count saturates instead of wrapping around. */
+#else
+ uint64_t ecc_err : 8;
+ uint64_t xor_ecc : 24;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ndf_ecc_cnt_s cn52xx;
+ struct cvmx_ndf_ecc_cnt_s cn63xx;
+ struct cvmx_ndf_ecc_cnt_s cn63xxp1;
+ struct cvmx_ndf_ecc_cnt_s cn66xx;
+ struct cvmx_ndf_ecc_cnt_s cn68xx;
+ struct cvmx_ndf_ecc_cnt_s cn68xxp1;
+};
+typedef union cvmx_ndf_ecc_cnt cvmx_ndf_ecc_cnt_t;
+
+/**
+ * cvmx_ndf_int
+ *
+ * Notes:
+ * FULL status is updated when the command fifo becomes full as a result of SW writing a new command to it.
+ *
+ * EMPTY status is updated when the command fifo becomes empty as a result of command execution unit fetching the
+ * last instruction out of the command fifo.
+ *
+ * Like all NDF_... registers, 64-bit operations must be used to access this register
+ */
+union cvmx_ndf_int {
+ uint64_t u64;
+ struct cvmx_ndf_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t ovrf : 1; /**< NDF_CMD write when fifo is full. Generally a
+ fatal error. */
+ uint64_t ecc_mult : 1; /**< Multi bit ECC error detected during boot */
+ uint64_t ecc_1bit : 1; /**< Single bit ECC error detected and fixed during boot */
+ uint64_t sm_bad : 1; /**< One of the state machines in a bad state */
+ uint64_t wdog : 1; /**< Watch Dog timer expired during command execution */
+ uint64_t full : 1; /**< Command fifo is full */
+ uint64_t empty : 1; /**< Command fifo is empty */
+#else
+ uint64_t empty : 1;
+ uint64_t full : 1;
+ uint64_t wdog : 1;
+ uint64_t sm_bad : 1;
+ uint64_t ecc_1bit : 1;
+ uint64_t ecc_mult : 1;
+ uint64_t ovrf : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_ndf_int_s cn52xx;
+ struct cvmx_ndf_int_s cn63xx;
+ struct cvmx_ndf_int_s cn63xxp1;
+ struct cvmx_ndf_int_s cn66xx;
+ struct cvmx_ndf_int_s cn68xx;
+ struct cvmx_ndf_int_s cn68xxp1;
+};
+typedef union cvmx_ndf_int cvmx_ndf_int_t;
+
+/**
+ * cvmx_ndf_int_en
+ *
+ * Notes:
+ * Like all NDF_... registers, 64-bit operations must be used to access this register
+ *
+ */
+union cvmx_ndf_int_en {
+ uint64_t u64;
+ struct cvmx_ndf_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t ovrf : 1; /**< Wrote to a full command fifo */
+ uint64_t ecc_mult : 1; /**< Multi bit ECC error detected during boot */
+ uint64_t ecc_1bit : 1; /**< Single bit ECC error detected and fixed during boot */
+ uint64_t sm_bad : 1; /**< One of the state machines in a bad state */
+ uint64_t wdog : 1; /**< Watch Dog timer expired during command execution */
+ uint64_t full : 1; /**< Command fifo is full */
+ uint64_t empty : 1; /**< Command fifo is empty */
+#else
+ uint64_t empty : 1;
+ uint64_t full : 1;
+ uint64_t wdog : 1;
+ uint64_t sm_bad : 1;
+ uint64_t ecc_1bit : 1;
+ uint64_t ecc_mult : 1;
+ uint64_t ovrf : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_ndf_int_en_s cn52xx;
+ struct cvmx_ndf_int_en_s cn63xx;
+ struct cvmx_ndf_int_en_s cn63xxp1;
+ struct cvmx_ndf_int_en_s cn66xx;
+ struct cvmx_ndf_int_en_s cn68xx;
+ struct cvmx_ndf_int_en_s cn68xxp1;
+};
+typedef union cvmx_ndf_int_en cvmx_ndf_int_en_t;
+
+/**
+ * cvmx_ndf_misc
+ *
+ * Notes:
+ * NBR_HWM this field specifies the high water mark for the NCB outbound load/store commands receive fifo.
+ * the fifo size is 16 entries.
+ *
+ * WAIT_CNT this field allows glitch filtering of the WAIT_n input to octeon, from Flash Memory. The count
+ * represents number of eclk cycles.
+ *
+ * FR_BYT this field specifies \# of unfilled bytes in the command fifo. Bytes become unfilled as commands
+ * complete execution and exit. (fifo is 256 bytes when BT_DIS=0, and 1536 bytes when BT_DIS=1)
+ *
+ * RD_DONE this W1C bit is set to 1 by HW when it reads the last 8 bytes out of the command fifo,
+ * in response to RD_CMD bit being set to 1 by SW.
+ *
+ * RD_VAL this read only bit is set to 1 by HW when it reads next 8 bytes from command fifo in response
+ * to RD_CMD bit being set to 1. A SW read of NDF_CMD csr clears this bit to 0.
+ *
+ * RD_CMD this R/W bit starts read out from the command fifo, 8 bytes at a time. SW should first read the
+ * RD_VAL bit in this csr to see if next 8 bytes from the command fifo are available in the
+ * NDF_CMD csr. All command fifo reads start and end on an 8 byte boundary. A RD_CMD in the
+ * middle of command execution will cause the execution to freeze until RD_DONE is set to 1. RD_CMD
+ * bit will be cleared on any NDF_CMD csr write by SW.
+ *
+ * BT_DMA this indicates to the NAND flash boot control state machine that boot dma read can begin.
+ * SW should set this bit to 1 after SW has loaded the command fifo. HW sets the bit to 0
+ * when boot dma command execution is complete. If chip enable 0 is not nand flash, this bit is
+ * permanently 1'b0 with SW writes ignored. Whenever BT_DIS=1, this bit will be 0.
+ *
+ * BT_DIS this R/W bit indicates to NAND flash boot control state machine that boot operation has ended.
+ * whenever this bit changes from 0 to a 1, the command fifo is emptied as a side effect. This bit must
+ * never be set when booting from nand flash and region zero is enabled.
+ *
+ * EX_DIS When 1, command execution stops after completing execution of all commands currently in the command
+ * fifo. Once command execution has stopped, and then new commands are loaded into the command fifo, execution
+ * will not resume as long as this bit is 1. When this bit is 0, command execution will resume if command fifo
+ * is not empty. EX_DIS should be set to 1, during boot i.e. when BT_DIS = 0.
+ *
+ * RST_FF reset command fifo to make it empty, any command inflight is not aborted before reseting
+ * the fifo. The fifo comes up empty at the end of power on reset.
+ *
+ * Like all NDF_... registers, 64-bit operations must be used to access this register
+ */
+union cvmx_ndf_misc {
+ uint64_t u64;
+ struct cvmx_ndf_misc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t mb_dis : 1; /**< Disable multibit error hangs and allow boot loads
+ or boot dma's proceed as if no multi bit errors
+ occured. HW will fix single bit errors as usual */
+ uint64_t nbr_hwm : 3; /**< Hi Water mark for NBR fifo or load/stores */
+ uint64_t wait_cnt : 6; /**< WAIT input filter count */
+ uint64_t fr_byt : 11; /**< Number of unfilled Command fifo bytes */
+ uint64_t rd_done : 1; /**< This W1C bit is set to 1 by HW when it completes
+ command fifo read out, in response to RD_CMD */
+ uint64_t rd_val : 1; /**< This RO bit is set to 1 by HW when it reads next 8
+ bytes from Command fifo into the NDF_CMD csr
+ SW reads NDF_CMD csr, HW clears this bit to 0 */
+ uint64_t rd_cmd : 1; /**< When 1, HW reads out contents of the Command fifo 8
+ bytes at a time into the NDF_CMD csr */
+ uint64_t bt_dma : 1; /**< When set to 1, boot time dma is enabled */
+ uint64_t bt_dis : 1; /**< When boot operation is over SW must set to 1
+ causes boot state mchines to sleep */
+ uint64_t ex_dis : 1; /**< When set to 1, suspends execution of commands at
+ next command in the fifo. */
+ uint64_t rst_ff : 1; /**< 1=reset command fifo to make it empty,
+ 0=normal operation */
+#else
+ uint64_t rst_ff : 1;
+ uint64_t ex_dis : 1;
+ uint64_t bt_dis : 1;
+ uint64_t bt_dma : 1;
+ uint64_t rd_cmd : 1;
+ uint64_t rd_val : 1;
+ uint64_t rd_done : 1;
+ uint64_t fr_byt : 11;
+ uint64_t wait_cnt : 6;
+ uint64_t nbr_hwm : 3;
+ uint64_t mb_dis : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_ndf_misc_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t nbr_hwm : 3; /**< Hi Water mark for NBR fifo or load/stores */
+ uint64_t wait_cnt : 6; /**< WAIT input filter count */
+ uint64_t fr_byt : 11; /**< Number of unfilled Command fifo bytes */
+ uint64_t rd_done : 1; /**< This W1C bit is set to 1 by HW when it completes
+ command fifo read out, in response to RD_CMD */
+ uint64_t rd_val : 1; /**< This RO bit is set to 1 by HW when it reads next 8
+ bytes from Command fifo into the NDF_CMD csr
+ SW reads NDF_CMD csr, HW clears this bit to 0 */
+ uint64_t rd_cmd : 1; /**< When 1, HW reads out contents of the Command fifo 8
+ bytes at a time into the NDF_CMD csr */
+ uint64_t bt_dma : 1; /**< When set to 1, boot time dma is enabled */
+ uint64_t bt_dis : 1; /**< When boot operation is over SW must set to 1
+ causes boot state mchines to sleep */
+ uint64_t ex_dis : 1; /**< When set to 1, suspends execution of commands at
+ next command in the fifo. */
+ uint64_t rst_ff : 1; /**< 1=reset command fifo to make it empty,
+ 0=normal operation */
+#else
+ uint64_t rst_ff : 1;
+ uint64_t ex_dis : 1;
+ uint64_t bt_dis : 1;
+ uint64_t bt_dma : 1;
+ uint64_t rd_cmd : 1;
+ uint64_t rd_val : 1;
+ uint64_t rd_done : 1;
+ uint64_t fr_byt : 11;
+ uint64_t wait_cnt : 6;
+ uint64_t nbr_hwm : 3;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn52xx;
+ struct cvmx_ndf_misc_s cn63xx;
+ struct cvmx_ndf_misc_s cn63xxp1;
+ struct cvmx_ndf_misc_s cn66xx;
+ struct cvmx_ndf_misc_s cn68xx;
+ struct cvmx_ndf_misc_s cn68xxp1;
+};
+typedef union cvmx_ndf_misc cvmx_ndf_misc_t;
+
+/**
+ * cvmx_ndf_st_reg
+ *
+ * Notes:
+ * This CSR aggregates all state machines used in nand flash controller for debug.
+ * Like all NDF_... registers, 64-bit operations must be used to access this register
+ */
+union cvmx_ndf_st_reg {
+ uint64_t u64;
+ struct cvmx_ndf_st_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t exe_idle : 1; /**< Command Execution status 1=IDLE, 0=Busy
+ 1 means execution of command sequence is complete
+ and command fifo is empty */
+ uint64_t exe_sm : 4; /**< Command Execution State machine states */
+ uint64_t bt_sm : 4; /**< Boot load and Boot dma State machine states */
+ uint64_t rd_ff_bad : 1; /**< CMD fifo read back State machine in bad state */
+ uint64_t rd_ff : 2; /**< CMD fifo read back State machine states */
+ uint64_t main_bad : 1; /**< Main State machine in bad state */
+ uint64_t main_sm : 3; /**< Main State machine states */
+#else
+ uint64_t main_sm : 3;
+ uint64_t main_bad : 1;
+ uint64_t rd_ff : 2;
+ uint64_t rd_ff_bad : 1;
+ uint64_t bt_sm : 4;
+ uint64_t exe_sm : 4;
+ uint64_t exe_idle : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ndf_st_reg_s cn52xx;
+ struct cvmx_ndf_st_reg_s cn63xx;
+ struct cvmx_ndf_st_reg_s cn63xxp1;
+ struct cvmx_ndf_st_reg_s cn66xx;
+ struct cvmx_ndf_st_reg_s cn68xx;
+ struct cvmx_ndf_st_reg_s cn68xxp1;
+};
+typedef union cvmx_ndf_st_reg cvmx_ndf_st_reg_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-ndf-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-npei-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-npei-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-npei-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,7440 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-npei-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon npei.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_NPEI_DEFS_H__
+#define __CVMX_NPEI_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_BAR1_INDEXX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_BAR1_INDEXX(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000000ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_BAR1_INDEXX(offset) (0x0000000000000000ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_BIST_STATUS CVMX_NPEI_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_NPEI_BIST_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_BIST_STATUS not supported on this chip\n");
+ return 0x0000000000000580ull;
+}
+#else
+#define CVMX_NPEI_BIST_STATUS (0x0000000000000580ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_BIST_STATUS2 CVMX_NPEI_BIST_STATUS2_FUNC()
+static inline uint64_t CVMX_NPEI_BIST_STATUS2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_BIST_STATUS2 not supported on this chip\n");
+ return 0x0000000000000680ull;
+}
+#else
+#define CVMX_NPEI_BIST_STATUS2 (0x0000000000000680ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_CTL_PORT0 CVMX_NPEI_CTL_PORT0_FUNC()
+static inline uint64_t CVMX_NPEI_CTL_PORT0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_CTL_PORT0 not supported on this chip\n");
+ return 0x0000000000000250ull;
+}
+#else
+#define CVMX_NPEI_CTL_PORT0 (0x0000000000000250ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_CTL_PORT1 CVMX_NPEI_CTL_PORT1_FUNC()
+static inline uint64_t CVMX_NPEI_CTL_PORT1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_CTL_PORT1 not supported on this chip\n");
+ return 0x0000000000000260ull;
+}
+#else
+#define CVMX_NPEI_CTL_PORT1 (0x0000000000000260ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_CTL_STATUS CVMX_NPEI_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_NPEI_CTL_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_CTL_STATUS not supported on this chip\n");
+ return 0x0000000000000570ull;
+}
+#else
+#define CVMX_NPEI_CTL_STATUS (0x0000000000000570ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_CTL_STATUS2 CVMX_NPEI_CTL_STATUS2_FUNC()
+static inline uint64_t CVMX_NPEI_CTL_STATUS2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_CTL_STATUS2 not supported on this chip\n");
+ return 0x0000000000003C00ull;
+}
+#else
+#define CVMX_NPEI_CTL_STATUS2 (0x0000000000003C00ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DATA_OUT_CNT CVMX_NPEI_DATA_OUT_CNT_FUNC()
+static inline uint64_t CVMX_NPEI_DATA_OUT_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DATA_OUT_CNT not supported on this chip\n");
+ return 0x00000000000005F0ull;
+}
+#else
+#define CVMX_NPEI_DATA_OUT_CNT (0x00000000000005F0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DBG_DATA CVMX_NPEI_DBG_DATA_FUNC()
+static inline uint64_t CVMX_NPEI_DBG_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DBG_DATA not supported on this chip\n");
+ return 0x0000000000000510ull;
+}
+#else
+#define CVMX_NPEI_DBG_DATA (0x0000000000000510ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DBG_SELECT CVMX_NPEI_DBG_SELECT_FUNC()
+static inline uint64_t CVMX_NPEI_DBG_SELECT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DBG_SELECT not supported on this chip\n");
+ return 0x0000000000000500ull;
+}
+#else
+#define CVMX_NPEI_DBG_SELECT (0x0000000000000500ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DMA0_INT_LEVEL CVMX_NPEI_DMA0_INT_LEVEL_FUNC()
+static inline uint64_t CVMX_NPEI_DMA0_INT_LEVEL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA0_INT_LEVEL not supported on this chip\n");
+ return 0x00000000000005C0ull;
+}
+#else
+#define CVMX_NPEI_DMA0_INT_LEVEL (0x00000000000005C0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DMA1_INT_LEVEL CVMX_NPEI_DMA1_INT_LEVEL_FUNC()
+static inline uint64_t CVMX_NPEI_DMA1_INT_LEVEL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA1_INT_LEVEL not supported on this chip\n");
+ return 0x00000000000005D0ull;
+}
+#else
+#define CVMX_NPEI_DMA1_INT_LEVEL (0x00000000000005D0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_DMAX_COUNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_NPEI_DMAX_COUNTS(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000450ull + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_NPEI_DMAX_COUNTS(offset) (0x0000000000000450ull + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_DMAX_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_NPEI_DMAX_DBELL(%lu) is invalid on this chip\n", offset);
+ return 0x00000000000003B0ull + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_NPEI_DMAX_DBELL(offset) (0x00000000000003B0ull + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_DMAX_IBUFF_SADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_NPEI_DMAX_IBUFF_SADDR(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000400ull + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset) (0x0000000000000400ull + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_DMAX_NADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_NPEI_DMAX_NADDR(%lu) is invalid on this chip\n", offset);
+ return 0x00000000000004A0ull + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_NPEI_DMAX_NADDR(offset) (0x00000000000004A0ull + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DMA_CNTS CVMX_NPEI_DMA_CNTS_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_CNTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA_CNTS not supported on this chip\n");
+ return 0x00000000000005E0ull;
+}
+#else
+#define CVMX_NPEI_DMA_CNTS (0x00000000000005E0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DMA_CONTROL CVMX_NPEI_DMA_CONTROL_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA_CONTROL not supported on this chip\n");
+ return 0x00000000000003A0ull;
+}
+#else
+#define CVMX_NPEI_DMA_CONTROL (0x00000000000003A0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DMA_PCIE_REQ_NUM CVMX_NPEI_DMA_PCIE_REQ_NUM_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_PCIE_REQ_NUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA_PCIE_REQ_NUM not supported on this chip\n");
+ return 0x00000000000005B0ull;
+}
+#else
+#define CVMX_NPEI_DMA_PCIE_REQ_NUM (0x00000000000005B0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DMA_STATE1 CVMX_NPEI_DMA_STATE1_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_STATE1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NPEI_DMA_STATE1 not supported on this chip\n");
+ return 0x00000000000006C0ull;
+}
+#else
+#define CVMX_NPEI_DMA_STATE1 (0x00000000000006C0ull)
+#endif
+#define CVMX_NPEI_DMA_STATE1_P1 (0x0000000000000680ull)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_DMA_STATE2 CVMX_NPEI_DMA_STATE2_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_STATE2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NPEI_DMA_STATE2 not supported on this chip\n");
+ return 0x00000000000006D0ull;
+}
+#else
+#define CVMX_NPEI_DMA_STATE2 (0x00000000000006D0ull)
+#endif
+#define CVMX_NPEI_DMA_STATE2_P1 (0x0000000000000690ull)
+#define CVMX_NPEI_DMA_STATE3_P1 (0x00000000000006A0ull)
+#define CVMX_NPEI_DMA_STATE4_P1 (0x00000000000006B0ull)
+#define CVMX_NPEI_DMA_STATE5_P1 (0x00000000000006C0ull)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_INT_A_ENB CVMX_NPEI_INT_A_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_INT_A_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_A_ENB not supported on this chip\n");
+ return 0x0000000000000560ull;
+}
+#else
+#define CVMX_NPEI_INT_A_ENB (0x0000000000000560ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_INT_A_ENB2 CVMX_NPEI_INT_A_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_INT_A_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_A_ENB2 not supported on this chip\n");
+ return 0x0000000000003CE0ull;
+}
+#else
+#define CVMX_NPEI_INT_A_ENB2 (0x0000000000003CE0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_INT_A_SUM CVMX_NPEI_INT_A_SUM_FUNC()
+static inline uint64_t CVMX_NPEI_INT_A_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_A_SUM not supported on this chip\n");
+ return 0x0000000000000550ull;
+}
+#else
+#define CVMX_NPEI_INT_A_SUM (0x0000000000000550ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_INT_ENB CVMX_NPEI_INT_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_ENB not supported on this chip\n");
+ return 0x0000000000000540ull;
+}
+#else
+#define CVMX_NPEI_INT_ENB (0x0000000000000540ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_INT_ENB2 CVMX_NPEI_INT_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_INT_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_ENB2 not supported on this chip\n");
+ return 0x0000000000003CD0ull;
+}
+#else
+#define CVMX_NPEI_INT_ENB2 (0x0000000000003CD0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_INT_INFO CVMX_NPEI_INT_INFO_FUNC()
+static inline uint64_t CVMX_NPEI_INT_INFO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_INFO not supported on this chip\n");
+ return 0x0000000000000590ull;
+}
+#else
+#define CVMX_NPEI_INT_INFO (0x0000000000000590ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_INT_SUM CVMX_NPEI_INT_SUM_FUNC()
+static inline uint64_t CVMX_NPEI_INT_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_SUM not supported on this chip\n");
+ return 0x0000000000000530ull;
+}
+#else
+#define CVMX_NPEI_INT_SUM (0x0000000000000530ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_INT_SUM2 CVMX_NPEI_INT_SUM2_FUNC()
+static inline uint64_t CVMX_NPEI_INT_SUM2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_SUM2 not supported on this chip\n");
+ return 0x0000000000003CC0ull;
+}
+#else
+#define CVMX_NPEI_INT_SUM2 (0x0000000000003CC0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_LAST_WIN_RDATA0 CVMX_NPEI_LAST_WIN_RDATA0_FUNC()
+static inline uint64_t CVMX_NPEI_LAST_WIN_RDATA0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_LAST_WIN_RDATA0 not supported on this chip\n");
+ return 0x0000000000000600ull;
+}
+#else
+#define CVMX_NPEI_LAST_WIN_RDATA0 (0x0000000000000600ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_LAST_WIN_RDATA1 CVMX_NPEI_LAST_WIN_RDATA1_FUNC()
+static inline uint64_t CVMX_NPEI_LAST_WIN_RDATA1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_LAST_WIN_RDATA1 not supported on this chip\n");
+ return 0x0000000000000610ull;
+}
+#else
+#define CVMX_NPEI_LAST_WIN_RDATA1 (0x0000000000000610ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MEM_ACCESS_CTL CVMX_NPEI_MEM_ACCESS_CTL_FUNC()
+static inline uint64_t CVMX_NPEI_MEM_ACCESS_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MEM_ACCESS_CTL not supported on this chip\n");
+ return 0x00000000000004F0ull;
+}
+#else
+#define CVMX_NPEI_MEM_ACCESS_CTL (0x00000000000004F0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_MEM_ACCESS_SUBIDX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 12) && (offset <= 27))))))
+ cvmx_warn("CVMX_NPEI_MEM_ACCESS_SUBIDX(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000280ull + ((offset) & 31) * 16 - 16*12;
+}
+#else
+#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset) (0x0000000000000280ull + ((offset) & 31) * 16 - 16*12)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_ENB0 CVMX_NPEI_MSI_ENB0_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_ENB0 not supported on this chip\n");
+ return 0x0000000000003C50ull;
+}
+#else
+#define CVMX_NPEI_MSI_ENB0 (0x0000000000003C50ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_ENB1 CVMX_NPEI_MSI_ENB1_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_ENB1 not supported on this chip\n");
+ return 0x0000000000003C60ull;
+}
+#else
+#define CVMX_NPEI_MSI_ENB1 (0x0000000000003C60ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_ENB2 CVMX_NPEI_MSI_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_ENB2 not supported on this chip\n");
+ return 0x0000000000003C70ull;
+}
+#else
+#define CVMX_NPEI_MSI_ENB2 (0x0000000000003C70ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_ENB3 CVMX_NPEI_MSI_ENB3_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_ENB3 not supported on this chip\n");
+ return 0x0000000000003C80ull;
+}
+#else
+#define CVMX_NPEI_MSI_ENB3 (0x0000000000003C80ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_RCV0 CVMX_NPEI_MSI_RCV0_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RCV0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RCV0 not supported on this chip\n");
+ return 0x0000000000003C10ull;
+}
+#else
+#define CVMX_NPEI_MSI_RCV0 (0x0000000000003C10ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_RCV1 CVMX_NPEI_MSI_RCV1_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RCV1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RCV1 not supported on this chip\n");
+ return 0x0000000000003C20ull;
+}
+#else
+#define CVMX_NPEI_MSI_RCV1 (0x0000000000003C20ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_RCV2 CVMX_NPEI_MSI_RCV2_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RCV2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RCV2 not supported on this chip\n");
+ return 0x0000000000003C30ull;
+}
+#else
+#define CVMX_NPEI_MSI_RCV2 (0x0000000000003C30ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_RCV3 CVMX_NPEI_MSI_RCV3_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RCV3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RCV3 not supported on this chip\n");
+ return 0x0000000000003C40ull;
+}
+#else
+#define CVMX_NPEI_MSI_RCV3 (0x0000000000003C40ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_RD_MAP CVMX_NPEI_MSI_RD_MAP_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RD_MAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RD_MAP not supported on this chip\n");
+ return 0x0000000000003CA0ull;
+}
+#else
+#define CVMX_NPEI_MSI_RD_MAP (0x0000000000003CA0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_W1C_ENB0 CVMX_NPEI_MSI_W1C_ENB0_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1C_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1C_ENB0 not supported on this chip\n");
+ return 0x0000000000003CF0ull;
+}
+#else
+#define CVMX_NPEI_MSI_W1C_ENB0 (0x0000000000003CF0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_W1C_ENB1 CVMX_NPEI_MSI_W1C_ENB1_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1C_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1C_ENB1 not supported on this chip\n");
+ return 0x0000000000003D00ull;
+}
+#else
+#define CVMX_NPEI_MSI_W1C_ENB1 (0x0000000000003D00ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_W1C_ENB2 CVMX_NPEI_MSI_W1C_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1C_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1C_ENB2 not supported on this chip\n");
+ return 0x0000000000003D10ull;
+}
+#else
+#define CVMX_NPEI_MSI_W1C_ENB2 (0x0000000000003D10ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_W1C_ENB3 CVMX_NPEI_MSI_W1C_ENB3_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1C_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1C_ENB3 not supported on this chip\n");
+ return 0x0000000000003D20ull;
+}
+#else
+#define CVMX_NPEI_MSI_W1C_ENB3 (0x0000000000003D20ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_W1S_ENB0 CVMX_NPEI_MSI_W1S_ENB0_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1S_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1S_ENB0 not supported on this chip\n");
+ return 0x0000000000003D30ull;
+}
+#else
+#define CVMX_NPEI_MSI_W1S_ENB0 (0x0000000000003D30ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_W1S_ENB1 CVMX_NPEI_MSI_W1S_ENB1_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1S_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1S_ENB1 not supported on this chip\n");
+ return 0x0000000000003D40ull;
+}
+#else
+#define CVMX_NPEI_MSI_W1S_ENB1 (0x0000000000003D40ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_W1S_ENB2 CVMX_NPEI_MSI_W1S_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1S_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1S_ENB2 not supported on this chip\n");
+ return 0x0000000000003D50ull;
+}
+#else
+#define CVMX_NPEI_MSI_W1S_ENB2 (0x0000000000003D50ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_W1S_ENB3 CVMX_NPEI_MSI_W1S_ENB3_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1S_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1S_ENB3 not supported on this chip\n");
+ return 0x0000000000003D60ull;
+}
+#else
+#define CVMX_NPEI_MSI_W1S_ENB3 (0x0000000000003D60ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_MSI_WR_MAP CVMX_NPEI_MSI_WR_MAP_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_WR_MAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_WR_MAP not supported on this chip\n");
+ return 0x0000000000003C90ull;
+}
+#else
+#define CVMX_NPEI_MSI_WR_MAP (0x0000000000003C90ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PCIE_CREDIT_CNT CVMX_NPEI_PCIE_CREDIT_CNT_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_CREDIT_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_CREDIT_CNT not supported on this chip\n");
+ return 0x0000000000003D70ull;
+}
+#else
+#define CVMX_NPEI_PCIE_CREDIT_CNT (0x0000000000003D70ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PCIE_MSI_RCV CVMX_NPEI_PCIE_MSI_RCV_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_MSI_RCV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_MSI_RCV not supported on this chip\n");
+ return 0x0000000000003CB0ull;
+}
+#else
+#define CVMX_NPEI_PCIE_MSI_RCV (0x0000000000003CB0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PCIE_MSI_RCV_B1 CVMX_NPEI_PCIE_MSI_RCV_B1_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_MSI_RCV_B1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_MSI_RCV_B1 not supported on this chip\n");
+ return 0x0000000000000650ull;
+}
+#else
+#define CVMX_NPEI_PCIE_MSI_RCV_B1 (0x0000000000000650ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PCIE_MSI_RCV_B2 CVMX_NPEI_PCIE_MSI_RCV_B2_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_MSI_RCV_B2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_MSI_RCV_B2 not supported on this chip\n");
+ return 0x0000000000000660ull;
+}
+#else
+#define CVMX_NPEI_PCIE_MSI_RCV_B2 (0x0000000000000660ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PCIE_MSI_RCV_B3 CVMX_NPEI_PCIE_MSI_RCV_B3_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_MSI_RCV_B3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_MSI_RCV_B3 not supported on this chip\n");
+ return 0x0000000000000670ull;
+}
+#else
+#define CVMX_NPEI_PCIE_MSI_RCV_B3 (0x0000000000000670ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKTX_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_CNTS(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000002400ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKTX_CNTS(offset) (0x0000000000002400ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKTX_INSTR_BADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_INSTR_BADDR(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000002800ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKTX_INSTR_BADDR(offset) (0x0000000000002800ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000002C00ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) (0x0000000000002C00ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000003000ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) (0x0000000000003000ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKTX_INSTR_HEADER(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_INSTR_HEADER(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000003400ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKTX_INSTR_HEADER(offset) (0x0000000000003400ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKTX_IN_BP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_IN_BP(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000003800ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKTX_IN_BP(offset) (0x0000000000003800ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKTX_SLIST_BADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_SLIST_BADDR(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000001400ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKTX_SLIST_BADDR(offset) (0x0000000000001400ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000001800ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) (0x0000000000001800ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000001C00ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) (0x0000000000001C00ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_CNT_INT CVMX_NPEI_PKT_CNT_INT_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_CNT_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_CNT_INT not supported on this chip\n");
+ return 0x0000000000001110ull;
+}
+#else
+#define CVMX_NPEI_PKT_CNT_INT (0x0000000000001110ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_CNT_INT_ENB CVMX_NPEI_PKT_CNT_INT_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_CNT_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_CNT_INT_ENB not supported on this chip\n");
+ return 0x0000000000001130ull;
+}
+#else
+#define CVMX_NPEI_PKT_CNT_INT_ENB (0x0000000000001130ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_DATA_OUT_ES CVMX_NPEI_PKT_DATA_OUT_ES_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_DATA_OUT_ES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_DATA_OUT_ES not supported on this chip\n");
+ return 0x00000000000010B0ull;
+}
+#else
+#define CVMX_NPEI_PKT_DATA_OUT_ES (0x00000000000010B0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_DATA_OUT_NS CVMX_NPEI_PKT_DATA_OUT_NS_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_DATA_OUT_NS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_DATA_OUT_NS not supported on this chip\n");
+ return 0x00000000000010A0ull;
+}
+#else
+#define CVMX_NPEI_PKT_DATA_OUT_NS (0x00000000000010A0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_DATA_OUT_ROR CVMX_NPEI_PKT_DATA_OUT_ROR_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_DATA_OUT_ROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_DATA_OUT_ROR not supported on this chip\n");
+ return 0x0000000000001090ull;
+}
+#else
+#define CVMX_NPEI_PKT_DATA_OUT_ROR (0x0000000000001090ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_DPADDR CVMX_NPEI_PKT_DPADDR_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_DPADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_DPADDR not supported on this chip\n");
+ return 0x0000000000001080ull;
+}
+#else
+#define CVMX_NPEI_PKT_DPADDR (0x0000000000001080ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_INPUT_CONTROL CVMX_NPEI_PKT_INPUT_CONTROL_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INPUT_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INPUT_CONTROL not supported on this chip\n");
+ return 0x0000000000001150ull;
+}
+#else
+#define CVMX_NPEI_PKT_INPUT_CONTROL (0x0000000000001150ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_INSTR_ENB CVMX_NPEI_PKT_INSTR_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INSTR_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INSTR_ENB not supported on this chip\n");
+ return 0x0000000000001000ull;
+}
+#else
+#define CVMX_NPEI_PKT_INSTR_ENB (0x0000000000001000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_INSTR_RD_SIZE CVMX_NPEI_PKT_INSTR_RD_SIZE_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INSTR_RD_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INSTR_RD_SIZE not supported on this chip\n");
+ return 0x0000000000001190ull;
+}
+#else
+#define CVMX_NPEI_PKT_INSTR_RD_SIZE (0x0000000000001190ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_INSTR_SIZE CVMX_NPEI_PKT_INSTR_SIZE_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INSTR_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INSTR_SIZE not supported on this chip\n");
+ return 0x0000000000001020ull;
+}
+#else
+#define CVMX_NPEI_PKT_INSTR_SIZE (0x0000000000001020ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_INT_LEVELS CVMX_NPEI_PKT_INT_LEVELS_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INT_LEVELS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INT_LEVELS not supported on this chip\n");
+ return 0x0000000000001100ull;
+}
+#else
+#define CVMX_NPEI_PKT_INT_LEVELS (0x0000000000001100ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_IN_BP CVMX_NPEI_PKT_IN_BP_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_IN_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_IN_BP not supported on this chip\n");
+ return 0x00000000000006B0ull;
+}
+#else
+#define CVMX_NPEI_PKT_IN_BP (0x00000000000006B0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPEI_PKT_IN_DONEX_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKT_IN_DONEX_CNTS(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000002000ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset) (0x0000000000002000ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_IN_INSTR_COUNTS CVMX_NPEI_PKT_IN_INSTR_COUNTS_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_IN_INSTR_COUNTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_IN_INSTR_COUNTS not supported on this chip\n");
+ return 0x00000000000006A0ull;
+}
+#else
+#define CVMX_NPEI_PKT_IN_INSTR_COUNTS (0x00000000000006A0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_IN_PCIE_PORT CVMX_NPEI_PKT_IN_PCIE_PORT_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_IN_PCIE_PORT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_IN_PCIE_PORT not supported on this chip\n");
+ return 0x00000000000011A0ull;
+}
+#else
+#define CVMX_NPEI_PKT_IN_PCIE_PORT (0x00000000000011A0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_IPTR CVMX_NPEI_PKT_IPTR_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_IPTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_IPTR not supported on this chip\n");
+ return 0x0000000000001070ull;
+}
+#else
+#define CVMX_NPEI_PKT_IPTR (0x0000000000001070ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_OUTPUT_WMARK CVMX_NPEI_PKT_OUTPUT_WMARK_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_OUTPUT_WMARK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_OUTPUT_WMARK not supported on this chip\n");
+ return 0x0000000000001160ull;
+}
+#else
+#define CVMX_NPEI_PKT_OUTPUT_WMARK (0x0000000000001160ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_OUT_BMODE CVMX_NPEI_PKT_OUT_BMODE_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_OUT_BMODE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_OUT_BMODE not supported on this chip\n");
+ return 0x00000000000010D0ull;
+}
+#else
+#define CVMX_NPEI_PKT_OUT_BMODE (0x00000000000010D0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_OUT_ENB CVMX_NPEI_PKT_OUT_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_OUT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_OUT_ENB not supported on this chip\n");
+ return 0x0000000000001010ull;
+}
+#else
+#define CVMX_NPEI_PKT_OUT_ENB (0x0000000000001010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_PCIE_PORT CVMX_NPEI_PKT_PCIE_PORT_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_PCIE_PORT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_PCIE_PORT not supported on this chip\n");
+ return 0x00000000000010E0ull;
+}
+#else
+#define CVMX_NPEI_PKT_PCIE_PORT (0x00000000000010E0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_PORT_IN_RST CVMX_NPEI_PKT_PORT_IN_RST_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_PORT_IN_RST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_PORT_IN_RST not supported on this chip\n");
+ return 0x0000000000000690ull;
+}
+#else
+#define CVMX_NPEI_PKT_PORT_IN_RST (0x0000000000000690ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_SLIST_ES CVMX_NPEI_PKT_SLIST_ES_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_SLIST_ES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_SLIST_ES not supported on this chip\n");
+ return 0x0000000000001050ull;
+}
+#else
+#define CVMX_NPEI_PKT_SLIST_ES (0x0000000000001050ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_SLIST_ID_SIZE CVMX_NPEI_PKT_SLIST_ID_SIZE_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_SLIST_ID_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_SLIST_ID_SIZE not supported on this chip\n");
+ return 0x0000000000001180ull;
+}
+#else
+#define CVMX_NPEI_PKT_SLIST_ID_SIZE (0x0000000000001180ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_SLIST_NS CVMX_NPEI_PKT_SLIST_NS_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_SLIST_NS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_SLIST_NS not supported on this chip\n");
+ return 0x0000000000001040ull;
+}
+#else
+#define CVMX_NPEI_PKT_SLIST_NS (0x0000000000001040ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_SLIST_ROR CVMX_NPEI_PKT_SLIST_ROR_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_SLIST_ROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_SLIST_ROR not supported on this chip\n");
+ return 0x0000000000001030ull;
+}
+#else
+#define CVMX_NPEI_PKT_SLIST_ROR (0x0000000000001030ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_TIME_INT CVMX_NPEI_PKT_TIME_INT_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_TIME_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_TIME_INT not supported on this chip\n");
+ return 0x0000000000001120ull;
+}
+#else
+#define CVMX_NPEI_PKT_TIME_INT (0x0000000000001120ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_PKT_TIME_INT_ENB CVMX_NPEI_PKT_TIME_INT_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_TIME_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_TIME_INT_ENB not supported on this chip\n");
+ return 0x0000000000001140ull;
+}
+#else
+#define CVMX_NPEI_PKT_TIME_INT_ENB (0x0000000000001140ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_RSL_INT_BLOCKS CVMX_NPEI_RSL_INT_BLOCKS_FUNC()
+static inline uint64_t CVMX_NPEI_RSL_INT_BLOCKS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_RSL_INT_BLOCKS not supported on this chip\n");
+ return 0x0000000000000520ull;
+}
+#else
+#define CVMX_NPEI_RSL_INT_BLOCKS (0x0000000000000520ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_SCRATCH_1 CVMX_NPEI_SCRATCH_1_FUNC()
+static inline uint64_t CVMX_NPEI_SCRATCH_1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_SCRATCH_1 not supported on this chip\n");
+ return 0x0000000000000270ull;
+}
+#else
+#define CVMX_NPEI_SCRATCH_1 (0x0000000000000270ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_STATE1 CVMX_NPEI_STATE1_FUNC()
+static inline uint64_t CVMX_NPEI_STATE1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_STATE1 not supported on this chip\n");
+ return 0x0000000000000620ull;
+}
+#else
+#define CVMX_NPEI_STATE1 (0x0000000000000620ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_STATE2 CVMX_NPEI_STATE2_FUNC()
+static inline uint64_t CVMX_NPEI_STATE2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_STATE2 not supported on this chip\n");
+ return 0x0000000000000630ull;
+}
+#else
+#define CVMX_NPEI_STATE2 (0x0000000000000630ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_STATE3 CVMX_NPEI_STATE3_FUNC()
+static inline uint64_t CVMX_NPEI_STATE3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_STATE3 not supported on this chip\n");
+ return 0x0000000000000640ull;
+}
+#else
+#define CVMX_NPEI_STATE3 (0x0000000000000640ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_WINDOW_CTL CVMX_NPEI_WINDOW_CTL_FUNC()
+static inline uint64_t CVMX_NPEI_WINDOW_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_WINDOW_CTL not supported on this chip\n");
+ return 0x0000000000000380ull;
+}
+#else
+#define CVMX_NPEI_WINDOW_CTL (0x0000000000000380ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_WIN_RD_ADDR CVMX_NPEI_WIN_RD_ADDR_FUNC()
+static inline uint64_t CVMX_NPEI_WIN_RD_ADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_WIN_RD_ADDR not supported on this chip\n");
+ return 0x0000000000000210ull;
+}
+#else
+#define CVMX_NPEI_WIN_RD_ADDR (0x0000000000000210ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_WIN_RD_DATA CVMX_NPEI_WIN_RD_DATA_FUNC()
+static inline uint64_t CVMX_NPEI_WIN_RD_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_WIN_RD_DATA not supported on this chip\n");
+ return 0x0000000000000240ull;
+}
+#else
+#define CVMX_NPEI_WIN_RD_DATA (0x0000000000000240ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_WIN_WR_ADDR CVMX_NPEI_WIN_WR_ADDR_FUNC()
+static inline uint64_t CVMX_NPEI_WIN_WR_ADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_WIN_WR_ADDR not supported on this chip\n");
+ return 0x0000000000000200ull;
+}
+#else
+#define CVMX_NPEI_WIN_WR_ADDR (0x0000000000000200ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_WIN_WR_DATA CVMX_NPEI_WIN_WR_DATA_FUNC()
+static inline uint64_t CVMX_NPEI_WIN_WR_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_WIN_WR_DATA not supported on this chip\n");
+ return 0x0000000000000220ull;
+}
+#else
+#define CVMX_NPEI_WIN_WR_DATA (0x0000000000000220ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPEI_WIN_WR_MASK CVMX_NPEI_WIN_WR_MASK_FUNC()
+static inline uint64_t CVMX_NPEI_WIN_WR_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_WIN_WR_MASK not supported on this chip\n");
+ return 0x0000000000000230ull;
+}
+#else
+#define CVMX_NPEI_WIN_WR_MASK (0x0000000000000230ull)
+#endif
+
+/**
+ * cvmx_npei_bar1_index#
+ *
+ * Total Address is 16Kb; 0x0000 - 0x3fff, 0x000 - 0x7fe(Reg, every other 8B)
+ *
+ * General 5kb; 0x0000 - 0x13ff, 0x000 - 0x27e(Reg-General)
+ * PktMem 10Kb; 0x1400 - 0x3bff, 0x280 - 0x77e(Reg-General-Packet)
+ * Rsvd 1Kb; 0x3c00 - 0x3fff, 0x780 - 0x7fe(Reg-NCB Only Mode)
+ * == NPEI_PKT_CNT_INT_ENB[PORT]
+ * == NPEI_PKT_TIME_INT_ENB[PORT]
+ * == NPEI_PKT_CNT_INT[PORT]
+ * == NPEI_PKT_TIME_INT[PORT]
+ * == NPEI_PKT_PCIE_PORT[PP]
+ * == NPEI_PKT_SLIST_ROR[ROR]
+ * == NPEI_PKT_SLIST_ROR[NSR] ?
+ * == NPEI_PKT_SLIST_ES[ES]
+ * == NPEI_PKTn_SLIST_BAOFF_DBELL[AOFF]
+ * == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL]
+ * == NPEI_PKTn_CNTS[CNT]
+ * NPEI_CTL_STATUS[OUTn_ENB] == NPEI_PKT_OUT_ENB[ENB]
+ * NPEI_BASE_ADDRESS_OUTPUTn[BADDR] == NPEI_PKTn_SLIST_BADDR[ADDR]
+ * NPEI_DESC_OUTPUTn[SIZE] == NPEI_PKTn_SLIST_FIFO_RSIZE[RSIZE]
+ * NPEI_Pn_DBPAIR_ADDR[NADDR] == NPEI_PKTn_SLIST_BADDR[ADDR] + NPEI_PKTn_SLIST_BAOFF_DBELL[AOFF]
+ * NPEI_PKT_CREDITSn[PTR_CNT] == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL]
+ * NPEI_P0_PAIR_CNTS[AVAIL] == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL]
+ * NPEI_P0_PAIR_CNTS[FCNT] ==
+ * NPEI_PKTS_SENTn[PKT_CNT] == NPEI_PKTn_CNTS[CNT]
+ * NPEI_OUTPUT_CONTROL[Pn_BMODE] == NPEI_PKT_OUT_BMODE[BMODE]
+ * NPEI_PKT_CREDITSn[PKT_CNT] == NPEI_PKTn_CNTS[CNT]
+ * NPEI_BUFF_SIZE_OUTPUTn[BSIZE] == NPEI_PKT_SLIST_ID_SIZE[BSIZE]
+ * NPEI_BUFF_SIZE_OUTPUTn[ISIZE] == NPEI_PKT_SLIST_ID_SIZE[ISIZE]
+ * NPEI_OUTPUT_CONTROL[On_CSRM] == NPEI_PKT_DPADDR[DPTR] & NPEI_PKT_OUT_USE_IPTR[PORT]
+ * NPEI_OUTPUT_CONTROL[On_ES] == NPEI_PKT_DATA_OUT_ES[ES]
+ * NPEI_OUTPUT_CONTROL[On_NS] == NPEI_PKT_DATA_OUT_NS[NSR] ?
+ * NPEI_OUTPUT_CONTROL[On_RO] == NPEI_PKT_DATA_OUT_ROR[ROR]
+ * NPEI_PKTS_SENT_INT_LEVn[PKT_CNT] == NPEI_PKT_INT_LEVELS[CNT]
+ * NPEI_PKTS_SENT_TIMEn[PKT_TIME] == NPEI_PKT_INT_LEVELS[TIME]
+ * NPEI_OUTPUT_CONTROL[IPTR_On] == NPEI_PKT_IPTR[IPTR]
+ * NPEI_PCIE_PORT_OUTPUT[] == NPEI_PKT_PCIE_PORT[PP]
+ *
+ * NPEI_BAR1_INDEXX = NPEI BAR1 IndexX Register
+ *
+ * Contains address index and control bits for access to memory ranges of BAR-1. Index is build from supplied address [25:22].
+ * NPEI_BAR1_INDEX0 through NPEI_BAR1_INDEX15 is used for transactions orginating with PCIE-PORT0 and NPEI_BAR1_INDEX16
+ * through NPEI_BAR1_INDEX31 is used for transactions originating with PCIE-PORT1.
+ */
+union cvmx_npei_bar1_indexx {
+ uint32_t u32;
+ struct cvmx_npei_bar1_indexx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_18_31 : 14;
+ uint32_t addr_idx : 14; /**< Address bits [35:22] sent to L2C */
+ uint32_t ca : 1; /**< Set '1' when access is not to be cached in L2. */
+ uint32_t end_swp : 2; /**< Endian Swap Mode */
+ uint32_t addr_v : 1; /**< Set '1' when the selected address range is valid. */
+#else
+ uint32_t addr_v : 1;
+ uint32_t end_swp : 2;
+ uint32_t ca : 1;
+ uint32_t addr_idx : 14;
+ uint32_t reserved_18_31 : 14;
+#endif
+ } s;
+ struct cvmx_npei_bar1_indexx_s cn52xx;
+ struct cvmx_npei_bar1_indexx_s cn52xxp1;
+ struct cvmx_npei_bar1_indexx_s cn56xx;
+ struct cvmx_npei_bar1_indexx_s cn56xxp1;
+};
+typedef union cvmx_npei_bar1_indexx cvmx_npei_bar1_indexx_t;
+
+/**
+ * cvmx_npei_bist_status
+ *
+ * NPEI_BIST_STATUS = NPI's BIST Status Register
+ *
+ * Results from BIST runs of NPEI's memories.
+ */
+union cvmx_npei_bist_status {
+ uint64_t u64;
+ struct cvmx_npei_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkt_rdf : 1; /**< BIST Status for PKT Read FIFO */
+ uint64_t reserved_60_62 : 3;
+ uint64_t pcr_gim : 1; /**< BIST Status for PKT Gather Instr MEM */
+ uint64_t pkt_pif : 1; /**< BIST Status for PKT INB FIFO */
+ uint64_t pcsr_int : 1; /**< BIST Status for PKT pout_int_bstatus */
+ uint64_t pcsr_im : 1; /**< BIST Status for PKT pcsr_instr_mem_bstatus */
+ uint64_t pcsr_cnt : 1; /**< BIST Status for PKT pin_cnt_bstatus */
+ uint64_t pcsr_id : 1; /**< BIST Status for PKT pcsr_in_done_bstatus */
+ uint64_t pcsr_sl : 1; /**< BIST Status for PKT pcsr_slist_bstatus */
+ uint64_t reserved_50_52 : 3;
+ uint64_t pkt_ind : 1; /**< BIST Status for PKT Instruction Done MEM */
+ uint64_t pkt_slm : 1; /**< BIST Status for PKT SList MEM */
+ uint64_t reserved_36_47 : 12;
+ uint64_t d0_pst : 1; /**< BIST Status for DMA0 Pcie Store */
+ uint64_t d1_pst : 1; /**< BIST Status for DMA1 Pcie Store */
+ uint64_t d2_pst : 1; /**< BIST Status for DMA2 Pcie Store */
+ uint64_t d3_pst : 1; /**< BIST Status for DMA3 Pcie Store */
+ uint64_t reserved_31_31 : 1;
+ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */
+ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */
+ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */
+ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */
+ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */
+ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */
+ uint64_t p2n1_po : 1; /**< BIST Status for P2N Port1 P Order */
+ uint64_t p2n1_no : 1; /**< BIST Status for P2N Port1 N Order */
+ uint64_t p2n1_co : 1; /**< BIST Status for P2N Port1 C Order */
+ uint64_t p2n0_po : 1; /**< BIST Status for P2N Port0 P Order */
+ uint64_t p2n0_no : 1; /**< BIST Status for P2N Port0 N Order */
+ uint64_t p2n0_co : 1; /**< BIST Status for P2N Port0 C Order */
+ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */
+ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */
+ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */
+ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */
+ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */
+ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */
+ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */
+ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */
+ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */
+ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */
+ uint64_t csm0 : 1; /**< BIST Status for CSM0 */
+ uint64_t csm1 : 1; /**< BIST Status for CSM1 */
+ uint64_t dif0 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif1 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif2 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif3 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t reserved_2_2 : 1;
+ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */
+ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */
+#else
+ uint64_t ncb_cmd : 1;
+ uint64_t msi : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t dif3 : 1;
+ uint64_t dif2 : 1;
+ uint64_t dif1 : 1;
+ uint64_t dif0 : 1;
+ uint64_t csm1 : 1;
+ uint64_t csm0 : 1;
+ uint64_t p2n1_p1 : 1;
+ uint64_t p2n1_p0 : 1;
+ uint64_t p2n1_n : 1;
+ uint64_t p2n1_c1 : 1;
+ uint64_t p2n1_c0 : 1;
+ uint64_t p2n0_p1 : 1;
+ uint64_t p2n0_p0 : 1;
+ uint64_t p2n0_n : 1;
+ uint64_t p2n0_c1 : 1;
+ uint64_t p2n0_c0 : 1;
+ uint64_t p2n0_co : 1;
+ uint64_t p2n0_no : 1;
+ uint64_t p2n0_po : 1;
+ uint64_t p2n1_co : 1;
+ uint64_t p2n1_no : 1;
+ uint64_t p2n1_po : 1;
+ uint64_t cpl_p1 : 1;
+ uint64_t cpl_p0 : 1;
+ uint64_t n2p1_o : 1;
+ uint64_t n2p1_c : 1;
+ uint64_t n2p0_o : 1;
+ uint64_t n2p0_c : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t d3_pst : 1;
+ uint64_t d2_pst : 1;
+ uint64_t d1_pst : 1;
+ uint64_t d0_pst : 1;
+ uint64_t reserved_36_47 : 12;
+ uint64_t pkt_slm : 1;
+ uint64_t pkt_ind : 1;
+ uint64_t reserved_50_52 : 3;
+ uint64_t pcsr_sl : 1;
+ uint64_t pcsr_id : 1;
+ uint64_t pcsr_cnt : 1;
+ uint64_t pcsr_im : 1;
+ uint64_t pcsr_int : 1;
+ uint64_t pkt_pif : 1;
+ uint64_t pcr_gim : 1;
+ uint64_t reserved_60_62 : 3;
+ uint64_t pkt_rdf : 1;
+#endif
+ } s;
+ struct cvmx_npei_bist_status_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkt_rdf : 1; /**< BIST Status for PKT Read FIFO */
+ uint64_t reserved_60_62 : 3;
+ uint64_t pcr_gim : 1; /**< BIST Status for PKT Gather Instr MEM */
+ uint64_t pkt_pif : 1; /**< BIST Status for PKT INB FIFO */
+ uint64_t pcsr_int : 1; /**< BIST Status for PKT OUTB Interrupt MEM */
+ uint64_t pcsr_im : 1; /**< BIST Status for PKT CSR Instr MEM */
+ uint64_t pcsr_cnt : 1; /**< BIST Status for PKT INB Count MEM */
+ uint64_t pcsr_id : 1; /**< BIST Status for PKT INB Instr Done MEM */
+ uint64_t pcsr_sl : 1; /**< BIST Status for PKT OUTB SLIST MEM */
+ uint64_t pkt_imem : 1; /**< BIST Status for PKT OUTB IFIFO */
+ uint64_t pkt_pfm : 1; /**< BIST Status for PKT Front MEM */
+ uint64_t pkt_pof : 1; /**< BIST Status for PKT OUTB FIFO */
+ uint64_t reserved_48_49 : 2;
+ uint64_t pkt_pop0 : 1; /**< BIST Status for PKT OUTB Slist0 */
+ uint64_t pkt_pop1 : 1; /**< BIST Status for PKT OUTB Slist1 */
+ uint64_t d0_mem : 1; /**< BIST Status for DMA MEM 0 */
+ uint64_t d1_mem : 1; /**< BIST Status for DMA MEM 1 */
+ uint64_t d2_mem : 1; /**< BIST Status for DMA MEM 2 */
+ uint64_t d3_mem : 1; /**< BIST Status for DMA MEM 3 */
+ uint64_t d4_mem : 1; /**< BIST Status for DMA MEM 4 */
+ uint64_t ds_mem : 1; /**< BIST Status for DMA Memory */
+ uint64_t reserved_36_39 : 4;
+ uint64_t d0_pst : 1; /**< BIST Status for DMA0 Pcie Store */
+ uint64_t d1_pst : 1; /**< BIST Status for DMA1 Pcie Store */
+ uint64_t d2_pst : 1; /**< BIST Status for DMA2 Pcie Store */
+ uint64_t d3_pst : 1; /**< BIST Status for DMA3 Pcie Store */
+ uint64_t d4_pst : 1; /**< BIST Status for DMA4 Pcie Store */
+ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */
+ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */
+ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */
+ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */
+ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */
+ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */
+ uint64_t p2n1_po : 1; /**< BIST Status for P2N Port1 P Order */
+ uint64_t p2n1_no : 1; /**< BIST Status for P2N Port1 N Order */
+ uint64_t p2n1_co : 1; /**< BIST Status for P2N Port1 C Order */
+ uint64_t p2n0_po : 1; /**< BIST Status for P2N Port0 P Order */
+ uint64_t p2n0_no : 1; /**< BIST Status for P2N Port0 N Order */
+ uint64_t p2n0_co : 1; /**< BIST Status for P2N Port0 C Order */
+ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */
+ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */
+ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */
+ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */
+ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */
+ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */
+ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */
+ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */
+ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */
+ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */
+ uint64_t csm0 : 1; /**< BIST Status for CSM0 */
+ uint64_t csm1 : 1; /**< BIST Status for CSM1 */
+ uint64_t dif0 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif1 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif2 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif3 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif4 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */
+ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */
+#else
+ uint64_t ncb_cmd : 1;
+ uint64_t msi : 1;
+ uint64_t dif4 : 1;
+ uint64_t dif3 : 1;
+ uint64_t dif2 : 1;
+ uint64_t dif1 : 1;
+ uint64_t dif0 : 1;
+ uint64_t csm1 : 1;
+ uint64_t csm0 : 1;
+ uint64_t p2n1_p1 : 1;
+ uint64_t p2n1_p0 : 1;
+ uint64_t p2n1_n : 1;
+ uint64_t p2n1_c1 : 1;
+ uint64_t p2n1_c0 : 1;
+ uint64_t p2n0_p1 : 1;
+ uint64_t p2n0_p0 : 1;
+ uint64_t p2n0_n : 1;
+ uint64_t p2n0_c1 : 1;
+ uint64_t p2n0_c0 : 1;
+ uint64_t p2n0_co : 1;
+ uint64_t p2n0_no : 1;
+ uint64_t p2n0_po : 1;
+ uint64_t p2n1_co : 1;
+ uint64_t p2n1_no : 1;
+ uint64_t p2n1_po : 1;
+ uint64_t cpl_p1 : 1;
+ uint64_t cpl_p0 : 1;
+ uint64_t n2p1_o : 1;
+ uint64_t n2p1_c : 1;
+ uint64_t n2p0_o : 1;
+ uint64_t n2p0_c : 1;
+ uint64_t d4_pst : 1;
+ uint64_t d3_pst : 1;
+ uint64_t d2_pst : 1;
+ uint64_t d1_pst : 1;
+ uint64_t d0_pst : 1;
+ uint64_t reserved_36_39 : 4;
+ uint64_t ds_mem : 1;
+ uint64_t d4_mem : 1;
+ uint64_t d3_mem : 1;
+ uint64_t d2_mem : 1;
+ uint64_t d1_mem : 1;
+ uint64_t d0_mem : 1;
+ uint64_t pkt_pop1 : 1;
+ uint64_t pkt_pop0 : 1;
+ uint64_t reserved_48_49 : 2;
+ uint64_t pkt_pof : 1;
+ uint64_t pkt_pfm : 1;
+ uint64_t pkt_imem : 1;
+ uint64_t pcsr_sl : 1;
+ uint64_t pcsr_id : 1;
+ uint64_t pcsr_cnt : 1;
+ uint64_t pcsr_im : 1;
+ uint64_t pcsr_int : 1;
+ uint64_t pkt_pif : 1;
+ uint64_t pcr_gim : 1;
+ uint64_t reserved_60_62 : 3;
+ uint64_t pkt_rdf : 1;
+#endif
+ } cn52xx;
+ struct cvmx_npei_bist_status_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t d0_mem0 : 1; /**< BIST Status for DMA0 Memory */
+ uint64_t d1_mem1 : 1; /**< BIST Status for DMA1 Memory */
+ uint64_t d2_mem2 : 1; /**< BIST Status for DMA2 Memory */
+ uint64_t d3_mem3 : 1; /**< BIST Status for DMA3 Memory */
+ uint64_t dr0_mem : 1; /**< BIST Status for DMA0 Store */
+ uint64_t d0_mem : 1; /**< BIST Status for DMA0 Memory */
+ uint64_t d1_mem : 1; /**< BIST Status for DMA1 Memory */
+ uint64_t d2_mem : 1; /**< BIST Status for DMA2 Memory */
+ uint64_t d3_mem : 1; /**< BIST Status for DMA3 Memory */
+ uint64_t dr1_mem : 1; /**< BIST Status for DMA1 Store */
+ uint64_t d0_pst : 1; /**< BIST Status for DMA0 Pcie Store */
+ uint64_t d1_pst : 1; /**< BIST Status for DMA1 Pcie Store */
+ uint64_t d2_pst : 1; /**< BIST Status for DMA2 Pcie Store */
+ uint64_t d3_pst : 1; /**< BIST Status for DMA3 Pcie Store */
+ uint64_t dr2_mem : 1; /**< BIST Status for DMA2 Store */
+ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */
+ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */
+ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */
+ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */
+ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */
+ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */
+ uint64_t p2n1_po : 1; /**< BIST Status for P2N Port1 P Order */
+ uint64_t p2n1_no : 1; /**< BIST Status for P2N Port1 N Order */
+ uint64_t p2n1_co : 1; /**< BIST Status for P2N Port1 C Order */
+ uint64_t p2n0_po : 1; /**< BIST Status for P2N Port0 P Order */
+ uint64_t p2n0_no : 1; /**< BIST Status for P2N Port0 N Order */
+ uint64_t p2n0_co : 1; /**< BIST Status for P2N Port0 C Order */
+ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */
+ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */
+ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */
+ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */
+ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */
+ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */
+ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */
+ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */
+ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */
+ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */
+ uint64_t csm0 : 1; /**< BIST Status for CSM0 */
+ uint64_t csm1 : 1; /**< BIST Status for CSM1 */
+ uint64_t dif0 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif1 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif2 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif3 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dr3_mem : 1; /**< BIST Status for DMA3 Store */
+ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */
+ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */
+#else
+ uint64_t ncb_cmd : 1;
+ uint64_t msi : 1;
+ uint64_t dr3_mem : 1;
+ uint64_t dif3 : 1;
+ uint64_t dif2 : 1;
+ uint64_t dif1 : 1;
+ uint64_t dif0 : 1;
+ uint64_t csm1 : 1;
+ uint64_t csm0 : 1;
+ uint64_t p2n1_p1 : 1;
+ uint64_t p2n1_p0 : 1;
+ uint64_t p2n1_n : 1;
+ uint64_t p2n1_c1 : 1;
+ uint64_t p2n1_c0 : 1;
+ uint64_t p2n0_p1 : 1;
+ uint64_t p2n0_p0 : 1;
+ uint64_t p2n0_n : 1;
+ uint64_t p2n0_c1 : 1;
+ uint64_t p2n0_c0 : 1;
+ uint64_t p2n0_co : 1;
+ uint64_t p2n0_no : 1;
+ uint64_t p2n0_po : 1;
+ uint64_t p2n1_co : 1;
+ uint64_t p2n1_no : 1;
+ uint64_t p2n1_po : 1;
+ uint64_t cpl_p1 : 1;
+ uint64_t cpl_p0 : 1;
+ uint64_t n2p1_o : 1;
+ uint64_t n2p1_c : 1;
+ uint64_t n2p0_o : 1;
+ uint64_t n2p0_c : 1;
+ uint64_t dr2_mem : 1;
+ uint64_t d3_pst : 1;
+ uint64_t d2_pst : 1;
+ uint64_t d1_pst : 1;
+ uint64_t d0_pst : 1;
+ uint64_t dr1_mem : 1;
+ uint64_t d3_mem : 1;
+ uint64_t d2_mem : 1;
+ uint64_t d1_mem : 1;
+ uint64_t d0_mem : 1;
+ uint64_t dr0_mem : 1;
+ uint64_t d3_mem3 : 1;
+ uint64_t d2_mem2 : 1;
+ uint64_t d1_mem1 : 1;
+ uint64_t d0_mem0 : 1;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_bist_status_cn52xx cn56xx;
+ struct cvmx_npei_bist_status_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t pcsr_int : 1; /**< BIST Status for PKT pout_int_bstatus */
+ uint64_t pcsr_im : 1; /**< BIST Status for PKT pcsr_instr_mem_bstatus */
+ uint64_t pcsr_cnt : 1; /**< BIST Status for PKT pin_cnt_bstatus */
+ uint64_t pcsr_id : 1; /**< BIST Status for PKT pcsr_in_done_bstatus */
+ uint64_t pcsr_sl : 1; /**< BIST Status for PKT pcsr_slist_bstatus */
+ uint64_t pkt_pout : 1; /**< BIST Status for PKT OUT Count MEM */
+ uint64_t pkt_imem : 1; /**< BIST Status for PKT Instruction MEM */
+ uint64_t pkt_cntm : 1; /**< BIST Status for PKT Count MEM */
+ uint64_t pkt_ind : 1; /**< BIST Status for PKT Instruction Done MEM */
+ uint64_t pkt_slm : 1; /**< BIST Status for PKT SList MEM */
+ uint64_t pkt_odf : 1; /**< BIST Status for PKT Output Data FIFO */
+ uint64_t pkt_oif : 1; /**< BIST Status for PKT Output INFO FIFO */
+ uint64_t pkt_out : 1; /**< BIST Status for PKT Output FIFO */
+ uint64_t pkt_i0 : 1; /**< BIST Status for PKT Instr0 */
+ uint64_t pkt_i1 : 1; /**< BIST Status for PKT Instr1 */
+ uint64_t pkt_s0 : 1; /**< BIST Status for PKT Slist0 */
+ uint64_t pkt_s1 : 1; /**< BIST Status for PKT Slist1 */
+ uint64_t d0_mem : 1; /**< BIST Status for DMA0 Memory */
+ uint64_t d1_mem : 1; /**< BIST Status for DMA1 Memory */
+ uint64_t d2_mem : 1; /**< BIST Status for DMA2 Memory */
+ uint64_t d3_mem : 1; /**< BIST Status for DMA3 Memory */
+ uint64_t d4_mem : 1; /**< BIST Status for DMA4 Memory */
+ uint64_t d0_pst : 1; /**< BIST Status for DMA0 Pcie Store */
+ uint64_t d1_pst : 1; /**< BIST Status for DMA1 Pcie Store */
+ uint64_t d2_pst : 1; /**< BIST Status for DMA2 Pcie Store */
+ uint64_t d3_pst : 1; /**< BIST Status for DMA3 Pcie Store */
+ uint64_t d4_pst : 1; /**< BIST Status for DMA4 Pcie Store */
+ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */
+ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */
+ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */
+ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */
+ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */
+ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */
+ uint64_t p2n1_po : 1; /**< BIST Status for P2N Port1 P Order */
+ uint64_t p2n1_no : 1; /**< BIST Status for P2N Port1 N Order */
+ uint64_t p2n1_co : 1; /**< BIST Status for P2N Port1 C Order */
+ uint64_t p2n0_po : 1; /**< BIST Status for P2N Port0 P Order */
+ uint64_t p2n0_no : 1; /**< BIST Status for P2N Port0 N Order */
+ uint64_t p2n0_co : 1; /**< BIST Status for P2N Port0 C Order */
+ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */
+ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */
+ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */
+ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */
+ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */
+ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */
+ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */
+ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */
+ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */
+ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */
+ uint64_t csm0 : 1; /**< BIST Status for CSM0 */
+ uint64_t csm1 : 1; /**< BIST Status for CSM1 */
+ uint64_t dif0 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif1 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif2 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif3 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t dif4 : 1; /**< BIST Status for DMA Instr0 */
+ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */
+ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */
+#else
+ uint64_t ncb_cmd : 1;
+ uint64_t msi : 1;
+ uint64_t dif4 : 1;
+ uint64_t dif3 : 1;
+ uint64_t dif2 : 1;
+ uint64_t dif1 : 1;
+ uint64_t dif0 : 1;
+ uint64_t csm1 : 1;
+ uint64_t csm0 : 1;
+ uint64_t p2n1_p1 : 1;
+ uint64_t p2n1_p0 : 1;
+ uint64_t p2n1_n : 1;
+ uint64_t p2n1_c1 : 1;
+ uint64_t p2n1_c0 : 1;
+ uint64_t p2n0_p1 : 1;
+ uint64_t p2n0_p0 : 1;
+ uint64_t p2n0_n : 1;
+ uint64_t p2n0_c1 : 1;
+ uint64_t p2n0_c0 : 1;
+ uint64_t p2n0_co : 1;
+ uint64_t p2n0_no : 1;
+ uint64_t p2n0_po : 1;
+ uint64_t p2n1_co : 1;
+ uint64_t p2n1_no : 1;
+ uint64_t p2n1_po : 1;
+ uint64_t cpl_p1 : 1;
+ uint64_t cpl_p0 : 1;
+ uint64_t n2p1_o : 1;
+ uint64_t n2p1_c : 1;
+ uint64_t n2p0_o : 1;
+ uint64_t n2p0_c : 1;
+ uint64_t d4_pst : 1;
+ uint64_t d3_pst : 1;
+ uint64_t d2_pst : 1;
+ uint64_t d1_pst : 1;
+ uint64_t d0_pst : 1;
+ uint64_t d4_mem : 1;
+ uint64_t d3_mem : 1;
+ uint64_t d2_mem : 1;
+ uint64_t d1_mem : 1;
+ uint64_t d0_mem : 1;
+ uint64_t pkt_s1 : 1;
+ uint64_t pkt_s0 : 1;
+ uint64_t pkt_i1 : 1;
+ uint64_t pkt_i0 : 1;
+ uint64_t pkt_out : 1;
+ uint64_t pkt_oif : 1;
+ uint64_t pkt_odf : 1;
+ uint64_t pkt_slm : 1;
+ uint64_t pkt_ind : 1;
+ uint64_t pkt_cntm : 1;
+ uint64_t pkt_imem : 1;
+ uint64_t pkt_pout : 1;
+ uint64_t pcsr_sl : 1;
+ uint64_t pcsr_id : 1;
+ uint64_t pcsr_cnt : 1;
+ uint64_t pcsr_im : 1;
+ uint64_t pcsr_int : 1;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } cn56xxp1;
+};
+typedef union cvmx_npei_bist_status cvmx_npei_bist_status_t;
+
+/**
+ * cvmx_npei_bist_status2
+ *
+ * NPEI_BIST_STATUS2 = NPI's BIST Status Register2
+ *
+ * Results from BIST runs of NPEI's memories.
+ */
+union cvmx_npei_bist_status2 {
+ uint64_t u64;
+ struct cvmx_npei_bist_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t prd_tag : 1; /**< BIST Status for DMA PCIE RD Tag MEM */
+ uint64_t prd_st0 : 1; /**< BIST Status for DMA PCIE RD state MEM 0 */
+ uint64_t prd_st1 : 1; /**< BIST Status for DMA PCIE RD state MEM 1 */
+ uint64_t prd_err : 1; /**< BIST Status for DMA PCIE RD ERR state MEM */
+ uint64_t nrd_st : 1; /**< BIST Status for DMA L2C RD state MEM */
+ uint64_t nwe_st : 1; /**< BIST Status for DMA L2C WR state MEM */
+ uint64_t nwe_wr0 : 1; /**< BIST Status for DMA L2C WR MEM 0 */
+ uint64_t nwe_wr1 : 1; /**< BIST Status for DMA L2C WR MEM 1 */
+ uint64_t pkt_rd : 1; /**< BIST Status for Inbound PKT MEM */
+ uint64_t psc_p0 : 1; /**< BIST Status for PSC TLP 0 MEM */
+ uint64_t psc_p1 : 1; /**< BIST Status for PSC TLP 1 MEM */
+ uint64_t pkt_gd : 1; /**< BIST Status for PKT OUTB Gather Data FIFO */
+ uint64_t pkt_gl : 1; /**< BIST Status for PKT_OUTB Gather List FIFO */
+ uint64_t pkt_blk : 1; /**< BIST Status for PKT OUTB Blocked FIFO */
+#else
+ uint64_t pkt_blk : 1;
+ uint64_t pkt_gl : 1;
+ uint64_t pkt_gd : 1;
+ uint64_t psc_p1 : 1;
+ uint64_t psc_p0 : 1;
+ uint64_t pkt_rd : 1;
+ uint64_t nwe_wr1 : 1;
+ uint64_t nwe_wr0 : 1;
+ uint64_t nwe_st : 1;
+ uint64_t nrd_st : 1;
+ uint64_t prd_err : 1;
+ uint64_t prd_st1 : 1;
+ uint64_t prd_st0 : 1;
+ uint64_t prd_tag : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_npei_bist_status2_s cn52xx;
+ struct cvmx_npei_bist_status2_s cn56xx;
+};
+typedef union cvmx_npei_bist_status2 cvmx_npei_bist_status2_t;
+
+/**
+ * cvmx_npei_ctl_port0
+ *
+ * NPEI_CTL_PORT0 = NPEI's Control Port 0
+ *
+ * Contains control for access for Port0
+ */
+union cvmx_npei_ctl_port0 {
+ uint64_t u64;
+ struct cvmx_npei_ctl_port0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t waitl_com : 1; /**< When set '1' casues the NPI to wait for a commit
+ from the L2C before sending additional completions
+ to the L2C from the PCIe.
+ Set this for more conservative behavior. Clear
+ this for more aggressive, higher-performance
+ behavior */
+ uint64_t intd : 1; /**< When '0' Intd wire asserted. Before mapping. */
+ uint64_t intc : 1; /**< When '0' Intc wire asserted. Before mapping. */
+ uint64_t intb : 1; /**< When '0' Intb wire asserted. Before mapping. */
+ uint64_t inta : 1; /**< When '0' Inta wire asserted. Before mapping. */
+ uint64_t intd_map : 2; /**< Maps INTD to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t intc_map : 2; /**< Maps INTC to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t intb_map : 2; /**< Maps INTB to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t inta_map : 2; /**< Maps INTA to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t ctlp_ro : 1; /**< Relaxed ordering enable for Completion TLPS. */
+ uint64_t reserved_6_6 : 1;
+ uint64_t ptlp_ro : 1; /**< Relaxed ordering enable for Posted TLPS. */
+ uint64_t bar2_enb : 1; /**< When set '1' BAR2 is enable and will respond when
+ clear '0' BAR2 access will cause UR responses. */
+ uint64_t bar2_esx : 2; /**< Value will be XORed with pci-address[37:36] to
+ determine the endian swap mode. */
+ uint64_t bar2_cax : 1; /**< Value will be XORed with pcie-address[38] to
+ determine the L2 cache attribute.
+ Not cached in L2 if XOR result is 1 */
+ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit
+ from the L2C before sending additional stores to
+ the L2C from the PCIe.
+ Most applications will not notice a difference, so
+ should not set this bit. Setting the bit is more
+ conservative on ordering, lower performance */
+#else
+ uint64_t wait_com : 1;
+ uint64_t bar2_cax : 1;
+ uint64_t bar2_esx : 2;
+ uint64_t bar2_enb : 1;
+ uint64_t ptlp_ro : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t ctlp_ro : 1;
+ uint64_t inta_map : 2;
+ uint64_t intb_map : 2;
+ uint64_t intc_map : 2;
+ uint64_t intd_map : 2;
+ uint64_t inta : 1;
+ uint64_t intb : 1;
+ uint64_t intc : 1;
+ uint64_t intd : 1;
+ uint64_t waitl_com : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_npei_ctl_port0_s cn52xx;
+ struct cvmx_npei_ctl_port0_s cn52xxp1;
+ struct cvmx_npei_ctl_port0_s cn56xx;
+ struct cvmx_npei_ctl_port0_s cn56xxp1;
+};
+typedef union cvmx_npei_ctl_port0 cvmx_npei_ctl_port0_t;
+
+/**
+ * cvmx_npei_ctl_port1
+ *
+ * NPEI_CTL_PORT1 = NPEI's Control Port1
+ *
+ * Contains control for access for Port1
+ */
+union cvmx_npei_ctl_port1 {
+ uint64_t u64;
+ struct cvmx_npei_ctl_port1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t waitl_com : 1; /**< When set '1' casues the NPI to wait for a commit
+ from the L2C before sending additional completions
+ to the L2C from the PCIe.
+ Set this for more conservative behavior. Clear
+ this for more aggressive, higher-performance */
+ uint64_t intd : 1; /**< When '0' Intd wire asserted. Before mapping. */
+ uint64_t intc : 1; /**< When '0' Intc wire asserted. Before mapping. */
+ uint64_t intb : 1; /**< When '0' Intv wire asserted. Before mapping. */
+ uint64_t inta : 1; /**< When '0' Inta wire asserted. Before mapping. */
+ uint64_t intd_map : 2; /**< Maps INTD to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t intc_map : 2; /**< Maps INTC to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t intb_map : 2; /**< Maps INTB to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t inta_map : 2; /**< Maps INTA to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t ctlp_ro : 1; /**< Relaxed ordering enable for Completion TLPS. */
+ uint64_t reserved_6_6 : 1;
+ uint64_t ptlp_ro : 1; /**< Relaxed ordering enable for Posted TLPS. */
+ uint64_t bar2_enb : 1; /**< When set '1' BAR2 is enable and will respond when
+ clear '0' BAR2 access will cause UR responses. */
+ uint64_t bar2_esx : 2; /**< Value will be XORed with pci-address[37:36] to
+ determine the endian swap mode. */
+ uint64_t bar2_cax : 1; /**< Value will be XORed with pcie-address[38] to
+ determine the L2 cache attribute.
+ Not cached in L2 if XOR result is 1 */
+ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit
+ from the L2C before sending additional stores to
+ the L2C from the PCIe.
+ Most applications will not notice a difference, so
+ should not set this bit. Setting the bit is more
+ conservative on ordering, lower performance */
+#else
+ uint64_t wait_com : 1;
+ uint64_t bar2_cax : 1;
+ uint64_t bar2_esx : 2;
+ uint64_t bar2_enb : 1;
+ uint64_t ptlp_ro : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t ctlp_ro : 1;
+ uint64_t inta_map : 2;
+ uint64_t intb_map : 2;
+ uint64_t intc_map : 2;
+ uint64_t intd_map : 2;
+ uint64_t inta : 1;
+ uint64_t intb : 1;
+ uint64_t intc : 1;
+ uint64_t intd : 1;
+ uint64_t waitl_com : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } s;
+ struct cvmx_npei_ctl_port1_s cn52xx;
+ struct cvmx_npei_ctl_port1_s cn52xxp1;
+ struct cvmx_npei_ctl_port1_s cn56xx;
+ struct cvmx_npei_ctl_port1_s cn56xxp1;
+};
+typedef union cvmx_npei_ctl_port1 cvmx_npei_ctl_port1_t;
+
+/**
+ * cvmx_npei_ctl_status
+ *
+ * NPEI_CTL_STATUS = NPEI Control Status Register
+ *
+ * Contains control and status for NPEI. Writes to this register are not oSrdered with writes/reads to the PCIe Memory space.
+ * To ensure that a write has completed the user must read the register before making an access(i.e. PCIe memory space)
+ * that requires the value of this register to be updated.
+ */
+union cvmx_npei_ctl_status {
+ uint64_t u64;
+ struct cvmx_npei_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t p1_ntags : 6; /**< Number of tags avaiable for PCIe Port1.
+ In RC mode 1 tag is needed for each outbound TLP
+ that requires a CPL TLP. In Endpoint mode the
+ number of tags required for a TLP request is
+ 1 per 64-bytes of CPL data + 1.
+ This field should only be written as part of
+ reset sequence, before issuing any reads, CFGs, or
+ IO transactions from the core(s). */
+ uint64_t p0_ntags : 6; /**< Number of tags avaiable for PCIe Port0.
+ In RC mode 1 tag is needed for each outbound TLP
+ that requires a CPL TLP. In Endpoint mode the
+ number of tags required for a TLP request is
+ 1 per 64-bytes of CPL data + 1.
+ This field should only be written as part of
+ reset sequence, before issuing any reads, CFGs, or
+ IO transactions from the core(s). */
+ uint64_t cfg_rtry : 16; /**< The time x 0x10000 in core clocks to wait for a
+ CPL to a CFG RD that does not carry a Retry Status.
+ Until such time that the timeout occurs and Retry
+ Status is received for a CFG RD, the Read CFG Read
+ will be resent. A value of 0 disables retries and
+ treats a CPL Retry as a CPL UR. */
+ uint64_t ring_en : 1; /**< When '0' forces "relative Q position" received
+ from PKO to be zero, and replicates the back-
+ pressure indication for the first ring attached
+ to a PKO port across all the rings attached to a
+ PKO port. When '1' backpressure is on a per
+ port/ring. */
+ uint64_t lnk_rst : 1; /**< Set when PCIe Core 0 request a link reset due to
+ link down state. This bit is only reset on raw
+ reset so it can be read for state to determine if
+ a reset occured. Bit is cleared when a '1' is
+ written to this field. */
+ uint64_t arb : 1; /**< PCIe switch arbitration mode. '0' == fixed priority
+ NPEI, PCIe0, then PCIe1. '1' == round robin. */
+ uint64_t pkt_bp : 4; /**< Unused */
+ uint64_t host_mode : 1; /**< Host mode */
+ uint64_t chip_rev : 8; /**< The chip revision. */
+#else
+ uint64_t chip_rev : 8;
+ uint64_t host_mode : 1;
+ uint64_t pkt_bp : 4;
+ uint64_t arb : 1;
+ uint64_t lnk_rst : 1;
+ uint64_t ring_en : 1;
+ uint64_t cfg_rtry : 16;
+ uint64_t p0_ntags : 6;
+ uint64_t p1_ntags : 6;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_npei_ctl_status_s cn52xx;
+ struct cvmx_npei_ctl_status_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t p1_ntags : 6; /**< Number of tags avaiable for PCIe Port1.
+ In RC mode 1 tag is needed for each outbound TLP
+ that requires a CPL TLP. In Endpoint mode the
+ number of tags required for a TLP request is
+ 1 per 64-bytes of CPL data + 1.
+ This field should only be written as part of
+ reset sequence, before issuing any reads, CFGs, or
+ IO transactions from the core(s). */
+ uint64_t p0_ntags : 6; /**< Number of tags avaiable for PCIe Port0.
+ In RC mode 1 tag is needed for each outbound TLP
+ that requires a CPL TLP. In Endpoint mode the
+ number of tags required for a TLP request is
+ 1 per 64-bytes of CPL data + 1.
+ This field should only be written as part of
+ reset sequence, before issuing any reads, CFGs, or
+ IO transactions from the core(s). */
+ uint64_t cfg_rtry : 16; /**< The time x 0x10000 in core clocks to wait for a
+ CPL to a CFG RD that does not carry a Retry Status.
+ Until such time that the timeout occurs and Retry
+ Status is received for a CFG RD, the Read CFG Read
+ will be resent. A value of 0 disables retries and
+ treats a CPL Retry as a CPL UR. */
+ uint64_t reserved_15_15 : 1;
+ uint64_t lnk_rst : 1; /**< Set when PCIe Core 0 request a link reset due to
+ link down state. This bit is only reset on raw
+ reset so it can be read for state to determine if
+ a reset occured. Bit is cleared when a '1' is
+ written to this field. */
+ uint64_t arb : 1; /**< PCIe switch arbitration mode. '0' == fixed priority
+ NPEI, PCIe0, then PCIe1. '1' == round robin. */
+ uint64_t reserved_9_12 : 4;
+ uint64_t host_mode : 1; /**< Host mode */
+ uint64_t chip_rev : 8; /**< The chip revision. */
+#else
+ uint64_t chip_rev : 8;
+ uint64_t host_mode : 1;
+ uint64_t reserved_9_12 : 4;
+ uint64_t arb : 1;
+ uint64_t lnk_rst : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t cfg_rtry : 16;
+ uint64_t p0_ntags : 6;
+ uint64_t p1_ntags : 6;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_ctl_status_s cn56xx;
+ struct cvmx_npei_ctl_status_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t lnk_rst : 1; /**< Set when PCIe Core 0 request a link reset due to
+ link down state. This bit is only reset on raw
+ reset so it can be read for state to determine if
+ a reset occured. Bit is cleared when a '1' is
+ written to this field. */
+ uint64_t arb : 1; /**< PCIe switch arbitration mode. '0' == fixed priority
+ NPEI, PCIe0, then PCIe1. '1' == round robin. */
+ uint64_t pkt_bp : 4; /**< Unused */
+ uint64_t host_mode : 1; /**< Host mode */
+ uint64_t chip_rev : 8; /**< The chip revision. */
+#else
+ uint64_t chip_rev : 8;
+ uint64_t host_mode : 1;
+ uint64_t pkt_bp : 4;
+ uint64_t arb : 1;
+ uint64_t lnk_rst : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } cn56xxp1;
+};
+typedef union cvmx_npei_ctl_status cvmx_npei_ctl_status_t;
+
+/**
+ * cvmx_npei_ctl_status2
+ *
+ * NPEI_CTL_STATUS2 = NPEI's Control Status2 Register
+ *
+ * Contains control and status for NPEI.
+ * Writes to this register are not ordered with writes/reads to the PCI Memory space.
+ * To ensure that a write has completed the user must read the register before
+ * making an access(i.e. PCI memory space) that requires the value of this register to be updated.
+ */
+union cvmx_npei_ctl_status2 {
+ uint64_t u64;
+ struct cvmx_npei_ctl_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mps : 1; /**< Max Payload Size
+ 0 = 128B
+ 1 = 256B
+ Note: PCIE*_CFG030[MPS] must be set to the same
+ value for proper function. */
+ uint64_t mrrs : 3; /**< Max Read Request Size
+ 0 = 128B
+ 1 = 256B
+ 2 = 512B
+ 3 = 1024B
+ 4 = 2048B
+ 5 = 4096B
+ Note: This field must not exceed the desired
+ max read request size. This means this field
+ should not exceed PCIE*_CFG030[MRRS]. */
+ uint64_t c1_w_flt : 1; /**< When '1' enables the window filter for reads and
+ writes using the window registers.
+ PCIE-Port1.
+ Unfilter writes are:
+ MIO, SubId0
+ MIO, SubId7
+ NPEI, SubId0
+ NPEI, SubId7
+ POW, SubId7
+ IPD, SubId7
+ USBN0, SubId7
+ Unfiltered Reads are:
+ MIO, SubId0
+ MIO, SubId7
+ NPEI, SubId0
+ NPEI, SubId7
+ POW, SubId1
+ POW, SubId2
+ POW, SubId3
+ POW, SubId7
+ IPD, SubId7
+ USBN0, SubId7 */
+ uint64_t c0_w_flt : 1; /**< When '1' enables the window filter for reads and
+ writes using the window registers.
+ PCIE-Port0.
+ Unfilter writes are:
+ MIO, SubId0
+ MIO, SubId7
+ NPEI, SubId0
+ NPEI, SubId7
+ POW, SubId7
+ IPD, SubId7
+ USBN0, SubId7
+ Unfiltered Reads are:
+ MIO, SubId0
+ MIO, SubId7
+ NPEI, SubId0
+ NPEI, SubId7
+ POW, SubId1
+ POW, SubId2
+ POW, SubId3
+ POW, SubId7
+ IPD, SubId7
+ USBN0, SubId7 */
+ uint64_t c1_b1_s : 3; /**< Pcie-Port1, Bar1 Size. 1 == 64MB, 2 == 128MB,
+ 3 == 256MB, 4 == 512MB, 5 == 1024MB, 6 == 2048MB,
+ 0 and 7 are reserved. */
+ uint64_t c0_b1_s : 3; /**< Pcie-Port0, Bar1 Size. 1 == 64MB, 2 == 128MB,
+ 3 == 256MB, 4 == 512MB, 5 == 1024MB, 6 == 2048MB,
+ 0 and 7 are reserved. */
+ uint64_t c1_wi_d : 1; /**< When set '1' disables access to the Window
+ Registers from the PCIe-Port1. */
+ uint64_t c1_b0_d : 1; /**< When set '1' disables access from PCIe-Port1 to
+ BAR-0 address offsets: Less Than 0x270,
+ Greater than 0x270 AND less than 0x0520, 0x3BC0,
+ 0x3CD0. */
+ uint64_t c0_wi_d : 1; /**< When set '1' disables access to the Window
+ Registers from the PCIe-Port0. */
+ uint64_t c0_b0_d : 1; /**< When set '1' disables access from PCIe-Port0 to
+ BAR-0 address offsets: Less Than 0x270,
+ Greater than 0x270 AND less than 0x0520, 0x3BC0,
+ 0x3CD0. */
+#else
+ uint64_t c0_b0_d : 1;
+ uint64_t c0_wi_d : 1;
+ uint64_t c1_b0_d : 1;
+ uint64_t c1_wi_d : 1;
+ uint64_t c0_b1_s : 3;
+ uint64_t c1_b1_s : 3;
+ uint64_t c0_w_flt : 1;
+ uint64_t c1_w_flt : 1;
+ uint64_t mrrs : 3;
+ uint64_t mps : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_npei_ctl_status2_s cn52xx;
+ struct cvmx_npei_ctl_status2_s cn52xxp1;
+ struct cvmx_npei_ctl_status2_s cn56xx;
+ struct cvmx_npei_ctl_status2_s cn56xxp1;
+};
+typedef union cvmx_npei_ctl_status2 cvmx_npei_ctl_status2_t;
+
+/**
+ * cvmx_npei_data_out_cnt
+ *
+ * NPEI_DATA_OUT_CNT = NPEI DATA OUT COUNT
+ *
+ * The EXEC data out fifo-count and the data unload counter.
+ */
+union cvmx_npei_data_out_cnt {
+ uint64_t u64;
+ struct cvmx_npei_data_out_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t p1_ucnt : 16; /**< PCIE-Port1 Fifo Unload Count. This counter is
+ incremented by '1' every time a word is removed
+ from the Data Out FIFO, whose count is shown in
+ P0_FCNT. */
+ uint64_t p1_fcnt : 6; /**< PCIE-Port1 Data Out Fifo Count. Number of address
+ data words to be sent out the PCIe port presently
+ buffered in the FIFO. */
+ uint64_t p0_ucnt : 16; /**< PCIE-Port0 Fifo Unload Count. This counter is
+ incremented by '1' every time a word is removed
+ from the Data Out FIFO, whose count is shown in
+ P0_FCNT. */
+ uint64_t p0_fcnt : 6; /**< PCIE-Port0 Data Out Fifo Count. Number of address
+ data words to be sent out the PCIe port presently
+ buffered in the FIFO. */
+#else
+ uint64_t p0_fcnt : 6;
+ uint64_t p0_ucnt : 16;
+ uint64_t p1_fcnt : 6;
+ uint64_t p1_ucnt : 16;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_npei_data_out_cnt_s cn52xx;
+ struct cvmx_npei_data_out_cnt_s cn52xxp1;
+ struct cvmx_npei_data_out_cnt_s cn56xx;
+ struct cvmx_npei_data_out_cnt_s cn56xxp1;
+};
+typedef union cvmx_npei_data_out_cnt cvmx_npei_data_out_cnt_t;
+
+/**
+ * cvmx_npei_dbg_data
+ *
+ * NPEI_DBG_DATA = NPEI Debug Data Register
+ *
+ * Value returned on the debug-data lines from the RSLs
+ */
+union cvmx_npei_dbg_data {
+ uint64_t u64;
+ struct cvmx_npei_dbg_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t qlm0_rev_lanes : 1; /**< Lane reversal for PCIe port 0 */
+ uint64_t reserved_25_26 : 2;
+ uint64_t qlm1_spd : 2; /**< Sets the QLM1 frequency
+ 0=1.25 Gbaud
+ 1=2.5 Gbaud
+ 2=3.125 Gbaud
+ 3=3.75 Gbaud */
+ uint64_t c_mul : 5; /**< PLL_MUL pins sampled at DCOK assertion
+ Core frequency = 50MHz*C_MUL */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t qlm1_spd : 2;
+ uint64_t reserved_25_26 : 2;
+ uint64_t qlm0_rev_lanes : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_npei_dbg_data_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t qlm0_link_width : 1; /**< Link width of PCIe port 0
+ 0 = PCIe port 0 is 2 lanes,
+ 2 lane PCIe port 1 exists
+ 1 = PCIe port 0 is 4 lanes,
+ PCIe port 1 does not exist */
+ uint64_t qlm0_rev_lanes : 1; /**< Lane reversal for PCIe port 0 */
+ uint64_t qlm1_mode : 2; /**< Sets the QLM1 Mode
+ 0=Reserved
+ 1=XAUI
+ 2=SGMII
+ 3=PICMG */
+ uint64_t qlm1_spd : 2; /**< Sets the QLM1 frequency
+ 0=1.25 Gbaud
+ 1=2.5 Gbaud
+ 2=3.125 Gbaud
+ 3=3.75 Gbaud */
+ uint64_t c_mul : 5; /**< PLL_MUL pins sampled at DCOK assertion
+ Core frequency = 50MHz*C_MUL */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t qlm1_spd : 2;
+ uint64_t qlm1_mode : 2;
+ uint64_t qlm0_rev_lanes : 1;
+ uint64_t qlm0_link_width : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn52xx;
+ struct cvmx_npei_dbg_data_cn52xx cn52xxp1;
+ struct cvmx_npei_dbg_data_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t qlm2_rev_lanes : 1; /**< Lane reversal for PCIe port 1 */
+ uint64_t qlm0_rev_lanes : 1; /**< Lane reversal for PCIe port 0 */
+ uint64_t qlm3_spd : 2; /**< Sets the QLM3 frequency
+ 0=1.25 Gbaud
+ 1=2.5 Gbaud
+ 2=3.125 Gbaud
+ 3=3.75 Gbaud */
+ uint64_t qlm1_spd : 2; /**< Sets the QLM1 frequency
+ 0=1.25 Gbaud
+ 1=2.5 Gbaud
+ 2=3.125 Gbaud
+ 3=3.75 Gbaud */
+ uint64_t c_mul : 5; /**< PLL_MUL pins sampled at DCOK assertion
+ Core frequency = 50MHz*C_MUL */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t qlm1_spd : 2;
+ uint64_t qlm3_spd : 2;
+ uint64_t qlm0_rev_lanes : 1;
+ uint64_t qlm2_rev_lanes : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn56xx;
+ struct cvmx_npei_dbg_data_cn56xx cn56xxp1;
+};
+typedef union cvmx_npei_dbg_data cvmx_npei_dbg_data_t;
+
+/**
+ * cvmx_npei_dbg_select
+ *
+ * NPEI_DBG_SELECT = Debug Select Register
+ *
+ * Contains the debug select value last written to the RSLs.
+ */
+union cvmx_npei_dbg_select {
+ uint64_t u64;
+ struct cvmx_npei_dbg_select_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t dbg_sel : 16; /**< When this register is written its value is sent to
+ all RSLs. */
+#else
+ uint64_t dbg_sel : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_npei_dbg_select_s cn52xx;
+ struct cvmx_npei_dbg_select_s cn52xxp1;
+ struct cvmx_npei_dbg_select_s cn56xx;
+ struct cvmx_npei_dbg_select_s cn56xxp1;
+};
+typedef union cvmx_npei_dbg_select cvmx_npei_dbg_select_t;
+
+/**
+ * cvmx_npei_dma#_counts
+ *
+ * NPEI_DMA[0..4]_COUNTS = DMA Instruction Counts
+ *
+ * Values for determing the number of instructions for DMA[0..4] in the NPEI.
+ */
+union cvmx_npei_dmax_counts {
+ uint64_t u64;
+ struct cvmx_npei_dmax_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t fcnt : 7; /**< Number of words in the Instruction FIFO. */
+ uint64_t dbell : 32; /**< Number of available words of Instructions to read. */
+#else
+ uint64_t dbell : 32;
+ uint64_t fcnt : 7;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } s;
+ struct cvmx_npei_dmax_counts_s cn52xx;
+ struct cvmx_npei_dmax_counts_s cn52xxp1;
+ struct cvmx_npei_dmax_counts_s cn56xx;
+ struct cvmx_npei_dmax_counts_s cn56xxp1;
+};
+typedef union cvmx_npei_dmax_counts cvmx_npei_dmax_counts_t;
+
+/**
+ * cvmx_npei_dma#_dbell
+ *
+ * NPEI_DMA_DBELL[0..4] = DMA Door Bell
+ *
+ * The door bell register for DMA[0..4] queue.
+ */
+union cvmx_npei_dmax_dbell {
+ uint32_t u32;
+ struct cvmx_npei_dmax_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t dbell : 16; /**< The value written to this register is added to the
+ number of 8byte words to be read and processes for
+ the low priority dma queue. */
+#else
+ uint32_t dbell : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_npei_dmax_dbell_s cn52xx;
+ struct cvmx_npei_dmax_dbell_s cn52xxp1;
+ struct cvmx_npei_dmax_dbell_s cn56xx;
+ struct cvmx_npei_dmax_dbell_s cn56xxp1;
+};
+typedef union cvmx_npei_dmax_dbell cvmx_npei_dmax_dbell_t;
+
+/**
+ * cvmx_npei_dma#_ibuff_saddr
+ *
+ * NPEI_DMA[0..4]_IBUFF_SADDR = DMA Instruction Buffer Starting Address
+ *
+ * The address to start reading Instructions from for DMA[0..4].
+ */
+union cvmx_npei_dmax_ibuff_saddr {
+ uint64_t u64;
+ struct cvmx_npei_dmax_ibuff_saddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t idle : 1; /**< DMA Engine IDLE state */
+ uint64_t saddr : 29; /**< The 128 byte aligned starting address to read the
+ first instruction. SADDR is address bit 35:7 of the
+ first instructions address. */
+ uint64_t reserved_0_6 : 7;
+#else
+ uint64_t reserved_0_6 : 7;
+ uint64_t saddr : 29;
+ uint64_t idle : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } s;
+ struct cvmx_npei_dmax_ibuff_saddr_s cn52xx;
+ struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t saddr : 29; /**< The 128 byte aligned starting address to read the
+ first instruction. SADDR is address bit 35:7 of the
+ first instructions address. */
+ uint64_t reserved_0_6 : 7;
+#else
+ uint64_t reserved_0_6 : 7;
+ uint64_t saddr : 29;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_dmax_ibuff_saddr_s cn56xx;
+ struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 cn56xxp1;
+};
+typedef union cvmx_npei_dmax_ibuff_saddr cvmx_npei_dmax_ibuff_saddr_t;
+
+/**
+ * cvmx_npei_dma#_naddr
+ *
+ * NPEI_DMA[0..4]_NADDR = DMA Next Ichunk Address
+ *
+ * Place NPEI will read the next Ichunk data from. This is valid when state is 0
+ */
+union cvmx_npei_dmax_naddr {
+ uint64_t u64;
+ struct cvmx_npei_dmax_naddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< The next L2C address to read DMA# instructions
+ from. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_npei_dmax_naddr_s cn52xx;
+ struct cvmx_npei_dmax_naddr_s cn52xxp1;
+ struct cvmx_npei_dmax_naddr_s cn56xx;
+ struct cvmx_npei_dmax_naddr_s cn56xxp1;
+};
+typedef union cvmx_npei_dmax_naddr cvmx_npei_dmax_naddr_t;
+
+/**
+ * cvmx_npei_dma0_int_level
+ *
+ * NPEI_DMA0_INT_LEVEL = NPEI DMA0 Interrupt Level
+ *
+ * Thresholds for DMA count and timer interrupts for DMA0.
+ */
+union cvmx_npei_dma0_int_level {
+ uint64_t u64;
+ struct cvmx_npei_dma0_int_level_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t time : 32; /**< Whenever the DMA_CNT0 timer exceeds
+ this value, NPEI_INT_SUM[DTIME0] is set.
+ The DMA_CNT0 timer increments every core clock
+ whenever NPEI_DMA_CNTS[DMA0]!=0, and is cleared
+ when NPEI_INT_SUM[DTIME0] is written with one. */
+ uint64_t cnt : 32; /**< Whenever NPEI_DMA_CNTS[DMA0] exceeds this value,
+ NPEI_INT_SUM[DCNT0] is set. */
+#else
+ uint64_t cnt : 32;
+ uint64_t time : 32;
+#endif
+ } s;
+ struct cvmx_npei_dma0_int_level_s cn52xx;
+ struct cvmx_npei_dma0_int_level_s cn52xxp1;
+ struct cvmx_npei_dma0_int_level_s cn56xx;
+ struct cvmx_npei_dma0_int_level_s cn56xxp1;
+};
+typedef union cvmx_npei_dma0_int_level cvmx_npei_dma0_int_level_t;
+
+/**
+ * cvmx_npei_dma1_int_level
+ *
+ * NPEI_DMA1_INT_LEVEL = NPEI DMA1 Interrupt Level
+ *
+ * Thresholds for DMA count and timer interrupts for DMA1.
+ */
+union cvmx_npei_dma1_int_level {
+ uint64_t u64;
+ struct cvmx_npei_dma1_int_level_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t time : 32; /**< Whenever the DMA_CNT1 timer exceeds
+ this value, NPEI_INT_SUM[DTIME1] is set.
+ The DMA_CNT1 timer increments every core clock
+ whenever NPEI_DMA_CNTS[DMA1]!=0, and is cleared
+ when NPEI_INT_SUM[DTIME1] is written with one. */
+ uint64_t cnt : 32; /**< Whenever NPEI_DMA_CNTS[DMA1] exceeds this value,
+ NPEI_INT_SUM[DCNT1] is set. */
+#else
+ uint64_t cnt : 32;
+ uint64_t time : 32;
+#endif
+ } s;
+ struct cvmx_npei_dma1_int_level_s cn52xx;
+ struct cvmx_npei_dma1_int_level_s cn52xxp1;
+ struct cvmx_npei_dma1_int_level_s cn56xx;
+ struct cvmx_npei_dma1_int_level_s cn56xxp1;
+};
+typedef union cvmx_npei_dma1_int_level cvmx_npei_dma1_int_level_t;
+
+/**
+ * cvmx_npei_dma_cnts
+ *
+ * NPEI_DMA_CNTS = NPEI DMA Count
+ *
+ * The DMA Count values for DMA0 and DMA1.
+ */
+union cvmx_npei_dma_cnts {
+ uint64_t u64;
+ struct cvmx_npei_dma_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dma1 : 32; /**< The DMA counter 1.
+ Writing this field will cause the written value to
+ be subtracted from DMA1. SW should use a 4-byte
+ write to access this field so as not to change the
+ value of other fields in this register.
+ HW will optionally increment this field after
+ it completes an OUTBOUND or EXTERNAL-ONLY DMA
+ instruction. These increments may cause interrupts.
+ Refer to NPEI_DMA1_INT_LEVEL and
+ NPEI_INT_SUM[DCNT1,DTIME1]. */
+ uint64_t dma0 : 32; /**< The DMA counter 0.
+ Writing this field will cause the written value to
+ be subtracted from DMA0. SW should use a 4-byte
+ write to access this field so as not to change the
+ value of other fields in this register.
+ HW will optionally increment this field after
+ it completes an OUTBOUND or EXTERNAL-ONLY DMA
+ instruction. These increments may cause interrupts.
+ Refer to NPEI_DMA0_INT_LEVEL and
+ NPEI_INT_SUM[DCNT0,DTIME0]. */
+#else
+ uint64_t dma0 : 32;
+ uint64_t dma1 : 32;
+#endif
+ } s;
+ struct cvmx_npei_dma_cnts_s cn52xx;
+ struct cvmx_npei_dma_cnts_s cn52xxp1;
+ struct cvmx_npei_dma_cnts_s cn56xx;
+ struct cvmx_npei_dma_cnts_s cn56xxp1;
+};
+typedef union cvmx_npei_dma_cnts cvmx_npei_dma_cnts_t;
+
+/**
+ * cvmx_npei_dma_control
+ *
+ * NPEI_DMA_CONTROL = DMA Control Register
+ *
+ * Controls operation of the DMA IN/OUT.
+ */
+union cvmx_npei_dma_control {
+ uint64_t u64;
+ struct cvmx_npei_dma_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t p_32b_m : 1; /**< DMA PCIE 32-bit word read disable bit
+ When 0, enable the feature */
+ uint64_t dma4_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma3_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma2_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma1_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma0_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t b0_lend : 1; /**< When set '1' and the NPEI is in the mode to write
+ 0 to L2C memory when a DMA is done, the address
+ to be written to will be treated as a Little
+ Endian address. */
+ uint64_t dwb_denb : 1; /**< When set '1' the NPEI will send a value in the DWB
+ field for a free page operation for the memory
+ that contained the data. */
+ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are freed
+ this value is used for the DWB field of the
+ operation. */
+ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will
+ be returned to when used. */
+ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters,
+ if '0' then the number of bytes in the dma transfer
+ will be added to the count register. */
+ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */
+ uint64_t o_ns : 1; /**< Nosnoop For DMA. */
+ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */
+ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used.
+ '1' use pointer values for address and register
+ values for RO, ES, and NS, '0' use register
+ values for address and pointer values for
+ RO, ES, and NS. */
+ uint64_t csize : 14; /**< The size in words of the DMA Instruction Chunk.
+ This value should only be written once. After
+ writing this value a new value will not be
+ recognized until the end of the DMA I-Chunk is
+ reached. */
+#else
+ uint64_t csize : 14;
+ uint64_t o_mode : 1;
+ uint64_t o_es : 2;
+ uint64_t o_ns : 1;
+ uint64_t o_ro : 1;
+ uint64_t o_add1 : 1;
+ uint64_t fpa_que : 3;
+ uint64_t dwb_ichk : 9;
+ uint64_t dwb_denb : 1;
+ uint64_t b0_lend : 1;
+ uint64_t dma0_enb : 1;
+ uint64_t dma1_enb : 1;
+ uint64_t dma2_enb : 1;
+ uint64_t dma3_enb : 1;
+ uint64_t dma4_enb : 1;
+ uint64_t p_32b_m : 1;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_npei_dma_control_s cn52xx;
+ struct cvmx_npei_dma_control_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t dma3_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma2_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma1_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma0_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t b0_lend : 1; /**< When set '1' and the NPEI is in the mode to write
+ 0 to L2C memory when a DMA is done, the address
+ to be written to will be treated as a Little
+ Endian address. */
+ uint64_t dwb_denb : 1; /**< When set '1' the NPEI will send a value in the DWB
+ field for a free page operation for the memory
+ that contained the data. */
+ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are freed
+ this value is used for the DWB field of the
+ operation. */
+ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will
+ be returned to when used. */
+ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters,
+ if '0' then the number of bytes in the dma transfer
+ will be added to the count register. */
+ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */
+ uint64_t o_ns : 1; /**< Nosnoop For DMA. */
+ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */
+ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used.
+ '1' use pointer values for address and register
+ values for RO, ES, and NS, '0' use register
+ values for address and pointer values for
+ RO, ES, and NS. */
+ uint64_t csize : 14; /**< The size in words of the DMA Instruction Chunk.
+ This value should only be written once. After
+ writing this value a new value will not be
+ recognized until the end of the DMA I-Chunk is
+ reached. */
+#else
+ uint64_t csize : 14;
+ uint64_t o_mode : 1;
+ uint64_t o_es : 2;
+ uint64_t o_ns : 1;
+ uint64_t o_ro : 1;
+ uint64_t o_add1 : 1;
+ uint64_t fpa_que : 3;
+ uint64_t dwb_ichk : 9;
+ uint64_t dwb_denb : 1;
+ uint64_t b0_lend : 1;
+ uint64_t dma0_enb : 1;
+ uint64_t dma1_enb : 1;
+ uint64_t dma2_enb : 1;
+ uint64_t dma3_enb : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_dma_control_s cn56xx;
+ struct cvmx_npei_dma_control_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t dma4_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma3_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma2_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma1_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t dma0_enb : 1; /**< DMA# enable. Enables the operation of the DMA
+ engine. After being enabled a DMA engine should not
+ be dis-abled while processing instructions. */
+ uint64_t b0_lend : 1; /**< When set '1' and the NPEI is in the mode to write
+ 0 to L2C memory when a DMA is done, the address
+ to be written to will be treated as a Little
+ Endian address. */
+ uint64_t dwb_denb : 1; /**< When set '1' the NPEI will send a value in the DWB
+ field for a free page operation for the memory
+ that contained the data. */
+ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are freed
+ this value is used for the DWB field of the
+ operation. */
+ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will
+ be returned to when used. */
+ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters,
+ if '0' then the number of bytes in the dma transfer
+ will be added to the count register. */
+ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */
+ uint64_t o_ns : 1; /**< Nosnoop For DMA. */
+ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */
+ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used.
+ '1' use pointer values for address and register
+ values for RO, ES, and NS, '0' use register
+ values for address and pointer values for
+ RO, ES, and NS. */
+ uint64_t csize : 14; /**< The size in words of the DMA Instruction Chunk.
+ This value should only be written once. After
+ writing this value a new value will not be
+ recognized until the end of the DMA I-Chunk is
+ reached. */
+#else
+ uint64_t csize : 14;
+ uint64_t o_mode : 1;
+ uint64_t o_es : 2;
+ uint64_t o_ns : 1;
+ uint64_t o_ro : 1;
+ uint64_t o_add1 : 1;
+ uint64_t fpa_que : 3;
+ uint64_t dwb_ichk : 9;
+ uint64_t dwb_denb : 1;
+ uint64_t b0_lend : 1;
+ uint64_t dma0_enb : 1;
+ uint64_t dma1_enb : 1;
+ uint64_t dma2_enb : 1;
+ uint64_t dma3_enb : 1;
+ uint64_t dma4_enb : 1;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } cn56xxp1;
+};
+typedef union cvmx_npei_dma_control cvmx_npei_dma_control_t;
+
+/**
+ * cvmx_npei_dma_pcie_req_num
+ *
+ * NPEI_DMA_PCIE_REQ_NUM = NPEI DMA PCIE Outstanding Read Request Number
+ *
+ * Outstanding PCIE read request number for DMAs and Packet, maximum number is 16
+ */
+union cvmx_npei_dma_pcie_req_num {
+ uint64_t u64;
+ struct cvmx_npei_dma_pcie_req_num_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dma_arb : 1; /**< DMA_PKT Read Request Arbitration
+ - 1: DMA0-4 and PKT are round robin. i.e.
+ DMA0-DMA1-DMA2-DMA3-DMA4-PKT...
+ - 0: DMA0-4 are round robin, pkt gets selected
+ half the time. i.e.
+ DMA0-PKT-DMA1-PKT-DMA2-PKT-DMA3-PKT-DMA4-PKT... */
+ uint64_t reserved_53_62 : 10;
+ uint64_t pkt_cnt : 5; /**< PKT outstanding PCIE Read Request Number for each
+ PCIe port
+ When PKT_CNT=x, for each PCIe port, the number
+ of outstanding PCIe memory space reads by the PCIe
+ packet input/output will not exceed x.
+ Valid Number is between 1 and 16 */
+ uint64_t reserved_45_47 : 3;
+ uint64_t dma4_cnt : 5; /**< DMA4 outstanding PCIE Read Request Number
+ When DMA4_CNT=x, the number of outstanding PCIe
+ memory space reads by the PCIe DMA engine 4
+ will not exceed x.
+ Valid Number is between 1 and 16 */
+ uint64_t reserved_37_39 : 3;
+ uint64_t dma3_cnt : 5; /**< DMA3 outstanding PCIE Read Request Number
+ When DMA3_CNT=x, the number of outstanding PCIe
+ memory space reads by the PCIe DMA engine 3
+ will not exceed x.
+ Valid Number is between 1 and 16 */
+ uint64_t reserved_29_31 : 3;
+ uint64_t dma2_cnt : 5; /**< DMA2 outstanding PCIE Read Request Number
+ When DMA2_CNT=x, the number of outstanding PCIe
+ memory space reads by the PCIe DMA engine 2
+ will not exceed x.
+ Valid Number is between 1 and 16 */
+ uint64_t reserved_21_23 : 3;
+ uint64_t dma1_cnt : 5; /**< DMA1 outstanding PCIE Read Request Number
+ When DMA1_CNT=x, the number of outstanding PCIe
+ memory space reads by the PCIe DMA engine 1
+ will not exceed x.
+ Valid Number is between 1 and 16 */
+ uint64_t reserved_13_15 : 3;
+ uint64_t dma0_cnt : 5; /**< DMA0 outstanding PCIE Read Request Number
+ When DMA0_CNT=x, the number of outstanding PCIe
+ memory space reads by the PCIe DMA engine 0
+ will not exceed x.
+ Valid Number is between 1 and 16 */
+ uint64_t reserved_5_7 : 3;
+ uint64_t dma_cnt : 5; /**< Total outstanding PCIE Read Request Number for each
+ PCIe port
+ When DMA_CNT=x, for each PCIe port, the total
+ number of outstanding PCIe memory space reads
+ by the PCIe DMA engines and packet input/output
+ will not exceed x.
+ Valid Number is between 1 and 16 */
+#else
+ uint64_t dma_cnt : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t dma0_cnt : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t dma1_cnt : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t dma2_cnt : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t dma3_cnt : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t dma4_cnt : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t pkt_cnt : 5;
+ uint64_t reserved_53_62 : 10;
+ uint64_t dma_arb : 1;
+#endif
+ } s;
+ struct cvmx_npei_dma_pcie_req_num_s cn52xx;
+ struct cvmx_npei_dma_pcie_req_num_s cn56xx;
+};
+typedef union cvmx_npei_dma_pcie_req_num cvmx_npei_dma_pcie_req_num_t;
+
+/**
+ * cvmx_npei_dma_state1
+ *
+ * NPEI_DMA_STATE1 = NPI's DMA State 1
+ *
+ * Results from DMA state register 1
+ */
+union cvmx_npei_dma_state1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t d4_dwe : 8; /**< DMA4 PICe Write State */
+ uint64_t d3_dwe : 8; /**< DMA3 PICe Write State */
+ uint64_t d2_dwe : 8; /**< DMA2 PICe Write State */
+ uint64_t d1_dwe : 8; /**< DMA1 PICe Write State */
+ uint64_t d0_dwe : 8; /**< DMA0 PICe Write State */
+#else
+ uint64_t d0_dwe : 8;
+ uint64_t d1_dwe : 8;
+ uint64_t d2_dwe : 8;
+ uint64_t d3_dwe : 8;
+ uint64_t d4_dwe : 8;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_npei_dma_state1_s cn52xx;
+};
+typedef union cvmx_npei_dma_state1 cvmx_npei_dma_state1_t;
+
+/**
+ * cvmx_npei_dma_state1_p1
+ *
+ * NPEI_DMA_STATE1_P1 = NPEI DMA Request and Instruction State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state1_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state1_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t d0_difst : 7; /**< DMA engine 0 dif instruction read state */
+ uint64_t d1_difst : 7; /**< DMA engine 1 dif instruction read state */
+ uint64_t d2_difst : 7; /**< DMA engine 2 dif instruction read state */
+ uint64_t d3_difst : 7; /**< DMA engine 3 dif instruction read state */
+ uint64_t d4_difst : 7; /**< DMA engine 4 dif instruction read state */
+ uint64_t d0_reqst : 5; /**< DMA engine 0 request data state */
+ uint64_t d1_reqst : 5; /**< DMA engine 1 request data state */
+ uint64_t d2_reqst : 5; /**< DMA engine 2 request data state */
+ uint64_t d3_reqst : 5; /**< DMA engine 3 request data state */
+ uint64_t d4_reqst : 5; /**< DMA engine 4 request data state */
+#else
+ uint64_t d4_reqst : 5;
+ uint64_t d3_reqst : 5;
+ uint64_t d2_reqst : 5;
+ uint64_t d1_reqst : 5;
+ uint64_t d0_reqst : 5;
+ uint64_t d4_difst : 7;
+ uint64_t d3_difst : 7;
+ uint64_t d2_difst : 7;
+ uint64_t d1_difst : 7;
+ uint64_t d0_difst : 7;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } s;
+ struct cvmx_npei_dma_state1_p1_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t d0_difst : 7; /**< DMA engine 0 dif instruction read state */
+ uint64_t d1_difst : 7; /**< DMA engine 1 dif instruction read state */
+ uint64_t d2_difst : 7; /**< DMA engine 2 dif instruction read state */
+ uint64_t d3_difst : 7; /**< DMA engine 3 dif instruction read state */
+ uint64_t reserved_25_31 : 7;
+ uint64_t d0_reqst : 5; /**< DMA engine 0 request data state */
+ uint64_t d1_reqst : 5; /**< DMA engine 1 request data state */
+ uint64_t d2_reqst : 5; /**< DMA engine 2 request data state */
+ uint64_t d3_reqst : 5; /**< DMA engine 3 request data state */
+ uint64_t reserved_0_4 : 5;
+#else
+ uint64_t reserved_0_4 : 5;
+ uint64_t d3_reqst : 5;
+ uint64_t d2_reqst : 5;
+ uint64_t d1_reqst : 5;
+ uint64_t d0_reqst : 5;
+ uint64_t reserved_25_31 : 7;
+ uint64_t d3_difst : 7;
+ uint64_t d2_difst : 7;
+ uint64_t d1_difst : 7;
+ uint64_t d0_difst : 7;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_dma_state1_p1_s cn56xxp1;
+};
+typedef union cvmx_npei_dma_state1_p1 cvmx_npei_dma_state1_p1_t;
+
+/**
+ * cvmx_npei_dma_state2
+ *
+ * NPEI_DMA_STATE2 = NPI's DMA State 2
+ *
+ * Results from DMA state register 2
+ */
+union cvmx_npei_dma_state2 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t ndwe : 4; /**< DMA L2C Write State */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ndre : 5; /**< DMA L2C Read State */
+ uint64_t reserved_10_15 : 6;
+ uint64_t prd : 10; /**< DMA PICe Read State */
+#else
+ uint64_t prd : 10;
+ uint64_t reserved_10_15 : 6;
+ uint64_t ndre : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t ndwe : 4;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_npei_dma_state2_s cn52xx;
+};
+typedef union cvmx_npei_dma_state2 cvmx_npei_dma_state2_t;
+
+/**
+ * cvmx_npei_dma_state2_p1
+ *
+ * NPEI_DMA_STATE2_P1 = NPEI DMA Instruction Fetch State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state2_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state2_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63 : 19;
+ uint64_t d0_dffst : 9; /**< DMA engine 0 dif instruction fetch state */
+ uint64_t d1_dffst : 9; /**< DMA engine 1 dif instruction fetch state */
+ uint64_t d2_dffst : 9; /**< DMA engine 2 dif instruction fetch state */
+ uint64_t d3_dffst : 9; /**< DMA engine 3 dif instruction fetch state */
+ uint64_t d4_dffst : 9; /**< DMA engine 4 dif instruction fetch state */
+#else
+ uint64_t d4_dffst : 9;
+ uint64_t d3_dffst : 9;
+ uint64_t d2_dffst : 9;
+ uint64_t d1_dffst : 9;
+ uint64_t d0_dffst : 9;
+ uint64_t reserved_45_63 : 19;
+#endif
+ } s;
+ struct cvmx_npei_dma_state2_p1_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63 : 19;
+ uint64_t d0_dffst : 9; /**< DMA engine 0 dif instruction fetch state */
+ uint64_t d1_dffst : 9; /**< DMA engine 1 dif instruction fetch state */
+ uint64_t d2_dffst : 9; /**< DMA engine 2 dif instruction fetch state */
+ uint64_t d3_dffst : 9; /**< DMA engine 3 dif instruction fetch state */
+ uint64_t reserved_0_8 : 9;
+#else
+ uint64_t reserved_0_8 : 9;
+ uint64_t d3_dffst : 9;
+ uint64_t d2_dffst : 9;
+ uint64_t d1_dffst : 9;
+ uint64_t d0_dffst : 9;
+ uint64_t reserved_45_63 : 19;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_dma_state2_p1_s cn56xxp1;
+};
+typedef union cvmx_npei_dma_state2_p1 cvmx_npei_dma_state2_p1_t;
+
+/**
+ * cvmx_npei_dma_state3_p1
+ *
+ * NPEI_DMA_STATE3_P1 = NPEI DMA DRE State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state3_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state3_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t d0_drest : 15; /**< DMA engine 0 dre state */
+ uint64_t d1_drest : 15; /**< DMA engine 1 dre state */
+ uint64_t d2_drest : 15; /**< DMA engine 2 dre state */
+ uint64_t d3_drest : 15; /**< DMA engine 3 dre state */
+#else
+ uint64_t d3_drest : 15;
+ uint64_t d2_drest : 15;
+ uint64_t d1_drest : 15;
+ uint64_t d0_drest : 15;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } s;
+ struct cvmx_npei_dma_state3_p1_s cn52xxp1;
+ struct cvmx_npei_dma_state3_p1_s cn56xxp1;
+};
+typedef union cvmx_npei_dma_state3_p1 cvmx_npei_dma_state3_p1_t;
+
+/**
+ * cvmx_npei_dma_state4_p1
+ *
+ * NPEI_DMA_STATE4_P1 = NPEI DMA DWE State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state4_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state4_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t d0_dwest : 13; /**< DMA engine 0 dwe state */
+ uint64_t d1_dwest : 13; /**< DMA engine 1 dwe state */
+ uint64_t d2_dwest : 13; /**< DMA engine 2 dwe state */
+ uint64_t d3_dwest : 13; /**< DMA engine 3 dwe state */
+#else
+ uint64_t d3_dwest : 13;
+ uint64_t d2_dwest : 13;
+ uint64_t d1_dwest : 13;
+ uint64_t d0_dwest : 13;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+ struct cvmx_npei_dma_state4_p1_s cn52xxp1;
+ struct cvmx_npei_dma_state4_p1_s cn56xxp1;
+};
+typedef union cvmx_npei_dma_state4_p1 cvmx_npei_dma_state4_p1_t;
+
+/**
+ * cvmx_npei_dma_state5_p1
+ *
+ * NPEI_DMA_STATE5_P1 = NPEI DMA DWE and DRE State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state5_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state5_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t d4_drest : 15; /**< DMA engine 4 dre state */
+ uint64_t d4_dwest : 13; /**< DMA engine 4 dwe state */
+#else
+ uint64_t d4_dwest : 13;
+ uint64_t d4_drest : 15;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_npei_dma_state5_p1_s cn56xxp1;
+};
+typedef union cvmx_npei_dma_state5_p1 cvmx_npei_dma_state5_p1_t;
+
+/**
+ * cvmx_npei_int_a_enb
+ *
+ * NPEI_INTERRUPT_A_ENB = NPI's Interrupt A Enable Register
+ *
+ * Used to allow the generation of interrupts (MSI/INTA) to the PCIe CoresUsed to enable the various interrupting conditions of NPEI
+ */
+union cvmx_npei_int_a_enb {
+ uint64_t u64;
+ struct cvmx_npei_int_a_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t pout_err : 1; /**< Enables NPEI_INT_A_SUM[9] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pin_bp : 1; /**< Enables NPEI_INT_A_SUM[8] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t p1_rdlk : 1; /**< Enables NPEI_INT_A_SUM[7] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t p0_rdlk : 1; /**< Enables NPEI_INT_A_SUM[6] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pgl_err : 1; /**< Enables NPEI_INT_A_SUM[5] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pdi_err : 1; /**< Enables NPEI_INT_A_SUM[4] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pop_err : 1; /**< Enables NPEI_INT_A_SUM[3] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pins_err : 1; /**< Enables NPEI_INT_A_SUM[2] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma1_cpl : 1; /**< Enables NPEI_INT_A_SUM[1] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0_cpl : 1; /**< Enables NPEI_INT_A_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t dma0_cpl : 1;
+ uint64_t dma1_cpl : 1;
+ uint64_t pins_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pgl_err : 1;
+ uint64_t p0_rdlk : 1;
+ uint64_t p1_rdlk : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pout_err : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_npei_int_a_enb_s cn52xx;
+ struct cvmx_npei_int_a_enb_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dma1_cpl : 1; /**< Enables NPEI_INT_A_SUM[1] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0_cpl : 1; /**< Enables NPEI_INT_A_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t dma0_cpl : 1;
+ uint64_t dma1_cpl : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_int_a_enb_s cn56xx;
+};
+typedef union cvmx_npei_int_a_enb cvmx_npei_int_a_enb_t;
+
+/**
+ * cvmx_npei_int_a_enb2
+ *
+ * NPEI_INTERRUPT_A_ENB2 = NPEI's Interrupt A Enable2 Register
+ *
+ * Used to enable the various interrupting conditions of NPEI
+ */
+union cvmx_npei_int_a_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_int_a_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t pout_err : 1; /**< Enables NPEI_INT_A_SUM[9] to generate an
+ interrupt on the RSL. */
+ uint64_t pin_bp : 1; /**< Enables NPEI_INT_A_SUM[8] to generate an
+ interrupt on the RSL. */
+ uint64_t p1_rdlk : 1; /**< Enables NPEI_INT_A_SUM[7] to generate an
+ interrupt on the RSL. */
+ uint64_t p0_rdlk : 1; /**< Enables NPEI_INT_A_SUM[6] to generate an
+ interrupt on the RSL. */
+ uint64_t pgl_err : 1; /**< Enables NPEI_INT_A_SUM[5] to generate an
+ interrupt on the RSL. */
+ uint64_t pdi_err : 1; /**< Enables NPEI_INT_A_SUM[4] to generate an
+ interrupt on the RSL. */
+ uint64_t pop_err : 1; /**< Enables NPEI_INT_A_SUM[3] to generate an
+ interrupt on the RSL. */
+ uint64_t pins_err : 1; /**< Enables NPEI_INT_A_SUM[2] to generate an
+ interrupt on the RSL. */
+ uint64_t dma1_cpl : 1; /**< Enables NPEI_INT_A_SUM[1] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0_cpl : 1; /**< Enables NPEI_INT_A_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t dma0_cpl : 1;
+ uint64_t dma1_cpl : 1;
+ uint64_t pins_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pgl_err : 1;
+ uint64_t p0_rdlk : 1;
+ uint64_t p1_rdlk : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pout_err : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_npei_int_a_enb2_s cn52xx;
+ struct cvmx_npei_int_a_enb2_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dma1_cpl : 1; /**< Enables NPEI_INT_A_SUM[1] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0_cpl : 1; /**< Enables NPEI_INT_A_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t dma0_cpl : 1;
+ uint64_t dma1_cpl : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_int_a_enb2_s cn56xx;
+};
+typedef union cvmx_npei_int_a_enb2 cvmx_npei_int_a_enb2_t;
+
+/**
+ * cvmx_npei_int_a_sum
+ *
+ * NPEI_INTERRUPT_A_SUM = NPI Interrupt A Summary Register
+ *
+ * Set when an interrupt condition occurs, write '1' to clear. When an interrupt bitin this register is set and
+ * the cooresponding bit in the NPEI_INT_A_ENB register is set, then NPEI_INT_SUM[61] will be set.
+ */
+union cvmx_npei_int_a_sum {
+ uint64_t u64;
+ struct cvmx_npei_int_a_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t pout_err : 1; /**< Set when PKO sends packet data with the error bit
+ set. */
+ uint64_t pin_bp : 1; /**< Packet input count has exceeded the WMARK.
+ See NPEI_PKT_IN_BP */
+ uint64_t p1_rdlk : 1; /**< PCIe port 1 received a read lock. */
+ uint64_t p0_rdlk : 1; /**< PCIe port 0 received a read lock. */
+ uint64_t pgl_err : 1; /**< When a read error occurs on a packet gather list
+ read this bit is set. */
+ uint64_t pdi_err : 1; /**< When a read error occurs on a packet data read
+ this bit is set. */
+ uint64_t pop_err : 1; /**< When a read error occurs on a packet scatter
+ pointer pair this bit is set. */
+ uint64_t pins_err : 1; /**< When a read error occurs on a packet instruction
+ this bit is set. */
+ uint64_t dma1_cpl : 1; /**< Set each time any PCIe DMA engine recieves a UR/CA
+ response from PCIe Port 1 */
+ uint64_t dma0_cpl : 1; /**< Set each time any PCIe DMA engine recieves a UR/CA
+ response from PCIe Port 0 */
+#else
+ uint64_t dma0_cpl : 1;
+ uint64_t dma1_cpl : 1;
+ uint64_t pins_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pgl_err : 1;
+ uint64_t p0_rdlk : 1;
+ uint64_t p1_rdlk : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pout_err : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_npei_int_a_sum_s cn52xx;
+ struct cvmx_npei_int_a_sum_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dma1_cpl : 1; /**< Set each time any PCIe DMA engine recieves a UR/CA
+ response from PCIe Port 1 */
+ uint64_t dma0_cpl : 1; /**< Set each time any PCIe DMA engine recieves a UR/CA
+ response from PCIe Port 0 */
+#else
+ uint64_t dma0_cpl : 1;
+ uint64_t dma1_cpl : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_int_a_sum_s cn56xx;
+};
+typedef union cvmx_npei_int_a_sum cvmx_npei_int_a_sum_t;
+
+/**
+ * cvmx_npei_int_enb
+ *
+ * NPEI_INTERRUPT_ENB = NPI's Interrupt Enable Register
+ *
+ * Used to allow the generation of interrupts (MSI/INTA) to the PCIe CoresUsed to enable the various interrupting conditions of NPI
+ */
+union cvmx_npei_int_enb {
+ uint64_t u64;
+ struct cvmx_npei_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta : 1; /**< Enables NPEI_INT_SUM[63] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_62_62 : 1;
+ uint64_t int_a : 1; /**< Enables NPEI_INT_SUM[61] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t crs1_dr : 1; /**< Enables NPEI_INT_SUM[29] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t crs1_er : 1; /**< Enables NPEI_INT_SUM[27] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t crs0_dr : 1; /**< Enables NPEI_INT_SUM[22] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t crs0_er : 1; /**< Enables NPEI_INT_SUM[20] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma4dbo : 1; /**< Enables NPEI_INT_SUM[8] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t rml_rto : 1; /**< Enables NPEI_INT_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t dma4dbo : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t psldbof : 1;
+ uint64_t pidbof : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t c0_aeri : 1;
+ uint64_t crs0_er : 1;
+ uint64_t c0_se : 1;
+ uint64_t crs0_dr : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t crs1_er : 1;
+ uint64_t c1_se : 1;
+ uint64_t crs1_dr : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t int_a : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t mio_inta : 1;
+#endif
+ } s;
+ struct cvmx_npei_int_enb_s cn52xx;
+ struct cvmx_npei_int_enb_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta : 1; /**< Enables NPEI_INT_SUM[63] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_62_62 : 1;
+ uint64_t int_a : 1; /**< Enables NPEI_INT_SUM[61] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t crs1_dr : 1; /**< Enables NPEI_INT_SUM[29] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t crs1_er : 1; /**< Enables NPEI_INT_SUM[27] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t crs0_dr : 1; /**< Enables NPEI_INT_SUM[22] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t crs0_er : 1; /**< Enables NPEI_INT_SUM[20] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_8_8 : 1;
+ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t rml_rto : 1; /**< Enables NPEI_INT_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t psldbof : 1;
+ uint64_t pidbof : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t c0_aeri : 1;
+ uint64_t crs0_er : 1;
+ uint64_t c0_se : 1;
+ uint64_t crs0_dr : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t crs1_er : 1;
+ uint64_t c1_se : 1;
+ uint64_t crs1_dr : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t int_a : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t mio_inta : 1;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_int_enb_s cn56xx;
+ struct cvmx_npei_int_enb_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta : 1; /**< Enables NPEI_INT_SUM[63] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_29_29 : 1;
+ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_27_27 : 1;
+ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_22_22 : 1;
+ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_20_20 : 1;
+ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma4dbo : 1; /**< Enables NPEI_INT_SUM[8] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t rml_rto : 1; /**< Enables NPEI_INT_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t dma4dbo : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t psldbof : 1;
+ uint64_t pidbof : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t c0_aeri : 1;
+ uint64_t reserved_20_20 : 1;
+ uint64_t c0_se : 1;
+ uint64_t reserved_22_22 : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t c1_se : 1;
+ uint64_t reserved_29_29 : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t reserved_61_62 : 2;
+ uint64_t mio_inta : 1;
+#endif
+ } cn56xxp1;
+};
+typedef union cvmx_npei_int_enb cvmx_npei_int_enb_t;
+
+/**
+ * cvmx_npei_int_enb2
+ *
+ * NPEI_INTERRUPT_ENB2 = NPI's Interrupt Enable2 Register
+ *
+ * Used to enable the various interrupting conditions of NPI
+ */
+union cvmx_npei_int_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_int_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t int_a : 1; /**< Enables NPEI_INT_SUM2[61] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an
+ interrupt on the RSL. */
+ uint64_t crs1_dr : 1; /**< Enables NPEI_INT_SUM2[29] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an
+ interrupt on the RSL. */
+ uint64_t crs1_er : 1; /**< Enables NPEI_INT_SUM2[27] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an
+ interrupt on the RSL. */
+ uint64_t crs0_dr : 1; /**< Enables NPEI_INT_SUM2[22] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an
+ interrupt on the RSL. */
+ uint64_t crs0_er : 1; /**< Enables NPEI_INT_SUM2[20] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an
+ interrupt on the RSL. */
+ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an
+ interrupt on the RSL. */
+ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an
+ interrupt on the RSL. */
+ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an
+ interrupt on the RSL. */
+ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an
+ interrupt on the RSL. */
+ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an
+ interrupt on the RSL. */
+ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an
+ interrupt on the RSL. */
+ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an
+ interrupt on the RSL. */
+ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an
+ interrupt on the RSL. */
+ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an
+ interrupt on the RSL. */
+ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an
+ interrupt on the RSL. */
+ uint64_t dma4dbo : 1; /**< Enables NPEI_INT_SUM[8] to generate an
+ interrupt on the RSL. */
+ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an
+ interrupt on the RSL. */
+ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an
+ interrupt on the RSL. */
+ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an
+ interrupt on the RSL. */
+ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an
+ interrupt on the RSL. */
+ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an
+ interrupt on the RSL. */
+ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an
+ interrupt on the RSL. */
+ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an
+ interrupt on the RSL. */
+ uint64_t rml_rto : 1; /**< Enables NPEI_INT_UM[0] to generate an
+ interrupt on the RSL. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t dma4dbo : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t psldbof : 1;
+ uint64_t pidbof : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t c0_aeri : 1;
+ uint64_t crs0_er : 1;
+ uint64_t c0_se : 1;
+ uint64_t crs0_dr : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t crs1_er : 1;
+ uint64_t c1_se : 1;
+ uint64_t crs1_dr : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t int_a : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_npei_int_enb2_s cn52xx;
+ struct cvmx_npei_int_enb2_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t int_a : 1; /**< Enables NPEI_INT_SUM2[61] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM2[60] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM2[59] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM2[58] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM2[57] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM2[56] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM2[55] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM2[54] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM2[53] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM2[52] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM2[51] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM2[50] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM2[49] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM2[48] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM2[47] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM2[46] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM2[45] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM2[44] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM2[43] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM2[42] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM2[41] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM2[40] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM2[39] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM2[38] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM2[37] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM2[36] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM2[35] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM2[34] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM2[33] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM2[32] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM2[31] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM2[30] to generate an
+ interrupt on the RSL. */
+ uint64_t crs1_dr : 1; /**< Enables NPEI_INT_SUM2[29] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM2[28] to generate an
+ interrupt on the RSL. */
+ uint64_t crs1_er : 1; /**< Enables NPEI_INT_SUM2[27] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM2[26] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM2[25] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM2[24] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM2[23] to generate an
+ interrupt on the RSL. */
+ uint64_t crs0_dr : 1; /**< Enables NPEI_INT_SUM2[22] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM2[21] to generate an
+ interrupt on the RSL. */
+ uint64_t crs0_er : 1; /**< Enables NPEI_INT_SUM2[20] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM2[19] to generate an
+ interrupt on the RSL. */
+ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM2[18] to generate an
+ interrupt on the RSL. */
+ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM2[17] to generate an
+ interrupt on the RSL. */
+ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM2[16] to generate an
+ interrupt on the RSL. */
+ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM2[15] to generate an
+ interrupt on the RSL. */
+ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM2[14] to generate an
+ interrupt on the RSL. */
+ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM2[13] to generate an
+ interrupt on the RSL. */
+ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM2[12] to generate an
+ interrupt on the RSL. */
+ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM2[11] to generate an
+ interrupt on the RSL. */
+ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM2[10] to generate an
+ interrupt on the RSL. */
+ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM2[9] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_8_8 : 1;
+ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM2[7] to generate an
+ interrupt on the RSL. */
+ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM2[6] to generate an
+ interrupt on the RSL. */
+ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM2[5] to generate an
+ interrupt on the RSL. */
+ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM2[4] to generate an
+ interrupt on the RSL. */
+ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM2[3] to generate an
+ interrupt on the RSL. */
+ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM2[2] to generate an
+ interrupt on the RSL. */
+ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM2[1] to generate an
+ interrupt on the RSL. */
+ uint64_t rml_rto : 1; /**< Enables NPEI_INT_SUM2[0] to generate an
+ interrupt on the RSL. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t psldbof : 1;
+ uint64_t pidbof : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t c0_aeri : 1;
+ uint64_t crs0_er : 1;
+ uint64_t c0_se : 1;
+ uint64_t crs0_dr : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t crs1_er : 1;
+ uint64_t c1_se : 1;
+ uint64_t crs1_dr : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t int_a : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_int_enb2_s cn56xx;
+ struct cvmx_npei_int_enb2_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an
+ interrupt on the RSL. */
+ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_29_29 : 1;
+ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_27_27 : 1;
+ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an
+ interrupt on the RSL. */
+ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_22_22 : 1;
+ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_20_20 : 1;
+ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an
+ interrupt on the RSL. */
+ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an
+ interrupt on the RSL. */
+ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an
+ interrupt on the RSL. */
+ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an
+ interrupt on the RSL. */
+ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an
+ interrupt on the RSL. */
+ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an
+ interrupt on the RSL. */
+ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an
+ interrupt on the RSL. */
+ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an
+ interrupt on the RSL. */
+ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an
+ interrupt on the RSL. */
+ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an
+ interrupt on the RSL. */
+ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an
+ interrupt on the RSL. */
+ uint64_t dma4dbo : 1; /**< Enables NPEI_INT_SUM[8] to generate an
+ interrupt on the RSL. */
+ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an
+ interrupt on the RSL. */
+ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an
+ interrupt on the RSL. */
+ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an
+ interrupt on the RSL. */
+ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an
+ interrupt on the RSL. */
+ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an
+ interrupt on the RSL. */
+ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an
+ interrupt on the RSL. */
+ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an
+ interrupt on the RSL. */
+ uint64_t rml_rto : 1; /**< Enables NPEI_INT_UM[0] to generate an
+ interrupt on the RSL. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t dma4dbo : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t psldbof : 1;
+ uint64_t pidbof : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t c0_aeri : 1;
+ uint64_t reserved_20_20 : 1;
+ uint64_t c0_se : 1;
+ uint64_t reserved_22_22 : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t c1_se : 1;
+ uint64_t reserved_29_29 : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } cn56xxp1;
+};
+typedef union cvmx_npei_int_enb2 cvmx_npei_int_enb2_t;
+
+/**
+ * cvmx_npei_int_info
+ *
+ * NPEI_INT_INFO = NPI Interrupt Information
+ *
+ * Contains information about some of the interrupt condition that can occur in the NPEI_INTERRUPT_SUM register.
+ */
+union cvmx_npei_int_info {
+ uint64_t u64;
+ struct cvmx_npei_int_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t pidbof : 6; /**< Field set when the NPEI_INTERRUPT_SUM[PIDBOF] bit
+ is set. This field when set will not change again
+ unitl NPEI_INTERRUPT_SUM[PIDBOF] is cleared. */
+ uint64_t psldbof : 6; /**< Field set when the NPEI_INTERRUPT_SUM[PSLDBOF] bit
+ is set. This field when set will not change again
+ unitl NPEI_INTERRUPT_SUM[PSLDBOF] is cleared. */
+#else
+ uint64_t psldbof : 6;
+ uint64_t pidbof : 6;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_npei_int_info_s cn52xx;
+ struct cvmx_npei_int_info_s cn56xx;
+ struct cvmx_npei_int_info_s cn56xxp1;
+};
+typedef union cvmx_npei_int_info cvmx_npei_int_info_t;
+
+/**
+ * cvmx_npei_int_sum
+ *
+ * NPEI_INTERRUPT_SUM = NPI Interrupt Summary Register
+ *
+ * Set when an interrupt condition occurs, write '1' to clear.
+ *
+ * HACK: These used to exist, how are TO handled?
+ * <3> PO0_2SML R/W1C 0x0 0 The packet being sent out on Port0 is smaller $R NS
+ * than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field.
+ * <7> I0_RTOUT R/W1C 0x0 0 Port-0 had a read timeout while attempting to $R NS
+ * read instructions.
+ * <15> P0_RTOUT R/W1C 0x0 0 Port-0 had a read timeout while attempting to $R NS
+ * read packet data.
+ * <23> G0_RTOUT R/W1C 0x0 0 Port-0 had a read timeout while attempting to $R NS
+ * read a gather list.
+ * <31> P0_PTOUT R/W1C 0x0 0 Port-0 output had a read timeout on a DATA/INFO $R NS
+ * pair.
+ */
+union cvmx_npei_int_sum {
+ uint64_t u64;
+ struct cvmx_npei_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta : 1; /**< Interrupt from MIO. */
+ uint64_t reserved_62_62 : 1;
+ uint64_t int_a : 1; /**< Set when a bit in the NPEI_INT_A_SUM register and
+ the cooresponding bit in the NPEI_INT_A_ENB
+ register is set. */
+ uint64_t c1_ldwn : 1; /**< Reset request due to link1 down status. */
+ uint64_t c0_ldwn : 1; /**< Reset request due to link0 down status. */
+ uint64_t c1_exc : 1; /**< Set when the PESC1_DBG_INFO register has a bit
+ set and its cooresponding PESC1_DBG_INFO_EN bit
+ is set. */
+ uint64_t c0_exc : 1; /**< Set when the PESC0_DBG_INFO register has a bit
+ set and its cooresponding PESC0_DBG_INFO_EN bit
+ is set. */
+ uint64_t c1_up_wf : 1; /**< Received Unsupported P-TLP for filtered window
+ register. Core1. */
+ uint64_t c0_up_wf : 1; /**< Received Unsupported P-TLP for filtered window
+ register. Core0. */
+ uint64_t c1_un_wf : 1; /**< Received Unsupported N-TLP for filtered window
+ register. Core1. */
+ uint64_t c0_un_wf : 1; /**< Received Unsupported N-TLP for filtered window
+ register. Core0. */
+ uint64_t c1_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar.
+ Core 1. */
+ uint64_t c1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register.
+ Core 1. */
+ uint64_t c1_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2.
+ Core 1. */
+ uint64_t c1_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1.
+ Core 1. */
+ uint64_t c1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0.
+ Core 1. */
+ uint64_t c1_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar.
+ Core 1. */
+ uint64_t c1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register.
+ Core 1. */
+ uint64_t c1_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2.
+ Core 1. */
+ uint64_t c1_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1.
+ Core 1. */
+ uint64_t c1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0.
+ Core 1. */
+ uint64_t c0_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar.
+ Core 0. */
+ uint64_t c0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register.
+ Core 0. */
+ uint64_t c0_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2.
+ Core 0. */
+ uint64_t c0_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1.
+ Core 0. */
+ uint64_t c0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0.
+ Core 0. */
+ uint64_t c0_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar.
+ Core 0. */
+ uint64_t c0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register.
+ Core 0. */
+ uint64_t c0_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2.
+ Core 0. */
+ uint64_t c0_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1.
+ Core 0. */
+ uint64_t c0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0.
+ Core 0. */
+ uint64_t c1_hpint : 1; /**< Hot-Plug Interrupt.
+ Pcie Core 1 (hp_int).
+ This interrupt will only be generated when
+ PCIERC1_CFG034[DLLS_C] is generated. Hot plug is
+ not supported. */
+ uint64_t c1_pmei : 1; /**< PME Interrupt.
+ Pcie Core 1. (cfg_pme_int) */
+ uint64_t c1_wake : 1; /**< Wake up from Power Management Unit.
+ Pcie Core 1. (wake_n)
+ Octeon will never generate this interrupt. */
+ uint64_t crs1_dr : 1; /**< Had a CRS when Retries were disabled. */
+ uint64_t c1_se : 1; /**< System Error, RC Mode Only.
+ Pcie Core 1. (cfg_sys_err_rc) */
+ uint64_t crs1_er : 1; /**< Had a CRS Timeout when Retries were enabled. */
+ uint64_t c1_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only.
+ Pcie Core 1. */
+ uint64_t c0_hpint : 1; /**< Hot-Plug Interrupt.
+ Pcie Core 0 (hp_int).
+ This interrupt will only be generated when
+ PCIERC0_CFG034[DLLS_C] is generated. Hot plug is
+ not supported. */
+ uint64_t c0_pmei : 1; /**< PME Interrupt.
+ Pcie Core 0. (cfg_pme_int) */
+ uint64_t c0_wake : 1; /**< Wake up from Power Management Unit.
+ Pcie Core 0. (wake_n)
+ Octeon will never generate this interrupt. */
+ uint64_t crs0_dr : 1; /**< Had a CRS when Retries were disabled. */
+ uint64_t c0_se : 1; /**< System Error, RC Mode Only.
+ Pcie Core 0. (cfg_sys_err_rc) */
+ uint64_t crs0_er : 1; /**< Had a CRS Timeout when Retries were enabled. */
+ uint64_t c0_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only.
+ Pcie Core 0 (cfg_aer_rc_err_int). */
+ uint64_t ptime : 1; /**< Packet Timer has an interrupt. Which rings can
+ be found in NPEI_PKT_TIME_INT. */
+ uint64_t pcnt : 1; /**< Packet Counter has an interrupt. Which rings can
+ be found in NPEI_PKT_CNT_INT. */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell count overflowed. Which
+ doorbell can be found in NPEI_INT_INFO[PIDBOF] */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell count overflowed. Which
+ doorbell can be found in NPEI_INT_INFO[PSLDBOF] */
+ uint64_t dtime1 : 1; /**< Whenever NPEI_DMA_CNTS[DMA1] is not 0, the
+ DMA_CNT1 timer increments every core clock. When
+ DMA_CNT1 timer exceeds NPEI_DMA1_INT_LEVEL[TIME],
+ this bit is set. Writing a '1' to this bit also
+ clears the DMA_CNT1 timer. */
+ uint64_t dtime0 : 1; /**< Whenever NPEI_DMA_CNTS[DMA0] is not 0, the
+ DMA_CNT0 timer increments every core clock. When
+ DMA_CNT0 timer exceeds NPEI_DMA0_INT_LEVEL[TIME],
+ this bit is set. Writing a '1' to this bit also
+ clears the DMA_CNT0 timer. */
+ uint64_t dcnt1 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA1] was/is
+ greater than NPEI_DMA1_INT_LEVEL[CNT]. */
+ uint64_t dcnt0 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA0] was/is
+ greater than NPEI_DMA0_INT_LEVEL[CNT]. */
+ uint64_t dma1fi : 1; /**< DMA0 set Forced Interrupt. */
+ uint64_t dma0fi : 1; /**< DMA0 set Forced Interrupt. */
+ uint64_t dma4dbo : 1; /**< DMA4 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma3dbo : 1; /**< DMA3 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma2dbo : 1; /**< DMA2 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma1dbo : 1; /**< DMA1 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma0dbo : 1; /**< DMA0 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */
+ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive
+ read-data/commit in 0xffff core clocks. */
+ uint64_t rml_wto : 1; /**< RML write did not get commit in 0xffff core clocks. */
+ uint64_t rml_rto : 1; /**< RML read did not return data in 0xffff core clocks. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t dma4dbo : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t psldbof : 1;
+ uint64_t pidbof : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t c0_aeri : 1;
+ uint64_t crs0_er : 1;
+ uint64_t c0_se : 1;
+ uint64_t crs0_dr : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t crs1_er : 1;
+ uint64_t c1_se : 1;
+ uint64_t crs1_dr : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t int_a : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t mio_inta : 1;
+#endif
+ } s;
+ struct cvmx_npei_int_sum_s cn52xx;
+ struct cvmx_npei_int_sum_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta : 1; /**< Interrupt from MIO. */
+ uint64_t reserved_62_62 : 1;
+ uint64_t int_a : 1; /**< Set when a bit in the NPEI_INT_A_SUM register and
+ the cooresponding bit in the NPEI_INT_A_ENB
+ register is set. */
+ uint64_t c1_ldwn : 1; /**< Reset request due to link1 down status. */
+ uint64_t c0_ldwn : 1; /**< Reset request due to link0 down status. */
+ uint64_t c1_exc : 1; /**< Set when the PESC1_DBG_INFO register has a bit
+ set and its cooresponding PESC1_DBG_INFO_EN bit
+ is set. */
+ uint64_t c0_exc : 1; /**< Set when the PESC0_DBG_INFO register has a bit
+ set and its cooresponding PESC0_DBG_INFO_EN bit
+ is set. */
+ uint64_t c1_up_wf : 1; /**< Received Unsupported P-TLP for filtered window
+ register. Core1. */
+ uint64_t c0_up_wf : 1; /**< Received Unsupported P-TLP for filtered window
+ register. Core0. */
+ uint64_t c1_un_wf : 1; /**< Received Unsupported N-TLP for filtered window
+ register. Core1. */
+ uint64_t c0_un_wf : 1; /**< Received Unsupported N-TLP for filtered window
+ register. Core0. */
+ uint64_t c1_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar.
+ Core 1. */
+ uint64_t c1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register.
+ Core 1. */
+ uint64_t c1_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2.
+ Core 1. */
+ uint64_t c1_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1.
+ Core 1. */
+ uint64_t c1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0.
+ Core 1. */
+ uint64_t c1_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar.
+ Core 1. */
+ uint64_t c1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register.
+ Core 1. */
+ uint64_t c1_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2.
+ Core 1. */
+ uint64_t c1_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1.
+ Core 1. */
+ uint64_t c1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0.
+ Core 1. */
+ uint64_t c0_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar.
+ Core 0. */
+ uint64_t c0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register.
+ Core 0. */
+ uint64_t c0_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2.
+ Core 0. */
+ uint64_t c0_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1.
+ Core 0. */
+ uint64_t c0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0.
+ Core 0. */
+ uint64_t c0_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar.
+ Core 0. */
+ uint64_t c0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register.
+ Core 0. */
+ uint64_t c0_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2.
+ Core 0. */
+ uint64_t c0_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1.
+ Core 0. */
+ uint64_t c0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0.
+ Core 0. */
+ uint64_t c1_hpint : 1; /**< Hot-Plug Interrupt.
+ Pcie Core 1 (hp_int).
+ This interrupt will only be generated when
+ PCIERC1_CFG034[DLLS_C] is generated. Hot plug is
+ not supported. */
+ uint64_t c1_pmei : 1; /**< PME Interrupt.
+ Pcie Core 1. (cfg_pme_int) */
+ uint64_t c1_wake : 1; /**< Wake up from Power Management Unit.
+ Pcie Core 1. (wake_n)
+ Octeon will never generate this interrupt. */
+ uint64_t crs1_dr : 1; /**< Had a CRS when Retries were disabled. */
+ uint64_t c1_se : 1; /**< System Error, RC Mode Only.
+ Pcie Core 1. (cfg_sys_err_rc) */
+ uint64_t crs1_er : 1; /**< Had a CRS Timeout when Retries were enabled. */
+ uint64_t c1_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only.
+ Pcie Core 1. */
+ uint64_t c0_hpint : 1; /**< Hot-Plug Interrupt.
+ Pcie Core 0 (hp_int).
+ This interrupt will only be generated when
+ PCIERC0_CFG034[DLLS_C] is generated. Hot plug is
+ not supported. */
+ uint64_t c0_pmei : 1; /**< PME Interrupt.
+ Pcie Core 0. (cfg_pme_int) */
+ uint64_t c0_wake : 1; /**< Wake up from Power Management Unit.
+ Pcie Core 0. (wake_n)
+ Octeon will never generate this interrupt. */
+ uint64_t crs0_dr : 1; /**< Had a CRS when Retries were disabled. */
+ uint64_t c0_se : 1; /**< System Error, RC Mode Only.
+ Pcie Core 0. (cfg_sys_err_rc) */
+ uint64_t crs0_er : 1; /**< Had a CRS Timeout when Retries were enabled. */
+ uint64_t c0_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only.
+ Pcie Core 0 (cfg_aer_rc_err_int). */
+ uint64_t reserved_15_18 : 4;
+ uint64_t dtime1 : 1; /**< Whenever NPEI_DMA_CNTS[DMA1] is not 0, the
+ DMA_CNT1 timer increments every core clock. When
+ DMA_CNT1 timer exceeds NPEI_DMA1_INT_LEVEL[TIME],
+ this bit is set. Writing a '1' to this bit also
+ clears the DMA_CNT1 timer. */
+ uint64_t dtime0 : 1; /**< Whenever NPEI_DMA_CNTS[DMA0] is not 0, the
+ DMA_CNT0 timer increments every core clock. When
+ DMA_CNT0 timer exceeds NPEI_DMA0_INT_LEVEL[TIME],
+ this bit is set. Writing a '1' to this bit also
+ clears the DMA_CNT0 timer. */
+ uint64_t dcnt1 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA1] was/is
+ greater than NPEI_DMA1_INT_LEVEL[CNT]. */
+ uint64_t dcnt0 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA0] was/is
+ greater than NPEI_DMA0_INT_LEVEL[CNT]. */
+ uint64_t dma1fi : 1; /**< DMA0 set Forced Interrupt. */
+ uint64_t dma0fi : 1; /**< DMA0 set Forced Interrupt. */
+ uint64_t reserved_8_8 : 1;
+ uint64_t dma3dbo : 1; /**< DMA3 doorbell count overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma2dbo : 1; /**< DMA2 doorbell count overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma1dbo : 1; /**< DMA1 doorbell count overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma0dbo : 1; /**< DMA0 doorbell count overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */
+ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive
+ read-data/commit in 0xffff core clocks. */
+ uint64_t rml_wto : 1; /**< RML write did not get commit in 0xffff core clocks. */
+ uint64_t rml_rto : 1; /**< RML read did not return data in 0xffff core clocks. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t reserved_15_18 : 4;
+ uint64_t c0_aeri : 1;
+ uint64_t crs0_er : 1;
+ uint64_t c0_se : 1;
+ uint64_t crs0_dr : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t crs1_er : 1;
+ uint64_t c1_se : 1;
+ uint64_t crs1_dr : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t int_a : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t mio_inta : 1;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_int_sum_s cn56xx;
+ struct cvmx_npei_int_sum_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta : 1; /**< Interrupt from MIO. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t c1_ldwn : 1; /**< Reset request due to link1 down status. */
+ uint64_t c0_ldwn : 1; /**< Reset request due to link0 down status. */
+ uint64_t c1_exc : 1; /**< Set when the PESC1_DBG_INFO register has a bit
+ set and its cooresponding PESC1_DBG_INFO_EN bit
+ is set. */
+ uint64_t c0_exc : 1; /**< Set when the PESC0_DBG_INFO register has a bit
+ set and its cooresponding PESC0_DBG_INFO_EN bit
+ is set. */
+ uint64_t c1_up_wf : 1; /**< Received Unsupported P-TLP for filtered window
+ register. Core1. */
+ uint64_t c0_up_wf : 1; /**< Received Unsupported P-TLP for filtered window
+ register. Core0. */
+ uint64_t c1_un_wf : 1; /**< Received Unsupported N-TLP for filtered window
+ register. Core1. */
+ uint64_t c0_un_wf : 1; /**< Received Unsupported N-TLP for filtered window
+ register. Core0. */
+ uint64_t c1_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar.
+ Core 1. */
+ uint64_t c1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register.
+ Core 1. */
+ uint64_t c1_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2.
+ Core 1. */
+ uint64_t c1_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1.
+ Core 1. */
+ uint64_t c1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0.
+ Core 1. */
+ uint64_t c1_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar.
+ Core 1. */
+ uint64_t c1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register.
+ Core 1. */
+ uint64_t c1_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2.
+ Core 1. */
+ uint64_t c1_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1.
+ Core 1. */
+ uint64_t c1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0.
+ Core 1. */
+ uint64_t c0_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar.
+ Core 0. */
+ uint64_t c0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register.
+ Core 0. */
+ uint64_t c0_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2.
+ Core 0. */
+ uint64_t c0_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1.
+ Core 0. */
+ uint64_t c0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0.
+ Core 0. */
+ uint64_t c0_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar.
+ Core 0. */
+ uint64_t c0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register.
+ Core 0. */
+ uint64_t c0_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2.
+ Core 0. */
+ uint64_t c0_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1.
+ Core 0. */
+ uint64_t c0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0.
+ Core 0. */
+ uint64_t c1_hpint : 1; /**< Hot-Plug Interrupt.
+ Pcie Core 1 (hp_int).
+ This interrupt will only be generated when
+ PCIERC1_CFG034[DLLS_C] is generated. Hot plug is
+ not supported. */
+ uint64_t c1_pmei : 1; /**< PME Interrupt.
+ Pcie Core 1. (cfg_pme_int) */
+ uint64_t c1_wake : 1; /**< Wake up from Power Management Unit.
+ Pcie Core 1. (wake_n)
+ Octeon will never generate this interrupt. */
+ uint64_t reserved_29_29 : 1;
+ uint64_t c1_se : 1; /**< System Error, RC Mode Only.
+ Pcie Core 1. (cfg_sys_err_rc) */
+ uint64_t reserved_27_27 : 1;
+ uint64_t c1_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only.
+ Pcie Core 1. */
+ uint64_t c0_hpint : 1; /**< Hot-Plug Interrupt.
+ Pcie Core 0 (hp_int).
+ This interrupt will only be generated when
+ PCIERC0_CFG034[DLLS_C] is generated. Hot plug is
+ not supported. */
+ uint64_t c0_pmei : 1; /**< PME Interrupt.
+ Pcie Core 0. (cfg_pme_int) */
+ uint64_t c0_wake : 1; /**< Wake up from Power Management Unit.
+ Pcie Core 0. (wake_n)
+ Octeon will never generate this interrupt. */
+ uint64_t reserved_22_22 : 1;
+ uint64_t c0_se : 1; /**< System Error, RC Mode Only.
+ Pcie Core 0. (cfg_sys_err_rc) */
+ uint64_t reserved_20_20 : 1;
+ uint64_t c0_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only.
+ Pcie Core 0 (cfg_aer_rc_err_int). */
+ uint64_t reserved_15_18 : 4;
+ uint64_t dtime1 : 1; /**< Whenever NPEI_DMA_CNTS[DMA1] is not 0, the
+ DMA_CNT1 timer increments every core clock. When
+ DMA_CNT1 timer exceeds NPEI_DMA1_INT_LEVEL[TIME],
+ this bit is set. Writing a '1' to this bit also
+ clears the DMA_CNT1 timer. */
+ uint64_t dtime0 : 1; /**< Whenever NPEI_DMA_CNTS[DMA0] is not 0, the
+ DMA_CNT0 timer increments every core clock. When
+ DMA_CNT0 timer exceeds NPEI_DMA0_INT_LEVEL[TIME],
+ this bit is set. Writing a '1' to this bit also
+ clears the DMA_CNT0 timer. */
+ uint64_t dcnt1 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA1] was/is
+ greater than NPEI_DMA1_INT_LEVEL[CNT]. */
+ uint64_t dcnt0 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA0] was/is
+ greater than NPEI_DMA0_INT_LEVEL[CNT]. */
+ uint64_t dma1fi : 1; /**< DMA0 set Forced Interrupt. */
+ uint64_t dma0fi : 1; /**< DMA0 set Forced Interrupt. */
+ uint64_t dma4dbo : 1; /**< DMA4 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma3dbo : 1; /**< DMA3 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma2dbo : 1; /**< DMA2 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma1dbo : 1; /**< DMA1 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t dma0dbo : 1; /**< DMA0 doorbell overflow.
+ Bit[32] of the doorbell count was set. */
+ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */
+ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive
+ read-data/commit in 0xffff core clocks. */
+ uint64_t rml_wto : 1; /**< RML write did not get commit in 0xffff core clocks. */
+ uint64_t rml_rto : 1; /**< RML read did not return data in 0xffff core clocks. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t dma4dbo : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t reserved_15_18 : 4;
+ uint64_t c0_aeri : 1;
+ uint64_t reserved_20_20 : 1;
+ uint64_t c0_se : 1;
+ uint64_t reserved_22_22 : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t c1_se : 1;
+ uint64_t reserved_29_29 : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t reserved_61_62 : 2;
+ uint64_t mio_inta : 1;
+#endif
+ } cn56xxp1;
+};
+typedef union cvmx_npei_int_sum cvmx_npei_int_sum_t;
+
+/**
+ * cvmx_npei_int_sum2
+ *
+ * NPEI_INTERRUPT_SUM2 = NPI Interrupt Summary2 Register
+ *
+ * This is a read only copy of the NPEI_INTERRUPT_SUM register with bit variances.
+ */
+union cvmx_npei_int_sum2 {
+ uint64_t u64;
+ struct cvmx_npei_int_sum2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t reserved_62_62 : 1;
+ uint64_t int_a : 1; /**< Set when a bit in the NPEI_INT_A_SUM register and
+ the cooresponding bit in the NPEI_INT_A_ENB2
+ register is set. */
+ uint64_t c1_ldwn : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_ldwn : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_exc : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_exc : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_up_wf : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_up_wf : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_un_wf : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_un_wf : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_un_bx : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_un_wi : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_un_b2 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_un_b1 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_un_b0 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_up_bx : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_up_wi : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_up_b2 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_up_b1 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_up_b0 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_un_bx : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_un_wi : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_un_b2 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_un_b1 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_un_b0 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_up_bx : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_up_wi : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_up_b2 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_up_b1 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_up_b0 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_hpint : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_pmei : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_wake : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t crs1_dr : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_se : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t crs1_er : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c1_aeri : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_hpint : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_pmei : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_wake : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t crs0_dr : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_se : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t crs0_er : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t c0_aeri : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t reserved_15_18 : 4;
+ uint64_t dtime1 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t dtime0 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t dcnt1 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t dcnt0 : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t dma1fi : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t dma0fi : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t reserved_8_8 : 1;
+ uint64_t dma3dbo : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t dma2dbo : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t dma1dbo : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t dma0dbo : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t iob2big : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t bar0_to : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t rml_wto : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+ uint64_t rml_rto : 1; /**< Equal to the cooresponding bit if the
+ NPEI_INT_SUM register. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t dma0dbo : 1;
+ uint64_t dma1dbo : 1;
+ uint64_t dma2dbo : 1;
+ uint64_t dma3dbo : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t dma0fi : 1;
+ uint64_t dma1fi : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t reserved_15_18 : 4;
+ uint64_t c0_aeri : 1;
+ uint64_t crs0_er : 1;
+ uint64_t c0_se : 1;
+ uint64_t crs0_dr : 1;
+ uint64_t c0_wake : 1;
+ uint64_t c0_pmei : 1;
+ uint64_t c0_hpint : 1;
+ uint64_t c1_aeri : 1;
+ uint64_t crs1_er : 1;
+ uint64_t c1_se : 1;
+ uint64_t crs1_dr : 1;
+ uint64_t c1_wake : 1;
+ uint64_t c1_pmei : 1;
+ uint64_t c1_hpint : 1;
+ uint64_t c0_up_b0 : 1;
+ uint64_t c0_up_b1 : 1;
+ uint64_t c0_up_b2 : 1;
+ uint64_t c0_up_wi : 1;
+ uint64_t c0_up_bx : 1;
+ uint64_t c0_un_b0 : 1;
+ uint64_t c0_un_b1 : 1;
+ uint64_t c0_un_b2 : 1;
+ uint64_t c0_un_wi : 1;
+ uint64_t c0_un_bx : 1;
+ uint64_t c1_up_b0 : 1;
+ uint64_t c1_up_b1 : 1;
+ uint64_t c1_up_b2 : 1;
+ uint64_t c1_up_wi : 1;
+ uint64_t c1_up_bx : 1;
+ uint64_t c1_un_b0 : 1;
+ uint64_t c1_un_b1 : 1;
+ uint64_t c1_un_b2 : 1;
+ uint64_t c1_un_wi : 1;
+ uint64_t c1_un_bx : 1;
+ uint64_t c0_un_wf : 1;
+ uint64_t c1_un_wf : 1;
+ uint64_t c0_up_wf : 1;
+ uint64_t c1_up_wf : 1;
+ uint64_t c0_exc : 1;
+ uint64_t c1_exc : 1;
+ uint64_t c0_ldwn : 1;
+ uint64_t c1_ldwn : 1;
+ uint64_t int_a : 1;
+ uint64_t reserved_62_62 : 1;
+ uint64_t mio_inta : 1;
+#endif
+ } s;
+ struct cvmx_npei_int_sum2_s cn52xx;
+ struct cvmx_npei_int_sum2_s cn52xxp1;
+ struct cvmx_npei_int_sum2_s cn56xx;
+};
+typedef union cvmx_npei_int_sum2 cvmx_npei_int_sum2_t;
+
+/**
+ * cvmx_npei_last_win_rdata0
+ *
+ * NPEI_LAST_WIN_RDATA0 = NPEI Last Window Read Data Port0
+ *
+ * The data from the last initiated window read.
+ */
+union cvmx_npei_last_win_rdata0 {
+ uint64_t u64;
+ struct cvmx_npei_last_win_rdata0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Last window read data. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_npei_last_win_rdata0_s cn52xx;
+ struct cvmx_npei_last_win_rdata0_s cn52xxp1;
+ struct cvmx_npei_last_win_rdata0_s cn56xx;
+ struct cvmx_npei_last_win_rdata0_s cn56xxp1;
+};
+typedef union cvmx_npei_last_win_rdata0 cvmx_npei_last_win_rdata0_t;
+
+/**
+ * cvmx_npei_last_win_rdata1
+ *
+ * NPEI_LAST_WIN_RDATA1 = NPEI Last Window Read Data Port1
+ *
+ * The data from the last initiated window read.
+ */
+union cvmx_npei_last_win_rdata1 {
+ uint64_t u64;
+ struct cvmx_npei_last_win_rdata1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Last window read data. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_npei_last_win_rdata1_s cn52xx;
+ struct cvmx_npei_last_win_rdata1_s cn52xxp1;
+ struct cvmx_npei_last_win_rdata1_s cn56xx;
+ struct cvmx_npei_last_win_rdata1_s cn56xxp1;
+};
+typedef union cvmx_npei_last_win_rdata1 cvmx_npei_last_win_rdata1_t;
+
+/**
+ * cvmx_npei_mem_access_ctl
+ *
+ * NPEI_MEM_ACCESS_CTL = NPEI's Memory Access Control
+ *
+ * Contains control for access to the PCIe address space.
+ */
+union cvmx_npei_mem_access_ctl {
+ uint64_t u64;
+ struct cvmx_npei_mem_access_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t max_word : 4; /**< The maximum number of words to merge into a single
+ write operation from the PPs to the PCIe. Legal
+ values are 1 to 16, where a '0' is treated as 16. */
+ uint64_t timer : 10; /**< When the NPEI starts a PP to PCIe write it waits
+ no longer than the value of TIMER in eclks to
+ merge additional writes from the PPs into 1
+ large write. The values for this field is 1 to
+ 1024 where a value of '0' is treated as 1024. */
+#else
+ uint64_t timer : 10;
+ uint64_t max_word : 4;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_npei_mem_access_ctl_s cn52xx;
+ struct cvmx_npei_mem_access_ctl_s cn52xxp1;
+ struct cvmx_npei_mem_access_ctl_s cn56xx;
+ struct cvmx_npei_mem_access_ctl_s cn56xxp1;
+};
+typedef union cvmx_npei_mem_access_ctl cvmx_npei_mem_access_ctl_t;
+
+/**
+ * cvmx_npei_mem_access_subid#
+ *
+ * NPEI_MEM_ACCESS_SUBIDX = NPEI Memory Access SubidX Register
+ *
+ * Contains address index and control bits for access to memory from Core PPs.
+ */
+union cvmx_npei_mem_access_subidx {
+ uint64_t u64;
+ struct cvmx_npei_mem_access_subidx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t zero : 1; /**< Causes all byte reads to be zero length reads.
+ Returns to the EXEC a zero for all read data. */
+ uint64_t port : 2; /**< Port the request is sent to. */
+ uint64_t nmerge : 1; /**< No merging is allowed in this window. */
+ uint64_t esr : 2; /**< Endian-swap for Reads. */
+ uint64_t esw : 2; /**< Endian-swap for Writes. */
+ uint64_t nsr : 1; /**< No Snoop for Reads. */
+ uint64_t nsw : 1; /**< No Snoop for Writes. */
+ uint64_t ror : 1; /**< Relaxed Ordering for Reads. */
+ uint64_t row : 1; /**< Relaxed Ordering for Writes. */
+ uint64_t ba : 30; /**< PCIe Adddress Bits <63:34>. */
+#else
+ uint64_t ba : 30;
+ uint64_t row : 1;
+ uint64_t ror : 1;
+ uint64_t nsw : 1;
+ uint64_t nsr : 1;
+ uint64_t esw : 2;
+ uint64_t esr : 2;
+ uint64_t nmerge : 1;
+ uint64_t port : 2;
+ uint64_t zero : 1;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } s;
+ struct cvmx_npei_mem_access_subidx_s cn52xx;
+ struct cvmx_npei_mem_access_subidx_s cn52xxp1;
+ struct cvmx_npei_mem_access_subidx_s cn56xx;
+ struct cvmx_npei_mem_access_subidx_s cn56xxp1;
+};
+typedef union cvmx_npei_mem_access_subidx cvmx_npei_mem_access_subidx_t;
+
+/**
+ * cvmx_npei_msi_enb0
+ *
+ * NPEI_MSI_ENB0 = NPEI MSI Enable0
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV0.
+ */
+union cvmx_npei_msi_enb0 {
+ uint64_t u64;
+ struct cvmx_npei_msi_enb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb : 64; /**< Enables bit [63:0] of NPEI_MSI_RCV0. */
+#else
+ uint64_t enb : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_enb0_s cn52xx;
+ struct cvmx_npei_msi_enb0_s cn52xxp1;
+ struct cvmx_npei_msi_enb0_s cn56xx;
+ struct cvmx_npei_msi_enb0_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_enb0 cvmx_npei_msi_enb0_t;
+
+/**
+ * cvmx_npei_msi_enb1
+ *
+ * NPEI_MSI_ENB1 = NPEI MSI Enable1
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV1.
+ */
+union cvmx_npei_msi_enb1 {
+ uint64_t u64;
+ struct cvmx_npei_msi_enb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb : 64; /**< Enables bit [63:0] of NPEI_MSI_RCV1. */
+#else
+ uint64_t enb : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_enb1_s cn52xx;
+ struct cvmx_npei_msi_enb1_s cn52xxp1;
+ struct cvmx_npei_msi_enb1_s cn56xx;
+ struct cvmx_npei_msi_enb1_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_enb1 cvmx_npei_msi_enb1_t;
+
+/**
+ * cvmx_npei_msi_enb2
+ *
+ * NPEI_MSI_ENB2 = NPEI MSI Enable2
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV2.
+ */
+union cvmx_npei_msi_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_msi_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb : 64; /**< Enables bit [63:0] of NPEI_MSI_RCV2. */
+#else
+ uint64_t enb : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_enb2_s cn52xx;
+ struct cvmx_npei_msi_enb2_s cn52xxp1;
+ struct cvmx_npei_msi_enb2_s cn56xx;
+ struct cvmx_npei_msi_enb2_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_enb2 cvmx_npei_msi_enb2_t;
+
+/**
+ * cvmx_npei_msi_enb3
+ *
+ * NPEI_MSI_ENB3 = NPEI MSI Enable3
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV3.
+ */
+union cvmx_npei_msi_enb3 {
+ uint64_t u64;
+ struct cvmx_npei_msi_enb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb : 64; /**< Enables bit [63:0] of NPEI_MSI_RCV3. */
+#else
+ uint64_t enb : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_enb3_s cn52xx;
+ struct cvmx_npei_msi_enb3_s cn52xxp1;
+ struct cvmx_npei_msi_enb3_s cn56xx;
+ struct cvmx_npei_msi_enb3_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_enb3 cvmx_npei_msi_enb3_t;
+
+/**
+ * cvmx_npei_msi_rcv0
+ *
+ * NPEI_MSI_RCV0 = NPEI MSI Receive0
+ *
+ * Contains bits [63:0] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv0 {
+ uint64_t u64;
+ struct cvmx_npei_msi_rcv0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr : 64; /**< Bits 63-0 of the 256 bits of MSI interrupt. */
+#else
+ uint64_t intr : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_rcv0_s cn52xx;
+ struct cvmx_npei_msi_rcv0_s cn52xxp1;
+ struct cvmx_npei_msi_rcv0_s cn56xx;
+ struct cvmx_npei_msi_rcv0_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_rcv0 cvmx_npei_msi_rcv0_t;
+
+/**
+ * cvmx_npei_msi_rcv1
+ *
+ * NPEI_MSI_RCV1 = NPEI MSI Receive1
+ *
+ * Contains bits [127:64] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv1 {
+ uint64_t u64;
+ struct cvmx_npei_msi_rcv1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr : 64; /**< Bits 127-64 of the 256 bits of MSI interrupt. */
+#else
+ uint64_t intr : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_rcv1_s cn52xx;
+ struct cvmx_npei_msi_rcv1_s cn52xxp1;
+ struct cvmx_npei_msi_rcv1_s cn56xx;
+ struct cvmx_npei_msi_rcv1_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_rcv1 cvmx_npei_msi_rcv1_t;
+
+/**
+ * cvmx_npei_msi_rcv2
+ *
+ * NPEI_MSI_RCV2 = NPEI MSI Receive2
+ *
+ * Contains bits [191:128] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv2 {
+ uint64_t u64;
+ struct cvmx_npei_msi_rcv2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr : 64; /**< Bits 191-128 of the 256 bits of MSI interrupt. */
+#else
+ uint64_t intr : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_rcv2_s cn52xx;
+ struct cvmx_npei_msi_rcv2_s cn52xxp1;
+ struct cvmx_npei_msi_rcv2_s cn56xx;
+ struct cvmx_npei_msi_rcv2_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_rcv2 cvmx_npei_msi_rcv2_t;
+
+/**
+ * cvmx_npei_msi_rcv3
+ *
+ * NPEI_MSI_RCV3 = NPEI MSI Receive3
+ *
+ * Contains bits [255:192] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv3 {
+ uint64_t u64;
+ struct cvmx_npei_msi_rcv3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr : 64; /**< Bits 255-192 of the 256 bits of MSI interrupt. */
+#else
+ uint64_t intr : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_rcv3_s cn52xx;
+ struct cvmx_npei_msi_rcv3_s cn52xxp1;
+ struct cvmx_npei_msi_rcv3_s cn56xx;
+ struct cvmx_npei_msi_rcv3_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_rcv3 cvmx_npei_msi_rcv3_t;
+
+/**
+ * cvmx_npei_msi_rd_map
+ *
+ * NPEI_MSI_RD_MAP = NPEI MSI Read MAP
+ *
+ * Used to read the mapping function of the NPEI_PCIE_MSI_RCV to NPEI_MSI_RCV registers.
+ */
+union cvmx_npei_msi_rd_map {
+ uint64_t u64;
+ struct cvmx_npei_msi_rd_map_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t rd_int : 8; /**< The value of the map at the location PREVIOUSLY
+ written to the MSI_INT field of this register. */
+ uint64_t msi_int : 8; /**< Selects the value that would be received when the
+ NPEI_PCIE_MSI_RCV register is written. */
+#else
+ uint64_t msi_int : 8;
+ uint64_t rd_int : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_npei_msi_rd_map_s cn52xx;
+ struct cvmx_npei_msi_rd_map_s cn52xxp1;
+ struct cvmx_npei_msi_rd_map_s cn56xx;
+ struct cvmx_npei_msi_rd_map_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_rd_map cvmx_npei_msi_rd_map_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb0
+ *
+ * NPEI_MSI_W1C_ENB0 = NPEI MSI Write 1 To Clear Enable0
+ *
+ * Used to clear bits in NPEI_MSI_ENB0. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb0 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1c_enb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr : 64; /**< A write of '1' to a vector will clear the
+ cooresponding bit in NPEI_MSI_ENB0.
+ A read to this address will return 0. */
+#else
+ uint64_t clr : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_w1c_enb0_s cn52xx;
+ struct cvmx_npei_msi_w1c_enb0_s cn56xx;
+};
+typedef union cvmx_npei_msi_w1c_enb0 cvmx_npei_msi_w1c_enb0_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb1
+ *
+ * NPEI_MSI_W1C_ENB1 = NPEI MSI Write 1 To Clear Enable1
+ *
+ * Used to clear bits in NPEI_MSI_ENB1. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb1 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1c_enb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr : 64; /**< A write of '1' to a vector will clear the
+ cooresponding bit in NPEI_MSI_ENB1.
+ A read to this address will return 0. */
+#else
+ uint64_t clr : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_w1c_enb1_s cn52xx;
+ struct cvmx_npei_msi_w1c_enb1_s cn56xx;
+};
+typedef union cvmx_npei_msi_w1c_enb1 cvmx_npei_msi_w1c_enb1_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb2
+ *
+ * NPEI_MSI_W1C_ENB2 = NPEI MSI Write 1 To Clear Enable2
+ *
+ * Used to clear bits in NPEI_MSI_ENB2. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1c_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr : 64; /**< A write of '1' to a vector will clear the
+ cooresponding bit in NPEI_MSI_ENB2.
+ A read to this address will return 0. */
+#else
+ uint64_t clr : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_w1c_enb2_s cn52xx;
+ struct cvmx_npei_msi_w1c_enb2_s cn56xx;
+};
+typedef union cvmx_npei_msi_w1c_enb2 cvmx_npei_msi_w1c_enb2_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb3
+ *
+ * NPEI_MSI_W1C_ENB3 = NPEI MSI Write 1 To Clear Enable3
+ *
+ * Used to clear bits in NPEI_MSI_ENB3. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb3 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1c_enb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr : 64; /**< A write of '1' to a vector will clear the
+ cooresponding bit in NPEI_MSI_ENB3.
+ A read to this address will return 0. */
+#else
+ uint64_t clr : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_w1c_enb3_s cn52xx;
+ struct cvmx_npei_msi_w1c_enb3_s cn56xx;
+};
+typedef union cvmx_npei_msi_w1c_enb3 cvmx_npei_msi_w1c_enb3_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb0
+ *
+ * NPEI_MSI_W1S_ENB0 = NPEI MSI Write 1 To Set Enable0
+ *
+ * Used to set bits in NPEI_MSI_ENB0. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb0 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1s_enb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set : 64; /**< A write of '1' to a vector will set the
+ cooresponding bit in NPEI_MSI_ENB0.
+ A read to this address will return 0. */
+#else
+ uint64_t set : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_w1s_enb0_s cn52xx;
+ struct cvmx_npei_msi_w1s_enb0_s cn56xx;
+};
+typedef union cvmx_npei_msi_w1s_enb0 cvmx_npei_msi_w1s_enb0_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb1
+ *
+ * NPEI_MSI_W1S_ENB0 = NPEI MSI Write 1 To Set Enable1
+ *
+ * Used to set bits in NPEI_MSI_ENB1. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb1 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1s_enb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set : 64; /**< A write of '1' to a vector will set the
+ cooresponding bit in NPEI_MSI_ENB1.
+ A read to this address will return 0. */
+#else
+ uint64_t set : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_w1s_enb1_s cn52xx;
+ struct cvmx_npei_msi_w1s_enb1_s cn56xx;
+};
+typedef union cvmx_npei_msi_w1s_enb1 cvmx_npei_msi_w1s_enb1_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb2
+ *
+ * NPEI_MSI_W1S_ENB2 = NPEI MSI Write 1 To Set Enable2
+ *
+ * Used to set bits in NPEI_MSI_ENB2. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1s_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set : 64; /**< A write of '1' to a vector will set the
+ cooresponding bit in NPEI_MSI_ENB2.
+ A read to this address will return 0. */
+#else
+ uint64_t set : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_w1s_enb2_s cn52xx;
+ struct cvmx_npei_msi_w1s_enb2_s cn56xx;
+};
+typedef union cvmx_npei_msi_w1s_enb2 cvmx_npei_msi_w1s_enb2_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb3
+ *
+ * NPEI_MSI_W1S_ENB3 = NPEI MSI Write 1 To Set Enable3
+ *
+ * Used to set bits in NPEI_MSI_ENB3. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb3 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1s_enb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set : 64; /**< A write of '1' to a vector will set the
+ cooresponding bit in NPEI_MSI_ENB3.
+ A read to this address will return 0. */
+#else
+ uint64_t set : 64;
+#endif
+ } s;
+ struct cvmx_npei_msi_w1s_enb3_s cn52xx;
+ struct cvmx_npei_msi_w1s_enb3_s cn56xx;
+};
+typedef union cvmx_npei_msi_w1s_enb3 cvmx_npei_msi_w1s_enb3_t;
+
+/**
+ * cvmx_npei_msi_wr_map
+ *
+ * NPEI_MSI_WR_MAP = NPEI MSI Write MAP
+ *
+ * Used to write the mapping function of the NPEI_PCIE_MSI_RCV to NPEI_MSI_RCV registers.
+ */
+union cvmx_npei_msi_wr_map {
+ uint64_t u64;
+ struct cvmx_npei_msi_wr_map_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t ciu_int : 8; /**< Selects which bit in the NPEI_MSI_RCV# (0-255)
+ will be set when the value specified in the
+ MSI_INT of this register is recevied during a
+ write to the NPEI_PCIE_MSI_RCV register. */
+ uint64_t msi_int : 8; /**< Selects the value that would be received when the
+ NPEI_PCIE_MSI_RCV register is written. */
+#else
+ uint64_t msi_int : 8;
+ uint64_t ciu_int : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_npei_msi_wr_map_s cn52xx;
+ struct cvmx_npei_msi_wr_map_s cn52xxp1;
+ struct cvmx_npei_msi_wr_map_s cn56xx;
+ struct cvmx_npei_msi_wr_map_s cn56xxp1;
+};
+typedef union cvmx_npei_msi_wr_map cvmx_npei_msi_wr_map_t;
+
+/**
+ * cvmx_npei_pcie_credit_cnt
+ *
+ * NPEI_PCIE_CREDIT_CNT = NPEI PCIE Credit Count
+ *
+ * Contains the number of credits for the pcie port FIFOs used by the NPEI. This value needs to be set BEFORE PCIe traffic
+ * flow from NPEI to PCIE Ports starts. A write to this register will cause the credit counts in the NPEI for the two
+ * PCIE ports to be reset to the value in this register.
+ */
+union cvmx_npei_pcie_credit_cnt {
+ uint64_t u64;
+ struct cvmx_npei_pcie_credit_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t p1_ccnt : 8; /**< Port1 C-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p1_ncnt : 8; /**< Port1 N-TLP FIFO Credits.
+ Legal values are 0x5 to 0x10. */
+ uint64_t p1_pcnt : 8; /**< Port1 P-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p0_ccnt : 8; /**< Port0 C-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p0_ncnt : 8; /**< Port0 N-TLP FIFO Credits.
+ Legal values are 0x5 to 0x10. */
+ uint64_t p0_pcnt : 8; /**< Port0 P-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+#else
+ uint64_t p0_pcnt : 8;
+ uint64_t p0_ncnt : 8;
+ uint64_t p0_ccnt : 8;
+ uint64_t p1_pcnt : 8;
+ uint64_t p1_ncnt : 8;
+ uint64_t p1_ccnt : 8;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_npei_pcie_credit_cnt_s cn52xx;
+ struct cvmx_npei_pcie_credit_cnt_s cn56xx;
+};
+typedef union cvmx_npei_pcie_credit_cnt cvmx_npei_pcie_credit_cnt_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv
+ *
+ * NPEI_PCIE_MSI_RCV = NPEI PCIe MSI Receive
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv {
+ uint64_t u64;
+ struct cvmx_npei_pcie_msi_rcv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t intr : 8; /**< A write to this register will result in a bit in
+ one of the NPEI_MSI_RCV# registers being set.
+ Which bit is set is dependent on the previously
+ written using the NPEI_MSI_WR_MAP register or if
+ not previously written the reset value of the MAP. */
+#else
+ uint64_t intr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_npei_pcie_msi_rcv_s cn52xx;
+ struct cvmx_npei_pcie_msi_rcv_s cn52xxp1;
+ struct cvmx_npei_pcie_msi_rcv_s cn56xx;
+ struct cvmx_npei_pcie_msi_rcv_s cn56xxp1;
+};
+typedef union cvmx_npei_pcie_msi_rcv cvmx_npei_pcie_msi_rcv_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv_b1
+ *
+ * NPEI_PCIE_MSI_RCV_B1 = NPEI PCIe MSI Receive Byte 1
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv_b1 {
+ uint64_t u64;
+ struct cvmx_npei_pcie_msi_rcv_b1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t intr : 8; /**< A write to this register will result in a bit in
+ one of the NPEI_MSI_RCV# registers being set.
+ Which bit is set is dependent on the previously
+ written using the NPEI_MSI_WR_MAP register or if
+ not previously written the reset value of the MAP. */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t intr : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_npei_pcie_msi_rcv_b1_s cn52xx;
+ struct cvmx_npei_pcie_msi_rcv_b1_s cn52xxp1;
+ struct cvmx_npei_pcie_msi_rcv_b1_s cn56xx;
+ struct cvmx_npei_pcie_msi_rcv_b1_s cn56xxp1;
+};
+typedef union cvmx_npei_pcie_msi_rcv_b1 cvmx_npei_pcie_msi_rcv_b1_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv_b2
+ *
+ * NPEI_PCIE_MSI_RCV_B2 = NPEI PCIe MSI Receive Byte 2
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv_b2 {
+ uint64_t u64;
+ struct cvmx_npei_pcie_msi_rcv_b2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t intr : 8; /**< A write to this register will result in a bit in
+ one of the NPEI_MSI_RCV# registers being set.
+ Which bit is set is dependent on the previously
+ written using the NPEI_MSI_WR_MAP register or if
+ not previously written the reset value of the MAP. */
+ uint64_t reserved_0_15 : 16;
+#else
+ uint64_t reserved_0_15 : 16;
+ uint64_t intr : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_npei_pcie_msi_rcv_b2_s cn52xx;
+ struct cvmx_npei_pcie_msi_rcv_b2_s cn52xxp1;
+ struct cvmx_npei_pcie_msi_rcv_b2_s cn56xx;
+ struct cvmx_npei_pcie_msi_rcv_b2_s cn56xxp1;
+};
+typedef union cvmx_npei_pcie_msi_rcv_b2 cvmx_npei_pcie_msi_rcv_b2_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv_b3
+ *
+ * NPEI_PCIE_MSI_RCV_B3 = NPEI PCIe MSI Receive Byte 3
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv_b3 {
+ uint64_t u64;
+ struct cvmx_npei_pcie_msi_rcv_b3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t intr : 8; /**< A write to this register will result in a bit in
+ one of the NPEI_MSI_RCV# registers being set.
+ Which bit is set is dependent on the previously
+ written using the NPEI_MSI_WR_MAP register or if
+ not previously written the reset value of the MAP. */
+ uint64_t reserved_0_23 : 24;
+#else
+ uint64_t reserved_0_23 : 24;
+ uint64_t intr : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pcie_msi_rcv_b3_s cn52xx;
+ struct cvmx_npei_pcie_msi_rcv_b3_s cn52xxp1;
+ struct cvmx_npei_pcie_msi_rcv_b3_s cn56xx;
+ struct cvmx_npei_pcie_msi_rcv_b3_s cn56xxp1;
+};
+typedef union cvmx_npei_pcie_msi_rcv_b3 cvmx_npei_pcie_msi_rcv_b3_t;
+
+/**
+ * cvmx_npei_pkt#_cnts
+ *
+ * NPEI_PKT[0..31]_CNTS = NPEI Packet ring# Counts
+ *
+ * The counters for output rings.
+ */
+union cvmx_npei_pktx_cnts {
+ uint64_t u64;
+ struct cvmx_npei_pktx_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t timer : 22; /**< Timer incremented every 1024 core clocks
+ when NPEI_PKTS#_CNTS[CNT] is non zero. Field
+ cleared when NPEI_PKTS#_CNTS[CNT] goes to 0.
+ Field is also cleared when NPEI_PKT_TIME_INT is
+ cleared.
+ The first increment of this count can occur
+ between 0 to 1023 core clocks. */
+ uint64_t cnt : 32; /**< ring counter. This field is incremented as
+ packets are sent out and decremented in response to
+ writes to this field.
+ When NPEI_PKT_OUT_BMODE is '0' a value of 1 is
+ added to the register for each packet, when '1'
+ and the info-pointer is NOT used the length of the
+ packet plus 8 is added, when '1' and info-pointer
+ mode IS used the packet length is added to this
+ field. */
+#else
+ uint64_t cnt : 32;
+ uint64_t timer : 22;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_npei_pktx_cnts_s cn52xx;
+ struct cvmx_npei_pktx_cnts_s cn56xx;
+};
+typedef union cvmx_npei_pktx_cnts cvmx_npei_pktx_cnts_t;
+
+/**
+ * cvmx_npei_pkt#_in_bp
+ *
+ * NPEI_PKT[0..31]_IN_BP = NPEI Packet ring# Input Backpressure
+ *
+ * The counters and thresholds for input packets to apply backpressure to processing of the packets.
+ */
+union cvmx_npei_pktx_in_bp {
+ uint64_t u64;
+ struct cvmx_npei_pktx_in_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wmark : 32; /**< When CNT is greater than this threshold no more
+ packets will be processed for this ring.
+ When writing this field of the NPEI_PKT#_IN_BP
+ register, use a 4-byte write so as to not write
+ any other field of this register. */
+ uint64_t cnt : 32; /**< ring counter. This field is incremented by one
+ whenever OCTEON receives, buffers, and creates a
+ work queue entry for a packet that arrives by the
+ cooresponding input ring. A write to this field
+ will be subtracted from the field value.
+ When writing this field of the NPEI_PKT#_IN_BP
+ register, use a 4-byte write so as to not write
+ any other field of this register. */
+#else
+ uint64_t cnt : 32;
+ uint64_t wmark : 32;
+#endif
+ } s;
+ struct cvmx_npei_pktx_in_bp_s cn52xx;
+ struct cvmx_npei_pktx_in_bp_s cn56xx;
+};
+typedef union cvmx_npei_pktx_in_bp cvmx_npei_pktx_in_bp_t;
+
+/**
+ * cvmx_npei_pkt#_instr_baddr
+ *
+ * NPEI_PKT[0..31]_INSTR_BADDR = NPEI Packet ring# Instruction Base Address
+ *
+ * Start of Instruction for input packets.
+ */
+union cvmx_npei_pktx_instr_baddr {
+ uint64_t u64;
+ struct cvmx_npei_pktx_instr_baddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 61; /**< Base address for Instructions. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t addr : 61;
+#endif
+ } s;
+ struct cvmx_npei_pktx_instr_baddr_s cn52xx;
+ struct cvmx_npei_pktx_instr_baddr_s cn56xx;
+};
+typedef union cvmx_npei_pktx_instr_baddr cvmx_npei_pktx_instr_baddr_t;
+
+/**
+ * cvmx_npei_pkt#_instr_baoff_dbell
+ *
+ * NPEI_PKT[0..31]_INSTR_BAOFF_DBELL = NPEI Packet ring# Instruction Base Address Offset and Doorbell
+ *
+ * The doorbell and base address offset for next read.
+ */
+union cvmx_npei_pktx_instr_baoff_dbell {
+ uint64_t u64;
+ struct cvmx_npei_pktx_instr_baoff_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t aoff : 32; /**< The offset from the NPEI_PKT[0..31]_INSTR_BADDR
+ where the next instruction will be read. */
+ uint64_t dbell : 32; /**< Instruction doorbell count. Writes to this field
+ will increment the value here. Reads will return
+ present value. A write of 0xffffffff will set the
+ DBELL and AOFF fields to '0'. */
+#else
+ uint64_t dbell : 32;
+ uint64_t aoff : 32;
+#endif
+ } s;
+ struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx;
+ struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx;
+};
+typedef union cvmx_npei_pktx_instr_baoff_dbell cvmx_npei_pktx_instr_baoff_dbell_t;
+
+/**
+ * cvmx_npei_pkt#_instr_fifo_rsize
+ *
+ * NPEI_PKT[0..31]_INSTR_FIFO_RSIZE = NPEI Packet ring# Instruction FIFO and Ring Size.
+ *
+ * Fifo field and ring size for Instructions.
+ */
+union cvmx_npei_pktx_instr_fifo_rsize {
+ uint64_t u64;
+ struct cvmx_npei_pktx_instr_fifo_rsize_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t max : 9; /**< Max Fifo Size. */
+ uint64_t rrp : 9; /**< Fifo read pointer. */
+ uint64_t wrp : 9; /**< Fifo write pointer. */
+ uint64_t fcnt : 5; /**< Fifo count. */
+ uint64_t rsize : 32; /**< Instruction ring size. */
+#else
+ uint64_t rsize : 32;
+ uint64_t fcnt : 5;
+ uint64_t wrp : 9;
+ uint64_t rrp : 9;
+ uint64_t max : 9;
+#endif
+ } s;
+ struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx;
+ struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx;
+};
+typedef union cvmx_npei_pktx_instr_fifo_rsize cvmx_npei_pktx_instr_fifo_rsize_t;
+
+/**
+ * cvmx_npei_pkt#_instr_header
+ *
+ * NPEI_PKT[0..31]_INSTR_HEADER = NPEI Packet ring# Instruction Header.
+ *
+ * VAlues used to build input packet header.
+ */
+union cvmx_npei_pktx_instr_header {
+ uint64_t u64;
+ struct cvmx_npei_pktx_instr_header_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */
+ uint64_t reserved_38_42 : 5;
+ uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */
+ uint64_t reserved_35_35 : 1;
+ uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */
+ uint64_t reserved_22_27 : 6;
+ uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent
+ as part of the packet data, regardless of the
+ value of bit [63] of the instruction header.
+ USE_IHDR must be set whenever PBP is set. */
+ uint64_t reserved_16_20 : 5;
+ uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t reserved_13_13 : 1;
+ uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t reserved_0_5 : 6;
+#else
+ uint64_t reserved_0_5 : 6;
+ uint64_t skp_len : 7;
+ uint64_t reserved_13_13 : 1;
+ uint64_t par_mode : 2;
+ uint64_t reserved_16_20 : 5;
+ uint64_t use_ihdr : 1;
+ uint64_t reserved_22_27 : 6;
+ uint64_t rskp_len : 7;
+ uint64_t reserved_35_35 : 1;
+ uint64_t rparmode : 2;
+ uint64_t reserved_38_42 : 5;
+ uint64_t pbp : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_npei_pktx_instr_header_s cn52xx;
+ struct cvmx_npei_pktx_instr_header_s cn56xx;
+};
+typedef union cvmx_npei_pktx_instr_header cvmx_npei_pktx_instr_header_t;
+
+/**
+ * cvmx_npei_pkt#_slist_baddr
+ *
+ * NPEI_PKT[0..31]_SLIST_BADDR = NPEI Packet ring# Scatter List Base Address
+ *
+ * Start of Scatter List for output packet pointers - MUST be 16 byte alligned
+ */
+union cvmx_npei_pktx_slist_baddr {
+ uint64_t u64;
+ struct cvmx_npei_pktx_slist_baddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 60; /**< Base address for scatter list pointers. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t addr : 60;
+#endif
+ } s;
+ struct cvmx_npei_pktx_slist_baddr_s cn52xx;
+ struct cvmx_npei_pktx_slist_baddr_s cn56xx;
+};
+typedef union cvmx_npei_pktx_slist_baddr cvmx_npei_pktx_slist_baddr_t;
+
+/**
+ * cvmx_npei_pkt#_slist_baoff_dbell
+ *
+ * NPEI_PKT[0..31]_SLIST_BAOFF_DBELL = NPEI Packet ring# Scatter List Base Address Offset and Doorbell
+ *
+ * The doorbell and base address offset for next read.
+ */
+union cvmx_npei_pktx_slist_baoff_dbell {
+ uint64_t u64;
+ struct cvmx_npei_pktx_slist_baoff_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t aoff : 32; /**< The offset from the NPEI_PKT[0..31]_SLIST_BADDR
+ where the next SList pointer will be read.
+ A write of 0xFFFFFFFF to the DBELL field will
+ clear DBELL and AOFF */
+ uint64_t dbell : 32; /**< Scatter list doorbell count. Writes to this field
+ will increment the value here. Reads will return
+ present value. The value of this field is
+ decremented as read operations are ISSUED for
+ scatter pointers.
+ A write of 0xFFFFFFFF will clear DBELL and AOFF */
+#else
+ uint64_t dbell : 32;
+ uint64_t aoff : 32;
+#endif
+ } s;
+ struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx;
+ struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx;
+};
+typedef union cvmx_npei_pktx_slist_baoff_dbell cvmx_npei_pktx_slist_baoff_dbell_t;
+
+/**
+ * cvmx_npei_pkt#_slist_fifo_rsize
+ *
+ * NPEI_PKT[0..31]_SLIST_FIFO_RSIZE = NPEI Packet ring# Scatter List FIFO and Ring Size.
+ *
+ * The number of scatter pointer pairs in the scatter list.
+ */
+union cvmx_npei_pktx_slist_fifo_rsize {
+ uint64_t u64;
+ struct cvmx_npei_pktx_slist_fifo_rsize_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rsize : 32; /**< The number of scatter pointer pairs contained in
+ the scatter list ring. */
+#else
+ uint64_t rsize : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx;
+ struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx;
+};
+typedef union cvmx_npei_pktx_slist_fifo_rsize cvmx_npei_pktx_slist_fifo_rsize_t;
+
+/**
+ * cvmx_npei_pkt_cnt_int
+ *
+ * NPEI_PKT_CNT_INT = NPI Packet Counter Interrupt
+ *
+ * The packets rings that are interrupting because of Packet Counters.
+ */
+union cvmx_npei_pkt_cnt_int {
+ uint64_t u64;
+ struct cvmx_npei_pkt_cnt_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t port : 32; /**< Bit vector cooresponding to ring number is set when
+ NPEI_PKT#_CNTS[CNT] is greater
+ than NPEI_PKT_INT_LEVELS[CNT]. */
+#else
+ uint64_t port : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_cnt_int_s cn52xx;
+ struct cvmx_npei_pkt_cnt_int_s cn56xx;
+};
+typedef union cvmx_npei_pkt_cnt_int cvmx_npei_pkt_cnt_int_t;
+
+/**
+ * cvmx_npei_pkt_cnt_int_enb
+ *
+ * NPEI_PKT_CNT_INT_ENB = NPI Packet Counter Interrupt Enable
+ *
+ * Enable for the packets rings that are interrupting because of Packet Counters.
+ */
+union cvmx_npei_pkt_cnt_int_enb {
+ uint64_t u64;
+ struct cvmx_npei_pkt_cnt_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t port : 32; /**< Bit vector cooresponding to ring number when set
+ allows NPEI_PKT_CNT_INT to generate an interrupt. */
+#else
+ uint64_t port : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_cnt_int_enb_s cn52xx;
+ struct cvmx_npei_pkt_cnt_int_enb_s cn56xx;
+};
+typedef union cvmx_npei_pkt_cnt_int_enb cvmx_npei_pkt_cnt_int_enb_t;
+
+/**
+ * cvmx_npei_pkt_data_out_es
+ *
+ * NPEI_PKT_DATA_OUT_ES = NPEI's Packet Data Out Endian Swap
+ *
+ * The Endian Swap for writing Data Out.
+ */
+union cvmx_npei_pkt_data_out_es {
+ uint64_t u64;
+ struct cvmx_npei_pkt_data_out_es_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t es : 64; /**< The endian swap mode for Packet rings 0 through 31.
+ Two bits are used per ring (i.e. ring 0 [1:0],
+ ring 1 [3:2], ....). */
+#else
+ uint64_t es : 64;
+#endif
+ } s;
+ struct cvmx_npei_pkt_data_out_es_s cn52xx;
+ struct cvmx_npei_pkt_data_out_es_s cn56xx;
+};
+typedef union cvmx_npei_pkt_data_out_es cvmx_npei_pkt_data_out_es_t;
+
+/**
+ * cvmx_npei_pkt_data_out_ns
+ *
+ * NPEI_PKT_DATA_OUT_NS = NPEI's Packet Data Out No Snoop
+ *
+ * The NS field for the TLP when writing packet data.
+ */
+union cvmx_npei_pkt_data_out_ns {
+ uint64_t u64;
+ struct cvmx_npei_pkt_data_out_ns_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t nsr : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring will enable NS in TLP header. */
+#else
+ uint64_t nsr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_data_out_ns_s cn52xx;
+ struct cvmx_npei_pkt_data_out_ns_s cn56xx;
+};
+typedef union cvmx_npei_pkt_data_out_ns cvmx_npei_pkt_data_out_ns_t;
+
+/**
+ * cvmx_npei_pkt_data_out_ror
+ *
+ * NPEI_PKT_DATA_OUT_ROR = NPEI's Packet Data Out Relaxed Ordering
+ *
+ * The ROR field for the TLP when writing Packet Data.
+ */
+union cvmx_npei_pkt_data_out_ror {
+ uint64_t u64;
+ struct cvmx_npei_pkt_data_out_ror_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ror : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring will enable ROR in TLP header. */
+#else
+ uint64_t ror : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_data_out_ror_s cn52xx;
+ struct cvmx_npei_pkt_data_out_ror_s cn56xx;
+};
+typedef union cvmx_npei_pkt_data_out_ror cvmx_npei_pkt_data_out_ror_t;
+
+/**
+ * cvmx_npei_pkt_dpaddr
+ *
+ * NPEI_PKT_DPADDR = NPEI's Packet Data Pointer Addr
+ *
+ * Used to detemine address and attributes for packet data writes.
+ */
+union cvmx_npei_pkt_dpaddr {
+ uint64_t u64;
+ struct cvmx_npei_pkt_dpaddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t dptr : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring will use:
+ the address[63:60] to write packet data
+ comes from the DPTR[63:60] in the scatter-list
+ pair and the RO, NS, ES values come from the O0_ES,
+ O0_NS, O0_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O0_ES[1:0], O0_NS, O0_RO. */
+#else
+ uint64_t dptr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_dpaddr_s cn52xx;
+ struct cvmx_npei_pkt_dpaddr_s cn56xx;
+};
+typedef union cvmx_npei_pkt_dpaddr cvmx_npei_pkt_dpaddr_t;
+
+/**
+ * cvmx_npei_pkt_in_bp
+ *
+ * NPEI_PKT_IN_BP = NPEI Packet Input Backpressure
+ *
+ * Which input rings have backpressure applied.
+ */
+union cvmx_npei_pkt_in_bp {
+ uint64_t u64;
+ struct cvmx_npei_pkt_in_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bp : 32; /**< A packet input ring that has its count greater
+ than its WMARK will have backpressure applied.
+ Each of the 32 bits coorespond to an input ring.
+ When '1' that ring has backpressure applied an
+ will fetch no more instructions, but will process
+ any previously fetched instructions. */
+#else
+ uint64_t bp : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_in_bp_s cn52xx;
+ struct cvmx_npei_pkt_in_bp_s cn56xx;
+};
+typedef union cvmx_npei_pkt_in_bp cvmx_npei_pkt_in_bp_t;
+
+/**
+ * cvmx_npei_pkt_in_done#_cnts
+ *
+ * NPEI_PKT_IN_DONE[0..31]_CNTS = NPEI Instruction Done ring# Counts
+ *
+ * Counters for instructions completed on Input rings.
+ */
+union cvmx_npei_pkt_in_donex_cnts {
+ uint64_t u64;
+ struct cvmx_npei_pkt_in_donex_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< This field is incrmented by '1' when an instruction
+ is completed. This field is incremented as the
+ last of the data is read from the PCIe. */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_in_donex_cnts_s cn52xx;
+ struct cvmx_npei_pkt_in_donex_cnts_s cn56xx;
+};
+typedef union cvmx_npei_pkt_in_donex_cnts cvmx_npei_pkt_in_donex_cnts_t;
+
+/**
+ * cvmx_npei_pkt_in_instr_counts
+ *
+ * NPEI_PKT_IN_INSTR_COUNTS = NPEI Packet Input Instrutction Counts
+ *
+ * Keeps track of the number of instructions read into the FIFO and Packets sent to IPD.
+ */
+union cvmx_npei_pkt_in_instr_counts {
+ uint64_t u64;
+ struct cvmx_npei_pkt_in_instr_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_cnt : 32; /**< Shows the number of packets sent to the IPD. */
+ uint64_t rd_cnt : 32; /**< Shows the value of instructions that have had reads
+ issued for them.
+ to the Packet-ring is in reset. */
+#else
+ uint64_t rd_cnt : 32;
+ uint64_t wr_cnt : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_in_instr_counts_s cn52xx;
+ struct cvmx_npei_pkt_in_instr_counts_s cn56xx;
+};
+typedef union cvmx_npei_pkt_in_instr_counts cvmx_npei_pkt_in_instr_counts_t;
+
+/**
+ * cvmx_npei_pkt_in_pcie_port
+ *
+ * NPEI_PKT_IN_PCIE_PORT = NPEI's Packet In To PCIe Port Assignment
+ *
+ * Assigns Packet Input rings to PCIe ports.
+ */
+union cvmx_npei_pkt_in_pcie_port {
+ uint64_t u64;
+ struct cvmx_npei_pkt_in_pcie_port_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pp : 64; /**< The PCIe port that the Packet ring number is
+ assigned. Two bits are used per ring (i.e. ring 0
+ [1:0], ring 1 [3:2], ....). A value of '0 means
+ that the Packetring is assign to PCIe Port 0, a '1'
+ PCIe Port 1, '2' and '3' are reserved. */
+#else
+ uint64_t pp : 64;
+#endif
+ } s;
+ struct cvmx_npei_pkt_in_pcie_port_s cn52xx;
+ struct cvmx_npei_pkt_in_pcie_port_s cn56xx;
+};
+typedef union cvmx_npei_pkt_in_pcie_port cvmx_npei_pkt_in_pcie_port_t;
+
+/**
+ * cvmx_npei_pkt_input_control
+ *
+ * NPEI_PKT_INPUT_CONTROL = NPEI's Packet Input Control
+ *
+ * Control for reads for gather list and instructions.
+ */
+union cvmx_npei_pkt_input_control {
+ uint64_t u64;
+ struct cvmx_npei_pkt_input_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t pkt_rr : 1; /**< When set '1' the input packet selection will be
+ made with a Round Robin arbitration. When '0'
+ the input packet ring is fixed in priority,
+ where the lower ring number has higher priority. */
+ uint64_t pbp_dhi : 13; /**< Field when in [PBP] is set to be used in
+ calculating a DPTR. */
+ uint64_t d_nsr : 1; /**< Enables '1' NoSnoop for reading of
+ gather data. */
+ uint64_t d_esr : 2; /**< The Endian-Swap-Mode for reading of
+ gather data. */
+ uint64_t d_ror : 1; /**< Enables '1' Relaxed Ordering for reading of
+ gather data. */
+ uint64_t use_csr : 1; /**< When set '1' the csr value will be used for
+ ROR, ESR, and NSR. When clear '0' the value in
+ DPTR will be used. In turn the bits not used for
+ ROR, ESR, and NSR, will be used for bits [63:60]
+ of the address used to fetch packet data. */
+ uint64_t nsr : 1; /**< Enables '1' NoSnoop for reading of
+ gather list and gather instruction. */
+ uint64_t esr : 2; /**< The Endian-Swap-Mode for reading of
+ gather list and gather instruction. */
+ uint64_t ror : 1; /**< Enables '1' Relaxed Ordering for reading of
+ gather list and gather instruction. */
+#else
+ uint64_t ror : 1;
+ uint64_t esr : 2;
+ uint64_t nsr : 1;
+ uint64_t use_csr : 1;
+ uint64_t d_ror : 1;
+ uint64_t d_esr : 2;
+ uint64_t d_nsr : 1;
+ uint64_t pbp_dhi : 13;
+ uint64_t pkt_rr : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_npei_pkt_input_control_s cn52xx;
+ struct cvmx_npei_pkt_input_control_s cn56xx;
+};
+typedef union cvmx_npei_pkt_input_control cvmx_npei_pkt_input_control_t;
+
+/**
+ * cvmx_npei_pkt_instr_enb
+ *
+ * NPEI_PKT_INSTR_ENB = NPEI's Packet Instruction Enable
+ *
+ * Enables the instruction fetch for a Packet-ring.
+ */
+union cvmx_npei_pkt_instr_enb {
+ uint64_t u64;
+ struct cvmx_npei_pkt_instr_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t enb : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring is enabled. */
+#else
+ uint64_t enb : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_instr_enb_s cn52xx;
+ struct cvmx_npei_pkt_instr_enb_s cn56xx;
+};
+typedef union cvmx_npei_pkt_instr_enb cvmx_npei_pkt_instr_enb_t;
+
+/**
+ * cvmx_npei_pkt_instr_rd_size
+ *
+ * NPEI_PKT_INSTR_RD_SIZE = NPEI Instruction Read Size
+ *
+ * The number of instruction allowed to be read at one time.
+ */
+union cvmx_npei_pkt_instr_rd_size {
+ uint64_t u64;
+ struct cvmx_npei_pkt_instr_rd_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rdsize : 64; /**< Number of instructions to be read in one PCIe read
+ request for the 4 PKOport - 8 rings. Every two bits
+ (i.e. 1:0, 3:2, 5:4..) are assign to the port/ring
+ combinations.
+ - 15:0 PKOPort0,Ring 7..0 31:16 PKOPort1,Ring 7..0
+ - 47:32 PKOPort2,Ring 7..0 63:48 PKOPort3,Ring 7..0
+ Two bit value are:
+ 0 - 1 Instruction
+ 1 - 2 Instructions
+ 2 - 3 Instructions
+ 3 - 4 Instructions */
+#else
+ uint64_t rdsize : 64;
+#endif
+ } s;
+ struct cvmx_npei_pkt_instr_rd_size_s cn52xx;
+ struct cvmx_npei_pkt_instr_rd_size_s cn56xx;
+};
+typedef union cvmx_npei_pkt_instr_rd_size cvmx_npei_pkt_instr_rd_size_t;
+
+/**
+ * cvmx_npei_pkt_instr_size
+ *
+ * NPEI_PKT_INSTR_SIZE = NPEI's Packet Instruction Size
+ *
+ * Determines if instructions are 64 or 32 byte in size for a Packet-ring.
+ */
+union cvmx_npei_pkt_instr_size {
+ uint64_t u64;
+ struct cvmx_npei_pkt_instr_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t is_64b : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring is a 64-byte instruction. */
+#else
+ uint64_t is_64b : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_instr_size_s cn52xx;
+ struct cvmx_npei_pkt_instr_size_s cn56xx;
+};
+typedef union cvmx_npei_pkt_instr_size cvmx_npei_pkt_instr_size_t;
+
+/**
+ * cvmx_npei_pkt_int_levels
+ *
+ * 0x90F0 reserved NPEI_PKT_PCIE_PORT2
+ *
+ *
+ * NPEI_PKT_INT_LEVELS = NPEI's Packet Interrupt Levels
+ *
+ * Output packet interrupt levels.
+ */
+union cvmx_npei_pkt_int_levels {
+ uint64_t u64;
+ struct cvmx_npei_pkt_int_levels_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t time : 22; /**< When NPEI_PKT#_CNTS[TIMER] is greater than this
+ value an interrupt is generated. */
+ uint64_t cnt : 32; /**< When NPEI_PKT#_CNTS[CNT] becomes
+ greater than this value an interrupt is generated. */
+#else
+ uint64_t cnt : 32;
+ uint64_t time : 22;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_npei_pkt_int_levels_s cn52xx;
+ struct cvmx_npei_pkt_int_levels_s cn56xx;
+};
+typedef union cvmx_npei_pkt_int_levels cvmx_npei_pkt_int_levels_t;
+
+/**
+ * cvmx_npei_pkt_iptr
+ *
+ * NPEI_PKT_IPTR = NPEI's Packet Info Poitner
+ *
+ * Controls using the Info-Pointer to store length and data.
+ */
+union cvmx_npei_pkt_iptr {
+ uint64_t u64;
+ struct cvmx_npei_pkt_iptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iptr : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring will use the Info-Pointer to
+ store length and data. */
+#else
+ uint64_t iptr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_iptr_s cn52xx;
+ struct cvmx_npei_pkt_iptr_s cn56xx;
+};
+typedef union cvmx_npei_pkt_iptr cvmx_npei_pkt_iptr_t;
+
+/**
+ * cvmx_npei_pkt_out_bmode
+ *
+ * NPEI_PKT_OUT_BMODE = NPEI's Packet Out Byte Mode
+ *
+ * Control the updating of the NPEI_PKT#_CNT register.
+ */
+union cvmx_npei_pkt_out_bmode {
+ uint64_t u64;
+ struct cvmx_npei_pkt_out_bmode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bmode : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring will have its NPEI_PKT#_CNT
+ register updated with the number of bytes in the
+ packet sent, when '0' the register will have a
+ value of '1' added. */
+#else
+ uint64_t bmode : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_out_bmode_s cn52xx;
+ struct cvmx_npei_pkt_out_bmode_s cn56xx;
+};
+typedef union cvmx_npei_pkt_out_bmode cvmx_npei_pkt_out_bmode_t;
+
+/**
+ * cvmx_npei_pkt_out_enb
+ *
+ * NPEI_PKT_OUT_ENB = NPEI's Packet Output Enable
+ *
+ * Enables the output packet engines.
+ */
+union cvmx_npei_pkt_out_enb {
+ uint64_t u64;
+ struct cvmx_npei_pkt_out_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t enb : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring is enabled.
+ If an error occurs on reading pointers for an
+ output ring, the ring will be disabled by clearing
+ the bit associated with the ring to '0'. */
+#else
+ uint64_t enb : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_out_enb_s cn52xx;
+ struct cvmx_npei_pkt_out_enb_s cn56xx;
+};
+typedef union cvmx_npei_pkt_out_enb cvmx_npei_pkt_out_enb_t;
+
+/**
+ * cvmx_npei_pkt_output_wmark
+ *
+ * NPEI_PKT_OUTPUT_WMARK = NPEI's Packet Output Water Mark
+ *
+ * Value that when the NPEI_PKT#_SLIST_BAOFF_DBELL[DBELL] value is less then that backpressure for the rings will be applied.
+ */
+union cvmx_npei_pkt_output_wmark {
+ uint64_t u64;
+ struct cvmx_npei_pkt_output_wmark_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wmark : 32; /**< Value when DBELL count drops below backpressure
+ for the ring will be applied to the PKO. */
+#else
+ uint64_t wmark : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_output_wmark_s cn52xx;
+ struct cvmx_npei_pkt_output_wmark_s cn56xx;
+};
+typedef union cvmx_npei_pkt_output_wmark cvmx_npei_pkt_output_wmark_t;
+
+/**
+ * cvmx_npei_pkt_pcie_port
+ *
+ * NPEI_PKT_PCIE_PORT = NPEI's Packet To PCIe Port Assignment
+ *
+ * Assigns Packet Ports to PCIe ports.
+ */
+union cvmx_npei_pkt_pcie_port {
+ uint64_t u64;
+ struct cvmx_npei_pkt_pcie_port_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pp : 64; /**< The PCIe port that the Packet ring number is
+ assigned. Two bits are used per ring (i.e. ring 0
+ [1:0], ring 1 [3:2], ....). A value of '0 means
+ that the Packetring is assign to PCIe Port 0, a '1'
+ PCIe Port 1, '2' and '3' are reserved. */
+#else
+ uint64_t pp : 64;
+#endif
+ } s;
+ struct cvmx_npei_pkt_pcie_port_s cn52xx;
+ struct cvmx_npei_pkt_pcie_port_s cn56xx;
+};
+typedef union cvmx_npei_pkt_pcie_port cvmx_npei_pkt_pcie_port_t;
+
+/**
+ * cvmx_npei_pkt_port_in_rst
+ *
+ * NPEI_PKT_PORT_IN_RST = NPEI Packet Port In Reset
+ *
+ * Vector bits related to ring-port for ones that are reset.
+ */
+union cvmx_npei_pkt_port_in_rst {
+ uint64_t u64;
+ struct cvmx_npei_pkt_port_in_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t in_rst : 32; /**< When asserted '1' the vector bit cooresponding
+ to the inbound Packet-ring is in reset. */
+ uint64_t out_rst : 32; /**< When asserted '1' the vector bit cooresponding
+ to the outbound Packet-ring is in reset. */
+#else
+ uint64_t out_rst : 32;
+ uint64_t in_rst : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_port_in_rst_s cn52xx;
+ struct cvmx_npei_pkt_port_in_rst_s cn56xx;
+};
+typedef union cvmx_npei_pkt_port_in_rst cvmx_npei_pkt_port_in_rst_t;
+
+/**
+ * cvmx_npei_pkt_slist_es
+ *
+ * NPEI_PKT_SLIST_ES = NPEI's Packet Scatter List Endian Swap
+ *
+ * The Endian Swap for Scatter List Read.
+ */
+union cvmx_npei_pkt_slist_es {
+ uint64_t u64;
+ struct cvmx_npei_pkt_slist_es_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t es : 64; /**< The endian swap mode for Packet rings 0 through 31.
+ Two bits are used per ring (i.e. ring 0 [1:0],
+ ring 1 [3:2], ....). */
+#else
+ uint64_t es : 64;
+#endif
+ } s;
+ struct cvmx_npei_pkt_slist_es_s cn52xx;
+ struct cvmx_npei_pkt_slist_es_s cn56xx;
+};
+typedef union cvmx_npei_pkt_slist_es cvmx_npei_pkt_slist_es_t;
+
+/**
+ * cvmx_npei_pkt_slist_id_size
+ *
+ * NPEI_PKT_SLIST_ID_SIZE = NPEI Packet Scatter List Info and Data Size
+ *
+ * The Size of the information and data fields pointed to by Scatter List pointers.
+ */
+union cvmx_npei_pkt_slist_id_size {
+ uint64_t u64;
+ struct cvmx_npei_pkt_slist_id_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t isize : 7; /**< Information size. Legal sizes are 0 to 120. */
+ uint64_t bsize : 16; /**< Data size. */
+#else
+ uint64_t bsize : 16;
+ uint64_t isize : 7;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_npei_pkt_slist_id_size_s cn52xx;
+ struct cvmx_npei_pkt_slist_id_size_s cn56xx;
+};
+typedef union cvmx_npei_pkt_slist_id_size cvmx_npei_pkt_slist_id_size_t;
+
+/**
+ * cvmx_npei_pkt_slist_ns
+ *
+ * NPEI_PKT_SLIST_NS = NPEI's Packet Scatter List No Snoop
+ *
+ * The NS field for the TLP when fetching Scatter List.
+ */
+union cvmx_npei_pkt_slist_ns {
+ uint64_t u64;
+ struct cvmx_npei_pkt_slist_ns_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t nsr : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring will enable NS in TLP header. */
+#else
+ uint64_t nsr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_slist_ns_s cn52xx;
+ struct cvmx_npei_pkt_slist_ns_s cn56xx;
+};
+typedef union cvmx_npei_pkt_slist_ns cvmx_npei_pkt_slist_ns_t;
+
+/**
+ * cvmx_npei_pkt_slist_ror
+ *
+ * NPEI_PKT_SLIST_ROR = NPEI's Packet Scatter List Relaxed Ordering
+ *
+ * The ROR field for the TLP when fetching Scatter List.
+ */
+union cvmx_npei_pkt_slist_ror {
+ uint64_t u64;
+ struct cvmx_npei_pkt_slist_ror_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ror : 32; /**< When asserted '1' the vector bit cooresponding
+ to the Packet-ring will enable ROR in TLP header. */
+#else
+ uint64_t ror : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_slist_ror_s cn52xx;
+ struct cvmx_npei_pkt_slist_ror_s cn56xx;
+};
+typedef union cvmx_npei_pkt_slist_ror cvmx_npei_pkt_slist_ror_t;
+
+/**
+ * cvmx_npei_pkt_time_int
+ *
+ * NPEI_PKT_TIME_INT = NPEI Packet Timer Interrupt
+ *
+ * The packets rings that are interrupting because of Packet Timers.
+ */
+union cvmx_npei_pkt_time_int {
+ uint64_t u64;
+ struct cvmx_npei_pkt_time_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t port : 32; /**< Bit vector cooresponding to ring number is set when
+ NPEI_PKT#_CNTS[TIMER] is greater than
+ NPEI_PKT_INT_LEVELS[TIME]. */
+#else
+ uint64_t port : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_time_int_s cn52xx;
+ struct cvmx_npei_pkt_time_int_s cn56xx;
+};
+typedef union cvmx_npei_pkt_time_int cvmx_npei_pkt_time_int_t;
+
+/**
+ * cvmx_npei_pkt_time_int_enb
+ *
+ * NPEI_PKT_TIME_INT_ENB = NPEI Packet Timer Interrupt Enable
+ *
+ * The packets rings that are interrupting because of Packet Timers.
+ */
+union cvmx_npei_pkt_time_int_enb {
+ uint64_t u64;
+ struct cvmx_npei_pkt_time_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t port : 32; /**< Bit vector cooresponding to ring number when set
+ allows NPEI_PKT_TIME_INT to generate an interrupt. */
+#else
+ uint64_t port : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_pkt_time_int_enb_s cn52xx;
+ struct cvmx_npei_pkt_time_int_enb_s cn56xx;
+};
+typedef union cvmx_npei_pkt_time_int_enb cvmx_npei_pkt_time_int_enb_t;
+
+/**
+ * cvmx_npei_rsl_int_blocks
+ *
+ * NPEI_RSL_INT_BLOCKS = NPEI RSL Interrupt Blocks Register
+ *
+ * Reading this register will return a vector with a bit set '1' for a corresponding RSL block
+ * that presently has an interrupt pending. The Field Description below supplies the name of the
+ * register that software should read to find out why that intterupt bit is set.
+ */
+union cvmx_npei_rsl_int_blocks {
+ uint64_t u64;
+ struct cvmx_npei_rsl_int_blocks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t iob : 1; /**< IOB_INT_SUM */
+ uint64_t lmc1 : 1; /**< LMC1_MEM_CFG0 */
+ uint64_t agl : 1; /**< AGL_GMX_RX0_INT_REG & AGL_GMX_TX_INT_REG */
+ uint64_t reserved_24_27 : 4;
+ uint64_t asxpcs1 : 1; /**< PCS1_INT*_REG */
+ uint64_t asxpcs0 : 1; /**< PCS0_INT*_REG */
+ uint64_t reserved_21_21 : 1;
+ uint64_t pip : 1; /**< PIP_INT_REG. */
+ uint64_t spx1 : 1; /**< Always reads as zero */
+ uint64_t spx0 : 1; /**< Always reads as zero */
+ uint64_t lmc0 : 1; /**< LMC0_MEM_CFG0 */
+ uint64_t l2c : 1; /**< L2C_INT_STAT */
+ uint64_t usb1 : 1; /**< Always reads as zero */
+ uint64_t rad : 1; /**< RAD_REG_ERROR */
+ uint64_t usb : 1; /**< USBN0_INT_SUM */
+ uint64_t pow : 1; /**< POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD_INT_SUM */
+ uint64_t reserved_8_8 : 1;
+ uint64_t zip : 1; /**< ZIP_ERROR */
+ uint64_t dfa : 1; /**< Always reads as zero */
+ uint64_t fpa : 1; /**< FPA_INT_SUM */
+ uint64_t key : 1; /**< KEY_INT_SUM */
+ uint64_t npei : 1; /**< NPEI_INT_SUM */
+ uint64_t gmx1 : 1; /**< GMX1_RX*_INT_REG & GMX1_TX_INT_REG */
+ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t gmx1 : 1;
+ uint64_t npei : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t usb : 1;
+ uint64_t rad : 1;
+ uint64_t usb1 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc0 : 1;
+ uint64_t spx0 : 1;
+ uint64_t spx1 : 1;
+ uint64_t pip : 1;
+ uint64_t reserved_21_21 : 1;
+ uint64_t asxpcs0 : 1;
+ uint64_t asxpcs1 : 1;
+ uint64_t reserved_24_27 : 4;
+ uint64_t agl : 1;
+ uint64_t lmc1 : 1;
+ uint64_t iob : 1;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_npei_rsl_int_blocks_s cn52xx;
+ struct cvmx_npei_rsl_int_blocks_s cn52xxp1;
+ struct cvmx_npei_rsl_int_blocks_s cn56xx;
+ struct cvmx_npei_rsl_int_blocks_s cn56xxp1;
+};
+typedef union cvmx_npei_rsl_int_blocks cvmx_npei_rsl_int_blocks_t;
+
+/**
+ * cvmx_npei_scratch_1
+ *
+ * NPEI_SCRATCH_1 = NPEI's Scratch 1
+ *
+ * A general purpose 64 bit register for SW use.
+ */
+union cvmx_npei_scratch_1 {
+ uint64_t u64;
+ struct cvmx_npei_scratch_1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< The value in this register is totaly SW dependent. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_npei_scratch_1_s cn52xx;
+ struct cvmx_npei_scratch_1_s cn52xxp1;
+ struct cvmx_npei_scratch_1_s cn56xx;
+ struct cvmx_npei_scratch_1_s cn56xxp1;
+};
+typedef union cvmx_npei_scratch_1 cvmx_npei_scratch_1_t;
+
+/**
+ * cvmx_npei_state1
+ *
+ * NPEI_STATE1 = NPEI State 1
+ *
+ * State machines in NPEI. For debug.
+ */
+union cvmx_npei_state1 {
+ uint64_t u64;
+ struct cvmx_npei_state1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cpl1 : 12; /**< CPL1 State */
+ uint64_t cpl0 : 12; /**< CPL0 State */
+ uint64_t arb : 1; /**< ARB State */
+ uint64_t csr : 39; /**< CSR State */
+#else
+ uint64_t csr : 39;
+ uint64_t arb : 1;
+ uint64_t cpl0 : 12;
+ uint64_t cpl1 : 12;
+#endif
+ } s;
+ struct cvmx_npei_state1_s cn52xx;
+ struct cvmx_npei_state1_s cn52xxp1;
+ struct cvmx_npei_state1_s cn56xx;
+ struct cvmx_npei_state1_s cn56xxp1;
+};
+typedef union cvmx_npei_state1 cvmx_npei_state1_t;
+
+/**
+ * cvmx_npei_state2
+ *
+ * NPEI_STATE2 = NPEI State 2
+ *
+ * State machines in NPEI. For debug.
+ */
+union cvmx_npei_state2 {
+ uint64_t u64;
+ struct cvmx_npei_state2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t npei : 1; /**< NPEI State */
+ uint64_t rac : 1; /**< RAC State */
+ uint64_t csm1 : 15; /**< CSM1 State */
+ uint64_t csm0 : 15; /**< CSM0 State */
+ uint64_t nnp0 : 8; /**< NNP0 State */
+ uint64_t nnd : 8; /**< NND State */
+#else
+ uint64_t nnd : 8;
+ uint64_t nnp0 : 8;
+ uint64_t csm0 : 15;
+ uint64_t csm1 : 15;
+ uint64_t rac : 1;
+ uint64_t npei : 1;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_npei_state2_s cn52xx;
+ struct cvmx_npei_state2_s cn52xxp1;
+ struct cvmx_npei_state2_s cn56xx;
+ struct cvmx_npei_state2_s cn56xxp1;
+};
+typedef union cvmx_npei_state2 cvmx_npei_state2_t;
+
+/**
+ * cvmx_npei_state3
+ *
+ * NPEI_STATE3 = NPEI State 3
+ *
+ * State machines in NPEI. For debug.
+ */
+union cvmx_npei_state3 {
+ uint64_t u64;
+ struct cvmx_npei_state3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t psm1 : 15; /**< PSM1 State */
+ uint64_t psm0 : 15; /**< PSM0 State */
+ uint64_t nsm1 : 13; /**< NSM1 State */
+ uint64_t nsm0 : 13; /**< NSM0 State */
+#else
+ uint64_t nsm0 : 13;
+ uint64_t nsm1 : 13;
+ uint64_t psm0 : 15;
+ uint64_t psm1 : 15;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_npei_state3_s cn52xx;
+ struct cvmx_npei_state3_s cn52xxp1;
+ struct cvmx_npei_state3_s cn56xx;
+ struct cvmx_npei_state3_s cn56xxp1;
+};
+typedef union cvmx_npei_state3 cvmx_npei_state3_t;
+
+/**
+ * cvmx_npei_win_rd_addr
+ *
+ * NPEI_WIN_RD_ADDR = NPEI Window Read Address Register
+ *
+ * The address to be read when the NPEI_WIN_RD_DATA register is read.
+ */
+union cvmx_npei_win_rd_addr {
+ uint64_t u64;
+ struct cvmx_npei_win_rd_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63 : 13;
+ uint64_t ld_cmd : 2; /**< The load command sent wit hthe read.
+ 0x0 == Load 8-bytes, 0x1 == Load 4-bytes,
+ 0x2 == Load 2-bytes, 0x3 == Load 1-bytes, */
+ uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always
+ read as '0'. */
+ uint64_t rd_addr : 48; /**< The address to be read from. Whenever the LSB of
+ this register is written, the Read Operation will
+ take place.
+ [47:40] = NCB_ID
+ [39:0] = Address
+ When [47:43] == NPI & [42:0] == 0 bits [39:0] are:
+ [39:32] == x, Not Used
+ [31:27] == RSL_ID
+ [12:0] == RSL Register Offset */
+#else
+ uint64_t rd_addr : 48;
+ uint64_t iobit : 1;
+ uint64_t ld_cmd : 2;
+ uint64_t reserved_51_63 : 13;
+#endif
+ } s;
+ struct cvmx_npei_win_rd_addr_s cn52xx;
+ struct cvmx_npei_win_rd_addr_s cn52xxp1;
+ struct cvmx_npei_win_rd_addr_s cn56xx;
+ struct cvmx_npei_win_rd_addr_s cn56xxp1;
+};
+typedef union cvmx_npei_win_rd_addr cvmx_npei_win_rd_addr_t;
+
+/**
+ * cvmx_npei_win_rd_data
+ *
+ * NPEI_WIN_RD_DATA = NPEI Window Read Data Register
+ *
+ * Reading this register causes a window read operation to take place. Address read is taht contained in the NPEI_WIN_RD_ADDR
+ * register.
+ */
+union cvmx_npei_win_rd_data {
+ uint64_t u64;
+ struct cvmx_npei_win_rd_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rd_data : 64; /**< The read data. */
+#else
+ uint64_t rd_data : 64;
+#endif
+ } s;
+ struct cvmx_npei_win_rd_data_s cn52xx;
+ struct cvmx_npei_win_rd_data_s cn52xxp1;
+ struct cvmx_npei_win_rd_data_s cn56xx;
+ struct cvmx_npei_win_rd_data_s cn56xxp1;
+};
+typedef union cvmx_npei_win_rd_data cvmx_npei_win_rd_data_t;
+
+/**
+ * cvmx_npei_win_wr_addr
+ *
+ * NPEI_WIN_WR_ADDR = NPEI Window Write Address Register
+ *
+ * Contains the address to be writen to when a write operation is started by writing the
+ * NPEI_WIN_WR_DATA register (see below).
+ *
+ * Notes:
+ * Even though address bit [2] can be set, it should always be kept to '0'.
+ *
+ */
+union cvmx_npei_win_wr_addr {
+ uint64_t u64;
+ struct cvmx_npei_win_wr_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always
+ read as '0'. */
+ uint64_t wr_addr : 46; /**< The address that will be written to when the
+ NPEI_WIN_WR_DATA register is written.
+ [47:40] = NCB_ID
+ [39:3] = Address
+ When [47:43] == NPI & [42:0] == 0 bits [39:0] are:
+ [39:32] == x, Not Used
+ [31:27] == RSL_ID
+ [12:2] == RSL Register Offset
+ [1:0] == x, Not Used */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t wr_addr : 46;
+ uint64_t iobit : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_npei_win_wr_addr_s cn52xx;
+ struct cvmx_npei_win_wr_addr_s cn52xxp1;
+ struct cvmx_npei_win_wr_addr_s cn56xx;
+ struct cvmx_npei_win_wr_addr_s cn56xxp1;
+};
+typedef union cvmx_npei_win_wr_addr cvmx_npei_win_wr_addr_t;
+
+/**
+ * cvmx_npei_win_wr_data
+ *
+ * NPEI_WIN_WR_DATA = NPEI Window Write Data Register
+ *
+ * Contains the data to write to the address located in the NPEI_WIN_WR_ADDR Register.
+ * Writing the least-significant-byte of this register will cause a write operation to take place.
+ */
+union cvmx_npei_win_wr_data {
+ uint64_t u64;
+ struct cvmx_npei_win_wr_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_data : 64; /**< The data to be written. Whenever the LSB of this
+ register is written, the Window Write will take
+ place. */
+#else
+ uint64_t wr_data : 64;
+#endif
+ } s;
+ struct cvmx_npei_win_wr_data_s cn52xx;
+ struct cvmx_npei_win_wr_data_s cn52xxp1;
+ struct cvmx_npei_win_wr_data_s cn56xx;
+ struct cvmx_npei_win_wr_data_s cn56xxp1;
+};
+typedef union cvmx_npei_win_wr_data cvmx_npei_win_wr_data_t;
+
+/**
+ * cvmx_npei_win_wr_mask
+ *
+ * NPEI_WIN_WR_MASK = NPEI Window Write Mask Register
+ *
+ * Contains the mask for the data in the NPEI_WIN_WR_DATA Register.
+ */
+union cvmx_npei_win_wr_mask {
+ uint64_t u64;
+ struct cvmx_npei_win_wr_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t wr_mask : 8; /**< The data to be written. When a bit is '0'
+ the corresponding byte will be written. */
+#else
+ uint64_t wr_mask : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_npei_win_wr_mask_s cn52xx;
+ struct cvmx_npei_win_wr_mask_s cn52xxp1;
+ struct cvmx_npei_win_wr_mask_s cn56xx;
+ struct cvmx_npei_win_wr_mask_s cn56xxp1;
+};
+typedef union cvmx_npei_win_wr_mask cvmx_npei_win_wr_mask_t;
+
+/**
+ * cvmx_npei_window_ctl
+ *
+ * NPEI_WINDOW_CTL = NPEI's Window Control
+ *
+ * The name of this register is misleading. The timeout value is used for BAR0 access from PCIE0 and PCIE1.
+ * Any access to the regigisters on the RML will timeout as 0xFFFF clock cycle. At time of timeout the next
+ * RML access will start, and interrupt will be set, and in the case of reads no data will be returned.
+ *
+ * The value of this register should be set to a minimum of 0x200000 to ensure that a timeout to an RML register
+ * occurs on the RML 0xFFFF timer before the timeout for a BAR0 access from the PCIE#.
+ */
+union cvmx_npei_window_ctl {
+ uint64_t u64;
+ struct cvmx_npei_window_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t time : 32; /**< Time to wait in core clocks to wait for a
+ BAR0 access to completeon the NCB
+ before timing out. A value of 0 will cause no
+ timeouts. A minimum value of 0x200000 should be
+ used when this register is not set to 0x0. */
+#else
+ uint64_t time : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npei_window_ctl_s cn52xx;
+ struct cvmx_npei_window_ctl_s cn52xxp1;
+ struct cvmx_npei_window_ctl_s cn56xx;
+ struct cvmx_npei_window_ctl_s cn56xxp1;
+};
+typedef union cvmx_npei_window_ctl cvmx_npei_window_ctl_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-npei-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-npi-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-npi-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-npi-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,4651 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-npi-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon npi.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_NPI_DEFS_H__
+#define __CVMX_NPI_DEFS_H__
+
+#define CVMX_NPI_BASE_ADDR_INPUT0 CVMX_NPI_BASE_ADDR_INPUTX(0)
+#define CVMX_NPI_BASE_ADDR_INPUT1 CVMX_NPI_BASE_ADDR_INPUTX(1)
+#define CVMX_NPI_BASE_ADDR_INPUT2 CVMX_NPI_BASE_ADDR_INPUTX(2)
+#define CVMX_NPI_BASE_ADDR_INPUT3 CVMX_NPI_BASE_ADDR_INPUTX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_BASE_ADDR_INPUTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_NPI_BASE_ADDR_INPUTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000000070ull) + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_NPI_BASE_ADDR_INPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000070ull) + ((offset) & 3) * 16)
+#endif
+#define CVMX_NPI_BASE_ADDR_OUTPUT0 CVMX_NPI_BASE_ADDR_OUTPUTX(0)
+#define CVMX_NPI_BASE_ADDR_OUTPUT1 CVMX_NPI_BASE_ADDR_OUTPUTX(1)
+#define CVMX_NPI_BASE_ADDR_OUTPUT2 CVMX_NPI_BASE_ADDR_OUTPUTX(2)
+#define CVMX_NPI_BASE_ADDR_OUTPUT3 CVMX_NPI_BASE_ADDR_OUTPUTX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_BASE_ADDR_OUTPUTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_NPI_BASE_ADDR_OUTPUTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F00000000B8ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_NPI_BASE_ADDR_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F00000000B8ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_BIST_STATUS CVMX_NPI_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_NPI_BIST_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_BIST_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000003F8ull);
+}
+#else
+#define CVMX_NPI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F00000003F8ull))
+#endif
+#define CVMX_NPI_BUFF_SIZE_OUTPUT0 CVMX_NPI_BUFF_SIZE_OUTPUTX(0)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT1 CVMX_NPI_BUFF_SIZE_OUTPUTX(1)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT2 CVMX_NPI_BUFF_SIZE_OUTPUTX(2)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT3 CVMX_NPI_BUFF_SIZE_OUTPUTX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_BUFF_SIZE_OUTPUTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_NPI_BUFF_SIZE_OUTPUTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F00000000E0ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_NPI_BUFF_SIZE_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F00000000E0ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_COMP_CTL CVMX_NPI_COMP_CTL_FUNC()
+static inline uint64_t CVMX_NPI_COMP_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_COMP_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000218ull);
+}
+#else
+#define CVMX_NPI_COMP_CTL (CVMX_ADD_IO_SEG(0x00011F0000000218ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_CTL_STATUS CVMX_NPI_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_NPI_CTL_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_CTL_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000010ull);
+}
+#else
+#define CVMX_NPI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_DBG_SELECT CVMX_NPI_DBG_SELECT_FUNC()
+static inline uint64_t CVMX_NPI_DBG_SELECT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_DBG_SELECT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000008ull);
+}
+#else
+#define CVMX_NPI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_DMA_CONTROL CVMX_NPI_DMA_CONTROL_FUNC()
+static inline uint64_t CVMX_NPI_DMA_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_DMA_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000128ull);
+}
+#else
+#define CVMX_NPI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000128ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_DMA_HIGHP_COUNTS CVMX_NPI_DMA_HIGHP_COUNTS_FUNC()
+static inline uint64_t CVMX_NPI_DMA_HIGHP_COUNTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_DMA_HIGHP_COUNTS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000148ull);
+}
+#else
+#define CVMX_NPI_DMA_HIGHP_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000000148ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_DMA_HIGHP_NADDR CVMX_NPI_DMA_HIGHP_NADDR_FUNC()
+static inline uint64_t CVMX_NPI_DMA_HIGHP_NADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_DMA_HIGHP_NADDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000158ull);
+}
+#else
+#define CVMX_NPI_DMA_HIGHP_NADDR (CVMX_ADD_IO_SEG(0x00011F0000000158ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_DMA_LOWP_COUNTS CVMX_NPI_DMA_LOWP_COUNTS_FUNC()
+static inline uint64_t CVMX_NPI_DMA_LOWP_COUNTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_DMA_LOWP_COUNTS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000140ull);
+}
+#else
+#define CVMX_NPI_DMA_LOWP_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000000140ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_DMA_LOWP_NADDR CVMX_NPI_DMA_LOWP_NADDR_FUNC()
+static inline uint64_t CVMX_NPI_DMA_LOWP_NADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_DMA_LOWP_NADDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000150ull);
+}
+#else
+#define CVMX_NPI_DMA_LOWP_NADDR (CVMX_ADD_IO_SEG(0x00011F0000000150ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_HIGHP_DBELL CVMX_NPI_HIGHP_DBELL_FUNC()
+static inline uint64_t CVMX_NPI_HIGHP_DBELL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_HIGHP_DBELL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000120ull);
+}
+#else
+#define CVMX_NPI_HIGHP_DBELL (CVMX_ADD_IO_SEG(0x00011F0000000120ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_HIGHP_IBUFF_SADDR CVMX_NPI_HIGHP_IBUFF_SADDR_FUNC()
+static inline uint64_t CVMX_NPI_HIGHP_IBUFF_SADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_HIGHP_IBUFF_SADDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000110ull);
+}
+#else
+#define CVMX_NPI_HIGHP_IBUFF_SADDR (CVMX_ADD_IO_SEG(0x00011F0000000110ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_INPUT_CONTROL CVMX_NPI_INPUT_CONTROL_FUNC()
+static inline uint64_t CVMX_NPI_INPUT_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_INPUT_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000138ull);
+}
+#else
+#define CVMX_NPI_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000138ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_INT_ENB CVMX_NPI_INT_ENB_FUNC()
+static inline uint64_t CVMX_NPI_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_INT_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000020ull);
+}
+#else
+#define CVMX_NPI_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_INT_SUM CVMX_NPI_INT_SUM_FUNC()
+static inline uint64_t CVMX_NPI_INT_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_INT_SUM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000018ull);
+}
+#else
+#define CVMX_NPI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_LOWP_DBELL CVMX_NPI_LOWP_DBELL_FUNC()
+static inline uint64_t CVMX_NPI_LOWP_DBELL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_LOWP_DBELL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000118ull);
+}
+#else
+#define CVMX_NPI_LOWP_DBELL (CVMX_ADD_IO_SEG(0x00011F0000000118ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_LOWP_IBUFF_SADDR CVMX_NPI_LOWP_IBUFF_SADDR_FUNC()
+static inline uint64_t CVMX_NPI_LOWP_IBUFF_SADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_LOWP_IBUFF_SADDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000108ull);
+}
+#else
+#define CVMX_NPI_LOWP_IBUFF_SADDR (CVMX_ADD_IO_SEG(0x00011F0000000108ull))
+#endif
+#define CVMX_NPI_MEM_ACCESS_SUBID3 CVMX_NPI_MEM_ACCESS_SUBIDX(3)
+#define CVMX_NPI_MEM_ACCESS_SUBID4 CVMX_NPI_MEM_ACCESS_SUBIDX(4)
+#define CVMX_NPI_MEM_ACCESS_SUBID5 CVMX_NPI_MEM_ACCESS_SUBIDX(5)
+#define CVMX_NPI_MEM_ACCESS_SUBID6 CVMX_NPI_MEM_ACCESS_SUBIDX(6)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_MEM_ACCESS_SUBIDX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset >= 3) && (offset <= 6)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset >= 3) && (offset <= 6)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset >= 3) && (offset <= 6)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset >= 3) && (offset <= 6)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset >= 3) && (offset <= 6))))))
+ cvmx_warn("CVMX_NPI_MEM_ACCESS_SUBIDX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000000028ull) + ((offset) & 7) * 8 - 8*3;
+}
+#else
+#define CVMX_NPI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000028ull) + ((offset) & 7) * 8 - 8*3)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_MSI_RCV CVMX_NPI_MSI_RCV_FUNC()
+static inline uint64_t CVMX_NPI_MSI_RCV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_MSI_RCV not supported on this chip\n");
+ return 0x0000000000000190ull;
+}
+#else
+#define CVMX_NPI_MSI_RCV (0x0000000000000190ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_NPI_MSI_RCV CVMX_NPI_NPI_MSI_RCV_FUNC()
+static inline uint64_t CVMX_NPI_NPI_MSI_RCV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_NPI_MSI_RCV not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001190ull);
+}
+#else
+#define CVMX_NPI_NPI_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F0000001190ull))
+#endif
+#define CVMX_NPI_NUM_DESC_OUTPUT0 CVMX_NPI_NUM_DESC_OUTPUTX(0)
+#define CVMX_NPI_NUM_DESC_OUTPUT1 CVMX_NPI_NUM_DESC_OUTPUTX(1)
+#define CVMX_NPI_NUM_DESC_OUTPUT2 CVMX_NPI_NUM_DESC_OUTPUTX(2)
+#define CVMX_NPI_NUM_DESC_OUTPUT3 CVMX_NPI_NUM_DESC_OUTPUTX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_NUM_DESC_OUTPUTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_NPI_NUM_DESC_OUTPUTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000000050ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_NPI_NUM_DESC_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000050ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_OUTPUT_CONTROL CVMX_NPI_OUTPUT_CONTROL_FUNC()
+static inline uint64_t CVMX_NPI_OUTPUT_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_OUTPUT_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000100ull);
+}
+#else
+#define CVMX_NPI_OUTPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000100ull))
+#endif
+#define CVMX_NPI_P0_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(0)
+#define CVMX_NPI_P0_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(0)
+#define CVMX_NPI_P0_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(0)
+#define CVMX_NPI_P0_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(0)
+#define CVMX_NPI_P1_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(1)
+#define CVMX_NPI_P1_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(1)
+#define CVMX_NPI_P1_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(1)
+#define CVMX_NPI_P1_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(1)
+#define CVMX_NPI_P2_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(2)
+#define CVMX_NPI_P2_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(2)
+#define CVMX_NPI_P2_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(2)
+#define CVMX_NPI_P2_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(2)
+#define CVMX_NPI_P3_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(3)
+#define CVMX_NPI_P3_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(3)
+#define CVMX_NPI_P3_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(3)
+#define CVMX_NPI_P3_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_PCI_BAR1_INDEXX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPI_PCI_BAR1_INDEXX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000001100ull) + ((offset) & 31) * 4;
+}
+#else
+#define CVMX_NPI_PCI_BAR1_INDEXX(offset) (CVMX_ADD_IO_SEG(0x00011F0000001100ull) + ((offset) & 31) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_BIST_REG CVMX_NPI_PCI_BIST_REG_FUNC()
+static inline uint64_t CVMX_NPI_PCI_BIST_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN50XX)))
+ cvmx_warn("CVMX_NPI_PCI_BIST_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000011C0ull);
+}
+#else
+#define CVMX_NPI_PCI_BIST_REG (CVMX_ADD_IO_SEG(0x00011F00000011C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_BURST_SIZE CVMX_NPI_PCI_BURST_SIZE_FUNC()
+static inline uint64_t CVMX_NPI_PCI_BURST_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_BURST_SIZE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000000D8ull);
+}
+#else
+#define CVMX_NPI_PCI_BURST_SIZE (CVMX_ADD_IO_SEG(0x00011F00000000D8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG00 CVMX_NPI_PCI_CFG00_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG00_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG00 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001800ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG00 (CVMX_ADD_IO_SEG(0x00011F0000001800ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG01 CVMX_NPI_PCI_CFG01_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG01_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG01 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001804ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG01 (CVMX_ADD_IO_SEG(0x00011F0000001804ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG02 CVMX_NPI_PCI_CFG02_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG02_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG02 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001808ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG02 (CVMX_ADD_IO_SEG(0x00011F0000001808ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG03 CVMX_NPI_PCI_CFG03_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG03_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG03 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000180Cull);
+}
+#else
+#define CVMX_NPI_PCI_CFG03 (CVMX_ADD_IO_SEG(0x00011F000000180Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG04 CVMX_NPI_PCI_CFG04_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG04_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG04 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001810ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG04 (CVMX_ADD_IO_SEG(0x00011F0000001810ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG05 CVMX_NPI_PCI_CFG05_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG05_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG05 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001814ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG05 (CVMX_ADD_IO_SEG(0x00011F0000001814ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG06 CVMX_NPI_PCI_CFG06_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG06_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG06 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001818ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG06 (CVMX_ADD_IO_SEG(0x00011F0000001818ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG07 CVMX_NPI_PCI_CFG07_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG07_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG07 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000181Cull);
+}
+#else
+#define CVMX_NPI_PCI_CFG07 (CVMX_ADD_IO_SEG(0x00011F000000181Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG08 CVMX_NPI_PCI_CFG08_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG08_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG08 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001820ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG08 (CVMX_ADD_IO_SEG(0x00011F0000001820ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG09 CVMX_NPI_PCI_CFG09_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG09_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG09 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001824ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG09 (CVMX_ADD_IO_SEG(0x00011F0000001824ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG10 CVMX_NPI_PCI_CFG10_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG10_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG10 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001828ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG10 (CVMX_ADD_IO_SEG(0x00011F0000001828ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG11 CVMX_NPI_PCI_CFG11_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG11_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG11 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000182Cull);
+}
+#else
+#define CVMX_NPI_PCI_CFG11 (CVMX_ADD_IO_SEG(0x00011F000000182Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG12 CVMX_NPI_PCI_CFG12_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG12_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG12 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001830ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG12 (CVMX_ADD_IO_SEG(0x00011F0000001830ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG13 CVMX_NPI_PCI_CFG13_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG13_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG13 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001834ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG13 (CVMX_ADD_IO_SEG(0x00011F0000001834ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG15 CVMX_NPI_PCI_CFG15_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG15_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG15 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000183Cull);
+}
+#else
+#define CVMX_NPI_PCI_CFG15 (CVMX_ADD_IO_SEG(0x00011F000000183Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG16 CVMX_NPI_PCI_CFG16_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG16_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG16 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001840ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG16 (CVMX_ADD_IO_SEG(0x00011F0000001840ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG17 CVMX_NPI_PCI_CFG17_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG17_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG17 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001844ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG17 (CVMX_ADD_IO_SEG(0x00011F0000001844ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG18 CVMX_NPI_PCI_CFG18_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG18_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG18 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001848ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG18 (CVMX_ADD_IO_SEG(0x00011F0000001848ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG19 CVMX_NPI_PCI_CFG19_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG19_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG19 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000184Cull);
+}
+#else
+#define CVMX_NPI_PCI_CFG19 (CVMX_ADD_IO_SEG(0x00011F000000184Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG20 CVMX_NPI_PCI_CFG20_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG20_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG20 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001850ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG20 (CVMX_ADD_IO_SEG(0x00011F0000001850ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG21 CVMX_NPI_PCI_CFG21_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG21_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG21 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001854ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG21 (CVMX_ADD_IO_SEG(0x00011F0000001854ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG22 CVMX_NPI_PCI_CFG22_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG22_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG22 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001858ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG22 (CVMX_ADD_IO_SEG(0x00011F0000001858ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG56 CVMX_NPI_PCI_CFG56_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG56_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG56 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000018E0ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG56 (CVMX_ADD_IO_SEG(0x00011F00000018E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG57 CVMX_NPI_PCI_CFG57_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG57_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG57 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000018E4ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG57 (CVMX_ADD_IO_SEG(0x00011F00000018E4ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG58 CVMX_NPI_PCI_CFG58_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG58_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG58 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000018E8ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG58 (CVMX_ADD_IO_SEG(0x00011F00000018E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG59 CVMX_NPI_PCI_CFG59_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG59_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG59 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000018ECull);
+}
+#else
+#define CVMX_NPI_PCI_CFG59 (CVMX_ADD_IO_SEG(0x00011F00000018ECull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG60 CVMX_NPI_PCI_CFG60_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG60_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG60 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000018F0ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG60 (CVMX_ADD_IO_SEG(0x00011F00000018F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG61 CVMX_NPI_PCI_CFG61_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG61_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG61 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000018F4ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG61 (CVMX_ADD_IO_SEG(0x00011F00000018F4ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG62 CVMX_NPI_PCI_CFG62_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG62_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG62 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000018F8ull);
+}
+#else
+#define CVMX_NPI_PCI_CFG62 (CVMX_ADD_IO_SEG(0x00011F00000018F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CFG63 CVMX_NPI_PCI_CFG63_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CFG63_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CFG63 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000018FCull);
+}
+#else
+#define CVMX_NPI_PCI_CFG63 (CVMX_ADD_IO_SEG(0x00011F00000018FCull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CNT_REG CVMX_NPI_PCI_CNT_REG_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CNT_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CNT_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000011B8ull);
+}
+#else
+#define CVMX_NPI_PCI_CNT_REG (CVMX_ADD_IO_SEG(0x00011F00000011B8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_CTL_STATUS_2 CVMX_NPI_PCI_CTL_STATUS_2_FUNC()
+static inline uint64_t CVMX_NPI_PCI_CTL_STATUS_2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_CTL_STATUS_2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000118Cull);
+}
+#else
+#define CVMX_NPI_PCI_CTL_STATUS_2 (CVMX_ADD_IO_SEG(0x00011F000000118Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_INT_ARB_CFG CVMX_NPI_PCI_INT_ARB_CFG_FUNC()
+static inline uint64_t CVMX_NPI_PCI_INT_ARB_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_INT_ARB_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000130ull);
+}
+#else
+#define CVMX_NPI_PCI_INT_ARB_CFG (CVMX_ADD_IO_SEG(0x00011F0000000130ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_INT_ENB2 CVMX_NPI_PCI_INT_ENB2_FUNC()
+static inline uint64_t CVMX_NPI_PCI_INT_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_INT_ENB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000011A0ull);
+}
+#else
+#define CVMX_NPI_PCI_INT_ENB2 (CVMX_ADD_IO_SEG(0x00011F00000011A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_INT_SUM2 CVMX_NPI_PCI_INT_SUM2_FUNC()
+static inline uint64_t CVMX_NPI_PCI_INT_SUM2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_INT_SUM2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001198ull);
+}
+#else
+#define CVMX_NPI_PCI_INT_SUM2 (CVMX_ADD_IO_SEG(0x00011F0000001198ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_READ_CMD CVMX_NPI_PCI_READ_CMD_FUNC()
+static inline uint64_t CVMX_NPI_PCI_READ_CMD_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_READ_CMD not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000048ull);
+}
+#else
+#define CVMX_NPI_PCI_READ_CMD (CVMX_ADD_IO_SEG(0x00011F0000000048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_READ_CMD_6 CVMX_NPI_PCI_READ_CMD_6_FUNC()
+static inline uint64_t CVMX_NPI_PCI_READ_CMD_6_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_READ_CMD_6 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001180ull);
+}
+#else
+#define CVMX_NPI_PCI_READ_CMD_6 (CVMX_ADD_IO_SEG(0x00011F0000001180ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_READ_CMD_C CVMX_NPI_PCI_READ_CMD_C_FUNC()
+static inline uint64_t CVMX_NPI_PCI_READ_CMD_C_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_READ_CMD_C not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001184ull);
+}
+#else
+#define CVMX_NPI_PCI_READ_CMD_C (CVMX_ADD_IO_SEG(0x00011F0000001184ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_READ_CMD_E CVMX_NPI_PCI_READ_CMD_E_FUNC()
+static inline uint64_t CVMX_NPI_PCI_READ_CMD_E_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_READ_CMD_E not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000001188ull);
+}
+#else
+#define CVMX_NPI_PCI_READ_CMD_E (CVMX_ADD_IO_SEG(0x00011F0000001188ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_SCM_REG CVMX_NPI_PCI_SCM_REG_FUNC()
+static inline uint64_t CVMX_NPI_PCI_SCM_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_SCM_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000011A8ull);
+}
+#else
+#define CVMX_NPI_PCI_SCM_REG (CVMX_ADD_IO_SEG(0x00011F00000011A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PCI_TSR_REG CVMX_NPI_PCI_TSR_REG_FUNC()
+static inline uint64_t CVMX_NPI_PCI_TSR_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PCI_TSR_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000011B0ull);
+}
+#else
+#define CVMX_NPI_PCI_TSR_REG (CVMX_ADD_IO_SEG(0x00011F00000011B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PORT32_INSTR_HDR CVMX_NPI_PORT32_INSTR_HDR_FUNC()
+static inline uint64_t CVMX_NPI_PORT32_INSTR_HDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PORT32_INSTR_HDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000001F8ull);
+}
+#else
+#define CVMX_NPI_PORT32_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F00000001F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PORT33_INSTR_HDR CVMX_NPI_PORT33_INSTR_HDR_FUNC()
+static inline uint64_t CVMX_NPI_PORT33_INSTR_HDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PORT33_INSTR_HDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000200ull);
+}
+#else
+#define CVMX_NPI_PORT33_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000200ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PORT34_INSTR_HDR CVMX_NPI_PORT34_INSTR_HDR_FUNC()
+static inline uint64_t CVMX_NPI_PORT34_INSTR_HDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PORT34_INSTR_HDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000208ull);
+}
+#else
+#define CVMX_NPI_PORT34_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000208ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PORT35_INSTR_HDR CVMX_NPI_PORT35_INSTR_HDR_FUNC()
+static inline uint64_t CVMX_NPI_PORT35_INSTR_HDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PORT35_INSTR_HDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000210ull);
+}
+#else
+#define CVMX_NPI_PORT35_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000210ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_PORT_BP_CONTROL CVMX_NPI_PORT_BP_CONTROL_FUNC()
+static inline uint64_t CVMX_NPI_PORT_BP_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_PORT_BP_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000001F0ull);
+}
+#else
+#define CVMX_NPI_PORT_BP_CONTROL (CVMX_ADD_IO_SEG(0x00011F00000001F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_PX_DBPAIR_ADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_NPI_PX_DBPAIR_ADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000000180ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_NPI_PX_DBPAIR_ADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000000180ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_PX_INSTR_ADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_NPI_PX_INSTR_ADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F00000001C0ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_NPI_PX_INSTR_ADDR(offset) (CVMX_ADD_IO_SEG(0x00011F00000001C0ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_PX_INSTR_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_NPI_PX_INSTR_CNTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F00000001A0ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_NPI_PX_INSTR_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F00000001A0ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_PX_PAIR_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_NPI_PX_PAIR_CNTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000000160ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_NPI_PX_PAIR_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000000160ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_RSL_INT_BLOCKS CVMX_NPI_RSL_INT_BLOCKS_FUNC()
+static inline uint64_t CVMX_NPI_RSL_INT_BLOCKS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_RSL_INT_BLOCKS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000000000ull);
+}
+#else
+#define CVMX_NPI_RSL_INT_BLOCKS (CVMX_ADD_IO_SEG(0x00011F0000000000ull))
+#endif
+#define CVMX_NPI_SIZE_INPUT0 CVMX_NPI_SIZE_INPUTX(0)
+#define CVMX_NPI_SIZE_INPUT1 CVMX_NPI_SIZE_INPUTX(1)
+#define CVMX_NPI_SIZE_INPUT2 CVMX_NPI_SIZE_INPUTX(2)
+#define CVMX_NPI_SIZE_INPUT3 CVMX_NPI_SIZE_INPUTX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_NPI_SIZE_INPUTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_NPI_SIZE_INPUTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000000078ull) + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_NPI_SIZE_INPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000078ull) + ((offset) & 3) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_NPI_WIN_READ_TO CVMX_NPI_WIN_READ_TO_FUNC()
+static inline uint64_t CVMX_NPI_WIN_READ_TO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_NPI_WIN_READ_TO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000001E0ull);
+}
+#else
+#define CVMX_NPI_WIN_READ_TO (CVMX_ADD_IO_SEG(0x00011F00000001E0ull))
+#endif
+
+/**
+ * cvmx_npi_base_addr_input#
+ *
+ * NPI_BASE_ADDR_INPUT0 = NPI's Base Address Input 0 Register
+ *
+ * The address to start reading Instructions from for Input-0.
+ */
+union cvmx_npi_base_addr_inputx {
+ uint64_t u64;
+ struct cvmx_npi_base_addr_inputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t baddr : 61; /**< The address to read Instruction from for output 0.
+ This address is 8-byte aligned, for this reason
+ address bits [2:0] will always be zero. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t baddr : 61;
+#endif
+ } s;
+ struct cvmx_npi_base_addr_inputx_s cn30xx;
+ struct cvmx_npi_base_addr_inputx_s cn31xx;
+ struct cvmx_npi_base_addr_inputx_s cn38xx;
+ struct cvmx_npi_base_addr_inputx_s cn38xxp2;
+ struct cvmx_npi_base_addr_inputx_s cn50xx;
+ struct cvmx_npi_base_addr_inputx_s cn58xx;
+ struct cvmx_npi_base_addr_inputx_s cn58xxp1;
+};
+typedef union cvmx_npi_base_addr_inputx cvmx_npi_base_addr_inputx_t;
+
+/**
+ * cvmx_npi_base_addr_output#
+ *
+ * NPI_BASE_ADDR_OUTPUT0 = NPI's Base Address Output 0 Register
+ *
+ * The address to start reading Instructions from for Output-0.
+ */
+union cvmx_npi_base_addr_outputx {
+ uint64_t u64;
+ struct cvmx_npi_base_addr_outputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t baddr : 61; /**< The address to read Instruction from for output 0.
+ This address is 8-byte aligned, for this reason
+ address bits [2:0] will always be zero. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t baddr : 61;
+#endif
+ } s;
+ struct cvmx_npi_base_addr_outputx_s cn30xx;
+ struct cvmx_npi_base_addr_outputx_s cn31xx;
+ struct cvmx_npi_base_addr_outputx_s cn38xx;
+ struct cvmx_npi_base_addr_outputx_s cn38xxp2;
+ struct cvmx_npi_base_addr_outputx_s cn50xx;
+ struct cvmx_npi_base_addr_outputx_s cn58xx;
+ struct cvmx_npi_base_addr_outputx_s cn58xxp1;
+};
+typedef union cvmx_npi_base_addr_outputx cvmx_npi_base_addr_outputx_t;
+
+/**
+ * cvmx_npi_bist_status
+ *
+ * NPI_BIST_STATUS = NPI's BIST Status Register
+ *
+ * Results from BIST runs of NPI's memories.
+ */
+union cvmx_npi_bist_status {
+ uint64_t u64;
+ struct cvmx_npi_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t csr_bs : 1; /**< BIST Status for the csr_fifo */
+ uint64_t dif_bs : 1; /**< BIST Status for the dif_fifo */
+ uint64_t rdp_bs : 1; /**< BIST Status for the rdp_fifo */
+ uint64_t pcnc_bs : 1; /**< BIST Status for the pcn_cnt_fifo */
+ uint64_t pcn_bs : 1; /**< BIST Status for the pcn_fifo */
+ uint64_t rdn_bs : 1; /**< BIST Status for the rdn_fifo */
+ uint64_t pcac_bs : 1; /**< BIST Status for the pca_cmd_fifo */
+ uint64_t pcad_bs : 1; /**< BIST Status for the pca_data_fifo */
+ uint64_t rdnl_bs : 1; /**< BIST Status for the rdn_length_fifo */
+ uint64_t pgf_bs : 1; /**< BIST Status for the pgf_fifo */
+ uint64_t pig_bs : 1; /**< BIST Status for the pig_fifo */
+ uint64_t pof0_bs : 1; /**< BIST Status for the pof0_fifo */
+ uint64_t pof1_bs : 1; /**< BIST Status for the pof1_fifo */
+ uint64_t pof2_bs : 1; /**< BIST Status for the pof2_fifo */
+ uint64_t pof3_bs : 1; /**< BIST Status for the pof3_fifo */
+ uint64_t pos_bs : 1; /**< BIST Status for the pos_fifo */
+ uint64_t nus_bs : 1; /**< BIST Status for the nus_fifo */
+ uint64_t dob_bs : 1; /**< BIST Status for the dob_fifo */
+ uint64_t pdf_bs : 1; /**< BIST Status for the pdf_fifo */
+ uint64_t dpi_bs : 1; /**< BIST Status for the dpi_fifo */
+#else
+ uint64_t dpi_bs : 1;
+ uint64_t pdf_bs : 1;
+ uint64_t dob_bs : 1;
+ uint64_t nus_bs : 1;
+ uint64_t pos_bs : 1;
+ uint64_t pof3_bs : 1;
+ uint64_t pof2_bs : 1;
+ uint64_t pof1_bs : 1;
+ uint64_t pof0_bs : 1;
+ uint64_t pig_bs : 1;
+ uint64_t pgf_bs : 1;
+ uint64_t rdnl_bs : 1;
+ uint64_t pcad_bs : 1;
+ uint64_t pcac_bs : 1;
+ uint64_t rdn_bs : 1;
+ uint64_t pcn_bs : 1;
+ uint64_t pcnc_bs : 1;
+ uint64_t rdp_bs : 1;
+ uint64_t dif_bs : 1;
+ uint64_t csr_bs : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_npi_bist_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t csr_bs : 1; /**< BIST Status for the csr_fifo */
+ uint64_t dif_bs : 1; /**< BIST Status for the dif_fifo */
+ uint64_t rdp_bs : 1; /**< BIST Status for the rdp_fifo */
+ uint64_t pcnc_bs : 1; /**< BIST Status for the pcn_cnt_fifo */
+ uint64_t pcn_bs : 1; /**< BIST Status for the pcn_fifo */
+ uint64_t rdn_bs : 1; /**< BIST Status for the rdn_fifo */
+ uint64_t pcac_bs : 1; /**< BIST Status for the pca_cmd_fifo */
+ uint64_t pcad_bs : 1; /**< BIST Status for the pca_data_fifo */
+ uint64_t rdnl_bs : 1; /**< BIST Status for the rdn_length_fifo */
+ uint64_t pgf_bs : 1; /**< BIST Status for the pgf_fifo */
+ uint64_t pig_bs : 1; /**< BIST Status for the pig_fifo */
+ uint64_t pof0_bs : 1; /**< BIST Status for the pof0_fifo */
+ uint64_t reserved_5_7 : 3;
+ uint64_t pos_bs : 1; /**< BIST Status for the pos_fifo */
+ uint64_t nus_bs : 1; /**< BIST Status for the nus_fifo */
+ uint64_t dob_bs : 1; /**< BIST Status for the dob_fifo */
+ uint64_t pdf_bs : 1; /**< BIST Status for the pdf_fifo */
+ uint64_t dpi_bs : 1; /**< BIST Status for the dpi_fifo */
+#else
+ uint64_t dpi_bs : 1;
+ uint64_t pdf_bs : 1;
+ uint64_t dob_bs : 1;
+ uint64_t nus_bs : 1;
+ uint64_t pos_bs : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pof0_bs : 1;
+ uint64_t pig_bs : 1;
+ uint64_t pgf_bs : 1;
+ uint64_t rdnl_bs : 1;
+ uint64_t pcad_bs : 1;
+ uint64_t pcac_bs : 1;
+ uint64_t rdn_bs : 1;
+ uint64_t pcn_bs : 1;
+ uint64_t pcnc_bs : 1;
+ uint64_t rdp_bs : 1;
+ uint64_t dif_bs : 1;
+ uint64_t csr_bs : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn30xx;
+ struct cvmx_npi_bist_status_s cn31xx;
+ struct cvmx_npi_bist_status_s cn38xx;
+ struct cvmx_npi_bist_status_s cn38xxp2;
+ struct cvmx_npi_bist_status_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t csr_bs : 1; /**< BIST Status for the csr_fifo */
+ uint64_t dif_bs : 1; /**< BIST Status for the dif_fifo */
+ uint64_t rdp_bs : 1; /**< BIST Status for the rdp_fifo */
+ uint64_t pcnc_bs : 1; /**< BIST Status for the pcn_cnt_fifo */
+ uint64_t pcn_bs : 1; /**< BIST Status for the pcn_fifo */
+ uint64_t rdn_bs : 1; /**< BIST Status for the rdn_fifo */
+ uint64_t pcac_bs : 1; /**< BIST Status for the pca_cmd_fifo */
+ uint64_t pcad_bs : 1; /**< BIST Status for the pca_data_fifo */
+ uint64_t rdnl_bs : 1; /**< BIST Status for the rdn_length_fifo */
+ uint64_t pgf_bs : 1; /**< BIST Status for the pgf_fifo */
+ uint64_t pig_bs : 1; /**< BIST Status for the pig_fifo */
+ uint64_t pof0_bs : 1; /**< BIST Status for the pof0_fifo */
+ uint64_t pof1_bs : 1; /**< BIST Status for the pof1_fifo */
+ uint64_t reserved_5_6 : 2;
+ uint64_t pos_bs : 1; /**< BIST Status for the pos_fifo */
+ uint64_t nus_bs : 1; /**< BIST Status for the nus_fifo */
+ uint64_t dob_bs : 1; /**< BIST Status for the dob_fifo */
+ uint64_t pdf_bs : 1; /**< BIST Status for the pdf_fifo */
+ uint64_t dpi_bs : 1; /**< BIST Status for the dpi_fifo */
+#else
+ uint64_t dpi_bs : 1;
+ uint64_t pdf_bs : 1;
+ uint64_t dob_bs : 1;
+ uint64_t nus_bs : 1;
+ uint64_t pos_bs : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t pof1_bs : 1;
+ uint64_t pof0_bs : 1;
+ uint64_t pig_bs : 1;
+ uint64_t pgf_bs : 1;
+ uint64_t rdnl_bs : 1;
+ uint64_t pcad_bs : 1;
+ uint64_t pcac_bs : 1;
+ uint64_t rdn_bs : 1;
+ uint64_t pcn_bs : 1;
+ uint64_t pcnc_bs : 1;
+ uint64_t rdp_bs : 1;
+ uint64_t dif_bs : 1;
+ uint64_t csr_bs : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn50xx;
+ struct cvmx_npi_bist_status_s cn58xx;
+ struct cvmx_npi_bist_status_s cn58xxp1;
+};
+typedef union cvmx_npi_bist_status cvmx_npi_bist_status_t;
+
+/**
+ * cvmx_npi_buff_size_output#
+ *
+ * NPI_BUFF_SIZE_OUTPUT0 = NPI's D/I Buffer Sizes For Output 0
+ *
+ * The size in bytes of the Data Bufffer and Information Buffer for output 0.
+ */
+union cvmx_npi_buff_size_outputx {
+ uint64_t u64;
+ struct cvmx_npi_buff_size_outputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t isize : 7; /**< The number of bytes to move to the Info-Pointer
+ from the front of the packet.
+ Legal values are 0-120. */
+ uint64_t bsize : 16; /**< The size in bytes of the area pointed to by
+ buffer pointer for output packet data. */
+#else
+ uint64_t bsize : 16;
+ uint64_t isize : 7;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_npi_buff_size_outputx_s cn30xx;
+ struct cvmx_npi_buff_size_outputx_s cn31xx;
+ struct cvmx_npi_buff_size_outputx_s cn38xx;
+ struct cvmx_npi_buff_size_outputx_s cn38xxp2;
+ struct cvmx_npi_buff_size_outputx_s cn50xx;
+ struct cvmx_npi_buff_size_outputx_s cn58xx;
+ struct cvmx_npi_buff_size_outputx_s cn58xxp1;
+};
+typedef union cvmx_npi_buff_size_outputx cvmx_npi_buff_size_outputx_t;
+
+/**
+ * cvmx_npi_comp_ctl
+ *
+ * NPI_COMP_CTL = PCI Compensation Control
+ *
+ * PCI Compensation Control
+ */
+union cvmx_npi_comp_ctl {
+ uint64_t u64;
+ struct cvmx_npi_comp_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t pctl : 5; /**< Bypass value for PCTL */
+ uint64_t nctl : 5; /**< Bypass value for NCTL */
+#else
+ uint64_t nctl : 5;
+ uint64_t pctl : 5;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_npi_comp_ctl_s cn50xx;
+ struct cvmx_npi_comp_ctl_s cn58xx;
+ struct cvmx_npi_comp_ctl_s cn58xxp1;
+};
+typedef union cvmx_npi_comp_ctl cvmx_npi_comp_ctl_t;
+
+/**
+ * cvmx_npi_ctl_status
+ *
+ * NPI_CTL_STATUS = NPI's Control Status Register
+ *
+ * Contains control ans status for NPI.
+ * Writes to this register are not ordered with writes/reads to the PCI Memory space.
+ * To ensure that a write has completed the user must read the register before
+ * making an access(i.e. PCI memory space) that requires the value of this register to be updated.
+ */
+union cvmx_npi_ctl_status {
+ uint64_t u64;
+ struct cvmx_npi_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63 : 1;
+ uint64_t chip_rev : 8; /**< The revision of the N3. */
+ uint64_t dis_pniw : 1; /**< When asserted '1' access from the PNI Window
+ Registers are disabled. */
+ uint64_t out3_enb : 1; /**< When asserted '1' the output3 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t out2_enb : 1; /**< When asserted '1' the output2 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t out1_enb : 1; /**< When asserted '1' the output1 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t out0_enb : 1; /**< When asserted '1' the output0 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t ins3_enb : 1; /**< When asserted '1' the gather3 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t ins2_enb : 1; /**< When asserted '1' the gather2 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t ins1_enb : 1; /**< When asserted '1' the gather1 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t ins0_enb : 1; /**< When asserted '1' the gather0 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t ins3_64b : 1; /**< When asserted '1' the instructions read by the
+ gather3 engine are 64-Byte instructions, when
+ de-asserted '0' instructions are 32-byte. */
+ uint64_t ins2_64b : 1; /**< When asserted '1' the instructions read by the
+ gather2 engine are 64-Byte instructions, when
+ de-asserted '0' instructions are 32-byte. */
+ uint64_t ins1_64b : 1; /**< When asserted '1' the instructions read by the
+ gather1 engine are 64-Byte instructions, when
+ de-asserted '0' instructions are 32-byte. */
+ uint64_t ins0_64b : 1; /**< When asserted '1' the instructions read by the
+ gather0 engine are 64-Byte instructions, when
+ de-asserted '0' instructions are 32-byte. */
+ uint64_t pci_wdis : 1; /**< When set '1' disables access to registers in
+ PNI address range 0x1000 - 0x17FF from the PCI. */
+ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit
+ from the L2C before sending additional access to
+ the L2C from the PCI. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t max_word : 5; /**< The maximum number of words to merge into a single
+ write operation from the PPs to the PCI. Legal
+ values are 1 to 32, where a '0' is treated as 32. */
+ uint64_t reserved_10_31 : 22;
+ uint64_t timer : 10; /**< When the NPI starts a PP to PCI write it will wait
+ no longer than the value of TIMER in eclks to
+ merge additional writes from the PPs into 1
+ large write. The values for this field is 1 to
+ 1024 where a value of '0' is treated as 1024. */
+#else
+ uint64_t timer : 10;
+ uint64_t reserved_10_31 : 22;
+ uint64_t max_word : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t wait_com : 1;
+ uint64_t pci_wdis : 1;
+ uint64_t ins0_64b : 1;
+ uint64_t ins1_64b : 1;
+ uint64_t ins2_64b : 1;
+ uint64_t ins3_64b : 1;
+ uint64_t ins0_enb : 1;
+ uint64_t ins1_enb : 1;
+ uint64_t ins2_enb : 1;
+ uint64_t ins3_enb : 1;
+ uint64_t out0_enb : 1;
+ uint64_t out1_enb : 1;
+ uint64_t out2_enb : 1;
+ uint64_t out3_enb : 1;
+ uint64_t dis_pniw : 1;
+ uint64_t chip_rev : 8;
+ uint64_t reserved_63_63 : 1;
+#endif
+ } s;
+ struct cvmx_npi_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63 : 1;
+ uint64_t chip_rev : 8; /**< The revision of the N3. */
+ uint64_t dis_pniw : 1; /**< When asserted '1' access from the PNI Window
+ Registers are disabled. */
+ uint64_t reserved_51_53 : 3;
+ uint64_t out0_enb : 1; /**< When asserted '1' the output0 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t reserved_47_49 : 3;
+ uint64_t ins0_enb : 1; /**< When asserted '1' the gather0 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t reserved_43_45 : 3;
+ uint64_t ins0_64b : 1; /**< When asserted '1' the instructions read by the
+ gather0 engine are 64-Byte instructions, when
+ de-asserted '0' instructions are 32-byte. */
+ uint64_t pci_wdis : 1; /**< When set '1' disables access to registers in
+ PNI address range 0x1000 - 0x17FF from the PCI. */
+ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit
+ from the L2C before sending additional access to
+ the L2C from the PCI. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t max_word : 5; /**< The maximum number of words to merge into a single
+ write operation from the PPs to the PCI. Legal
+ values are 1 to 32, where a '0' is treated as 32. */
+ uint64_t reserved_10_31 : 22;
+ uint64_t timer : 10; /**< When the NPI starts a PP to PCI write it will wait
+ no longer than the value of TIMER in eclks to
+ merge additional writes from the PPs into 1
+ large write. The values for this field is 1 to
+ 1024 where a value of '0' is treated as 1024. */
+#else
+ uint64_t timer : 10;
+ uint64_t reserved_10_31 : 22;
+ uint64_t max_word : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t wait_com : 1;
+ uint64_t pci_wdis : 1;
+ uint64_t ins0_64b : 1;
+ uint64_t reserved_43_45 : 3;
+ uint64_t ins0_enb : 1;
+ uint64_t reserved_47_49 : 3;
+ uint64_t out0_enb : 1;
+ uint64_t reserved_51_53 : 3;
+ uint64_t dis_pniw : 1;
+ uint64_t chip_rev : 8;
+ uint64_t reserved_63_63 : 1;
+#endif
+ } cn30xx;
+ struct cvmx_npi_ctl_status_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63 : 1;
+ uint64_t chip_rev : 8; /**< The revision of the N3.
+ 0 => pass1.x, 1 => 2.0 */
+ uint64_t dis_pniw : 1; /**< When asserted '1' access from the PNI Window
+ Registers are disabled. */
+ uint64_t reserved_52_53 : 2;
+ uint64_t out1_enb : 1; /**< When asserted '1' the output1 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t out0_enb : 1; /**< When asserted '1' the output0 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t reserved_48_49 : 2;
+ uint64_t ins1_enb : 1; /**< When asserted '1' the gather1 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t ins0_enb : 1; /**< When asserted '1' the gather0 engine is enabled.
+ After enabling the values of the associated
+ Address and Size Register should not be changed. */
+ uint64_t reserved_44_45 : 2;
+ uint64_t ins1_64b : 1; /**< When asserted '1' the instructions read by the
+ gather1 engine are 64-Byte instructions, when
+ de-asserted '0' instructions are 32-byte. */
+ uint64_t ins0_64b : 1; /**< When asserted '1' the instructions read by the
+ gather0 engine are 64-Byte instructions, when
+ de-asserted '0' instructions are 32-byte. */
+ uint64_t pci_wdis : 1; /**< When set '1' disables access to registers in
+ PNI address range 0x1000 - 0x17FF from the PCI. */
+ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit
+ from the L2C before sending additional access to
+ the L2C from the PCI. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t max_word : 5; /**< The maximum number of words to merge into a single
+ write operation from the PPs to the PCI. Legal
+ values are 1 to 32, where a '0' is treated as 32. */
+ uint64_t reserved_10_31 : 22;
+ uint64_t timer : 10; /**< When the NPI starts a PP to PCI write it will wait
+ no longer than the value of TIMER in eclks to
+ merge additional writes from the PPs into 1
+ large write. The values for this field is 1 to
+ 1024 where a value of '0' is treated as 1024. */
+#else
+ uint64_t timer : 10;
+ uint64_t reserved_10_31 : 22;
+ uint64_t max_word : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t wait_com : 1;
+ uint64_t pci_wdis : 1;
+ uint64_t ins0_64b : 1;
+ uint64_t ins1_64b : 1;
+ uint64_t reserved_44_45 : 2;
+ uint64_t ins0_enb : 1;
+ uint64_t ins1_enb : 1;
+ uint64_t reserved_48_49 : 2;
+ uint64_t out0_enb : 1;
+ uint64_t out1_enb : 1;
+ uint64_t reserved_52_53 : 2;
+ uint64_t dis_pniw : 1;
+ uint64_t chip_rev : 8;
+ uint64_t reserved_63_63 : 1;
+#endif
+ } cn31xx;
+ struct cvmx_npi_ctl_status_s cn38xx;
+ struct cvmx_npi_ctl_status_s cn38xxp2;
+ struct cvmx_npi_ctl_status_cn31xx cn50xx;
+ struct cvmx_npi_ctl_status_s cn58xx;
+ struct cvmx_npi_ctl_status_s cn58xxp1;
+};
+typedef union cvmx_npi_ctl_status cvmx_npi_ctl_status_t;
+
+/**
+ * cvmx_npi_dbg_select
+ *
+ * NPI_DBG_SELECT = Debug Select Register
+ *
+ * Contains the debug select value in last written to the RSLs.
+ */
+union cvmx_npi_dbg_select {
+ uint64_t u64;
+ struct cvmx_npi_dbg_select_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t dbg_sel : 16; /**< When this register is written its value is sent to
+ all RSLs. */
+#else
+ uint64_t dbg_sel : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_npi_dbg_select_s cn30xx;
+ struct cvmx_npi_dbg_select_s cn31xx;
+ struct cvmx_npi_dbg_select_s cn38xx;
+ struct cvmx_npi_dbg_select_s cn38xxp2;
+ struct cvmx_npi_dbg_select_s cn50xx;
+ struct cvmx_npi_dbg_select_s cn58xx;
+ struct cvmx_npi_dbg_select_s cn58xxp1;
+};
+typedef union cvmx_npi_dbg_select cvmx_npi_dbg_select_t;
+
+/**
+ * cvmx_npi_dma_control
+ *
+ * NPI_DMA_CONTROL = DMA Control Register
+ *
+ * Controls operation of the DMA IN/OUT of the NPI.
+ */
+union cvmx_npi_dma_control {
+ uint64_t u64;
+ struct cvmx_npi_dma_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t b0_lend : 1; /**< When set '1' and the NPI is in the mode to write
+ 0 to L2C memory when a DMA is done, the address
+ to be written to will be treated as a Little
+ Endian address. This field is new to PASS-2. */
+ uint64_t dwb_denb : 1; /**< When set '1' the NPI will send a value in the DWB
+ field for a free page operation for the memory
+ that contained the data in N3. */
+ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are freed
+ this value is used for the DWB field of the
+ operation. */
+ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will
+ be returned to when used. */
+ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters,
+ if '0' then the number of bytes in the dma transfer
+ will be added to the count register. */
+ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */
+ uint64_t o_ns : 1; /**< Nosnoop For DMA. */
+ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */
+ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used.
+ '1' use pointer values for address and register
+ values for RO, ES, and NS, '0' use register
+ values for address and pointer values for
+ RO, ES, and NS. */
+ uint64_t hp_enb : 1; /**< Enables the High Priority DMA.
+ While this bit is disabled '0' then the value
+ in the NPI_HIGHP_IBUFF_SADDR is re-loaded to the
+ starting address of the High Priority DMA engine.
+ CSIZE field will be reloaded, for the High Priority
+ DMA Engine. */
+ uint64_t lp_enb : 1; /**< Enables the Low Priority DMA.
+ While this bit is disabled '0' then the value
+ in the NPI_LOWP_IBUFF_SADDR is re-loaded to the
+ starting address of the Low Priority DMA engine.
+ PASS-2: When this bit is '0' the value in the
+ CSIZE field will be reloaded, for the Low Priority
+ DMA Engine. */
+ uint64_t csize : 14; /**< The size in words of the DMA Instruction Chunk.
+ This value should only be written once. After
+ writing this value a new value will not be
+ recognized until the end of the DMA I-Chunk is
+ reached. */
+#else
+ uint64_t csize : 14;
+ uint64_t lp_enb : 1;
+ uint64_t hp_enb : 1;
+ uint64_t o_mode : 1;
+ uint64_t o_es : 2;
+ uint64_t o_ns : 1;
+ uint64_t o_ro : 1;
+ uint64_t o_add1 : 1;
+ uint64_t fpa_que : 3;
+ uint64_t dwb_ichk : 9;
+ uint64_t dwb_denb : 1;
+ uint64_t b0_lend : 1;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_npi_dma_control_s cn30xx;
+ struct cvmx_npi_dma_control_s cn31xx;
+ struct cvmx_npi_dma_control_s cn38xx;
+ struct cvmx_npi_dma_control_s cn38xxp2;
+ struct cvmx_npi_dma_control_s cn50xx;
+ struct cvmx_npi_dma_control_s cn58xx;
+ struct cvmx_npi_dma_control_s cn58xxp1;
+};
+typedef union cvmx_npi_dma_control cvmx_npi_dma_control_t;
+
+/**
+ * cvmx_npi_dma_highp_counts
+ *
+ * NPI_DMA_HIGHP_COUNTS = NPI's High Priority DMA Counts
+ *
+ * Values for determing the number of instructions for High Priority DMA in the NPI.
+ */
+union cvmx_npi_dma_highp_counts {
+ uint64_t u64;
+ struct cvmx_npi_dma_highp_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t fcnt : 7; /**< Number of words in the Instruction FIFO. */
+ uint64_t dbell : 32; /**< Number of available words of Instructions to read. */
+#else
+ uint64_t dbell : 32;
+ uint64_t fcnt : 7;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } s;
+ struct cvmx_npi_dma_highp_counts_s cn30xx;
+ struct cvmx_npi_dma_highp_counts_s cn31xx;
+ struct cvmx_npi_dma_highp_counts_s cn38xx;
+ struct cvmx_npi_dma_highp_counts_s cn38xxp2;
+ struct cvmx_npi_dma_highp_counts_s cn50xx;
+ struct cvmx_npi_dma_highp_counts_s cn58xx;
+ struct cvmx_npi_dma_highp_counts_s cn58xxp1;
+};
+typedef union cvmx_npi_dma_highp_counts cvmx_npi_dma_highp_counts_t;
+
+/**
+ * cvmx_npi_dma_highp_naddr
+ *
+ * NPI_DMA_HIGHP_NADDR = NPI's High Priority DMA Next Ichunk Address
+ *
+ * Place NPI will read the next Ichunk data from. This is valid when state is 0
+ */
+union cvmx_npi_dma_highp_naddr {
+ uint64_t u64;
+ struct cvmx_npi_dma_highp_naddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t state : 4; /**< The DMA instruction engine state vector.
+ Typical value is 0 (IDLE). */
+ uint64_t addr : 36; /**< The next L2C address to read DMA instructions
+ from for the High Priority DMA engine. */
+#else
+ uint64_t addr : 36;
+ uint64_t state : 4;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_npi_dma_highp_naddr_s cn30xx;
+ struct cvmx_npi_dma_highp_naddr_s cn31xx;
+ struct cvmx_npi_dma_highp_naddr_s cn38xx;
+ struct cvmx_npi_dma_highp_naddr_s cn38xxp2;
+ struct cvmx_npi_dma_highp_naddr_s cn50xx;
+ struct cvmx_npi_dma_highp_naddr_s cn58xx;
+ struct cvmx_npi_dma_highp_naddr_s cn58xxp1;
+};
+typedef union cvmx_npi_dma_highp_naddr cvmx_npi_dma_highp_naddr_t;
+
+/**
+ * cvmx_npi_dma_lowp_counts
+ *
+ * NPI_DMA_LOWP_COUNTS = NPI's Low Priority DMA Counts
+ *
+ * Values for determing the number of instructions for Low Priority DMA in the NPI.
+ */
+union cvmx_npi_dma_lowp_counts {
+ uint64_t u64;
+ struct cvmx_npi_dma_lowp_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t fcnt : 7; /**< Number of words in the Instruction FIFO. */
+ uint64_t dbell : 32; /**< Number of available words of Instructions to read. */
+#else
+ uint64_t dbell : 32;
+ uint64_t fcnt : 7;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } s;
+ struct cvmx_npi_dma_lowp_counts_s cn30xx;
+ struct cvmx_npi_dma_lowp_counts_s cn31xx;
+ struct cvmx_npi_dma_lowp_counts_s cn38xx;
+ struct cvmx_npi_dma_lowp_counts_s cn38xxp2;
+ struct cvmx_npi_dma_lowp_counts_s cn50xx;
+ struct cvmx_npi_dma_lowp_counts_s cn58xx;
+ struct cvmx_npi_dma_lowp_counts_s cn58xxp1;
+};
+typedef union cvmx_npi_dma_lowp_counts cvmx_npi_dma_lowp_counts_t;
+
+/**
+ * cvmx_npi_dma_lowp_naddr
+ *
+ * NPI_DMA_LOWP_NADDR = NPI's Low Priority DMA Next Ichunk Address
+ *
+ * Place NPI will read the next Ichunk data from. This is valid when state is 0
+ */
+union cvmx_npi_dma_lowp_naddr {
+ uint64_t u64;
+ struct cvmx_npi_dma_lowp_naddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t state : 4; /**< The DMA instruction engine state vector.
+ Typical value is 0 (IDLE). */
+ uint64_t addr : 36; /**< The next L2C address to read DMA instructions
+ from for the Low Priority DMA engine. */
+#else
+ uint64_t addr : 36;
+ uint64_t state : 4;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_npi_dma_lowp_naddr_s cn30xx;
+ struct cvmx_npi_dma_lowp_naddr_s cn31xx;
+ struct cvmx_npi_dma_lowp_naddr_s cn38xx;
+ struct cvmx_npi_dma_lowp_naddr_s cn38xxp2;
+ struct cvmx_npi_dma_lowp_naddr_s cn50xx;
+ struct cvmx_npi_dma_lowp_naddr_s cn58xx;
+ struct cvmx_npi_dma_lowp_naddr_s cn58xxp1;
+};
+typedef union cvmx_npi_dma_lowp_naddr cvmx_npi_dma_lowp_naddr_t;
+
+/**
+ * cvmx_npi_highp_dbell
+ *
+ * NPI_HIGHP_DBELL = High Priority Door Bell
+ *
+ * The door bell register for the high priority DMA queue.
+ */
+union cvmx_npi_highp_dbell {
+ uint64_t u64;
+ struct cvmx_npi_highp_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t dbell : 16; /**< The value written to this register is added to the
+ number of 8byte words to be read and processes for
+ the high priority dma queue. */
+#else
+ uint64_t dbell : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_npi_highp_dbell_s cn30xx;
+ struct cvmx_npi_highp_dbell_s cn31xx;
+ struct cvmx_npi_highp_dbell_s cn38xx;
+ struct cvmx_npi_highp_dbell_s cn38xxp2;
+ struct cvmx_npi_highp_dbell_s cn50xx;
+ struct cvmx_npi_highp_dbell_s cn58xx;
+ struct cvmx_npi_highp_dbell_s cn58xxp1;
+};
+typedef union cvmx_npi_highp_dbell cvmx_npi_highp_dbell_t;
+
+/**
+ * cvmx_npi_highp_ibuff_saddr
+ *
+ * NPI_HIGHP_IBUFF_SADDR = DMA High Priority Instruction Buffer Starting Address
+ *
+ * The address to start reading Instructions from for HIGHP.
+ */
+union cvmx_npi_highp_ibuff_saddr {
+ uint64_t u64;
+ struct cvmx_npi_highp_ibuff_saddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t saddr : 36; /**< The starting address to read the first instruction. */
+#else
+ uint64_t saddr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_npi_highp_ibuff_saddr_s cn30xx;
+ struct cvmx_npi_highp_ibuff_saddr_s cn31xx;
+ struct cvmx_npi_highp_ibuff_saddr_s cn38xx;
+ struct cvmx_npi_highp_ibuff_saddr_s cn38xxp2;
+ struct cvmx_npi_highp_ibuff_saddr_s cn50xx;
+ struct cvmx_npi_highp_ibuff_saddr_s cn58xx;
+ struct cvmx_npi_highp_ibuff_saddr_s cn58xxp1;
+};
+typedef union cvmx_npi_highp_ibuff_saddr cvmx_npi_highp_ibuff_saddr_t;
+
+/**
+ * cvmx_npi_input_control
+ *
+ * NPI_INPUT_CONTROL = NPI's Input Control Register
+ *
+ * Control for reads for gather list and instructions.
+ */
+union cvmx_npi_input_control {
+ uint64_t u64;
+ struct cvmx_npi_input_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t pkt_rr : 1; /**< When set '1' the input packet selection will be
+ made with a Round Robin arbitration. When '0'
+ the input packet port is fixed in priority,
+ where the lower port number has higher priority.
+ PASS3 Field */
+ uint64_t pbp_dhi : 13; /**< Field when in [PBP] is set to be used in
+ calculating a DPTR. */
+ uint64_t d_nsr : 1; /**< Enables '1' NoSnoop for reading of
+ gather data. */
+ uint64_t d_esr : 2; /**< The Endian-Swap-Mode for reading of
+ gather data. */
+ uint64_t d_ror : 1; /**< Enables '1' Relaxed Ordering for reading of
+ gather data. */
+ uint64_t use_csr : 1; /**< When set '1' the csr value will be used for
+ ROR, ESR, and NSR. When clear '0' the value in
+ DPTR will be used. In turn the bits not used for
+ ROR, ESR, and NSR, will be used for bits [63:60]
+ of the address used to fetch packet data. */
+ uint64_t nsr : 1; /**< Enables '1' NoSnoop for reading of
+ gather list and gather instruction. */
+ uint64_t esr : 2; /**< The Endian-Swap-Mode for reading of
+ gather list and gather instruction. */
+ uint64_t ror : 1; /**< Enables '1' Relaxed Ordering for reading of
+ gather list and gather instruction. */
+#else
+ uint64_t ror : 1;
+ uint64_t esr : 2;
+ uint64_t nsr : 1;
+ uint64_t use_csr : 1;
+ uint64_t d_ror : 1;
+ uint64_t d_esr : 2;
+ uint64_t d_nsr : 1;
+ uint64_t pbp_dhi : 13;
+ uint64_t pkt_rr : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_npi_input_control_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t pbp_dhi : 13; /**< Field when in [PBP] is set to be used in
+ calculating a DPTR. */
+ uint64_t d_nsr : 1; /**< Enables '1' NoSnoop for reading of
+ gather data. */
+ uint64_t d_esr : 2; /**< The Endian-Swap-Mode for reading of
+ gather data. */
+ uint64_t d_ror : 1; /**< Enables '1' Relaxed Ordering for reading of
+ gather data. */
+ uint64_t use_csr : 1; /**< When set '1' the csr value will be used for
+ ROR, ESR, and NSR. When clear '0' the value in
+ DPTR will be used. In turn the bits not used for
+ ROR, ESR, and NSR, will be used for bits [63:60]
+ of the address used to fetch packet data. */
+ uint64_t nsr : 1; /**< Enables '1' NoSnoop for reading of
+ gather list and gather instruction. */
+ uint64_t esr : 2; /**< The Endian-Swap-Mode for reading of
+ gather list and gather instruction. */
+ uint64_t ror : 1; /**< Enables '1' Relaxed Ordering for reading of
+ gather list and gather instruction. */
+#else
+ uint64_t ror : 1;
+ uint64_t esr : 2;
+ uint64_t nsr : 1;
+ uint64_t use_csr : 1;
+ uint64_t d_ror : 1;
+ uint64_t d_esr : 2;
+ uint64_t d_nsr : 1;
+ uint64_t pbp_dhi : 13;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } cn30xx;
+ struct cvmx_npi_input_control_cn30xx cn31xx;
+ struct cvmx_npi_input_control_s cn38xx;
+ struct cvmx_npi_input_control_cn30xx cn38xxp2;
+ struct cvmx_npi_input_control_s cn50xx;
+ struct cvmx_npi_input_control_s cn58xx;
+ struct cvmx_npi_input_control_s cn58xxp1;
+};
+typedef union cvmx_npi_input_control cvmx_npi_input_control_t;
+
+/**
+ * cvmx_npi_int_enb
+ *
+ * NPI_INTERRUPT_ENB = NPI's Interrupt Enable Register
+ *
+ * Used to enable the various interrupting conditions of NPI
+ */
+union cvmx_npi_int_enb {
+ uint64_t u64;
+ struct cvmx_npi_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t q1_a_f : 1; /**< Enables NPI_INT_SUM[Q1_A_F] to generate an
+ interrupt. */
+ uint64_t q1_s_e : 1; /**< Enables NPI_INT_SUM[Q1_S_E] to generate an
+ interrupt. */
+ uint64_t pdf_p_f : 1; /**< Enables NPI_INT_SUM[PDF_P_F] to generate an
+ interrupt. */
+ uint64_t pdf_p_e : 1; /**< Enables NPI_INT_SUM[PDF_P_E] to generate an
+ interrupt. */
+ uint64_t pcf_p_f : 1; /**< Enables NPI_INT_SUM[PCF_P_F] to generate an
+ interrupt. */
+ uint64_t pcf_p_e : 1; /**< Enables NPI_INT_SUM[PCF_P_E] to generate an
+ interrupt. */
+ uint64_t rdx_s_e : 1; /**< Enables NPI_INT_SUM[RDX_S_E] to generate an
+ interrupt. */
+ uint64_t rwx_s_e : 1; /**< Enables NPI_INT_SUM[RWX_S_E] to generate an
+ interrupt. */
+ uint64_t pnc_a_f : 1; /**< Enables NPI_INT_SUM[PNC_A_F] to generate an
+ interrupt. */
+ uint64_t pnc_s_e : 1; /**< Enables NPI_INT_SUM[PNC_S_E] to generate an
+ interrupt. */
+ uint64_t com_a_f : 1; /**< Enables NPI_INT_SUM[COM_A_F] to generate an
+ interrupt. */
+ uint64_t com_s_e : 1; /**< Enables NPI_INT_SUM[COM_S_E] to generate an
+ interrupt. */
+ uint64_t q3_a_f : 1; /**< Enables NPI_INT_SUM[Q3_A_F] to generate an
+ interrupt. */
+ uint64_t q3_s_e : 1; /**< Enables NPI_INT_SUM[Q3_S_E] to generate an
+ interrupt. */
+ uint64_t q2_a_f : 1; /**< Enables NPI_INT_SUM[Q2_A_F] to generate an
+ interrupt. */
+ uint64_t q2_s_e : 1; /**< Enables NPI_INT_SUM[Q2_S_E] to generate an
+ interrupt. */
+ uint64_t pcr_a_f : 1; /**< Enables NPI_INT_SUM[PCR_A_F] to generate an
+ interrupt. */
+ uint64_t pcr_s_e : 1; /**< Enables NPI_INT_SUM[PCR_S_E] to generate an
+ interrupt. */
+ uint64_t fcr_a_f : 1; /**< Enables NPI_INT_SUM[FCR_A_F] to generate an
+ interrupt. */
+ uint64_t fcr_s_e : 1; /**< Enables NPI_INT_SUM[FCR_S_E] to generate an
+ interrupt. */
+ uint64_t iobdma : 1; /**< Enables NPI_INT_SUM[IOBDMA] to generate an
+ interrupt. */
+ uint64_t p_dperr : 1; /**< Enables NPI_INT_SUM[P_DPERR] to generate an
+ interrupt. */
+ uint64_t win_rto : 1; /**< Enables NPI_INT_SUM[WIN_RTO] to generate an
+ interrupt. */
+ uint64_t i3_pperr : 1; /**< Enables NPI_INT_SUM[I3_PPERR] to generate an
+ interrupt. */
+ uint64_t i2_pperr : 1; /**< Enables NPI_INT_SUM[I2_PPERR] to generate an
+ interrupt. */
+ uint64_t i1_pperr : 1; /**< Enables NPI_INT_SUM[I1_PPERR] to generate an
+ interrupt. */
+ uint64_t i0_pperr : 1; /**< Enables NPI_INT_SUM[I0_PPERR] to generate an
+ interrupt. */
+ uint64_t p3_ptout : 1; /**< Enables NPI_INT_SUM[P3_PTOUT] to generate an
+ interrupt. */
+ uint64_t p2_ptout : 1; /**< Enables NPI_INT_SUM[P2_PTOUT] to generate an
+ interrupt. */
+ uint64_t p1_ptout : 1; /**< Enables NPI_INT_SUM[P1_PTOUT] to generate an
+ interrupt. */
+ uint64_t p0_ptout : 1; /**< Enables NPI_INT_SUM[P0_PTOUT] to generate an
+ interrupt. */
+ uint64_t p3_pperr : 1; /**< Enables NPI_INT_SUM[P3_PPERR] to generate an
+ interrupt. */
+ uint64_t p2_pperr : 1; /**< Enables NPI_INT_SUM[P2_PPERR] to generate an
+ interrupt. */
+ uint64_t p1_pperr : 1; /**< Enables NPI_INT_SUM[P1_PPERR] to generate an
+ interrupt. */
+ uint64_t p0_pperr : 1; /**< Enables NPI_INT_SUM[P0_PPERR] to generate an
+ interrupt. */
+ uint64_t g3_rtout : 1; /**< Enables NPI_INT_SUM[G3_RTOUT] to generate an
+ interrupt. */
+ uint64_t g2_rtout : 1; /**< Enables NPI_INT_SUM[G2_RTOUT] to generate an
+ interrupt. */
+ uint64_t g1_rtout : 1; /**< Enables NPI_INT_SUM[G1_RTOUT] to generate an
+ interrupt. */
+ uint64_t g0_rtout : 1; /**< Enables NPI_INT_SUM[G0_RTOUT] to generate an
+ interrupt. */
+ uint64_t p3_perr : 1; /**< Enables NPI_INT_SUM[P3_PERR] to generate an
+ interrupt. */
+ uint64_t p2_perr : 1; /**< Enables NPI_INT_SUM[P2_PERR] to generate an
+ interrupt. */
+ uint64_t p1_perr : 1; /**< Enables NPI_INT_SUM[P1_PERR] to generate an
+ interrupt. */
+ uint64_t p0_perr : 1; /**< Enables NPI_INT_SUM[P0_PERR] to generate an
+ interrupt. */
+ uint64_t p3_rtout : 1; /**< Enables NPI_INT_SUM[P3_RTOUT] to generate an
+ interrupt. */
+ uint64_t p2_rtout : 1; /**< Enables NPI_INT_SUM[P2_RTOUT] to generate an
+ interrupt. */
+ uint64_t p1_rtout : 1; /**< Enables NPI_INT_SUM[P1_RTOUT] to generate an
+ interrupt. */
+ uint64_t p0_rtout : 1; /**< Enables NPI_INT_SUM[P0_RTOUT] to generate an
+ interrupt. */
+ uint64_t i3_overf : 1; /**< Enables NPI_INT_SUM[I3_OVERF] to generate an
+ interrupt. */
+ uint64_t i2_overf : 1; /**< Enables NPI_INT_SUM[I2_OVERF] to generate an
+ interrupt. */
+ uint64_t i1_overf : 1; /**< Enables NPI_INT_SUM[I1_OVERF] to generate an
+ interrupt. */
+ uint64_t i0_overf : 1; /**< Enables NPI_INT_SUM[I0_OVERF] to generate an
+ interrupt. */
+ uint64_t i3_rtout : 1; /**< Enables NPI_INT_SUM[I3_RTOUT] to generate an
+ interrupt. */
+ uint64_t i2_rtout : 1; /**< Enables NPI_INT_SUM[I2_RTOUT] to generate an
+ interrupt. */
+ uint64_t i1_rtout : 1; /**< Enables NPI_INT_SUM[I1_RTOUT] to generate an
+ interrupt. */
+ uint64_t i0_rtout : 1; /**< Enables NPI_INT_SUM[I0_RTOUT] to generate an
+ interrupt. */
+ uint64_t po3_2sml : 1; /**< Enables NPI_INT_SUM[PO3_2SML] to generate an
+ interrupt. */
+ uint64_t po2_2sml : 1; /**< Enables NPI_INT_SUM[PO2_2SML] to generate an
+ interrupt. */
+ uint64_t po1_2sml : 1; /**< Enables NPI_INT_SUM[PO1_2SML] to generate an
+ interrupt. */
+ uint64_t po0_2sml : 1; /**< Enables NPI_INT_SUM[PO0_2SML] to generate an
+ interrupt. */
+ uint64_t pci_rsl : 1; /**< Enables NPI_INT_SUM[PCI_RSL] to generate an
+ interrupt. */
+ uint64_t rml_wto : 1; /**< Enables NPI_INT_SUM[RML_WTO] to generate an
+ interrupt. */
+ uint64_t rml_rto : 1; /**< Enables NPI_INT_SUM[RML_RTO] to generate an
+ interrupt. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t pci_rsl : 1;
+ uint64_t po0_2sml : 1;
+ uint64_t po1_2sml : 1;
+ uint64_t po2_2sml : 1;
+ uint64_t po3_2sml : 1;
+ uint64_t i0_rtout : 1;
+ uint64_t i1_rtout : 1;
+ uint64_t i2_rtout : 1;
+ uint64_t i3_rtout : 1;
+ uint64_t i0_overf : 1;
+ uint64_t i1_overf : 1;
+ uint64_t i2_overf : 1;
+ uint64_t i3_overf : 1;
+ uint64_t p0_rtout : 1;
+ uint64_t p1_rtout : 1;
+ uint64_t p2_rtout : 1;
+ uint64_t p3_rtout : 1;
+ uint64_t p0_perr : 1;
+ uint64_t p1_perr : 1;
+ uint64_t p2_perr : 1;
+ uint64_t p3_perr : 1;
+ uint64_t g0_rtout : 1;
+ uint64_t g1_rtout : 1;
+ uint64_t g2_rtout : 1;
+ uint64_t g3_rtout : 1;
+ uint64_t p0_pperr : 1;
+ uint64_t p1_pperr : 1;
+ uint64_t p2_pperr : 1;
+ uint64_t p3_pperr : 1;
+ uint64_t p0_ptout : 1;
+ uint64_t p1_ptout : 1;
+ uint64_t p2_ptout : 1;
+ uint64_t p3_ptout : 1;
+ uint64_t i0_pperr : 1;
+ uint64_t i1_pperr : 1;
+ uint64_t i2_pperr : 1;
+ uint64_t i3_pperr : 1;
+ uint64_t win_rto : 1;
+ uint64_t p_dperr : 1;
+ uint64_t iobdma : 1;
+ uint64_t fcr_s_e : 1;
+ uint64_t fcr_a_f : 1;
+ uint64_t pcr_s_e : 1;
+ uint64_t pcr_a_f : 1;
+ uint64_t q2_s_e : 1;
+ uint64_t q2_a_f : 1;
+ uint64_t q3_s_e : 1;
+ uint64_t q3_a_f : 1;
+ uint64_t com_s_e : 1;
+ uint64_t com_a_f : 1;
+ uint64_t pnc_s_e : 1;
+ uint64_t pnc_a_f : 1;
+ uint64_t rwx_s_e : 1;
+ uint64_t rdx_s_e : 1;
+ uint64_t pcf_p_e : 1;
+ uint64_t pcf_p_f : 1;
+ uint64_t pdf_p_e : 1;
+ uint64_t pdf_p_f : 1;
+ uint64_t q1_s_e : 1;
+ uint64_t q1_a_f : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_npi_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t q1_a_f : 1; /**< Enables NPI_INT_SUM[Q1_A_F] to generate an
+ interrupt. */
+ uint64_t q1_s_e : 1; /**< Enables NPI_INT_SUM[Q1_S_E] to generate an
+ interrupt. */
+ uint64_t pdf_p_f : 1; /**< Enables NPI_INT_SUM[PDF_P_F] to generate an
+ interrupt. */
+ uint64_t pdf_p_e : 1; /**< Enables NPI_INT_SUM[PDF_P_E] to generate an
+ interrupt. */
+ uint64_t pcf_p_f : 1; /**< Enables NPI_INT_SUM[PCF_P_F] to generate an
+ interrupt. */
+ uint64_t pcf_p_e : 1; /**< Enables NPI_INT_SUM[PCF_P_E] to generate an
+ interrupt. */
+ uint64_t rdx_s_e : 1; /**< Enables NPI_INT_SUM[RDX_S_E] to generate an
+ interrupt. */
+ uint64_t rwx_s_e : 1; /**< Enables NPI_INT_SUM[RWX_S_E] to generate an
+ interrupt. */
+ uint64_t pnc_a_f : 1; /**< Enables NPI_INT_SUM[PNC_A_F] to generate an
+ interrupt. */
+ uint64_t pnc_s_e : 1; /**< Enables NPI_INT_SUM[PNC_S_E] to generate an
+ interrupt. */
+ uint64_t com_a_f : 1; /**< Enables NPI_INT_SUM[COM_A_F] to generate an
+ interrupt. */
+ uint64_t com_s_e : 1; /**< Enables NPI_INT_SUM[COM_S_E] to generate an
+ interrupt. */
+ uint64_t q3_a_f : 1; /**< Enables NPI_INT_SUM[Q3_A_F] to generate an
+ interrupt. */
+ uint64_t q3_s_e : 1; /**< Enables NPI_INT_SUM[Q3_S_E] to generate an
+ interrupt. */
+ uint64_t q2_a_f : 1; /**< Enables NPI_INT_SUM[Q2_A_F] to generate an
+ interrupt. */
+ uint64_t q2_s_e : 1; /**< Enables NPI_INT_SUM[Q2_S_E] to generate an
+ interrupt. */
+ uint64_t pcr_a_f : 1; /**< Enables NPI_INT_SUM[PCR_A_F] to generate an
+ interrupt. */
+ uint64_t pcr_s_e : 1; /**< Enables NPI_INT_SUM[PCR_S_E] to generate an
+ interrupt. */
+ uint64_t fcr_a_f : 1; /**< Enables NPI_INT_SUM[FCR_A_F] to generate an
+ interrupt. */
+ uint64_t fcr_s_e : 1; /**< Enables NPI_INT_SUM[FCR_S_E] to generate an
+ interrupt. */
+ uint64_t iobdma : 1; /**< Enables NPI_INT_SUM[IOBDMA] to generate an
+ interrupt. */
+ uint64_t p_dperr : 1; /**< Enables NPI_INT_SUM[P_DPERR] to generate an
+ interrupt. */
+ uint64_t win_rto : 1; /**< Enables NPI_INT_SUM[WIN_RTO] to generate an
+ interrupt. */
+ uint64_t reserved_36_38 : 3;
+ uint64_t i0_pperr : 1; /**< Enables NPI_INT_SUM[I0_PPERR] to generate an
+ interrupt. */
+ uint64_t reserved_32_34 : 3;
+ uint64_t p0_ptout : 1; /**< Enables NPI_INT_SUM[P0_PTOUT] to generate an
+ interrupt. */
+ uint64_t reserved_28_30 : 3;
+ uint64_t p0_pperr : 1; /**< Enables NPI_INT_SUM[P0_PPERR] to generate an
+ interrupt. */
+ uint64_t reserved_24_26 : 3;
+ uint64_t g0_rtout : 1; /**< Enables NPI_INT_SUM[G0_RTOUT] to generate an
+ interrupt. */
+ uint64_t reserved_20_22 : 3;
+ uint64_t p0_perr : 1; /**< Enables NPI_INT_SUM[P0_PERR] to generate an
+ interrupt. */
+ uint64_t reserved_16_18 : 3;
+ uint64_t p0_rtout : 1; /**< Enables NPI_INT_SUM[P0_RTOUT] to generate an
+ interrupt. */
+ uint64_t reserved_12_14 : 3;
+ uint64_t i0_overf : 1; /**< Enables NPI_INT_SUM[I0_OVERF] to generate an
+ interrupt. */
+ uint64_t reserved_8_10 : 3;
+ uint64_t i0_rtout : 1; /**< Enables NPI_INT_SUM[I0_RTOUT] to generate an
+ interrupt. */
+ uint64_t reserved_4_6 : 3;
+ uint64_t po0_2sml : 1; /**< Enables NPI_INT_SUM[PO0_2SML] to generate an
+ interrupt. */
+ uint64_t pci_rsl : 1; /**< Enables NPI_INT_SUM[PCI_RSL] to generate an
+ interrupt. */
+ uint64_t rml_wto : 1; /**< Enables NPI_INT_SUM[RML_WTO] to generate an
+ interrupt. */
+ uint64_t rml_rto : 1; /**< Enables NPI_INT_SUM[RML_RTO] to generate an
+ interrupt. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t pci_rsl : 1;
+ uint64_t po0_2sml : 1;
+ uint64_t reserved_4_6 : 3;
+ uint64_t i0_rtout : 1;
+ uint64_t reserved_8_10 : 3;
+ uint64_t i0_overf : 1;
+ uint64_t reserved_12_14 : 3;
+ uint64_t p0_rtout : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t p0_perr : 1;
+ uint64_t reserved_20_22 : 3;
+ uint64_t g0_rtout : 1;
+ uint64_t reserved_24_26 : 3;
+ uint64_t p0_pperr : 1;
+ uint64_t reserved_28_30 : 3;
+ uint64_t p0_ptout : 1;
+ uint64_t reserved_32_34 : 3;
+ uint64_t i0_pperr : 1;
+ uint64_t reserved_36_38 : 3;
+ uint64_t win_rto : 1;
+ uint64_t p_dperr : 1;
+ uint64_t iobdma : 1;
+ uint64_t fcr_s_e : 1;
+ uint64_t fcr_a_f : 1;
+ uint64_t pcr_s_e : 1;
+ uint64_t pcr_a_f : 1;
+ uint64_t q2_s_e : 1;
+ uint64_t q2_a_f : 1;
+ uint64_t q3_s_e : 1;
+ uint64_t q3_a_f : 1;
+ uint64_t com_s_e : 1;
+ uint64_t com_a_f : 1;
+ uint64_t pnc_s_e : 1;
+ uint64_t pnc_a_f : 1;
+ uint64_t rwx_s_e : 1;
+ uint64_t rdx_s_e : 1;
+ uint64_t pcf_p_e : 1;
+ uint64_t pcf_p_f : 1;
+ uint64_t pdf_p_e : 1;
+ uint64_t pdf_p_f : 1;
+ uint64_t q1_s_e : 1;
+ uint64_t q1_a_f : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn30xx;
+ struct cvmx_npi_int_enb_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t q1_a_f : 1; /**< Enables NPI_INT_SUM[Q1_A_F] to generate an
+ interrupt. */
+ uint64_t q1_s_e : 1; /**< Enables NPI_INT_SUM[Q1_S_E] to generate an
+ interrupt. */
+ uint64_t pdf_p_f : 1; /**< Enables NPI_INT_SUM[PDF_P_F] to generate an
+ interrupt. */
+ uint64_t pdf_p_e : 1; /**< Enables NPI_INT_SUM[PDF_P_E] to generate an
+ interrupt. */
+ uint64_t pcf_p_f : 1; /**< Enables NPI_INT_SUM[PCF_P_F] to generate an
+ interrupt. */
+ uint64_t pcf_p_e : 1; /**< Enables NPI_INT_SUM[PCF_P_E] to generate an
+ interrupt. */
+ uint64_t rdx_s_e : 1; /**< Enables NPI_INT_SUM[RDX_S_E] to generate an
+ interrupt. */
+ uint64_t rwx_s_e : 1; /**< Enables NPI_INT_SUM[RWX_S_E] to generate an
+ interrupt. */
+ uint64_t pnc_a_f : 1; /**< Enables NPI_INT_SUM[PNC_A_F] to generate an
+ interrupt. */
+ uint64_t pnc_s_e : 1; /**< Enables NPI_INT_SUM[PNC_S_E] to generate an
+ interrupt. */
+ uint64_t com_a_f : 1; /**< Enables NPI_INT_SUM[COM_A_F] to generate an
+ interrupt. */
+ uint64_t com_s_e : 1; /**< Enables NPI_INT_SUM[COM_S_E] to generate an
+ interrupt. */
+ uint64_t q3_a_f : 1; /**< Enables NPI_INT_SUM[Q3_A_F] to generate an
+ interrupt. */
+ uint64_t q3_s_e : 1; /**< Enables NPI_INT_SUM[Q3_S_E] to generate an
+ interrupt. */
+ uint64_t q2_a_f : 1; /**< Enables NPI_INT_SUM[Q2_A_F] to generate an
+ interrupt. */
+ uint64_t q2_s_e : 1; /**< Enables NPI_INT_SUM[Q2_S_E] to generate an
+ interrupt. */
+ uint64_t pcr_a_f : 1; /**< Enables NPI_INT_SUM[PCR_A_F] to generate an
+ interrupt. */
+ uint64_t pcr_s_e : 1; /**< Enables NPI_INT_SUM[PCR_S_E] to generate an
+ interrupt. */
+ uint64_t fcr_a_f : 1; /**< Enables NPI_INT_SUM[FCR_A_F] to generate an
+ interrupt. */
+ uint64_t fcr_s_e : 1; /**< Enables NPI_INT_SUM[FCR_S_E] to generate an
+ interrupt. */
+ uint64_t iobdma : 1; /**< Enables NPI_INT_SUM[IOBDMA] to generate an
+ interrupt. */
+ uint64_t p_dperr : 1; /**< Enables NPI_INT_SUM[P_DPERR] to generate an
+ interrupt. */
+ uint64_t win_rto : 1; /**< Enables NPI_INT_SUM[WIN_RTO] to generate an
+ interrupt. */
+ uint64_t reserved_37_38 : 2;
+ uint64_t i1_pperr : 1; /**< Enables NPI_INT_SUM[I1_PPERR] to generate an
+ interrupt. */
+ uint64_t i0_pperr : 1; /**< Enables NPI_INT_SUM[I0_PPERR] to generate an
+ interrupt. */
+ uint64_t reserved_33_34 : 2;
+ uint64_t p1_ptout : 1; /**< Enables NPI_INT_SUM[P1_PTOUT] to generate an
+ interrupt. */
+ uint64_t p0_ptout : 1; /**< Enables NPI_INT_SUM[P0_PTOUT] to generate an
+ interrupt. */
+ uint64_t reserved_29_30 : 2;
+ uint64_t p1_pperr : 1; /**< Enables NPI_INT_SUM[P1_PPERR] to generate an
+ interrupt. */
+ uint64_t p0_pperr : 1; /**< Enables NPI_INT_SUM[P0_PPERR] to generate an
+ interrupt. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t g1_rtout : 1; /**< Enables NPI_INT_SUM[G1_RTOUT] to generate an
+ interrupt. */
+ uint64_t g0_rtout : 1; /**< Enables NPI_INT_SUM[G0_RTOUT] to generate an
+ interrupt. */
+ uint64_t reserved_21_22 : 2;
+ uint64_t p1_perr : 1; /**< Enables NPI_INT_SUM[P1_PERR] to generate an
+ interrupt. */
+ uint64_t p0_perr : 1; /**< Enables NPI_INT_SUM[P0_PERR] to generate an
+ interrupt. */
+ uint64_t reserved_17_18 : 2;
+ uint64_t p1_rtout : 1; /**< Enables NPI_INT_SUM[P1_RTOUT] to generate an
+ interrupt. */
+ uint64_t p0_rtout : 1; /**< Enables NPI_INT_SUM[P0_RTOUT] to generate an
+ interrupt. */
+ uint64_t reserved_13_14 : 2;
+ uint64_t i1_overf : 1; /**< Enables NPI_INT_SUM[I1_OVERF] to generate an
+ interrupt. */
+ uint64_t i0_overf : 1; /**< Enables NPI_INT_SUM[I0_OVERF] to generate an
+ interrupt. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t i1_rtout : 1; /**< Enables NPI_INT_SUM[I1_RTOUT] to generate an
+ interrupt. */
+ uint64_t i0_rtout : 1; /**< Enables NPI_INT_SUM[I0_RTOUT] to generate an
+ interrupt. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t po1_2sml : 1; /**< Enables NPI_INT_SUM[PO1_2SML] to generate an
+ interrupt. */
+ uint64_t po0_2sml : 1; /**< Enables NPI_INT_SUM[PO0_2SML] to generate an
+ interrupt. */
+ uint64_t pci_rsl : 1; /**< Enables NPI_INT_SUM[PCI_RSL] to generate an
+ interrupt. */
+ uint64_t rml_wto : 1; /**< Enables NPI_INT_SUM[RML_WTO] to generate an
+ interrupt. */
+ uint64_t rml_rto : 1; /**< Enables NPI_INT_SUM[RML_RTO] to generate an
+ interrupt. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t pci_rsl : 1;
+ uint64_t po0_2sml : 1;
+ uint64_t po1_2sml : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t i0_rtout : 1;
+ uint64_t i1_rtout : 1;
+ uint64_t reserved_9_10 : 2;
+ uint64_t i0_overf : 1;
+ uint64_t i1_overf : 1;
+ uint64_t reserved_13_14 : 2;
+ uint64_t p0_rtout : 1;
+ uint64_t p1_rtout : 1;
+ uint64_t reserved_17_18 : 2;
+ uint64_t p0_perr : 1;
+ uint64_t p1_perr : 1;
+ uint64_t reserved_21_22 : 2;
+ uint64_t g0_rtout : 1;
+ uint64_t g1_rtout : 1;
+ uint64_t reserved_25_26 : 2;
+ uint64_t p0_pperr : 1;
+ uint64_t p1_pperr : 1;
+ uint64_t reserved_29_30 : 2;
+ uint64_t p0_ptout : 1;
+ uint64_t p1_ptout : 1;
+ uint64_t reserved_33_34 : 2;
+ uint64_t i0_pperr : 1;
+ uint64_t i1_pperr : 1;
+ uint64_t reserved_37_38 : 2;
+ uint64_t win_rto : 1;
+ uint64_t p_dperr : 1;
+ uint64_t iobdma : 1;
+ uint64_t fcr_s_e : 1;
+ uint64_t fcr_a_f : 1;
+ uint64_t pcr_s_e : 1;
+ uint64_t pcr_a_f : 1;
+ uint64_t q2_s_e : 1;
+ uint64_t q2_a_f : 1;
+ uint64_t q3_s_e : 1;
+ uint64_t q3_a_f : 1;
+ uint64_t com_s_e : 1;
+ uint64_t com_a_f : 1;
+ uint64_t pnc_s_e : 1;
+ uint64_t pnc_a_f : 1;
+ uint64_t rwx_s_e : 1;
+ uint64_t rdx_s_e : 1;
+ uint64_t pcf_p_e : 1;
+ uint64_t pcf_p_f : 1;
+ uint64_t pdf_p_e : 1;
+ uint64_t pdf_p_f : 1;
+ uint64_t q1_s_e : 1;
+ uint64_t q1_a_f : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn31xx;
+ struct cvmx_npi_int_enb_s cn38xx;
+ struct cvmx_npi_int_enb_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t iobdma : 1; /**< Enables NPI_INT_SUM[IOBDMA] to generate an
+ interrupt. */
+ uint64_t p_dperr : 1; /**< Enables NPI_INT_SUM[P_DPERR] to generate an
+ interrupt. */
+ uint64_t win_rto : 1; /**< Enables NPI_INT_SUM[WIN_RTO] to generate an
+ interrupt. */
+ uint64_t i3_pperr : 1; /**< Enables NPI_INT_SUM[I3_PPERR] to generate an
+ interrupt. */
+ uint64_t i2_pperr : 1; /**< Enables NPI_INT_SUM[I2_PPERR] to generate an
+ interrupt. */
+ uint64_t i1_pperr : 1; /**< Enables NPI_INT_SUM[I1_PPERR] to generate an
+ interrupt. */
+ uint64_t i0_pperr : 1; /**< Enables NPI_INT_SUM[I0_PPERR] to generate an
+ interrupt. */
+ uint64_t p3_ptout : 1; /**< Enables NPI_INT_SUM[P3_PTOUT] to generate an
+ interrupt. */
+ uint64_t p2_ptout : 1; /**< Enables NPI_INT_SUM[P2_PTOUT] to generate an
+ interrupt. */
+ uint64_t p1_ptout : 1; /**< Enables NPI_INT_SUM[P1_PTOUT] to generate an
+ interrupt. */
+ uint64_t p0_ptout : 1; /**< Enables NPI_INT_SUM[P0_PTOUT] to generate an
+ interrupt. */
+ uint64_t p3_pperr : 1; /**< Enables NPI_INT_SUM[P3_PPERR] to generate an
+ interrupt. */
+ uint64_t p2_pperr : 1; /**< Enables NPI_INT_SUM[P2_PPERR] to generate an
+ interrupt. */
+ uint64_t p1_pperr : 1; /**< Enables NPI_INT_SUM[P1_PPERR] to generate an
+ interrupt. */
+ uint64_t p0_pperr : 1; /**< Enables NPI_INT_SUM[P0_PPERR] to generate an
+ interrupt. */
+ uint64_t g3_rtout : 1; /**< Enables NPI_INT_SUM[G3_RTOUT] to generate an
+ interrupt. */
+ uint64_t g2_rtout : 1; /**< Enables NPI_INT_SUM[G2_RTOUT] to generate an
+ interrupt. */
+ uint64_t g1_rtout : 1; /**< Enables NPI_INT_SUM[G1_RTOUT] to generate an
+ interrupt. */
+ uint64_t g0_rtout : 1; /**< Enables NPI_INT_SUM[G0_RTOUT] to generate an
+ interrupt. */
+ uint64_t p3_perr : 1; /**< Enables NPI_INT_SUM[P3_PERR] to generate an
+ interrupt. */
+ uint64_t p2_perr : 1; /**< Enables NPI_INT_SUM[P2_PERR] to generate an
+ interrupt. */
+ uint64_t p1_perr : 1; /**< Enables NPI_INT_SUM[P1_PERR] to generate an
+ interrupt. */
+ uint64_t p0_perr : 1; /**< Enables NPI_INT_SUM[P0_PERR] to generate an
+ interrupt. */
+ uint64_t p3_rtout : 1; /**< Enables NPI_INT_SUM[P3_RTOUT] to generate an
+ interrupt. */
+ uint64_t p2_rtout : 1; /**< Enables NPI_INT_SUM[P2_RTOUT] to generate an
+ interrupt. */
+ uint64_t p1_rtout : 1; /**< Enables NPI_INT_SUM[P1_RTOUT] to generate an
+ interrupt. */
+ uint64_t p0_rtout : 1; /**< Enables NPI_INT_SUM[P0_RTOUT] to generate an
+ interrupt. */
+ uint64_t i3_overf : 1; /**< Enables NPI_INT_SUM[I3_OVERF] to generate an
+ interrupt. */
+ uint64_t i2_overf : 1; /**< Enables NPI_INT_SUM[I2_OVERF] to generate an
+ interrupt. */
+ uint64_t i1_overf : 1; /**< Enables NPI_INT_SUM[I1_OVERF] to generate an
+ interrupt. */
+ uint64_t i0_overf : 1; /**< Enables NPI_INT_SUM[I0_OVERF] to generate an
+ interrupt. */
+ uint64_t i3_rtout : 1; /**< Enables NPI_INT_SUM[I3_RTOUT] to generate an
+ interrupt. */
+ uint64_t i2_rtout : 1; /**< Enables NPI_INT_SUM[I2_RTOUT] to generate an
+ interrupt. */
+ uint64_t i1_rtout : 1; /**< Enables NPI_INT_SUM[I1_RTOUT] to generate an
+ interrupt. */
+ uint64_t i0_rtout : 1; /**< Enables NPI_INT_SUM[I0_RTOUT] to generate an
+ interrupt. */
+ uint64_t po3_2sml : 1; /**< Enables NPI_INT_SUM[PO3_2SML] to generate an
+ interrupt. */
+ uint64_t po2_2sml : 1; /**< Enables NPI_INT_SUM[PO2_2SML] to generate an
+ interrupt. */
+ uint64_t po1_2sml : 1; /**< Enables NPI_INT_SUM[PO1_2SML] to generate an
+ interrupt. */
+ uint64_t po0_2sml : 1; /**< Enables NPI_INT_SUM[PO0_2SML] to generate an
+ interrupt. */
+ uint64_t pci_rsl : 1; /**< Enables NPI_INT_SUM[PCI_RSL] to generate an
+ interrupt. */
+ uint64_t rml_wto : 1; /**< Enables NPI_INT_SUM[RML_WTO] to generate an
+ interrupt. */
+ uint64_t rml_rto : 1; /**< Enables NPI_INT_SUM[RML_RTO] to generate an
+ interrupt. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t pci_rsl : 1;
+ uint64_t po0_2sml : 1;
+ uint64_t po1_2sml : 1;
+ uint64_t po2_2sml : 1;
+ uint64_t po3_2sml : 1;
+ uint64_t i0_rtout : 1;
+ uint64_t i1_rtout : 1;
+ uint64_t i2_rtout : 1;
+ uint64_t i3_rtout : 1;
+ uint64_t i0_overf : 1;
+ uint64_t i1_overf : 1;
+ uint64_t i2_overf : 1;
+ uint64_t i3_overf : 1;
+ uint64_t p0_rtout : 1;
+ uint64_t p1_rtout : 1;
+ uint64_t p2_rtout : 1;
+ uint64_t p3_rtout : 1;
+ uint64_t p0_perr : 1;
+ uint64_t p1_perr : 1;
+ uint64_t p2_perr : 1;
+ uint64_t p3_perr : 1;
+ uint64_t g0_rtout : 1;
+ uint64_t g1_rtout : 1;
+ uint64_t g2_rtout : 1;
+ uint64_t g3_rtout : 1;
+ uint64_t p0_pperr : 1;
+ uint64_t p1_pperr : 1;
+ uint64_t p2_pperr : 1;
+ uint64_t p3_pperr : 1;
+ uint64_t p0_ptout : 1;
+ uint64_t p1_ptout : 1;
+ uint64_t p2_ptout : 1;
+ uint64_t p3_ptout : 1;
+ uint64_t i0_pperr : 1;
+ uint64_t i1_pperr : 1;
+ uint64_t i2_pperr : 1;
+ uint64_t i3_pperr : 1;
+ uint64_t win_rto : 1;
+ uint64_t p_dperr : 1;
+ uint64_t iobdma : 1;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } cn38xxp2;
+ struct cvmx_npi_int_enb_cn31xx cn50xx;
+ struct cvmx_npi_int_enb_s cn58xx;
+ struct cvmx_npi_int_enb_s cn58xxp1;
+};
+typedef union cvmx_npi_int_enb cvmx_npi_int_enb_t;
+
+/**
+ * cvmx_npi_int_sum
+ *
+ * NPI_INTERRUPT_SUM = NPI Interrupt Summary Register
+ *
+ * Set when an interrupt condition occurs, write '1' to clear.
+ */
+union cvmx_npi_int_sum {
+ uint64_t u64;
+ struct cvmx_npi_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t q1_a_f : 1; /**< Attempted to add when Queue-1 FIFO is full.
+ PASS3 Field. */
+ uint64_t q1_s_e : 1; /**< Attempted to subtract when Queue-1 FIFO is empty.
+ PASS3 Field. */
+ uint64_t pdf_p_f : 1; /**< Attempted to push a full PCN-DATA-FIFO.
+ PASS3 Field. */
+ uint64_t pdf_p_e : 1; /**< Attempted to pop an empty PCN-DATA-FIFO.
+ PASS3 Field. */
+ uint64_t pcf_p_f : 1; /**< Attempted to push a full PCN-CNT-FIFO.
+ PASS3 Field. */
+ uint64_t pcf_p_e : 1; /**< Attempted to pop an empty PCN-CNT-FIFO.
+ PASS3 Field. */
+ uint64_t rdx_s_e : 1; /**< Attempted to subtract when DPI-XFR-Wait count is 0.
+ PASS3 Field. */
+ uint64_t rwx_s_e : 1; /**< Attempted to subtract when RDN-XFR-Wait count is 0.
+ PASS3 Field. */
+ uint64_t pnc_a_f : 1; /**< Attempted to add when PNI-NPI Credits are max.
+ PASS3 Field. */
+ uint64_t pnc_s_e : 1; /**< Attempted to subtract when PNI-NPI Credits are 0.
+ PASS3 Field. */
+ uint64_t com_a_f : 1; /**< Attempted to add when PCN-Commit Counter is max.
+ PASS3 Field. */
+ uint64_t com_s_e : 1; /**< Attempted to subtract when PCN-Commit Counter is 0.
+ PASS3 Field. */
+ uint64_t q3_a_f : 1; /**< Attempted to add when Queue-3 FIFO is full.
+ PASS3 Field. */
+ uint64_t q3_s_e : 1; /**< Attempted to subtract when Queue-3 FIFO is empty.
+ PASS3 Field. */
+ uint64_t q2_a_f : 1; /**< Attempted to add when Queue-2 FIFO is full.
+ PASS3 Field. */
+ uint64_t q2_s_e : 1; /**< Attempted to subtract when Queue-2 FIFO is empty.
+ PASS3 Field. */
+ uint64_t pcr_a_f : 1; /**< Attempted to add when POW Credits is full.
+ PASS3 Field. */
+ uint64_t pcr_s_e : 1; /**< Attempted to subtract when POW Credits is empty.
+ PASS3 Field. */
+ uint64_t fcr_a_f : 1; /**< Attempted to add when FPA Credits is full.
+ PASS3 Field. */
+ uint64_t fcr_s_e : 1; /**< Attempted to subtract when FPA Credits is empty.
+ PASS3 Field. */
+ uint64_t iobdma : 1; /**< Requested IOBDMA read size exceeded 128 words. */
+ uint64_t p_dperr : 1; /**< If a parity error occured on data written to L2C
+ from the PCI this bit may be set. */
+ uint64_t win_rto : 1; /**< Windowed Load Timed Out. */
+ uint64_t i3_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t i2_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t i1_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t i0_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t p3_ptout : 1; /**< Port-3 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t p2_ptout : 1; /**< Port-2 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t p1_ptout : 1; /**< Port-1 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t p0_ptout : 1; /**< Port-0 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t p3_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t p2_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t p1_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t p0_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t g3_rtout : 1; /**< Port-3 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t g2_rtout : 1; /**< Port-2 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t g1_rtout : 1; /**< Port-1 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t g0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t p3_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t p2_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t p1_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t p0_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t p3_rtout : 1; /**< Port-3 had a read timeout while attempting to
+ read packet data. */
+ uint64_t p2_rtout : 1; /**< Port-2 had a read timeout while attempting to
+ read packet data. */
+ uint64_t p1_rtout : 1; /**< Port-1 had a read timeout while attempting to
+ read packet data. */
+ uint64_t p0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read packet data. */
+ uint64_t i3_overf : 1; /**< Port-3 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t i2_overf : 1; /**< Port-2 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t i1_overf : 1; /**< Port-1 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t i0_overf : 1; /**< Port-0 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t i3_rtout : 1; /**< Port-3 had a read timeout while attempting to
+ read instructions. */
+ uint64_t i2_rtout : 1; /**< Port-2 had a read timeout while attempting to
+ read instructions. */
+ uint64_t i1_rtout : 1; /**< Port-1 had a read timeout while attempting to
+ read instructions. */
+ uint64_t i0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read instructions. */
+ uint64_t po3_2sml : 1; /**< The packet being sent out on Port3 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT3[ISIZE] field. */
+ uint64_t po2_2sml : 1; /**< The packet being sent out on Port2 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT2[ISIZE] field. */
+ uint64_t po1_2sml : 1; /**< The packet being sent out on Port1 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT1[ISIZE] field. */
+ uint64_t po0_2sml : 1; /**< The packet being sent out on Port0 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field. */
+ uint64_t pci_rsl : 1; /**< This '1' when a bit in PCI_INT_SUM2 is SET and the
+ corresponding bit in the PCI_INT_ENB2 is SET. */
+ uint64_t rml_wto : 1; /**< Set '1' when the RML does not receive a commit
+ back from a RSL after sending a write command to
+ a RSL. */
+ uint64_t rml_rto : 1; /**< Set '1' when the RML does not receive read data
+ back from a RSL after sending a read command to
+ a RSL. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t pci_rsl : 1;
+ uint64_t po0_2sml : 1;
+ uint64_t po1_2sml : 1;
+ uint64_t po2_2sml : 1;
+ uint64_t po3_2sml : 1;
+ uint64_t i0_rtout : 1;
+ uint64_t i1_rtout : 1;
+ uint64_t i2_rtout : 1;
+ uint64_t i3_rtout : 1;
+ uint64_t i0_overf : 1;
+ uint64_t i1_overf : 1;
+ uint64_t i2_overf : 1;
+ uint64_t i3_overf : 1;
+ uint64_t p0_rtout : 1;
+ uint64_t p1_rtout : 1;
+ uint64_t p2_rtout : 1;
+ uint64_t p3_rtout : 1;
+ uint64_t p0_perr : 1;
+ uint64_t p1_perr : 1;
+ uint64_t p2_perr : 1;
+ uint64_t p3_perr : 1;
+ uint64_t g0_rtout : 1;
+ uint64_t g1_rtout : 1;
+ uint64_t g2_rtout : 1;
+ uint64_t g3_rtout : 1;
+ uint64_t p0_pperr : 1;
+ uint64_t p1_pperr : 1;
+ uint64_t p2_pperr : 1;
+ uint64_t p3_pperr : 1;
+ uint64_t p0_ptout : 1;
+ uint64_t p1_ptout : 1;
+ uint64_t p2_ptout : 1;
+ uint64_t p3_ptout : 1;
+ uint64_t i0_pperr : 1;
+ uint64_t i1_pperr : 1;
+ uint64_t i2_pperr : 1;
+ uint64_t i3_pperr : 1;
+ uint64_t win_rto : 1;
+ uint64_t p_dperr : 1;
+ uint64_t iobdma : 1;
+ uint64_t fcr_s_e : 1;
+ uint64_t fcr_a_f : 1;
+ uint64_t pcr_s_e : 1;
+ uint64_t pcr_a_f : 1;
+ uint64_t q2_s_e : 1;
+ uint64_t q2_a_f : 1;
+ uint64_t q3_s_e : 1;
+ uint64_t q3_a_f : 1;
+ uint64_t com_s_e : 1;
+ uint64_t com_a_f : 1;
+ uint64_t pnc_s_e : 1;
+ uint64_t pnc_a_f : 1;
+ uint64_t rwx_s_e : 1;
+ uint64_t rdx_s_e : 1;
+ uint64_t pcf_p_e : 1;
+ uint64_t pcf_p_f : 1;
+ uint64_t pdf_p_e : 1;
+ uint64_t pdf_p_f : 1;
+ uint64_t q1_s_e : 1;
+ uint64_t q1_a_f : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_npi_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t q1_a_f : 1; /**< Attempted to add when Queue-1 FIFO is full. */
+ uint64_t q1_s_e : 1; /**< Attempted to subtract when Queue-1 FIFO is empty. */
+ uint64_t pdf_p_f : 1; /**< Attempted to push a full PCN-DATA-FIFO. */
+ uint64_t pdf_p_e : 1; /**< Attempted to pop an empty PCN-DATA-FIFO. */
+ uint64_t pcf_p_f : 1; /**< Attempted to push a full PCN-CNT-FIFO. */
+ uint64_t pcf_p_e : 1; /**< Attempted to pop an empty PCN-CNT-FIFO. */
+ uint64_t rdx_s_e : 1; /**< Attempted to subtract when DPI-XFR-Wait count is 0. */
+ uint64_t rwx_s_e : 1; /**< Attempted to subtract when RDN-XFR-Wait count is 0. */
+ uint64_t pnc_a_f : 1; /**< Attempted to add when PNI-NPI Credits are max. */
+ uint64_t pnc_s_e : 1; /**< Attempted to subtract when PNI-NPI Credits are 0. */
+ uint64_t com_a_f : 1; /**< Attempted to add when PCN-Commit Counter is max. */
+ uint64_t com_s_e : 1; /**< Attempted to subtract when PCN-Commit Counter is 0. */
+ uint64_t q3_a_f : 1; /**< Attempted to add when Queue-3 FIFO is full. */
+ uint64_t q3_s_e : 1; /**< Attempted to subtract when Queue-3 FIFO is empty. */
+ uint64_t q2_a_f : 1; /**< Attempted to add when Queue-2 FIFO is full. */
+ uint64_t q2_s_e : 1; /**< Attempted to subtract when Queue-2 FIFO is empty. */
+ uint64_t pcr_a_f : 1; /**< Attempted to add when POW Credits is full. */
+ uint64_t pcr_s_e : 1; /**< Attempted to subtract when POW Credits is empty. */
+ uint64_t fcr_a_f : 1; /**< Attempted to add when FPA Credits is full. */
+ uint64_t fcr_s_e : 1; /**< Attempted to subtract when FPA Credits is empty. */
+ uint64_t iobdma : 1; /**< Requested IOBDMA read size exceeded 128 words. */
+ uint64_t p_dperr : 1; /**< If a parity error occured on data written to L2C
+ from the PCI this bit may be set. */
+ uint64_t win_rto : 1; /**< Windowed Load Timed Out. */
+ uint64_t reserved_36_38 : 3;
+ uint64_t i0_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t reserved_32_34 : 3;
+ uint64_t p0_ptout : 1; /**< Port-0 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t reserved_28_30 : 3;
+ uint64_t p0_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t reserved_24_26 : 3;
+ uint64_t g0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t reserved_20_22 : 3;
+ uint64_t p0_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t reserved_16_18 : 3;
+ uint64_t p0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read packet data. */
+ uint64_t reserved_12_14 : 3;
+ uint64_t i0_overf : 1; /**< Port-0 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t reserved_8_10 : 3;
+ uint64_t i0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read instructions. */
+ uint64_t reserved_4_6 : 3;
+ uint64_t po0_2sml : 1; /**< The packet being sent out on Port0 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field. */
+ uint64_t pci_rsl : 1; /**< This '1' when a bit in PCI_INT_SUM2 is SET and the
+ corresponding bit in the PCI_INT_ENB2 is SET. */
+ uint64_t rml_wto : 1; /**< Set '1' when the RML does not receive a commit
+ back from a RSL after sending a write command to
+ a RSL. */
+ uint64_t rml_rto : 1; /**< Set '1' when the RML does not receive read data
+ back from a RSL after sending a read command to
+ a RSL. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t pci_rsl : 1;
+ uint64_t po0_2sml : 1;
+ uint64_t reserved_4_6 : 3;
+ uint64_t i0_rtout : 1;
+ uint64_t reserved_8_10 : 3;
+ uint64_t i0_overf : 1;
+ uint64_t reserved_12_14 : 3;
+ uint64_t p0_rtout : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t p0_perr : 1;
+ uint64_t reserved_20_22 : 3;
+ uint64_t g0_rtout : 1;
+ uint64_t reserved_24_26 : 3;
+ uint64_t p0_pperr : 1;
+ uint64_t reserved_28_30 : 3;
+ uint64_t p0_ptout : 1;
+ uint64_t reserved_32_34 : 3;
+ uint64_t i0_pperr : 1;
+ uint64_t reserved_36_38 : 3;
+ uint64_t win_rto : 1;
+ uint64_t p_dperr : 1;
+ uint64_t iobdma : 1;
+ uint64_t fcr_s_e : 1;
+ uint64_t fcr_a_f : 1;
+ uint64_t pcr_s_e : 1;
+ uint64_t pcr_a_f : 1;
+ uint64_t q2_s_e : 1;
+ uint64_t q2_a_f : 1;
+ uint64_t q3_s_e : 1;
+ uint64_t q3_a_f : 1;
+ uint64_t com_s_e : 1;
+ uint64_t com_a_f : 1;
+ uint64_t pnc_s_e : 1;
+ uint64_t pnc_a_f : 1;
+ uint64_t rwx_s_e : 1;
+ uint64_t rdx_s_e : 1;
+ uint64_t pcf_p_e : 1;
+ uint64_t pcf_p_f : 1;
+ uint64_t pdf_p_e : 1;
+ uint64_t pdf_p_f : 1;
+ uint64_t q1_s_e : 1;
+ uint64_t q1_a_f : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn30xx;
+ struct cvmx_npi_int_sum_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t q1_a_f : 1; /**< Attempted to add when Queue-1 FIFO is full. */
+ uint64_t q1_s_e : 1; /**< Attempted to subtract when Queue-1 FIFO is empty. */
+ uint64_t pdf_p_f : 1; /**< Attempted to push a full PCN-DATA-FIFO. */
+ uint64_t pdf_p_e : 1; /**< Attempted to pop an empty PCN-DATA-FIFO. */
+ uint64_t pcf_p_f : 1; /**< Attempted to push a full PCN-CNT-FIFO. */
+ uint64_t pcf_p_e : 1; /**< Attempted to pop an empty PCN-CNT-FIFO. */
+ uint64_t rdx_s_e : 1; /**< Attempted to subtract when DPI-XFR-Wait count is 0. */
+ uint64_t rwx_s_e : 1; /**< Attempted to subtract when RDN-XFR-Wait count is 0. */
+ uint64_t pnc_a_f : 1; /**< Attempted to add when PNI-NPI Credits are max. */
+ uint64_t pnc_s_e : 1; /**< Attempted to subtract when PNI-NPI Credits are 0. */
+ uint64_t com_a_f : 1; /**< Attempted to add when PCN-Commit Counter is max. */
+ uint64_t com_s_e : 1; /**< Attempted to subtract when PCN-Commit Counter is 0. */
+ uint64_t q3_a_f : 1; /**< Attempted to add when Queue-3 FIFO is full. */
+ uint64_t q3_s_e : 1; /**< Attempted to subtract when Queue-3 FIFO is empty. */
+ uint64_t q2_a_f : 1; /**< Attempted to add when Queue-2 FIFO is full. */
+ uint64_t q2_s_e : 1; /**< Attempted to subtract when Queue-2 FIFO is empty. */
+ uint64_t pcr_a_f : 1; /**< Attempted to add when POW Credits is full. */
+ uint64_t pcr_s_e : 1; /**< Attempted to subtract when POW Credits is empty. */
+ uint64_t fcr_a_f : 1; /**< Attempted to add when FPA Credits is full. */
+ uint64_t fcr_s_e : 1; /**< Attempted to subtract when FPA Credits is empty. */
+ uint64_t iobdma : 1; /**< Requested IOBDMA read size exceeded 128 words. */
+ uint64_t p_dperr : 1; /**< If a parity error occured on data written to L2C
+ from the PCI this bit may be set. */
+ uint64_t win_rto : 1; /**< Windowed Load Timed Out. */
+ uint64_t reserved_37_38 : 2;
+ uint64_t i1_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t i0_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t reserved_33_34 : 2;
+ uint64_t p1_ptout : 1; /**< Port-1 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t p0_ptout : 1; /**< Port-0 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t reserved_29_30 : 2;
+ uint64_t p1_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t p0_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t g1_rtout : 1; /**< Port-1 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t g0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t reserved_21_22 : 2;
+ uint64_t p1_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t p0_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t reserved_17_18 : 2;
+ uint64_t p1_rtout : 1; /**< Port-1 had a read timeout while attempting to
+ read packet data. */
+ uint64_t p0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read packet data. */
+ uint64_t reserved_13_14 : 2;
+ uint64_t i1_overf : 1; /**< Port-1 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t i0_overf : 1; /**< Port-0 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t i1_rtout : 1; /**< Port-1 had a read timeout while attempting to
+ read instructions. */
+ uint64_t i0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read instructions. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t po1_2sml : 1; /**< The packet being sent out on Port1 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT1[ISIZE] field. */
+ uint64_t po0_2sml : 1; /**< The packet being sent out on Port0 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field. */
+ uint64_t pci_rsl : 1; /**< This '1' when a bit in PCI_INT_SUM2 is SET and the
+ corresponding bit in the PCI_INT_ENB2 is SET. */
+ uint64_t rml_wto : 1; /**< Set '1' when the RML does not receive a commit
+ back from a RSL after sending a write command to
+ a RSL. */
+ uint64_t rml_rto : 1; /**< Set '1' when the RML does not receive read data
+ back from a RSL after sending a read command to
+ a RSL. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t pci_rsl : 1;
+ uint64_t po0_2sml : 1;
+ uint64_t po1_2sml : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t i0_rtout : 1;
+ uint64_t i1_rtout : 1;
+ uint64_t reserved_9_10 : 2;
+ uint64_t i0_overf : 1;
+ uint64_t i1_overf : 1;
+ uint64_t reserved_13_14 : 2;
+ uint64_t p0_rtout : 1;
+ uint64_t p1_rtout : 1;
+ uint64_t reserved_17_18 : 2;
+ uint64_t p0_perr : 1;
+ uint64_t p1_perr : 1;
+ uint64_t reserved_21_22 : 2;
+ uint64_t g0_rtout : 1;
+ uint64_t g1_rtout : 1;
+ uint64_t reserved_25_26 : 2;
+ uint64_t p0_pperr : 1;
+ uint64_t p1_pperr : 1;
+ uint64_t reserved_29_30 : 2;
+ uint64_t p0_ptout : 1;
+ uint64_t p1_ptout : 1;
+ uint64_t reserved_33_34 : 2;
+ uint64_t i0_pperr : 1;
+ uint64_t i1_pperr : 1;
+ uint64_t reserved_37_38 : 2;
+ uint64_t win_rto : 1;
+ uint64_t p_dperr : 1;
+ uint64_t iobdma : 1;
+ uint64_t fcr_s_e : 1;
+ uint64_t fcr_a_f : 1;
+ uint64_t pcr_s_e : 1;
+ uint64_t pcr_a_f : 1;
+ uint64_t q2_s_e : 1;
+ uint64_t q2_a_f : 1;
+ uint64_t q3_s_e : 1;
+ uint64_t q3_a_f : 1;
+ uint64_t com_s_e : 1;
+ uint64_t com_a_f : 1;
+ uint64_t pnc_s_e : 1;
+ uint64_t pnc_a_f : 1;
+ uint64_t rwx_s_e : 1;
+ uint64_t rdx_s_e : 1;
+ uint64_t pcf_p_e : 1;
+ uint64_t pcf_p_f : 1;
+ uint64_t pdf_p_e : 1;
+ uint64_t pdf_p_f : 1;
+ uint64_t q1_s_e : 1;
+ uint64_t q1_a_f : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn31xx;
+ struct cvmx_npi_int_sum_s cn38xx;
+ struct cvmx_npi_int_sum_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t iobdma : 1; /**< Requested IOBDMA read size exceeded 128 words. */
+ uint64_t p_dperr : 1; /**< If a parity error occured on data written to L2C
+ from the PCI this bit may be set. */
+ uint64_t win_rto : 1; /**< Windowed Load Timed Out. */
+ uint64_t i3_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t i2_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t i1_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t i0_pperr : 1; /**< If a parity error occured on the port's instruction
+ this bit may be set. */
+ uint64_t p3_ptout : 1; /**< Port-3 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t p2_ptout : 1; /**< Port-2 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t p1_ptout : 1; /**< Port-1 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t p0_ptout : 1; /**< Port-0 output had a read timeout on a DATA/INFO
+ pair. */
+ uint64_t p3_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t p2_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t p1_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t p0_pperr : 1; /**< If a parity error occured on the port DATA/INFO
+ pointer-pair, this bit may be set. */
+ uint64_t g3_rtout : 1; /**< Port-3 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t g2_rtout : 1; /**< Port-2 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t g1_rtout : 1; /**< Port-1 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t g0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read a gather list. */
+ uint64_t p3_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t p2_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t p1_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t p0_perr : 1; /**< If a parity error occured on the port's packet
+ data this bit may be set. */
+ uint64_t p3_rtout : 1; /**< Port-3 had a read timeout while attempting to
+ read packet data. */
+ uint64_t p2_rtout : 1; /**< Port-2 had a read timeout while attempting to
+ read packet data. */
+ uint64_t p1_rtout : 1; /**< Port-1 had a read timeout while attempting to
+ read packet data. */
+ uint64_t p0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read packet data. */
+ uint64_t i3_overf : 1; /**< Port-3 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t i2_overf : 1; /**< Port-2 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t i1_overf : 1; /**< Port-1 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t i0_overf : 1; /**< Port-0 had a doorbell overflow. Bit[31] of the
+ doorbell count was set. */
+ uint64_t i3_rtout : 1; /**< Port-3 had a read timeout while attempting to
+ read instructions. */
+ uint64_t i2_rtout : 1; /**< Port-2 had a read timeout while attempting to
+ read instructions. */
+ uint64_t i1_rtout : 1; /**< Port-1 had a read timeout while attempting to
+ read instructions. */
+ uint64_t i0_rtout : 1; /**< Port-0 had a read timeout while attempting to
+ read instructions. */
+ uint64_t po3_2sml : 1; /**< The packet being sent out on Port3 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT3[ISIZE] field. */
+ uint64_t po2_2sml : 1; /**< The packet being sent out on Port2 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT2[ISIZE] field. */
+ uint64_t po1_2sml : 1; /**< The packet being sent out on Port1 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT1[ISIZE] field. */
+ uint64_t po0_2sml : 1; /**< The packet being sent out on Port0 is smaller
+ than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field. */
+ uint64_t pci_rsl : 1; /**< This '1' when a bit in PCI_INT_SUM2 is SET and the
+ corresponding bit in the PCI_INT_ENB2 is SET. */
+ uint64_t rml_wto : 1; /**< Set '1' when the RML does not receive a commit
+ back from a RSL after sending a write command to
+ a RSL. */
+ uint64_t rml_rto : 1; /**< Set '1' when the RML does not receive read data
+ back from a RSL after sending a read command to
+ a RSL. */
+#else
+ uint64_t rml_rto : 1;
+ uint64_t rml_wto : 1;
+ uint64_t pci_rsl : 1;
+ uint64_t po0_2sml : 1;
+ uint64_t po1_2sml : 1;
+ uint64_t po2_2sml : 1;
+ uint64_t po3_2sml : 1;
+ uint64_t i0_rtout : 1;
+ uint64_t i1_rtout : 1;
+ uint64_t i2_rtout : 1;
+ uint64_t i3_rtout : 1;
+ uint64_t i0_overf : 1;
+ uint64_t i1_overf : 1;
+ uint64_t i2_overf : 1;
+ uint64_t i3_overf : 1;
+ uint64_t p0_rtout : 1;
+ uint64_t p1_rtout : 1;
+ uint64_t p2_rtout : 1;
+ uint64_t p3_rtout : 1;
+ uint64_t p0_perr : 1;
+ uint64_t p1_perr : 1;
+ uint64_t p2_perr : 1;
+ uint64_t p3_perr : 1;
+ uint64_t g0_rtout : 1;
+ uint64_t g1_rtout : 1;
+ uint64_t g2_rtout : 1;
+ uint64_t g3_rtout : 1;
+ uint64_t p0_pperr : 1;
+ uint64_t p1_pperr : 1;
+ uint64_t p2_pperr : 1;
+ uint64_t p3_pperr : 1;
+ uint64_t p0_ptout : 1;
+ uint64_t p1_ptout : 1;
+ uint64_t p2_ptout : 1;
+ uint64_t p3_ptout : 1;
+ uint64_t i0_pperr : 1;
+ uint64_t i1_pperr : 1;
+ uint64_t i2_pperr : 1;
+ uint64_t i3_pperr : 1;
+ uint64_t win_rto : 1;
+ uint64_t p_dperr : 1;
+ uint64_t iobdma : 1;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } cn38xxp2;
+ struct cvmx_npi_int_sum_cn31xx cn50xx;
+ struct cvmx_npi_int_sum_s cn58xx;
+ struct cvmx_npi_int_sum_s cn58xxp1;
+};
+typedef union cvmx_npi_int_sum cvmx_npi_int_sum_t;
+
+/**
+ * cvmx_npi_lowp_dbell
+ *
+ * NPI_LOWP_DBELL = Low Priority Door Bell
+ *
+ * The door bell register for the low priority DMA queue.
+ */
+union cvmx_npi_lowp_dbell {
+ uint64_t u64;
+ struct cvmx_npi_lowp_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t dbell : 16; /**< The value written to this register is added to the
+ number of 8byte words to be read and processes for
+ the low priority dma queue. */
+#else
+ uint64_t dbell : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_npi_lowp_dbell_s cn30xx;
+ struct cvmx_npi_lowp_dbell_s cn31xx;
+ struct cvmx_npi_lowp_dbell_s cn38xx;
+ struct cvmx_npi_lowp_dbell_s cn38xxp2;
+ struct cvmx_npi_lowp_dbell_s cn50xx;
+ struct cvmx_npi_lowp_dbell_s cn58xx;
+ struct cvmx_npi_lowp_dbell_s cn58xxp1;
+};
+typedef union cvmx_npi_lowp_dbell cvmx_npi_lowp_dbell_t;
+
+/**
+ * cvmx_npi_lowp_ibuff_saddr
+ *
+ * NPI_LOWP_IBUFF_SADDR = DMA Low Priority's Instruction Buffer Starting Address
+ *
+ * The address to start reading Instructions from for LOWP.
+ */
+union cvmx_npi_lowp_ibuff_saddr {
+ uint64_t u64;
+ struct cvmx_npi_lowp_ibuff_saddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t saddr : 36; /**< The starting address to read the first instruction. */
+#else
+ uint64_t saddr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_npi_lowp_ibuff_saddr_s cn30xx;
+ struct cvmx_npi_lowp_ibuff_saddr_s cn31xx;
+ struct cvmx_npi_lowp_ibuff_saddr_s cn38xx;
+ struct cvmx_npi_lowp_ibuff_saddr_s cn38xxp2;
+ struct cvmx_npi_lowp_ibuff_saddr_s cn50xx;
+ struct cvmx_npi_lowp_ibuff_saddr_s cn58xx;
+ struct cvmx_npi_lowp_ibuff_saddr_s cn58xxp1;
+};
+typedef union cvmx_npi_lowp_ibuff_saddr cvmx_npi_lowp_ibuff_saddr_t;
+
+/**
+ * cvmx_npi_mem_access_subid#
+ *
+ * NPI_MEM_ACCESS_SUBID3 = Memory Access SubId 3Register
+ *
+ * Carries Read/Write parameters for PP access to PCI memory that use NPI SubId3.
+ * Writes to this register are not ordered with writes/reads to the PCI Memory space.
+ * To ensure that a write has completed the user must read the register before
+ * making an access(i.e. PCI memory space) that requires the value of this register to be updated.
+ */
+union cvmx_npi_mem_access_subidx {
+ uint64_t u64;
+ struct cvmx_npi_mem_access_subidx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t shortl : 1; /**< Generate CMD-6 on PCI(x) when '1'.
+ Loads from the cores to the corresponding subid
+ that are 32-bits or smaller:
+ - Will generate the PCI-X "Memory Read DWORD"
+ command in PCI-X mode. (Note that "Memory
+ Read DWORD" appears much like an IO read on
+ the PCI-X bus.)
+ - Will generate the PCI "Memory Read" command
+ in PCI-X mode, irrespective of the
+ NPI_PCI_READ_CMD[CMD_SIZE] value.
+ NOT IN PASS 1 NOR PASS 2 */
+ uint64_t nmerge : 1; /**< No Merge. (NOT IN PASS 1 NOR PASS 2) */
+ uint64_t esr : 2; /**< Endian-Swap on read. */
+ uint64_t esw : 2; /**< Endian-Swap on write. */
+ uint64_t nsr : 1; /**< No-Snoop on read. */
+ uint64_t nsw : 1; /**< No-Snoop on write. */
+ uint64_t ror : 1; /**< Relax Read on read. */
+ uint64_t row : 1; /**< Relax Order on write. */
+ uint64_t ba : 28; /**< PCI Address bits [63:36]. */
+#else
+ uint64_t ba : 28;
+ uint64_t row : 1;
+ uint64_t ror : 1;
+ uint64_t nsw : 1;
+ uint64_t nsr : 1;
+ uint64_t esw : 2;
+ uint64_t esr : 2;
+ uint64_t nmerge : 1;
+ uint64_t shortl : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_npi_mem_access_subidx_s cn30xx;
+ struct cvmx_npi_mem_access_subidx_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t esr : 2; /**< Endian-Swap on read. */
+ uint64_t esw : 2; /**< Endian-Swap on write. */
+ uint64_t nsr : 1; /**< No-Snoop on read. */
+ uint64_t nsw : 1; /**< No-Snoop on write. */
+ uint64_t ror : 1; /**< Relax Read on read. */
+ uint64_t row : 1; /**< Relax Order on write. */
+ uint64_t ba : 28; /**< PCI Address bits [63:36]. */
+#else
+ uint64_t ba : 28;
+ uint64_t row : 1;
+ uint64_t ror : 1;
+ uint64_t nsw : 1;
+ uint64_t nsr : 1;
+ uint64_t esw : 2;
+ uint64_t esr : 2;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn31xx;
+ struct cvmx_npi_mem_access_subidx_s cn38xx;
+ struct cvmx_npi_mem_access_subidx_cn31xx cn38xxp2;
+ struct cvmx_npi_mem_access_subidx_s cn50xx;
+ struct cvmx_npi_mem_access_subidx_s cn58xx;
+ struct cvmx_npi_mem_access_subidx_s cn58xxp1;
+};
+typedef union cvmx_npi_mem_access_subidx cvmx_npi_mem_access_subidx_t;
+
+/**
+ * cvmx_npi_msi_rcv
+ *
+ * NPI_MSI_RCV = NPI MSI Receive Vector Register
+ *
+ * A bit is set in this register relative to the vector received during a MSI. And cleared by a W1 to the register.
+ */
+union cvmx_npi_msi_rcv {
+ uint64_t u64;
+ struct cvmx_npi_msi_rcv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t int_vec : 64; /**< Refer to PCI_MSI_RCV */
+#else
+ uint64_t int_vec : 64;
+#endif
+ } s;
+ struct cvmx_npi_msi_rcv_s cn30xx;
+ struct cvmx_npi_msi_rcv_s cn31xx;
+ struct cvmx_npi_msi_rcv_s cn38xx;
+ struct cvmx_npi_msi_rcv_s cn38xxp2;
+ struct cvmx_npi_msi_rcv_s cn50xx;
+ struct cvmx_npi_msi_rcv_s cn58xx;
+ struct cvmx_npi_msi_rcv_s cn58xxp1;
+};
+typedef union cvmx_npi_msi_rcv cvmx_npi_msi_rcv_t;
+
+/**
+ * cvmx_npi_num_desc_output#
+ *
+ * NUM_DESC_OUTPUT0 = Number Of Descriptors Available For Output 0
+ *
+ * The size of the Buffer/Info Pointer Pair ring for Output-0.
+ */
+union cvmx_npi_num_desc_outputx {
+ uint64_t u64;
+ struct cvmx_npi_num_desc_outputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t size : 32; /**< The size of the Buffer/Info Pointer Pair ring. */
+#else
+ uint64_t size : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npi_num_desc_outputx_s cn30xx;
+ struct cvmx_npi_num_desc_outputx_s cn31xx;
+ struct cvmx_npi_num_desc_outputx_s cn38xx;
+ struct cvmx_npi_num_desc_outputx_s cn38xxp2;
+ struct cvmx_npi_num_desc_outputx_s cn50xx;
+ struct cvmx_npi_num_desc_outputx_s cn58xx;
+ struct cvmx_npi_num_desc_outputx_s cn58xxp1;
+};
+typedef union cvmx_npi_num_desc_outputx cvmx_npi_num_desc_outputx_t;
+
+/**
+ * cvmx_npi_output_control
+ *
+ * NPI_OUTPUT_CONTROL = NPI's Output Control Register
+ *
+ * The address to start reading Instructions from for Output-3.
+ */
+union cvmx_npi_output_control {
+ uint64_t u64;
+ struct cvmx_npi_output_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t pkt_rr : 1; /**< When set '1' the output packet selection will be
+ made with a Round Robin arbitration. When '0'
+ the output packet port is fixed in priority,
+ where the lower port number has higher priority.
+ PASS3 Field */
+ uint64_t p3_bmode : 1; /**< When set '1' PCI_PKTS_SENT3 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t p2_bmode : 1; /**< When set '1' PCI_PKTS_SENT2 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t p1_bmode : 1; /**< When set '1' PCI_PKTS_SENT1 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t o3_es : 2; /**< Endian Swap for Output3 Data. */
+ uint64_t o3_ns : 1; /**< NoSnoop Enable for Output3 Data. */
+ uint64_t o3_ro : 1; /**< Relaxed Ordering Enable for Output3 Data. */
+ uint64_t o2_es : 2; /**< Endian Swap for Output2 Data. */
+ uint64_t o2_ns : 1; /**< NoSnoop Enable for Output2 Data. */
+ uint64_t o2_ro : 1; /**< Relaxed Ordering Enable for Output2 Data. */
+ uint64_t o1_es : 2; /**< Endian Swap for Output1 Data. */
+ uint64_t o1_ns : 1; /**< NoSnoop Enable for Output1 Data. */
+ uint64_t o1_ro : 1; /**< Relaxed Ordering Enable for Output1 Data. */
+ uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */
+ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */
+ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */
+ uint64_t o3_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O3_ES,
+ O3_NS, O3_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O3_ES[1:0], O3_NS, O3_RO. For Output Port-3. */
+ uint64_t o2_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O2_ES,
+ O2_NS, O2_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O2_ES[1:0], O2_NS, O2_RO. For Output Port-2. */
+ uint64_t o1_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O1_ES,
+ O1_NS, O1_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O1_ES[1:0], O1_NS, O1_RO. For Output Port-1. */
+ uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O0_ES,
+ O0_NS, O0_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t iptr_o3 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-3. */
+ uint64_t iptr_o2 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-2. */
+ uint64_t iptr_o1 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-1. */
+ uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-0. */
+ uint64_t esr_sl3 : 2; /**< The Endian-Swap-Mode for Slist3 reads. */
+ uint64_t nsr_sl3 : 1; /**< Enables '1' NoSnoop for Slist3 reads. */
+ uint64_t ror_sl3 : 1; /**< Enables '1' Relaxed Ordering for Slist3 reads. */
+ uint64_t esr_sl2 : 2; /**< The Endian-Swap-Mode for Slist2 reads. */
+ uint64_t nsr_sl2 : 1; /**< Enables '1' NoSnoop for Slist2 reads. */
+ uint64_t ror_sl2 : 1; /**< Enables '1' Relaxed Ordering for Slist2 reads. */
+ uint64_t esr_sl1 : 2; /**< The Endian-Swap-Mode for Slist1 reads. */
+ uint64_t nsr_sl1 : 1; /**< Enables '1' NoSnoop for Slist1 reads. */
+ uint64_t ror_sl1 : 1; /**< Enables '1' Relaxed Ordering for Slist1 reads. */
+ uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */
+ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */
+ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */
+#else
+ uint64_t ror_sl0 : 1;
+ uint64_t nsr_sl0 : 1;
+ uint64_t esr_sl0 : 2;
+ uint64_t ror_sl1 : 1;
+ uint64_t nsr_sl1 : 1;
+ uint64_t esr_sl1 : 2;
+ uint64_t ror_sl2 : 1;
+ uint64_t nsr_sl2 : 1;
+ uint64_t esr_sl2 : 2;
+ uint64_t ror_sl3 : 1;
+ uint64_t nsr_sl3 : 1;
+ uint64_t esr_sl3 : 2;
+ uint64_t iptr_o0 : 1;
+ uint64_t iptr_o1 : 1;
+ uint64_t iptr_o2 : 1;
+ uint64_t iptr_o3 : 1;
+ uint64_t reserved_20_23 : 4;
+ uint64_t o0_csrm : 1;
+ uint64_t o1_csrm : 1;
+ uint64_t o2_csrm : 1;
+ uint64_t o3_csrm : 1;
+ uint64_t o0_ro : 1;
+ uint64_t o0_ns : 1;
+ uint64_t o0_es : 2;
+ uint64_t o1_ro : 1;
+ uint64_t o1_ns : 1;
+ uint64_t o1_es : 2;
+ uint64_t o2_ro : 1;
+ uint64_t o2_ns : 1;
+ uint64_t o2_es : 2;
+ uint64_t o3_ro : 1;
+ uint64_t o3_ns : 1;
+ uint64_t o3_es : 2;
+ uint64_t p0_bmode : 1;
+ uint64_t p1_bmode : 1;
+ uint64_t p2_bmode : 1;
+ uint64_t p3_bmode : 1;
+ uint64_t pkt_rr : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_npi_output_control_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63 : 19;
+ uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t reserved_32_43 : 12;
+ uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */
+ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */
+ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O0_ES,
+ O0_NS, O0_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */
+ uint64_t reserved_17_23 : 7;
+ uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-0. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */
+ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */
+ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */
+#else
+ uint64_t ror_sl0 : 1;
+ uint64_t nsr_sl0 : 1;
+ uint64_t esr_sl0 : 2;
+ uint64_t reserved_4_15 : 12;
+ uint64_t iptr_o0 : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t o0_csrm : 1;
+ uint64_t reserved_25_27 : 3;
+ uint64_t o0_ro : 1;
+ uint64_t o0_ns : 1;
+ uint64_t o0_es : 2;
+ uint64_t reserved_32_43 : 12;
+ uint64_t p0_bmode : 1;
+ uint64_t reserved_45_63 : 19;
+#endif
+ } cn30xx;
+ struct cvmx_npi_output_control_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t p1_bmode : 1; /**< When set '1' PCI_PKTS_SENT1 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t reserved_36_43 : 8;
+ uint64_t o1_es : 2; /**< Endian Swap for Output1 Data. */
+ uint64_t o1_ns : 1; /**< NoSnoop Enable for Output1 Data. */
+ uint64_t o1_ro : 1; /**< Relaxed Ordering Enable for Output1 Data. */
+ uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */
+ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */
+ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t o1_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O1_ES,
+ O1_NS, O1_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O1_ES[1:0], O1_NS, O1_RO. For Output Port-1. */
+ uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O0_ES,
+ O0_NS, O0_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t iptr_o1 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-1. */
+ uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-0. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t esr_sl1 : 2; /**< The Endian-Swap-Mode for Slist1 reads. */
+ uint64_t nsr_sl1 : 1; /**< Enables '1' NoSnoop for Slist1 reads. */
+ uint64_t ror_sl1 : 1; /**< Enables '1' Relaxed Ordering for Slist1 reads. */
+ uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */
+ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */
+ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */
+#else
+ uint64_t ror_sl0 : 1;
+ uint64_t nsr_sl0 : 1;
+ uint64_t esr_sl0 : 2;
+ uint64_t ror_sl1 : 1;
+ uint64_t nsr_sl1 : 1;
+ uint64_t esr_sl1 : 2;
+ uint64_t reserved_8_15 : 8;
+ uint64_t iptr_o0 : 1;
+ uint64_t iptr_o1 : 1;
+ uint64_t reserved_18_23 : 6;
+ uint64_t o0_csrm : 1;
+ uint64_t o1_csrm : 1;
+ uint64_t reserved_26_27 : 2;
+ uint64_t o0_ro : 1;
+ uint64_t o0_ns : 1;
+ uint64_t o0_es : 2;
+ uint64_t o1_ro : 1;
+ uint64_t o1_ns : 1;
+ uint64_t o1_es : 2;
+ uint64_t reserved_36_43 : 8;
+ uint64_t p0_bmode : 1;
+ uint64_t p1_bmode : 1;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } cn31xx;
+ struct cvmx_npi_output_control_s cn38xx;
+ struct cvmx_npi_output_control_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t p3_bmode : 1; /**< When set '1' PCI_PKTS_SENT3 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t p2_bmode : 1; /**< When set '1' PCI_PKTS_SENT2 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t p1_bmode : 1; /**< When set '1' PCI_PKTS_SENT1 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t o3_es : 2; /**< Endian Swap for Output3 Data. */
+ uint64_t o3_ns : 1; /**< NoSnoop Enable for Output3 Data. */
+ uint64_t o3_ro : 1; /**< Relaxed Ordering Enable for Output3 Data. */
+ uint64_t o2_es : 2; /**< Endian Swap for Output2 Data. */
+ uint64_t o2_ns : 1; /**< NoSnoop Enable for Output2 Data. */
+ uint64_t o2_ro : 1; /**< Relaxed Ordering Enable for Output2 Data. */
+ uint64_t o1_es : 2; /**< Endian Swap for Output1 Data. */
+ uint64_t o1_ns : 1; /**< NoSnoop Enable for Output1 Data. */
+ uint64_t o1_ro : 1; /**< Relaxed Ordering Enable for Output1 Data. */
+ uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */
+ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */
+ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */
+ uint64_t o3_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O3_ES,
+ O3_NS, O3_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O3_ES[1:0], O3_NS, O3_RO. For Output Port-3. */
+ uint64_t o2_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O2_ES,
+ O2_NS, O2_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O2_ES[1:0], O2_NS, O2_RO. For Output Port-2. */
+ uint64_t o1_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O1_ES,
+ O1_NS, O1_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O1_ES[1:0], O1_NS, O1_RO. For Output Port-1. */
+ uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O0_ES,
+ O0_NS, O0_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t iptr_o3 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-3. */
+ uint64_t iptr_o2 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-2. */
+ uint64_t iptr_o1 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-1. */
+ uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-0. */
+ uint64_t esr_sl3 : 2; /**< The Endian-Swap-Mode for Slist3 reads. */
+ uint64_t nsr_sl3 : 1; /**< Enables '1' NoSnoop for Slist3 reads. */
+ uint64_t ror_sl3 : 1; /**< Enables '1' Relaxed Ordering for Slist3 reads. */
+ uint64_t esr_sl2 : 2; /**< The Endian-Swap-Mode for Slist2 reads. */
+ uint64_t nsr_sl2 : 1; /**< Enables '1' NoSnoop for Slist2 reads. */
+ uint64_t ror_sl2 : 1; /**< Enables '1' Relaxed Ordering for Slist2 reads. */
+ uint64_t esr_sl1 : 2; /**< The Endian-Swap-Mode for Slist1 reads. */
+ uint64_t nsr_sl1 : 1; /**< Enables '1' NoSnoop for Slist1 reads. */
+ uint64_t ror_sl1 : 1; /**< Enables '1' Relaxed Ordering for Slist1 reads. */
+ uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */
+ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */
+ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */
+#else
+ uint64_t ror_sl0 : 1;
+ uint64_t nsr_sl0 : 1;
+ uint64_t esr_sl0 : 2;
+ uint64_t ror_sl1 : 1;
+ uint64_t nsr_sl1 : 1;
+ uint64_t esr_sl1 : 2;
+ uint64_t ror_sl2 : 1;
+ uint64_t nsr_sl2 : 1;
+ uint64_t esr_sl2 : 2;
+ uint64_t ror_sl3 : 1;
+ uint64_t nsr_sl3 : 1;
+ uint64_t esr_sl3 : 2;
+ uint64_t iptr_o0 : 1;
+ uint64_t iptr_o1 : 1;
+ uint64_t iptr_o2 : 1;
+ uint64_t iptr_o3 : 1;
+ uint64_t reserved_20_23 : 4;
+ uint64_t o0_csrm : 1;
+ uint64_t o1_csrm : 1;
+ uint64_t o2_csrm : 1;
+ uint64_t o3_csrm : 1;
+ uint64_t o0_ro : 1;
+ uint64_t o0_ns : 1;
+ uint64_t o0_es : 2;
+ uint64_t o1_ro : 1;
+ uint64_t o1_ns : 1;
+ uint64_t o1_es : 2;
+ uint64_t o2_ro : 1;
+ uint64_t o2_ns : 1;
+ uint64_t o2_es : 2;
+ uint64_t o3_ro : 1;
+ uint64_t o3_ns : 1;
+ uint64_t o3_es : 2;
+ uint64_t p0_bmode : 1;
+ uint64_t p1_bmode : 1;
+ uint64_t p2_bmode : 1;
+ uint64_t p3_bmode : 1;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cn38xxp2;
+ struct cvmx_npi_output_control_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t pkt_rr : 1; /**< When set '1' the output packet selection will be
+ made with a Round Robin arbitration. When '0'
+ the output packet port is fixed in priority,
+ where the lower port number has higher priority.
+ PASS2 Field */
+ uint64_t reserved_46_47 : 2;
+ uint64_t p1_bmode : 1; /**< When set '1' PCI_PKTS_SENT1 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be
+ updated with the number of bytes in the packet
+ sent, when '0' the register will have a value
+ of '1' added. */
+ uint64_t reserved_36_43 : 8;
+ uint64_t o1_es : 2; /**< Endian Swap for Output1 Data. */
+ uint64_t o1_ns : 1; /**< NoSnoop Enable for Output1 Data. */
+ uint64_t o1_ro : 1; /**< Relaxed Ordering Enable for Output1 Data. */
+ uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */
+ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */
+ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t o1_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O1_ES,
+ O1_NS, O1_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O1_ES[1:0], O1_NS, O1_RO. For Output Port-1. */
+ uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data,
+ comes from the DPTR[63:60] in the scatter-list pair,
+ and the RO, NS, ES values come from the O0_ES,
+ O0_NS, O0_RO. When '0' the RO == DPTR[60],
+ NS == DPTR[61], ES == DPTR[63:62], the address the
+ packet will be written to is ADDR[63:60] ==
+ O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t iptr_o1 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-1. */
+ uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data
+ for output-0. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t esr_sl1 : 2; /**< The Endian-Swap-Mode for Slist1 reads. */
+ uint64_t nsr_sl1 : 1; /**< Enables '1' NoSnoop for Slist1 reads. */
+ uint64_t ror_sl1 : 1; /**< Enables '1' Relaxed Ordering for Slist1 reads. */
+ uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */
+ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */
+ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */
+#else
+ uint64_t ror_sl0 : 1;
+ uint64_t nsr_sl0 : 1;
+ uint64_t esr_sl0 : 2;
+ uint64_t ror_sl1 : 1;
+ uint64_t nsr_sl1 : 1;
+ uint64_t esr_sl1 : 2;
+ uint64_t reserved_8_15 : 8;
+ uint64_t iptr_o0 : 1;
+ uint64_t iptr_o1 : 1;
+ uint64_t reserved_18_23 : 6;
+ uint64_t o0_csrm : 1;
+ uint64_t o1_csrm : 1;
+ uint64_t reserved_26_27 : 2;
+ uint64_t o0_ro : 1;
+ uint64_t o0_ns : 1;
+ uint64_t o0_es : 2;
+ uint64_t o1_ro : 1;
+ uint64_t o1_ns : 1;
+ uint64_t o1_es : 2;
+ uint64_t reserved_36_43 : 8;
+ uint64_t p0_bmode : 1;
+ uint64_t p1_bmode : 1;
+ uint64_t reserved_46_47 : 2;
+ uint64_t pkt_rr : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn50xx;
+ struct cvmx_npi_output_control_s cn58xx;
+ struct cvmx_npi_output_control_s cn58xxp1;
+};
+typedef union cvmx_npi_output_control cvmx_npi_output_control_t;
+
+/**
+ * cvmx_npi_p#_dbpair_addr
+ *
+ * NPI_P0_DBPAIR_ADDR = NPI's Port-0 DATA-BUFFER Pair Next Read Address.
+ *
+ * Contains the next address to read for Port's-0 Data/Buffer Pair.
+ */
+union cvmx_npi_px_dbpair_addr {
+ uint64_t u64;
+ struct cvmx_npi_px_dbpair_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63 : 1;
+ uint64_t state : 2; /**< POS state machine vector. Used to tell when NADDR
+ is valid (when STATE == 0). */
+ uint64_t naddr : 61; /**< Bits [63:3] of the next Data-Info Pair to read.
+ Value is only valid when STATE == 0. */
+#else
+ uint64_t naddr : 61;
+ uint64_t state : 2;
+ uint64_t reserved_63_63 : 1;
+#endif
+ } s;
+ struct cvmx_npi_px_dbpair_addr_s cn30xx;
+ struct cvmx_npi_px_dbpair_addr_s cn31xx;
+ struct cvmx_npi_px_dbpair_addr_s cn38xx;
+ struct cvmx_npi_px_dbpair_addr_s cn38xxp2;
+ struct cvmx_npi_px_dbpair_addr_s cn50xx;
+ struct cvmx_npi_px_dbpair_addr_s cn58xx;
+ struct cvmx_npi_px_dbpair_addr_s cn58xxp1;
+};
+typedef union cvmx_npi_px_dbpair_addr cvmx_npi_px_dbpair_addr_t;
+
+/**
+ * cvmx_npi_p#_instr_addr
+ *
+ * NPI_P0_INSTR_ADDR = NPI's Port-0 Instruction Next Read Address.
+ *
+ * Contains the next address to read for Port's-0 Instructions.
+ */
+union cvmx_npi_px_instr_addr {
+ uint64_t u64;
+ struct cvmx_npi_px_instr_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state : 3; /**< Gather engine state vector. Used to tell when
+ NADDR is valid (when STATE == 0). */
+ uint64_t naddr : 61; /**< Bits [63:3] of the next Instruction to read.
+ Value is only valid when STATE == 0. */
+#else
+ uint64_t naddr : 61;
+ uint64_t state : 3;
+#endif
+ } s;
+ struct cvmx_npi_px_instr_addr_s cn30xx;
+ struct cvmx_npi_px_instr_addr_s cn31xx;
+ struct cvmx_npi_px_instr_addr_s cn38xx;
+ struct cvmx_npi_px_instr_addr_s cn38xxp2;
+ struct cvmx_npi_px_instr_addr_s cn50xx;
+ struct cvmx_npi_px_instr_addr_s cn58xx;
+ struct cvmx_npi_px_instr_addr_s cn58xxp1;
+};
+typedef union cvmx_npi_px_instr_addr cvmx_npi_px_instr_addr_t;
+
+/**
+ * cvmx_npi_p#_instr_cnts
+ *
+ * NPI_P0_INSTR_CNTS = NPI's Port-0 Instruction Counts For Packets In.
+ *
+ * Used to determine the number of instruction in the NPI and to be fetched for Input-Packets.
+ */
+union cvmx_npi_px_instr_cnts {
+ uint64_t u64;
+ struct cvmx_npi_px_instr_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t fcnt : 6; /**< Number entries in the Instruction FIFO. */
+ uint64_t avail : 32; /**< Doorbell count to be read. */
+#else
+ uint64_t avail : 32;
+ uint64_t fcnt : 6;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_npi_px_instr_cnts_s cn30xx;
+ struct cvmx_npi_px_instr_cnts_s cn31xx;
+ struct cvmx_npi_px_instr_cnts_s cn38xx;
+ struct cvmx_npi_px_instr_cnts_s cn38xxp2;
+ struct cvmx_npi_px_instr_cnts_s cn50xx;
+ struct cvmx_npi_px_instr_cnts_s cn58xx;
+ struct cvmx_npi_px_instr_cnts_s cn58xxp1;
+};
+typedef union cvmx_npi_px_instr_cnts cvmx_npi_px_instr_cnts_t;
+
+/**
+ * cvmx_npi_p#_pair_cnts
+ *
+ * NPI_P0_PAIR_CNTS = NPI's Port-0 Instruction Counts For Packets Out.
+ *
+ * Used to determine the number of instruction in the NPI and to be fetched for Output-Packets.
+ */
+union cvmx_npi_px_pair_cnts {
+ uint64_t u64;
+ struct cvmx_npi_px_pair_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t fcnt : 5; /**< 16 - number entries in the D/I Pair FIFO. */
+ uint64_t avail : 32; /**< Doorbell count to be read. */
+#else
+ uint64_t avail : 32;
+ uint64_t fcnt : 5;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } s;
+ struct cvmx_npi_px_pair_cnts_s cn30xx;
+ struct cvmx_npi_px_pair_cnts_s cn31xx;
+ struct cvmx_npi_px_pair_cnts_s cn38xx;
+ struct cvmx_npi_px_pair_cnts_s cn38xxp2;
+ struct cvmx_npi_px_pair_cnts_s cn50xx;
+ struct cvmx_npi_px_pair_cnts_s cn58xx;
+ struct cvmx_npi_px_pair_cnts_s cn58xxp1;
+};
+typedef union cvmx_npi_px_pair_cnts cvmx_npi_px_pair_cnts_t;
+
+/**
+ * cvmx_npi_pci_burst_size
+ *
+ * NPI_PCI_BURST_SIZE = NPI PCI Burst Size Register
+ *
+ * Control the number of words the NPI will attempt to read / write to/from the PCI.
+ */
+union cvmx_npi_pci_burst_size {
+ uint64_t u64;
+ struct cvmx_npi_pci_burst_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t wr_brst : 7; /**< The number of 8B words to write to PCI in any one
+ write operation. A zero is equal to 128. This
+ value is used the packet reads and is clamped at
+ a max of 112 for dma writes. */
+ uint64_t rd_brst : 7; /**< Number of 8B words to read from PCI in any one
+ read operation. Legal values are 1 to 127, where
+ a 0 will be treated as a 1.
+ "For reading of packet data value is limited to 64
+ in PASS-2."
+ This value does not control the size of a read
+ caused by an IOBDMA from a PP. */
+#else
+ uint64_t rd_brst : 7;
+ uint64_t wr_brst : 7;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_npi_pci_burst_size_s cn30xx;
+ struct cvmx_npi_pci_burst_size_s cn31xx;
+ struct cvmx_npi_pci_burst_size_s cn38xx;
+ struct cvmx_npi_pci_burst_size_s cn38xxp2;
+ struct cvmx_npi_pci_burst_size_s cn50xx;
+ struct cvmx_npi_pci_burst_size_s cn58xx;
+ struct cvmx_npi_pci_burst_size_s cn58xxp1;
+};
+typedef union cvmx_npi_pci_burst_size cvmx_npi_pci_burst_size_t;
+
+/**
+ * cvmx_npi_pci_int_arb_cfg
+ *
+ * NPI_PCI_INT_ARB_CFG = Configuration For PCI Arbiter
+ *
+ * Controls operation of the Internal PCI Arbiter. This register should
+ * only be written when PRST# is asserted. NPI_PCI_INT_ARB_CFG[EN] should
+ * only be set when Octane is a host.
+ */
+union cvmx_npi_pci_int_arb_cfg {
+ uint64_t u64;
+ struct cvmx_npi_pci_int_arb_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t hostmode : 1; /**< PCI Host Mode Pin (sampled for use by software).
+ This bit reflects the sampled PCI_HOSTMODE pin.
+ In HOST Mode, OCTEON drives the PCI_CLK_OUT and
+ PCI initialization pattern during PCI_RST_N deassertion). */
+ uint64_t pci_ovr : 4; /**< PCI Host Mode Bus Speed/Type Override
+ When in Host Mode(PCI_HOSTMODE pin =1), OCTEON acting
+ as the PCI Central Agent, samples the PCI_PCI100,
+ PCI_M66EN and PCI_PCIXCAP pins to determine the
+ 'sampled' PCI Bus speed and Bus Type (PCI or PCIX).
+ (see: PCI_CNT_REG[HM_SPEED,HM_PCIX])
+ However, in some cases, SW may want to override the
+ the 'sampled' PCI Bus Type/Speed, and use some
+ SLOWER Bus frequency.
+ The PCI_OVR field encoding represents the 'override'
+ PCI Bus Type/Speed which will be used to generate the
+ PCI_CLK_OUT and determines the PCI initialization pattern
+ driven during PCI_RST_N deassertion.
+ PCI_OVR[3]: OVERRIDE (0:DISABLE/1:ENABLE)
+ PCI_OVR[2]: BUS TYPE(0:PCI/1:PCIX)
+ PCI_OVR[1:0]: BUS SPEED(0:33/1:66/2:100/3:133)
+ OVERRIDE TYPE SPEED | Override Configuration
+ [3] [2] [1:0] | TYPE SPEED
+ ------------------+-------------------------------
+ 0 x xx | No override(uses 'sampled'
+ | Bus Speed(HM_SPEED) and Bus Type(HM_PCIX)
+ 1 0 00 | PCI Mode 33MHz
+ 1 0 01 | PCI Mode 66MHz
+ 1 0 10 | RESERVED (DO NOT USE)
+ 1 0 11 | RESERVED (DO NOT USE)
+ 1 1 00 | RESERVED (DO NOT USE)
+ 1 1 01 | PCIX Mode 66MHz
+ 1 1 10 | PCIX Mode 100MHz
+ 1 1 11 | PCIX Mode 133MHz
+ NOTES:
+ - NPI_PCI_INT_ARB_CFG[PCI_OVR] has NO EFFECT on
+ PCI_CNT_REG[HM_SPEED,HM_PCIX] (ie: the sampled PCI Bus
+ Type/Speed), but WILL EFFECT PCI_CTL_STATUS_2[AP_PCIX]
+ which reflects the actual PCI Bus Type(0:PCI/1:PCIX).
+ - Software should never 'up' configure the recommended values.
+ In other words, if the 'sampled' Bus Type=PCI(HM_PCIX=0),
+ then SW should NOT attempt to set TYPE[2]=1 for PCIX Mode.
+ Likewise, if the sampled Bus Speed=66MHz(HM_SPEED=01),
+ then SW should NOT attempt to 'speed up' the bus [ie:
+ SPEED[1:0]=10(100MHz)].
+ - If PCI_OVR<3> is set prior to PCI reset de-assertion
+ in host mode, NPI_PCI_INT_ARB_CFG[PCI_OVR]
+ indicates the Bus Type/Speed that OCTEON drove on the
+ DEVSEL/STOP/TRDY pins during reset de-assertion. (user
+ should then ignore the 'sampled' Bus Type/Speed
+ contained in the PCI_CNT_REG[HM_PCIX, HM_SPEED]) fields.
+ - If PCI_OVR<3> is clear prior to PCI reset de-assertion
+ in host mode, PCI_CNT_REG[HM_PCIX,HM_SPEED])
+ indicates the Bus Type/Speed that OCTEON drove on the
+ DEVSEL/STOP/TRDY pins during reset de-assertion. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t en : 1; /**< Internal arbiter enable. */
+ uint64_t park_mod : 1; /**< Bus park mode. 0=park on last, 1=park on device. */
+ uint64_t park_dev : 3; /**< Bus park device. 0-3 External device, 4 = Octane. */
+#else
+ uint64_t park_dev : 3;
+ uint64_t park_mod : 1;
+ uint64_t en : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pci_ovr : 4;
+ uint64_t hostmode : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_npi_pci_int_arb_cfg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t en : 1; /**< Internal arbiter enable. */
+ uint64_t park_mod : 1; /**< Bus park mode. 0=park on last, 1=park on device. */
+ uint64_t park_dev : 3; /**< Bus park device. 0-3 External device, 4 = Octane. */
+#else
+ uint64_t park_dev : 3;
+ uint64_t park_mod : 1;
+ uint64_t en : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn30xx;
+ struct cvmx_npi_pci_int_arb_cfg_cn30xx cn31xx;
+ struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xx;
+ struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xxp2;
+ struct cvmx_npi_pci_int_arb_cfg_s cn50xx;
+ struct cvmx_npi_pci_int_arb_cfg_s cn58xx;
+ struct cvmx_npi_pci_int_arb_cfg_s cn58xxp1;
+};
+typedef union cvmx_npi_pci_int_arb_cfg cvmx_npi_pci_int_arb_cfg_t;
+
+/**
+ * cvmx_npi_pci_read_cmd
+ *
+ * NPI_PCI_READ_CMD = NPI PCI Read Command Register
+ *
+ * Controls the type of read command sent.
+ * Writes to this register are not ordered with writes/reads to the PCI Memory space.
+ * To ensure that a write has completed the user must read the register before
+ * making an access(i.e. PCI memory space) that requires the value of this register to be updated.
+ * Also any previously issued reads/writes to PCI memory space, still stored in the outbound
+ * FIFO will use the value of this register after it has been updated.
+ */
+union cvmx_npi_pci_read_cmd {
+ uint64_t u64;
+ struct cvmx_npi_pci_read_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t cmd_size : 11; /**< Number bytes to be read is equal to or exceeds this
+ size will cause the PCI in PCI mode to use a
+ Memory-Read-Multiple. This register has a value
+ from 8 to 2048. A value of 0-7 will be treated as
+ a value of 2048. */
+#else
+ uint64_t cmd_size : 11;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_npi_pci_read_cmd_s cn30xx;
+ struct cvmx_npi_pci_read_cmd_s cn31xx;
+ struct cvmx_npi_pci_read_cmd_s cn38xx;
+ struct cvmx_npi_pci_read_cmd_s cn38xxp2;
+ struct cvmx_npi_pci_read_cmd_s cn50xx;
+ struct cvmx_npi_pci_read_cmd_s cn58xx;
+ struct cvmx_npi_pci_read_cmd_s cn58xxp1;
+};
+typedef union cvmx_npi_pci_read_cmd cvmx_npi_pci_read_cmd_t;
+
+/**
+ * cvmx_npi_port32_instr_hdr
+ *
+ * NPI_PORT32_INSTR_HDR = NPI Port 32 Instruction Header
+ *
+ * Contains bits [62:42] of the Instruction Header for port 32.
+ */
+union cvmx_npi_port32_instr_hdr {
+ uint64_t u64;
+ struct cvmx_npi_port32_instr_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */
+ uint64_t rsv_f : 5; /**< Reserved */
+ uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */
+ uint64_t rsv_e : 1; /**< Reserved */
+ uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */
+ uint64_t rsv_d : 6; /**< Reserved */
+ uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent
+ as part of the packet data, regardless of the
+ value of bit [63] of the instruction header.
+ USE_IHDR must be set whenever PBP is set. */
+ uint64_t rsv_c : 5; /**< Reserved */
+ uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t rsv_b : 1; /**< Reserved
+ instruction header sent to IPD. */
+ uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t rsv_a : 6; /**< Reserved */
+#else
+ uint64_t rsv_a : 6;
+ uint64_t skp_len : 7;
+ uint64_t rsv_b : 1;
+ uint64_t par_mode : 2;
+ uint64_t rsv_c : 5;
+ uint64_t use_ihdr : 1;
+ uint64_t rsv_d : 6;
+ uint64_t rskp_len : 7;
+ uint64_t rsv_e : 1;
+ uint64_t rparmode : 2;
+ uint64_t rsv_f : 5;
+ uint64_t pbp : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_npi_port32_instr_hdr_s cn30xx;
+ struct cvmx_npi_port32_instr_hdr_s cn31xx;
+ struct cvmx_npi_port32_instr_hdr_s cn38xx;
+ struct cvmx_npi_port32_instr_hdr_s cn38xxp2;
+ struct cvmx_npi_port32_instr_hdr_s cn50xx;
+ struct cvmx_npi_port32_instr_hdr_s cn58xx;
+ struct cvmx_npi_port32_instr_hdr_s cn58xxp1;
+};
+typedef union cvmx_npi_port32_instr_hdr cvmx_npi_port32_instr_hdr_t;
+
+/**
+ * cvmx_npi_port33_instr_hdr
+ *
+ * NPI_PORT33_INSTR_HDR = NPI Port 33 Instruction Header
+ *
+ * Contains bits [62:42] of the Instruction Header for port 33.
+ */
+union cvmx_npi_port33_instr_hdr {
+ uint64_t u64;
+ struct cvmx_npi_port33_instr_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */
+ uint64_t rsv_f : 5; /**< Reserved */
+ uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */
+ uint64_t rsv_e : 1; /**< Reserved */
+ uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */
+ uint64_t rsv_d : 6; /**< Reserved */
+ uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent
+ as part of the packet data, regardless of the
+ value of bit [63] of the instruction header.
+ USE_IHDR must be set whenever PBP is set. */
+ uint64_t rsv_c : 5; /**< Reserved */
+ uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t rsv_b : 1; /**< Reserved
+ instruction header sent to IPD. */
+ uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t rsv_a : 6; /**< Reserved */
+#else
+ uint64_t rsv_a : 6;
+ uint64_t skp_len : 7;
+ uint64_t rsv_b : 1;
+ uint64_t par_mode : 2;
+ uint64_t rsv_c : 5;
+ uint64_t use_ihdr : 1;
+ uint64_t rsv_d : 6;
+ uint64_t rskp_len : 7;
+ uint64_t rsv_e : 1;
+ uint64_t rparmode : 2;
+ uint64_t rsv_f : 5;
+ uint64_t pbp : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_npi_port33_instr_hdr_s cn31xx;
+ struct cvmx_npi_port33_instr_hdr_s cn38xx;
+ struct cvmx_npi_port33_instr_hdr_s cn38xxp2;
+ struct cvmx_npi_port33_instr_hdr_s cn50xx;
+ struct cvmx_npi_port33_instr_hdr_s cn58xx;
+ struct cvmx_npi_port33_instr_hdr_s cn58xxp1;
+};
+typedef union cvmx_npi_port33_instr_hdr cvmx_npi_port33_instr_hdr_t;
+
+/**
+ * cvmx_npi_port34_instr_hdr
+ *
+ * NPI_PORT34_INSTR_HDR = NPI Port 34 Instruction Header
+ *
+ * Contains bits [62:42] of the Instruction Header for port 34. Added for PASS-2.
+ */
+union cvmx_npi_port34_instr_hdr {
+ uint64_t u64;
+ struct cvmx_npi_port34_instr_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */
+ uint64_t rsv_f : 5; /**< Reserved */
+ uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */
+ uint64_t rsv_e : 1; /**< Reserved */
+ uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */
+ uint64_t rsv_d : 6; /**< Reserved */
+ uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent
+ as part of the packet data, regardless of the
+ value of bit [63] of the instruction header.
+ USE_IHDR must be set whenever PBP is set. */
+ uint64_t rsv_c : 5; /**< Reserved */
+ uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t rsv_b : 1; /**< Reserved
+ instruction header sent to IPD. */
+ uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t rsv_a : 6; /**< Reserved */
+#else
+ uint64_t rsv_a : 6;
+ uint64_t skp_len : 7;
+ uint64_t rsv_b : 1;
+ uint64_t par_mode : 2;
+ uint64_t rsv_c : 5;
+ uint64_t use_ihdr : 1;
+ uint64_t rsv_d : 6;
+ uint64_t rskp_len : 7;
+ uint64_t rsv_e : 1;
+ uint64_t rparmode : 2;
+ uint64_t rsv_f : 5;
+ uint64_t pbp : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_npi_port34_instr_hdr_s cn38xx;
+ struct cvmx_npi_port34_instr_hdr_s cn38xxp2;
+ struct cvmx_npi_port34_instr_hdr_s cn58xx;
+ struct cvmx_npi_port34_instr_hdr_s cn58xxp1;
+};
+typedef union cvmx_npi_port34_instr_hdr cvmx_npi_port34_instr_hdr_t;
+
+/**
+ * cvmx_npi_port35_instr_hdr
+ *
+ * NPI_PORT35_INSTR_HDR = NPI Port 35 Instruction Header
+ *
+ * Contains bits [62:42] of the Instruction Header for port 35. Added for PASS-2.
+ */
+union cvmx_npi_port35_instr_hdr {
+ uint64_t u64;
+ struct cvmx_npi_port35_instr_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */
+ uint64_t rsv_f : 5; /**< Reserved */
+ uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */
+ uint64_t rsv_e : 1; /**< Reserved */
+ uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */
+ uint64_t rsv_d : 6; /**< Reserved */
+ uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent
+ as part of the packet data, regardless of the
+ value of bit [63] of the instruction header.
+ USE_IHDR must be set whenever PBP is set. */
+ uint64_t rsv_c : 5; /**< Reserved */
+ uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t rsv_b : 1; /**< Reserved
+ instruction header sent to IPD. */
+ uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet
+ is not raw and PBP is not set. */
+ uint64_t rsv_a : 6; /**< Reserved */
+#else
+ uint64_t rsv_a : 6;
+ uint64_t skp_len : 7;
+ uint64_t rsv_b : 1;
+ uint64_t par_mode : 2;
+ uint64_t rsv_c : 5;
+ uint64_t use_ihdr : 1;
+ uint64_t rsv_d : 6;
+ uint64_t rskp_len : 7;
+ uint64_t rsv_e : 1;
+ uint64_t rparmode : 2;
+ uint64_t rsv_f : 5;
+ uint64_t pbp : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_npi_port35_instr_hdr_s cn38xx;
+ struct cvmx_npi_port35_instr_hdr_s cn38xxp2;
+ struct cvmx_npi_port35_instr_hdr_s cn58xx;
+ struct cvmx_npi_port35_instr_hdr_s cn58xxp1;
+};
+typedef union cvmx_npi_port35_instr_hdr cvmx_npi_port35_instr_hdr_t;
+
+/**
+ * cvmx_npi_port_bp_control
+ *
+ * NPI_PORT_BP_CONTROL = Port Backpressure Control
+ *
+ * Enables Port Level Backpressure
+ */
+union cvmx_npi_port_bp_control {
+ uint64_t u64;
+ struct cvmx_npi_port_bp_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t bp_on : 4; /**< Port 35-32 port level backpressure applied. */
+ uint64_t enb : 4; /**< Enables port level backpressure from the IPD. */
+#else
+ uint64_t enb : 4;
+ uint64_t bp_on : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_npi_port_bp_control_s cn30xx;
+ struct cvmx_npi_port_bp_control_s cn31xx;
+ struct cvmx_npi_port_bp_control_s cn38xx;
+ struct cvmx_npi_port_bp_control_s cn38xxp2;
+ struct cvmx_npi_port_bp_control_s cn50xx;
+ struct cvmx_npi_port_bp_control_s cn58xx;
+ struct cvmx_npi_port_bp_control_s cn58xxp1;
+};
+typedef union cvmx_npi_port_bp_control cvmx_npi_port_bp_control_t;
+
+/**
+ * cvmx_npi_rsl_int_blocks
+ *
+ * RSL_INT_BLOCKS = RSL Interrupt Blocks Register
+ *
+ * Reading this register will return a vector with a bit set '1' for a corresponding RSL block
+ * that presently has an interrupt pending. The Field Description below supplies the name of the
+ * register that software should read to find out why that intterupt bit is set.
+ */
+union cvmx_npi_rsl_int_blocks {
+ uint64_t u64;
+ struct cvmx_npi_rsl_int_blocks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rint_31 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t iob : 1; /**< IOB_INT_SUM */
+ uint64_t reserved_28_29 : 2;
+ uint64_t rint_27 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_26 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_25 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_24 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t asx1 : 1; /**< ASX1_INT_REG */
+ uint64_t asx0 : 1; /**< ASX0_INT_REG */
+ uint64_t rint_21 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t pip : 1; /**< PIP_INT_REG. */
+ uint64_t spx1 : 1; /**< SPX1_INT_REG & STX1_INT_REG */
+ uint64_t spx0 : 1; /**< SPX0_INT_REG & STX0_INT_REG */
+ uint64_t lmc : 1; /**< LMC_MEM_CFG0 */
+ uint64_t l2c : 1; /**< L2T_ERR & L2D_ERR */
+ uint64_t rint_15 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t reserved_13_14 : 2;
+ uint64_t pow : 1; /**< POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD_INT_SUM */
+ uint64_t rint_8 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t zip : 1; /**< ZIP_ERROR */
+ uint64_t dfa : 1; /**< DFA_ERR */
+ uint64_t fpa : 1; /**< FPA_INT_SUM */
+ uint64_t key : 1; /**< KEY_INT_SUM */
+ uint64_t npi : 1; /**< NPI_INT_SUM */
+ uint64_t gmx1 : 1; /**< GMX1_RX*_INT_REG & GMX1_TX_INT_REG */
+ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t gmx1 : 1;
+ uint64_t npi : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rint_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t reserved_13_14 : 2;
+ uint64_t rint_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc : 1;
+ uint64_t spx0 : 1;
+ uint64_t spx1 : 1;
+ uint64_t pip : 1;
+ uint64_t rint_21 : 1;
+ uint64_t asx0 : 1;
+ uint64_t asx1 : 1;
+ uint64_t rint_24 : 1;
+ uint64_t rint_25 : 1;
+ uint64_t rint_26 : 1;
+ uint64_t rint_27 : 1;
+ uint64_t reserved_28_29 : 2;
+ uint64_t iob : 1;
+ uint64_t rint_31 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npi_rsl_int_blocks_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rint_31 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t iob : 1; /**< IOB_INT_SUM */
+ uint64_t rint_29 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_28 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_27 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_26 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_25 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_24 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t asx1 : 1; /**< ASX1_INT_REG */
+ uint64_t asx0 : 1; /**< ASX0_INT_REG */
+ uint64_t rint_21 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t pip : 1; /**< PIP_INT_REG. */
+ uint64_t spx1 : 1; /**< SPX1_INT_REG & STX1_INT_REG */
+ uint64_t spx0 : 1; /**< SPX0_INT_REG & STX0_INT_REG */
+ uint64_t lmc : 1; /**< LMC_MEM_CFG0 */
+ uint64_t l2c : 1; /**< L2T_ERR & L2D_ERR */
+ uint64_t rint_15 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_14 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t usb : 1; /**< USBN_INT_SUM */
+ uint64_t pow : 1; /**< POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD_INT_SUM */
+ uint64_t rint_8 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t zip : 1; /**< ZIP_ERROR */
+ uint64_t dfa : 1; /**< DFA_ERR */
+ uint64_t fpa : 1; /**< FPA_INT_SUM */
+ uint64_t key : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t npi : 1; /**< NPI_INT_SUM */
+ uint64_t gmx1 : 1; /**< GMX1_RX*_INT_REG & GMX1_TX_INT_REG */
+ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t gmx1 : 1;
+ uint64_t npi : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rint_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t usb : 1;
+ uint64_t rint_14 : 1;
+ uint64_t rint_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc : 1;
+ uint64_t spx0 : 1;
+ uint64_t spx1 : 1;
+ uint64_t pip : 1;
+ uint64_t rint_21 : 1;
+ uint64_t asx0 : 1;
+ uint64_t asx1 : 1;
+ uint64_t rint_24 : 1;
+ uint64_t rint_25 : 1;
+ uint64_t rint_26 : 1;
+ uint64_t rint_27 : 1;
+ uint64_t rint_28 : 1;
+ uint64_t rint_29 : 1;
+ uint64_t iob : 1;
+ uint64_t rint_31 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn30xx;
+ struct cvmx_npi_rsl_int_blocks_cn30xx cn31xx;
+ struct cvmx_npi_rsl_int_blocks_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rint_31 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t iob : 1; /**< IOB_INT_SUM */
+ uint64_t rint_29 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_28 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_27 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_26 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_25 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_24 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t asx1 : 1; /**< ASX1_INT_REG */
+ uint64_t asx0 : 1; /**< ASX0_INT_REG */
+ uint64_t rint_21 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t pip : 1; /**< PIP_INT_REG. */
+ uint64_t spx1 : 1; /**< SPX1_INT_REG & STX1_INT_REG */
+ uint64_t spx0 : 1; /**< SPX0_INT_REG & STX0_INT_REG */
+ uint64_t lmc : 1; /**< LMC_MEM_CFG0 */
+ uint64_t l2c : 1; /**< L2T_ERR & L2D_ERR */
+ uint64_t rint_15 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_14 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t rint_13 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t pow : 1; /**< POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD_INT_SUM */
+ uint64_t rint_8 : 1; /**< Set '1' when RSL bLock has an interrupt. */
+ uint64_t zip : 1; /**< ZIP_ERROR */
+ uint64_t dfa : 1; /**< DFA_ERR */
+ uint64_t fpa : 1; /**< FPA_INT_SUM */
+ uint64_t key : 1; /**< KEY_INT_SUM */
+ uint64_t npi : 1; /**< NPI_INT_SUM */
+ uint64_t gmx1 : 1; /**< GMX1_RX*_INT_REG & GMX1_TX_INT_REG */
+ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t gmx1 : 1;
+ uint64_t npi : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rint_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t rint_13 : 1;
+ uint64_t rint_14 : 1;
+ uint64_t rint_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc : 1;
+ uint64_t spx0 : 1;
+ uint64_t spx1 : 1;
+ uint64_t pip : 1;
+ uint64_t rint_21 : 1;
+ uint64_t asx0 : 1;
+ uint64_t asx1 : 1;
+ uint64_t rint_24 : 1;
+ uint64_t rint_25 : 1;
+ uint64_t rint_26 : 1;
+ uint64_t rint_27 : 1;
+ uint64_t rint_28 : 1;
+ uint64_t rint_29 : 1;
+ uint64_t iob : 1;
+ uint64_t rint_31 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn38xx;
+ struct cvmx_npi_rsl_int_blocks_cn38xx cn38xxp2;
+ struct cvmx_npi_rsl_int_blocks_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t iob : 1; /**< IOB_INT_SUM */
+ uint64_t lmc1 : 1; /**< Always reads as zero */
+ uint64_t agl : 1; /**< Always reads as zero */
+ uint64_t reserved_24_27 : 4;
+ uint64_t asx1 : 1; /**< Always reads as zero */
+ uint64_t asx0 : 1; /**< ASX0_INT_REG */
+ uint64_t reserved_21_21 : 1;
+ uint64_t pip : 1; /**< PIP_INT_REG. */
+ uint64_t spx1 : 1; /**< Always reads as zero */
+ uint64_t spx0 : 1; /**< Always reads as zero */
+ uint64_t lmc : 1; /**< LMC_MEM_CFG0 */
+ uint64_t l2c : 1; /**< L2T_ERR & L2D_ERR */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rad : 1; /**< Always reads as zero */
+ uint64_t usb : 1; /**< USBN_INT_SUM */
+ uint64_t pow : 1; /**< POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD_INT_SUM */
+ uint64_t reserved_8_8 : 1;
+ uint64_t zip : 1; /**< Always reads as zero */
+ uint64_t dfa : 1; /**< Always reads as zero */
+ uint64_t fpa : 1; /**< FPA_INT_SUM */
+ uint64_t key : 1; /**< Always reads as zero */
+ uint64_t npi : 1; /**< NPI_INT_SUM */
+ uint64_t gmx1 : 1; /**< Always reads as zero */
+ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t gmx1 : 1;
+ uint64_t npi : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t usb : 1;
+ uint64_t rad : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc : 1;
+ uint64_t spx0 : 1;
+ uint64_t spx1 : 1;
+ uint64_t pip : 1;
+ uint64_t reserved_21_21 : 1;
+ uint64_t asx0 : 1;
+ uint64_t asx1 : 1;
+ uint64_t reserved_24_27 : 4;
+ uint64_t agl : 1;
+ uint64_t lmc1 : 1;
+ uint64_t iob : 1;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn50xx;
+ struct cvmx_npi_rsl_int_blocks_cn38xx cn58xx;
+ struct cvmx_npi_rsl_int_blocks_cn38xx cn58xxp1;
+};
+typedef union cvmx_npi_rsl_int_blocks cvmx_npi_rsl_int_blocks_t;
+
+/**
+ * cvmx_npi_size_input#
+ *
+ * NPI_SIZE_INPUT0 = NPI's Size for Input 0 Register
+ *
+ * The size (in instructions) of Instruction Queue-0.
+ */
+union cvmx_npi_size_inputx {
+ uint64_t u64;
+ struct cvmx_npi_size_inputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t size : 32; /**< The size of the Instruction Queue used by Octane.
+ The value [SIZE] is in Instructions.
+ A value of 0 in this field is illegal. */
+#else
+ uint64_t size : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npi_size_inputx_s cn30xx;
+ struct cvmx_npi_size_inputx_s cn31xx;
+ struct cvmx_npi_size_inputx_s cn38xx;
+ struct cvmx_npi_size_inputx_s cn38xxp2;
+ struct cvmx_npi_size_inputx_s cn50xx;
+ struct cvmx_npi_size_inputx_s cn58xx;
+ struct cvmx_npi_size_inputx_s cn58xxp1;
+};
+typedef union cvmx_npi_size_inputx cvmx_npi_size_inputx_t;
+
+/**
+ * cvmx_npi_win_read_to
+ *
+ * NPI_WIN_READ_TO = NPI WINDOW READ Timeout Register
+ *
+ * Number of core clocks to wait before timing out on a WINDOW-READ to the NCB.
+ */
+union cvmx_npi_win_read_to {
+ uint64_t u64;
+ struct cvmx_npi_win_read_to_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t time : 32; /**< Time to wait in core clocks. A value of 0 will
+ cause no timeouts. */
+#else
+ uint64_t time : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_npi_win_read_to_s cn30xx;
+ struct cvmx_npi_win_read_to_s cn31xx;
+ struct cvmx_npi_win_read_to_s cn38xx;
+ struct cvmx_npi_win_read_to_s cn38xxp2;
+ struct cvmx_npi_win_read_to_s cn50xx;
+ struct cvmx_npi_win_read_to_s cn58xx;
+ struct cvmx_npi_win_read_to_s cn58xxp1;
+};
+typedef union cvmx_npi_win_read_to cvmx_npi_win_read_to_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-npi-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-npi.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-npi.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-npi.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,150 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * PCI / PCIe packet engine related structures.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_NPI_H__
+#define __CVMX_NPI_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * PCI / PCIe packet instruction header format
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t r : 1; /**< Packet is RAW */
+ uint64_t g : 1; /**< Gather list is used */
+ uint64_t dlengsz : 14; /**< Data length / Gather list size */
+ uint64_t fsz : 6; /**< Front data size */
+ uint64_t qos : 3; /**< POW QoS queue */
+ uint64_t grp : 4; /**< POW Group */
+ uint64_t rs : 1; /**< Real short */
+ cvmx_pow_tag_type_t tt : 2; /**< POW Tag type */
+ uint64_t tag : 32; /**< POW 32 bit tag */
+#else
+ uint64_t tag : 32;
+ cvmx_pow_tag_type_t tt : 2;
+ uint64_t rs : 1;
+ uint64_t grp : 4;
+ uint64_t qos : 3;
+ uint64_t fsz : 6;
+ uint64_t dlengsz : 14;
+ uint64_t g : 1;
+ uint64_t r : 1;
+#endif
+ } s;
+} cvmx_npi_inst_hdr_t;
+
+/**
+ * PCI / PCIe packet data pointer formats 0-3
+ */
+typedef union
+{
+ uint64_t dptr0;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t es : 2; /**< Endian swap mode */
+ uint64_t ns : 1; /**< No snoop */
+ uint64_t ro : 1; /**< Relaxed ordering */
+ uint64_t addr : 60; /**< PCI/PCIe address */
+#else
+ uint64_t addr : 60;
+ uint64_t ro : 1;
+ uint64_t ns : 1;
+ uint64_t es : 2;
+#endif
+ } dptr1;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pm : 2; /**< Parse mode */
+ uint64_t sl : 7; /**< Skip length */
+ uint64_t addr : 55; /**< PCI/PCIe address */
+#else
+ uint64_t addr : 55;
+ uint64_t sl : 7;
+ uint64_t pm : 2;
+#endif
+ } dptr2;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t es : 2; /**< Endian swap mode */
+ uint64_t ns : 1; /**< No snoop */
+ uint64_t ro : 1; /**< Relaxed ordering */
+ uint64_t pm : 2; /**< Parse mode */
+ uint64_t sl : 7; /**< Skip length */
+ uint64_t addr : 51; /**< PCI/PCIe address */
+#else
+ uint64_t addr : 51;
+ uint64_t sl : 7;
+ uint64_t pm : 2;
+ uint64_t ro : 1;
+ uint64_t ns : 1;
+ uint64_t es : 2;
+#endif
+ } dptr3;
+} cvmx_npi_dptr_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_NPI_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-npi.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-packet.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-packet.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-packet.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,82 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Packet buffer defines.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMX_PACKET_H__
+#define __CVMX_PACKET_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * This structure defines a buffer pointer on Octeon
+ */
+union cvmx_buf_ptr {
+ void* ptr;
+ uint64_t u64;
+ struct
+ {
+ uint64_t i : 1; /**< if set, invert the "free" pick of the overall packet. HW always sets this bit to 0 on inbound packet */
+ uint64_t back : 4; /**< Indicates the amount to back up to get to the buffer start in cache lines. In most cases
+ this is less than one complete cache line, so the value is zero */
+ uint64_t pool : 3; /**< The pool that the buffer came from / goes to */
+ uint64_t size :16; /**< The size of the segment pointed to by addr (in bytes) */
+ uint64_t addr :40; /**< Pointer to the first byte of the data, NOT buffer */
+ } s;
+};
+
+typedef union cvmx_buf_ptr cvmx_buf_ptr_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_PACKET_H__ */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-packet.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pci-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pci-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pci-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,4586 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pci-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pci.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PCI_DEFS_H__
+#define __CVMX_PCI_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_BAR1_INDEXX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PCI_BAR1_INDEXX(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000100ull + ((offset) & 31) * 4;
+}
+#else
+#define CVMX_PCI_BAR1_INDEXX(offset) (0x0000000000000100ull + ((offset) & 31) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_BIST_REG CVMX_PCI_BIST_REG_FUNC()
+static inline uint64_t CVMX_PCI_BIST_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN50XX)))
+ cvmx_warn("CVMX_PCI_BIST_REG not supported on this chip\n");
+ return 0x00000000000001C0ull;
+}
+#else
+#define CVMX_PCI_BIST_REG (0x00000000000001C0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG00 CVMX_PCI_CFG00_FUNC()
+static inline uint64_t CVMX_PCI_CFG00_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG00 not supported on this chip\n");
+ return 0x0000000000000000ull;
+}
+#else
+#define CVMX_PCI_CFG00 (0x0000000000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG01 CVMX_PCI_CFG01_FUNC()
+static inline uint64_t CVMX_PCI_CFG01_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG01 not supported on this chip\n");
+ return 0x0000000000000004ull;
+}
+#else
+#define CVMX_PCI_CFG01 (0x0000000000000004ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG02 CVMX_PCI_CFG02_FUNC()
+static inline uint64_t CVMX_PCI_CFG02_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG02 not supported on this chip\n");
+ return 0x0000000000000008ull;
+}
+#else
+#define CVMX_PCI_CFG02 (0x0000000000000008ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG03 CVMX_PCI_CFG03_FUNC()
+static inline uint64_t CVMX_PCI_CFG03_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG03 not supported on this chip\n");
+ return 0x000000000000000Cull;
+}
+#else
+#define CVMX_PCI_CFG03 (0x000000000000000Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG04 CVMX_PCI_CFG04_FUNC()
+static inline uint64_t CVMX_PCI_CFG04_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG04 not supported on this chip\n");
+ return 0x0000000000000010ull;
+}
+#else
+#define CVMX_PCI_CFG04 (0x0000000000000010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG05 CVMX_PCI_CFG05_FUNC()
+static inline uint64_t CVMX_PCI_CFG05_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG05 not supported on this chip\n");
+ return 0x0000000000000014ull;
+}
+#else
+#define CVMX_PCI_CFG05 (0x0000000000000014ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG06 CVMX_PCI_CFG06_FUNC()
+static inline uint64_t CVMX_PCI_CFG06_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG06 not supported on this chip\n");
+ return 0x0000000000000018ull;
+}
+#else
+#define CVMX_PCI_CFG06 (0x0000000000000018ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG07 CVMX_PCI_CFG07_FUNC()
+static inline uint64_t CVMX_PCI_CFG07_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG07 not supported on this chip\n");
+ return 0x000000000000001Cull;
+}
+#else
+#define CVMX_PCI_CFG07 (0x000000000000001Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG08 CVMX_PCI_CFG08_FUNC()
+static inline uint64_t CVMX_PCI_CFG08_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG08 not supported on this chip\n");
+ return 0x0000000000000020ull;
+}
+#else
+#define CVMX_PCI_CFG08 (0x0000000000000020ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG09 CVMX_PCI_CFG09_FUNC()
+static inline uint64_t CVMX_PCI_CFG09_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG09 not supported on this chip\n");
+ return 0x0000000000000024ull;
+}
+#else
+#define CVMX_PCI_CFG09 (0x0000000000000024ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG10 CVMX_PCI_CFG10_FUNC()
+static inline uint64_t CVMX_PCI_CFG10_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG10 not supported on this chip\n");
+ return 0x0000000000000028ull;
+}
+#else
+#define CVMX_PCI_CFG10 (0x0000000000000028ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG11 CVMX_PCI_CFG11_FUNC()
+static inline uint64_t CVMX_PCI_CFG11_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG11 not supported on this chip\n");
+ return 0x000000000000002Cull;
+}
+#else
+#define CVMX_PCI_CFG11 (0x000000000000002Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG12 CVMX_PCI_CFG12_FUNC()
+static inline uint64_t CVMX_PCI_CFG12_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG12 not supported on this chip\n");
+ return 0x0000000000000030ull;
+}
+#else
+#define CVMX_PCI_CFG12 (0x0000000000000030ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG13 CVMX_PCI_CFG13_FUNC()
+static inline uint64_t CVMX_PCI_CFG13_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG13 not supported on this chip\n");
+ return 0x0000000000000034ull;
+}
+#else
+#define CVMX_PCI_CFG13 (0x0000000000000034ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG15 CVMX_PCI_CFG15_FUNC()
+static inline uint64_t CVMX_PCI_CFG15_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG15 not supported on this chip\n");
+ return 0x000000000000003Cull;
+}
+#else
+#define CVMX_PCI_CFG15 (0x000000000000003Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG16 CVMX_PCI_CFG16_FUNC()
+static inline uint64_t CVMX_PCI_CFG16_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG16 not supported on this chip\n");
+ return 0x0000000000000040ull;
+}
+#else
+#define CVMX_PCI_CFG16 (0x0000000000000040ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG17 CVMX_PCI_CFG17_FUNC()
+static inline uint64_t CVMX_PCI_CFG17_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG17 not supported on this chip\n");
+ return 0x0000000000000044ull;
+}
+#else
+#define CVMX_PCI_CFG17 (0x0000000000000044ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG18 CVMX_PCI_CFG18_FUNC()
+static inline uint64_t CVMX_PCI_CFG18_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG18 not supported on this chip\n");
+ return 0x0000000000000048ull;
+}
+#else
+#define CVMX_PCI_CFG18 (0x0000000000000048ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG19 CVMX_PCI_CFG19_FUNC()
+static inline uint64_t CVMX_PCI_CFG19_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG19 not supported on this chip\n");
+ return 0x000000000000004Cull;
+}
+#else
+#define CVMX_PCI_CFG19 (0x000000000000004Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG20 CVMX_PCI_CFG20_FUNC()
+static inline uint64_t CVMX_PCI_CFG20_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG20 not supported on this chip\n");
+ return 0x0000000000000050ull;
+}
+#else
+#define CVMX_PCI_CFG20 (0x0000000000000050ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG21 CVMX_PCI_CFG21_FUNC()
+static inline uint64_t CVMX_PCI_CFG21_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG21 not supported on this chip\n");
+ return 0x0000000000000054ull;
+}
+#else
+#define CVMX_PCI_CFG21 (0x0000000000000054ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG22 CVMX_PCI_CFG22_FUNC()
+static inline uint64_t CVMX_PCI_CFG22_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG22 not supported on this chip\n");
+ return 0x0000000000000058ull;
+}
+#else
+#define CVMX_PCI_CFG22 (0x0000000000000058ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG56 CVMX_PCI_CFG56_FUNC()
+static inline uint64_t CVMX_PCI_CFG56_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG56 not supported on this chip\n");
+ return 0x00000000000000E0ull;
+}
+#else
+#define CVMX_PCI_CFG56 (0x00000000000000E0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG57 CVMX_PCI_CFG57_FUNC()
+static inline uint64_t CVMX_PCI_CFG57_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG57 not supported on this chip\n");
+ return 0x00000000000000E4ull;
+}
+#else
+#define CVMX_PCI_CFG57 (0x00000000000000E4ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG58 CVMX_PCI_CFG58_FUNC()
+static inline uint64_t CVMX_PCI_CFG58_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG58 not supported on this chip\n");
+ return 0x00000000000000E8ull;
+}
+#else
+#define CVMX_PCI_CFG58 (0x00000000000000E8ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG59 CVMX_PCI_CFG59_FUNC()
+static inline uint64_t CVMX_PCI_CFG59_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG59 not supported on this chip\n");
+ return 0x00000000000000ECull;
+}
+#else
+#define CVMX_PCI_CFG59 (0x00000000000000ECull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG60 CVMX_PCI_CFG60_FUNC()
+static inline uint64_t CVMX_PCI_CFG60_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG60 not supported on this chip\n");
+ return 0x00000000000000F0ull;
+}
+#else
+#define CVMX_PCI_CFG60 (0x00000000000000F0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG61 CVMX_PCI_CFG61_FUNC()
+static inline uint64_t CVMX_PCI_CFG61_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG61 not supported on this chip\n");
+ return 0x00000000000000F4ull;
+}
+#else
+#define CVMX_PCI_CFG61 (0x00000000000000F4ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG62 CVMX_PCI_CFG62_FUNC()
+static inline uint64_t CVMX_PCI_CFG62_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG62 not supported on this chip\n");
+ return 0x00000000000000F8ull;
+}
+#else
+#define CVMX_PCI_CFG62 (0x00000000000000F8ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CFG63 CVMX_PCI_CFG63_FUNC()
+static inline uint64_t CVMX_PCI_CFG63_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CFG63 not supported on this chip\n");
+ return 0x00000000000000FCull;
+}
+#else
+#define CVMX_PCI_CFG63 (0x00000000000000FCull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CNT_REG CVMX_PCI_CNT_REG_FUNC()
+static inline uint64_t CVMX_PCI_CNT_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CNT_REG not supported on this chip\n");
+ return 0x00000000000001B8ull;
+}
+#else
+#define CVMX_PCI_CNT_REG (0x00000000000001B8ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_CTL_STATUS_2 CVMX_PCI_CTL_STATUS_2_FUNC()
+static inline uint64_t CVMX_PCI_CTL_STATUS_2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_CTL_STATUS_2 not supported on this chip\n");
+ return 0x000000000000018Cull;
+}
+#else
+#define CVMX_PCI_CTL_STATUS_2 (0x000000000000018Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_DBELL_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCI_DBELL_X(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000080ull + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_PCI_DBELL_X(offset) (0x0000000000000080ull + ((offset) & 3) * 8)
+#endif
+#define CVMX_PCI_DMA_CNT0 CVMX_PCI_DMA_CNTX(0)
+#define CVMX_PCI_DMA_CNT1 CVMX_PCI_DMA_CNTX(1)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_DMA_CNTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PCI_DMA_CNTX(%lu) is invalid on this chip\n", offset);
+ return 0x00000000000000A0ull + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_PCI_DMA_CNTX(offset) (0x00000000000000A0ull + ((offset) & 1) * 8)
+#endif
+#define CVMX_PCI_DMA_INT_LEV0 CVMX_PCI_DMA_INT_LEVX(0)
+#define CVMX_PCI_DMA_INT_LEV1 CVMX_PCI_DMA_INT_LEVX(1)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_DMA_INT_LEVX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PCI_DMA_INT_LEVX(%lu) is invalid on this chip\n", offset);
+ return 0x00000000000000A4ull + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_PCI_DMA_INT_LEVX(offset) (0x00000000000000A4ull + ((offset) & 1) * 8)
+#endif
+#define CVMX_PCI_DMA_TIME0 CVMX_PCI_DMA_TIMEX(0)
+#define CVMX_PCI_DMA_TIME1 CVMX_PCI_DMA_TIMEX(1)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_DMA_TIMEX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PCI_DMA_TIMEX(%lu) is invalid on this chip\n", offset);
+ return 0x00000000000000B0ull + ((offset) & 1) * 4;
+}
+#else
+#define CVMX_PCI_DMA_TIMEX(offset) (0x00000000000000B0ull + ((offset) & 1) * 4)
+#endif
+#define CVMX_PCI_INSTR_COUNT0 CVMX_PCI_INSTR_COUNTX(0)
+#define CVMX_PCI_INSTR_COUNT1 CVMX_PCI_INSTR_COUNTX(1)
+#define CVMX_PCI_INSTR_COUNT2 CVMX_PCI_INSTR_COUNTX(2)
+#define CVMX_PCI_INSTR_COUNT3 CVMX_PCI_INSTR_COUNTX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_INSTR_COUNTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCI_INSTR_COUNTX(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000084ull + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_PCI_INSTR_COUNTX(offset) (0x0000000000000084ull + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_INT_ENB CVMX_PCI_INT_ENB_FUNC()
+static inline uint64_t CVMX_PCI_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_INT_ENB not supported on this chip\n");
+ return 0x0000000000000038ull;
+}
+#else
+#define CVMX_PCI_INT_ENB (0x0000000000000038ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_INT_ENB2 CVMX_PCI_INT_ENB2_FUNC()
+static inline uint64_t CVMX_PCI_INT_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_INT_ENB2 not supported on this chip\n");
+ return 0x00000000000001A0ull;
+}
+#else
+#define CVMX_PCI_INT_ENB2 (0x00000000000001A0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_INT_SUM CVMX_PCI_INT_SUM_FUNC()
+static inline uint64_t CVMX_PCI_INT_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_INT_SUM not supported on this chip\n");
+ return 0x0000000000000030ull;
+}
+#else
+#define CVMX_PCI_INT_SUM (0x0000000000000030ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_INT_SUM2 CVMX_PCI_INT_SUM2_FUNC()
+static inline uint64_t CVMX_PCI_INT_SUM2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_INT_SUM2 not supported on this chip\n");
+ return 0x0000000000000198ull;
+}
+#else
+#define CVMX_PCI_INT_SUM2 (0x0000000000000198ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_MSI_RCV CVMX_PCI_MSI_RCV_FUNC()
+static inline uint64_t CVMX_PCI_MSI_RCV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_MSI_RCV not supported on this chip\n");
+ return 0x00000000000000F0ull;
+}
+#else
+#define CVMX_PCI_MSI_RCV (0x00000000000000F0ull)
+#endif
+#define CVMX_PCI_PKTS_SENT0 CVMX_PCI_PKTS_SENTX(0)
+#define CVMX_PCI_PKTS_SENT1 CVMX_PCI_PKTS_SENTX(1)
+#define CVMX_PCI_PKTS_SENT2 CVMX_PCI_PKTS_SENTX(2)
+#define CVMX_PCI_PKTS_SENT3 CVMX_PCI_PKTS_SENTX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_PKTS_SENTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCI_PKTS_SENTX(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000040ull + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_PCI_PKTS_SENTX(offset) (0x0000000000000040ull + ((offset) & 3) * 16)
+#endif
+#define CVMX_PCI_PKTS_SENT_INT_LEV0 CVMX_PCI_PKTS_SENT_INT_LEVX(0)
+#define CVMX_PCI_PKTS_SENT_INT_LEV1 CVMX_PCI_PKTS_SENT_INT_LEVX(1)
+#define CVMX_PCI_PKTS_SENT_INT_LEV2 CVMX_PCI_PKTS_SENT_INT_LEVX(2)
+#define CVMX_PCI_PKTS_SENT_INT_LEV3 CVMX_PCI_PKTS_SENT_INT_LEVX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_PKTS_SENT_INT_LEVX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCI_PKTS_SENT_INT_LEVX(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000048ull + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_PCI_PKTS_SENT_INT_LEVX(offset) (0x0000000000000048ull + ((offset) & 3) * 16)
+#endif
+#define CVMX_PCI_PKTS_SENT_TIME0 CVMX_PCI_PKTS_SENT_TIMEX(0)
+#define CVMX_PCI_PKTS_SENT_TIME1 CVMX_PCI_PKTS_SENT_TIMEX(1)
+#define CVMX_PCI_PKTS_SENT_TIME2 CVMX_PCI_PKTS_SENT_TIMEX(2)
+#define CVMX_PCI_PKTS_SENT_TIME3 CVMX_PCI_PKTS_SENT_TIMEX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_PKTS_SENT_TIMEX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCI_PKTS_SENT_TIMEX(%lu) is invalid on this chip\n", offset);
+ return 0x000000000000004Cull + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_PCI_PKTS_SENT_TIMEX(offset) (0x000000000000004Cull + ((offset) & 3) * 16)
+#endif
+#define CVMX_PCI_PKT_CREDITS0 CVMX_PCI_PKT_CREDITSX(0)
+#define CVMX_PCI_PKT_CREDITS1 CVMX_PCI_PKT_CREDITSX(1)
+#define CVMX_PCI_PKT_CREDITS2 CVMX_PCI_PKT_CREDITSX(2)
+#define CVMX_PCI_PKT_CREDITS3 CVMX_PCI_PKT_CREDITSX(3)
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCI_PKT_CREDITSX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCI_PKT_CREDITSX(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000044ull + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_PCI_PKT_CREDITSX(offset) (0x0000000000000044ull + ((offset) & 3) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_READ_CMD_6 CVMX_PCI_READ_CMD_6_FUNC()
+static inline uint64_t CVMX_PCI_READ_CMD_6_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_READ_CMD_6 not supported on this chip\n");
+ return 0x0000000000000180ull;
+}
+#else
+#define CVMX_PCI_READ_CMD_6 (0x0000000000000180ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_READ_CMD_C CVMX_PCI_READ_CMD_C_FUNC()
+static inline uint64_t CVMX_PCI_READ_CMD_C_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_READ_CMD_C not supported on this chip\n");
+ return 0x0000000000000184ull;
+}
+#else
+#define CVMX_PCI_READ_CMD_C (0x0000000000000184ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_READ_CMD_E CVMX_PCI_READ_CMD_E_FUNC()
+static inline uint64_t CVMX_PCI_READ_CMD_E_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_READ_CMD_E not supported on this chip\n");
+ return 0x0000000000000188ull;
+}
+#else
+#define CVMX_PCI_READ_CMD_E (0x0000000000000188ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_READ_TIMEOUT CVMX_PCI_READ_TIMEOUT_FUNC()
+static inline uint64_t CVMX_PCI_READ_TIMEOUT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_READ_TIMEOUT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000000B0ull);
+}
+#else
+#define CVMX_PCI_READ_TIMEOUT (CVMX_ADD_IO_SEG(0x00011F00000000B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_SCM_REG CVMX_PCI_SCM_REG_FUNC()
+static inline uint64_t CVMX_PCI_SCM_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_SCM_REG not supported on this chip\n");
+ return 0x00000000000001A8ull;
+}
+#else
+#define CVMX_PCI_SCM_REG (0x00000000000001A8ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_TSR_REG CVMX_PCI_TSR_REG_FUNC()
+static inline uint64_t CVMX_PCI_TSR_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_TSR_REG not supported on this chip\n");
+ return 0x00000000000001B0ull;
+}
+#else
+#define CVMX_PCI_TSR_REG (0x00000000000001B0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_WIN_RD_ADDR CVMX_PCI_WIN_RD_ADDR_FUNC()
+static inline uint64_t CVMX_PCI_WIN_RD_ADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_WIN_RD_ADDR not supported on this chip\n");
+ return 0x0000000000000008ull;
+}
+#else
+#define CVMX_PCI_WIN_RD_ADDR (0x0000000000000008ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_WIN_RD_DATA CVMX_PCI_WIN_RD_DATA_FUNC()
+static inline uint64_t CVMX_PCI_WIN_RD_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_WIN_RD_DATA not supported on this chip\n");
+ return 0x0000000000000020ull;
+}
+#else
+#define CVMX_PCI_WIN_RD_DATA (0x0000000000000020ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_WIN_WR_ADDR CVMX_PCI_WIN_WR_ADDR_FUNC()
+static inline uint64_t CVMX_PCI_WIN_WR_ADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_WIN_WR_ADDR not supported on this chip\n");
+ return 0x0000000000000000ull;
+}
+#else
+#define CVMX_PCI_WIN_WR_ADDR (0x0000000000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_WIN_WR_DATA CVMX_PCI_WIN_WR_DATA_FUNC()
+static inline uint64_t CVMX_PCI_WIN_WR_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_WIN_WR_DATA not supported on this chip\n");
+ return 0x0000000000000010ull;
+}
+#else
+#define CVMX_PCI_WIN_WR_DATA (0x0000000000000010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PCI_WIN_WR_MASK CVMX_PCI_WIN_WR_MASK_FUNC()
+static inline uint64_t CVMX_PCI_WIN_WR_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PCI_WIN_WR_MASK not supported on this chip\n");
+ return 0x0000000000000018ull;
+}
+#else
+#define CVMX_PCI_WIN_WR_MASK (0x0000000000000018ull)
+#endif
+
+/**
+ * cvmx_pci_bar1_index#
+ *
+ * PCI_BAR1_INDEXX = PCI IndexX Register
+ *
+ * Contains address index and control bits for access to memory ranges of Bar-1,
+ * when PCI supplied address-bits [26:22] == X.
+ */
+union cvmx_pci_bar1_indexx {
+ uint32_t u32;
+ struct cvmx_pci_bar1_indexx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_18_31 : 14;
+ uint32_t addr_idx : 14; /**< Address bits [35:22] sent to L2C */
+ uint32_t ca : 1; /**< Set '1' when access is not to be cached in L2. */
+ uint32_t end_swp : 2; /**< Endian Swap Mode */
+ uint32_t addr_v : 1; /**< Set '1' when the selected address range is valid. */
+#else
+ uint32_t addr_v : 1;
+ uint32_t end_swp : 2;
+ uint32_t ca : 1;
+ uint32_t addr_idx : 14;
+ uint32_t reserved_18_31 : 14;
+#endif
+ } s;
+ struct cvmx_pci_bar1_indexx_s cn30xx;
+ struct cvmx_pci_bar1_indexx_s cn31xx;
+ struct cvmx_pci_bar1_indexx_s cn38xx;
+ struct cvmx_pci_bar1_indexx_s cn38xxp2;
+ struct cvmx_pci_bar1_indexx_s cn50xx;
+ struct cvmx_pci_bar1_indexx_s cn58xx;
+ struct cvmx_pci_bar1_indexx_s cn58xxp1;
+};
+typedef union cvmx_pci_bar1_indexx cvmx_pci_bar1_indexx_t;
+
+/**
+ * cvmx_pci_bist_reg
+ *
+ * PCI_BIST_REG = PCI PNI BIST Status Register
+ *
+ * Contains the bist results for the PNI memories.
+ */
+union cvmx_pci_bist_reg {
+ uint64_t u64;
+ struct cvmx_pci_bist_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t rsp_bs : 1; /**< Bist Status For b12_rsp_fifo_bist
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+ uint64_t dma0_bs : 1; /**< Bist Status For dmao_count
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+ uint64_t cmd0_bs : 1; /**< Bist Status For npi_cmd0_pni_am0
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+ uint64_t cmd_bs : 1; /**< Bist Status For npi_cmd_pni_am1
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+ uint64_t csr2p_bs : 1; /**< Bist Status For npi_csr_2_pni_am
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+ uint64_t csrr_bs : 1; /**< Bist Status For npi_csr_rsp_2_pni_am
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+ uint64_t rsp2p_bs : 1; /**< Bist Status For npi_rsp_2_pni_am
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+ uint64_t csr2n_bs : 1; /**< Bist Status For pni_csr_2_npi_am
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+ uint64_t dat2n_bs : 1; /**< Bist Status For pni_data_2_npi_am
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+ uint64_t dbg2n_bs : 1; /**< Bist Status For pni_dbg_data_2_npi_am
+ The value of this register is available 100,000
+ core clocks + 21,000 pclks after:
+ Host Mode - deassertion of pci_rst_n
+ Non Host Mode - deassertion of pci_rst_n */
+#else
+ uint64_t dbg2n_bs : 1;
+ uint64_t dat2n_bs : 1;
+ uint64_t csr2n_bs : 1;
+ uint64_t rsp2p_bs : 1;
+ uint64_t csrr_bs : 1;
+ uint64_t csr2p_bs : 1;
+ uint64_t cmd_bs : 1;
+ uint64_t cmd0_bs : 1;
+ uint64_t dma0_bs : 1;
+ uint64_t rsp_bs : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_pci_bist_reg_s cn50xx;
+};
+typedef union cvmx_pci_bist_reg cvmx_pci_bist_reg_t;
+
+/**
+ * cvmx_pci_cfg00
+ *
+ * Registers at address 0x1000 -> 0x17FF are PNI
+ * Start at 0x100 into range
+ * these are shifted by 2 to the left to make address
+ * Registers at address 0x1800 -> 0x18FF are CFG
+ * these are shifted by 2 to the left to make address
+ *
+ * PCI_CFG00 = First 32-bits of PCI config space (PCI Vendor + Device)
+ *
+ * This register contains the first 32-bits of the PCI config space registers
+ */
+union cvmx_pci_cfg00 {
+ uint32_t u32;
+ struct cvmx_pci_cfg00_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t devid : 16; /**< This is the device ID for OCTEON (90nm shhrink) */
+ uint32_t vendid : 16; /**< This is the Cavium's vendor ID */
+#else
+ uint32_t vendid : 16;
+ uint32_t devid : 16;
+#endif
+ } s;
+ struct cvmx_pci_cfg00_s cn30xx;
+ struct cvmx_pci_cfg00_s cn31xx;
+ struct cvmx_pci_cfg00_s cn38xx;
+ struct cvmx_pci_cfg00_s cn38xxp2;
+ struct cvmx_pci_cfg00_s cn50xx;
+ struct cvmx_pci_cfg00_s cn58xx;
+ struct cvmx_pci_cfg00_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg00 cvmx_pci_cfg00_t;
+
+/**
+ * cvmx_pci_cfg01
+ *
+ * PCI_CFG01 = Second 32-bits of PCI config space (Command/Status Register)
+ *
+ */
+union cvmx_pci_cfg01 {
+ uint32_t u32;
+ struct cvmx_pci_cfg01_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dpe : 1; /**< Detected Parity Error */
+ uint32_t sse : 1; /**< Signaled System Error */
+ uint32_t rma : 1; /**< Received Master Abort */
+ uint32_t rta : 1; /**< Received Target Abort */
+ uint32_t sta : 1; /**< Signaled Target Abort */
+ uint32_t devt : 2; /**< DEVSEL# timing (for PCI only/for PCIX = don't care) */
+ uint32_t mdpe : 1; /**< Master Data Parity Error */
+ uint32_t fbb : 1; /**< Fast Back-to-Back Transactions Capable
+ Mode Dependent (1 = PCI Mode / 0 = PCIX Mode) */
+ uint32_t reserved_22_22 : 1;
+ uint32_t m66 : 1; /**< 66MHz Capable */
+ uint32_t cle : 1; /**< Capabilities List Enable */
+ uint32_t i_stat : 1; /**< When INTx# is asserted by OCTEON this bit will be set.
+ When deasserted by OCTEON this bit will be cleared. */
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_dis : 1; /**< When asserted '1' disables the generation of INTx#
+ by OCTEON. When disabled '0' allows assertion of INTx#
+ by OCTEON. */
+ uint32_t fbbe : 1; /**< Fast Back to Back Transaction Enable */
+ uint32_t see : 1; /**< System Error Enable */
+ uint32_t ads : 1; /**< Address/Data Stepping
+ NOTE: Octeon does NOT support address/data stepping. */
+ uint32_t pee : 1; /**< PERR# Enable */
+ uint32_t vps : 1; /**< VGA Palette Snooping */
+ uint32_t mwice : 1; /**< Memory Write & Invalidate Command Enable */
+ uint32_t scse : 1; /**< Special Cycle Snooping Enable */
+ uint32_t me : 1; /**< Master Enable
+ Must be set for OCTEON to master a PCI/PCI-X
+ transaction. This should always be set any time
+ that OCTEON is connected to a PCI/PCI-X bus. */
+ uint32_t msae : 1; /**< Memory Space Access Enable
+ Must be set to recieve a PCI/PCI-X memory space
+ transaction. This must always be set any time that
+ OCTEON is connected to a PCI/PCI-X bus. */
+ uint32_t isae : 1; /**< I/O Space Access Enable
+ NOTE: For OCTEON, this bit MUST NEVER be set
+ (it is read-only and OCTEON does not respond to I/O
+ Space accesses). */
+#else
+ uint32_t isae : 1;
+ uint32_t msae : 1;
+ uint32_t me : 1;
+ uint32_t scse : 1;
+ uint32_t mwice : 1;
+ uint32_t vps : 1;
+ uint32_t pee : 1;
+ uint32_t ads : 1;
+ uint32_t see : 1;
+ uint32_t fbbe : 1;
+ uint32_t i_dis : 1;
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_stat : 1;
+ uint32_t cle : 1;
+ uint32_t m66 : 1;
+ uint32_t reserved_22_22 : 1;
+ uint32_t fbb : 1;
+ uint32_t mdpe : 1;
+ uint32_t devt : 2;
+ uint32_t sta : 1;
+ uint32_t rta : 1;
+ uint32_t rma : 1;
+ uint32_t sse : 1;
+ uint32_t dpe : 1;
+#endif
+ } s;
+ struct cvmx_pci_cfg01_s cn30xx;
+ struct cvmx_pci_cfg01_s cn31xx;
+ struct cvmx_pci_cfg01_s cn38xx;
+ struct cvmx_pci_cfg01_s cn38xxp2;
+ struct cvmx_pci_cfg01_s cn50xx;
+ struct cvmx_pci_cfg01_s cn58xx;
+ struct cvmx_pci_cfg01_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg01 cvmx_pci_cfg01_t;
+
+/**
+ * cvmx_pci_cfg02
+ *
+ * PCI_CFG02 = Third 32-bits of PCI config space (Class Code / Revision ID)
+ *
+ */
+union cvmx_pci_cfg02 {
+ uint32_t u32;
+ struct cvmx_pci_cfg02_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cc : 24; /**< Class Code (Processor/MIPS)
+ (was 0x100000 in pass 1 and pass 2) */
+ uint32_t rid : 8; /**< Revision ID
+ (0 in pass 1, 1 in pass 1.1, 8 in pass 2.0) */
+#else
+ uint32_t rid : 8;
+ uint32_t cc : 24;
+#endif
+ } s;
+ struct cvmx_pci_cfg02_s cn30xx;
+ struct cvmx_pci_cfg02_s cn31xx;
+ struct cvmx_pci_cfg02_s cn38xx;
+ struct cvmx_pci_cfg02_s cn38xxp2;
+ struct cvmx_pci_cfg02_s cn50xx;
+ struct cvmx_pci_cfg02_s cn58xx;
+ struct cvmx_pci_cfg02_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg02 cvmx_pci_cfg02_t;
+
+/**
+ * cvmx_pci_cfg03
+ *
+ * PCI_CFG03 = Fourth 32-bits of PCI config space (BIST, HEADER Type, Latency timer, line size)
+ *
+ */
+union cvmx_pci_cfg03 {
+ uint32_t u32;
+ struct cvmx_pci_cfg03_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bcap : 1; /**< BIST Capable */
+ uint32_t brb : 1; /**< BIST Request/busy bit
+ Note: OCTEON does not support PCI BIST, therefore
+ this bit should remain zero. */
+ uint32_t reserved_28_29 : 2;
+ uint32_t bcod : 4; /**< BIST Code */
+ uint32_t ht : 8; /**< Header Type (Type 0) */
+ uint32_t lt : 8; /**< Latency Timer
+ (0=PCI) (0=PCI)
+ (0x40=PCIX) (0x40=PCIX) */
+ uint32_t cls : 8; /**< Cache Line Size */
+#else
+ uint32_t cls : 8;
+ uint32_t lt : 8;
+ uint32_t ht : 8;
+ uint32_t bcod : 4;
+ uint32_t reserved_28_29 : 2;
+ uint32_t brb : 1;
+ uint32_t bcap : 1;
+#endif
+ } s;
+ struct cvmx_pci_cfg03_s cn30xx;
+ struct cvmx_pci_cfg03_s cn31xx;
+ struct cvmx_pci_cfg03_s cn38xx;
+ struct cvmx_pci_cfg03_s cn38xxp2;
+ struct cvmx_pci_cfg03_s cn50xx;
+ struct cvmx_pci_cfg03_s cn58xx;
+ struct cvmx_pci_cfg03_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg03 cvmx_pci_cfg03_t;
+
+/**
+ * cvmx_pci_cfg04
+ *
+ * PCI_CFG04 = Fifth 32-bits of PCI config space (Base Address Register 0 - Low)
+ *
+ * Description: BAR0: 4KB 64-bit Prefetchable Memory Space
+ * [0]: 0 (Memory Space)
+ * [2:1]: 2 (64bit memory decoder)
+ * [3]: 1 (Prefetchable)
+ * [11:4]: RAZ (to imply 4KB space)
+ * [31:12]: RW (User may define base address)
+ */
+union cvmx_pci_cfg04 {
+ uint32_t u32;
+ struct cvmx_pci_cfg04_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lbase : 20; /**< Base Address[31:12]
+ Base Address[30:12] read as zero if
+ PCI_CTL_STATUS_2[BB0] is set (in pass 3+) */
+ uint32_t lbasez : 8; /**< Base Address[11:4] (Read as Zero) */
+ uint32_t pf : 1; /**< Prefetchable Space */
+ uint32_t typ : 2; /**< Type (00=32b/01=below 1MB/10=64b/11=RSV) */
+ uint32_t mspc : 1; /**< Memory Space Indicator */
+#else
+ uint32_t mspc : 1;
+ uint32_t typ : 2;
+ uint32_t pf : 1;
+ uint32_t lbasez : 8;
+ uint32_t lbase : 20;
+#endif
+ } s;
+ struct cvmx_pci_cfg04_s cn30xx;
+ struct cvmx_pci_cfg04_s cn31xx;
+ struct cvmx_pci_cfg04_s cn38xx;
+ struct cvmx_pci_cfg04_s cn38xxp2;
+ struct cvmx_pci_cfg04_s cn50xx;
+ struct cvmx_pci_cfg04_s cn58xx;
+ struct cvmx_pci_cfg04_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg04 cvmx_pci_cfg04_t;
+
+/**
+ * cvmx_pci_cfg05
+ *
+ * PCI_CFG05 = Sixth 32-bits of PCI config space (Base Address Register 0 - High)
+ *
+ */
+union cvmx_pci_cfg05 {
+ uint32_t u32;
+ struct cvmx_pci_cfg05_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hbase : 32; /**< Base Address[63:32] */
+#else
+ uint32_t hbase : 32;
+#endif
+ } s;
+ struct cvmx_pci_cfg05_s cn30xx;
+ struct cvmx_pci_cfg05_s cn31xx;
+ struct cvmx_pci_cfg05_s cn38xx;
+ struct cvmx_pci_cfg05_s cn38xxp2;
+ struct cvmx_pci_cfg05_s cn50xx;
+ struct cvmx_pci_cfg05_s cn58xx;
+ struct cvmx_pci_cfg05_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg05 cvmx_pci_cfg05_t;
+
+/**
+ * cvmx_pci_cfg06
+ *
+ * PCI_CFG06 = Seventh 32-bits of PCI config space (Base Address Register 1 - Low)
+ *
+ * Description: BAR1: 128MB 64-bit Prefetchable Memory Space
+ * [0]: 0 (Memory Space)
+ * [2:1]: 2 (64bit memory decoder)
+ * [3]: 1 (Prefetchable)
+ * [26:4]: RAZ (to imply 128MB space)
+ * [31:27]: RW (User may define base address)
+ */
+union cvmx_pci_cfg06 {
+ uint32_t u32;
+ struct cvmx_pci_cfg06_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lbase : 5; /**< Base Address[31:27]
+ In pass 3+:
+ Base Address[29:27] read as zero if
+ PCI_CTL_STATUS_2[BB1] is set
+ Base Address[30] reads as zero if
+ PCI_CTL_STATUS_2[BB1] is set and
+ PCI_CTL_STATUS_2[BB1_SIZE] is set */
+ uint32_t lbasez : 23; /**< Base Address[26:4] (Read as Zero) */
+ uint32_t pf : 1; /**< Prefetchable Space */
+ uint32_t typ : 2; /**< Type (00=32b/01=below 1MB/10=64b/11=RSV) */
+ uint32_t mspc : 1; /**< Memory Space Indicator */
+#else
+ uint32_t mspc : 1;
+ uint32_t typ : 2;
+ uint32_t pf : 1;
+ uint32_t lbasez : 23;
+ uint32_t lbase : 5;
+#endif
+ } s;
+ struct cvmx_pci_cfg06_s cn30xx;
+ struct cvmx_pci_cfg06_s cn31xx;
+ struct cvmx_pci_cfg06_s cn38xx;
+ struct cvmx_pci_cfg06_s cn38xxp2;
+ struct cvmx_pci_cfg06_s cn50xx;
+ struct cvmx_pci_cfg06_s cn58xx;
+ struct cvmx_pci_cfg06_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg06 cvmx_pci_cfg06_t;
+
+/**
+ * cvmx_pci_cfg07
+ *
+ * PCI_CFG07 = Eighth 32-bits of PCI config space (Base Address Register 1 - High)
+ *
+ */
+union cvmx_pci_cfg07 {
+ uint32_t u32;
+ struct cvmx_pci_cfg07_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hbase : 32; /**< Base Address[63:32] */
+#else
+ uint32_t hbase : 32;
+#endif
+ } s;
+ struct cvmx_pci_cfg07_s cn30xx;
+ struct cvmx_pci_cfg07_s cn31xx;
+ struct cvmx_pci_cfg07_s cn38xx;
+ struct cvmx_pci_cfg07_s cn38xxp2;
+ struct cvmx_pci_cfg07_s cn50xx;
+ struct cvmx_pci_cfg07_s cn58xx;
+ struct cvmx_pci_cfg07_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg07 cvmx_pci_cfg07_t;
+
+/**
+ * cvmx_pci_cfg08
+ *
+ * PCI_CFG08 = Ninth 32-bits of PCI config space (Base Address Register 2 - Low)
+ *
+ * Description: BAR1: 2^39 (512GB) 64-bit Prefetchable Memory Space
+ * [0]: 0 (Memory Space)
+ * [2:1]: 2 (64bit memory decoder)
+ * [3]: 1 (Prefetchable)
+ * [31:4]: RAZ
+ */
+union cvmx_pci_cfg08 {
+ uint32_t u32;
+ struct cvmx_pci_cfg08_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lbasez : 28; /**< Base Address[31:4] (Read as Zero) */
+ uint32_t pf : 1; /**< Prefetchable Space */
+ uint32_t typ : 2; /**< Type (00=32b/01=below 1MB/10=64b/11=RSV) */
+ uint32_t mspc : 1; /**< Memory Space Indicator */
+#else
+ uint32_t mspc : 1;
+ uint32_t typ : 2;
+ uint32_t pf : 1;
+ uint32_t lbasez : 28;
+#endif
+ } s;
+ struct cvmx_pci_cfg08_s cn30xx;
+ struct cvmx_pci_cfg08_s cn31xx;
+ struct cvmx_pci_cfg08_s cn38xx;
+ struct cvmx_pci_cfg08_s cn38xxp2;
+ struct cvmx_pci_cfg08_s cn50xx;
+ struct cvmx_pci_cfg08_s cn58xx;
+ struct cvmx_pci_cfg08_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg08 cvmx_pci_cfg08_t;
+
+/**
+ * cvmx_pci_cfg09
+ *
+ * PCI_CFG09 = Tenth 32-bits of PCI config space (Base Address Register 2 - High)
+ *
+ */
+union cvmx_pci_cfg09 {
+ uint32_t u32;
+ struct cvmx_pci_cfg09_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hbase : 25; /**< Base Address[63:39] */
+ uint32_t hbasez : 7; /**< Base Address[38:31] (Read as Zero) */
+#else
+ uint32_t hbasez : 7;
+ uint32_t hbase : 25;
+#endif
+ } s;
+ struct cvmx_pci_cfg09_s cn30xx;
+ struct cvmx_pci_cfg09_s cn31xx;
+ struct cvmx_pci_cfg09_s cn38xx;
+ struct cvmx_pci_cfg09_s cn38xxp2;
+ struct cvmx_pci_cfg09_s cn50xx;
+ struct cvmx_pci_cfg09_s cn58xx;
+ struct cvmx_pci_cfg09_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg09 cvmx_pci_cfg09_t;
+
+/**
+ * cvmx_pci_cfg10
+ *
+ * PCI_CFG10 = Eleventh 32-bits of PCI config space (Card Bus CIS Pointer)
+ *
+ */
+union cvmx_pci_cfg10 {
+ uint32_t u32;
+ struct cvmx_pci_cfg10_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cisp : 32; /**< CardBus CIS Pointer (UNUSED) */
+#else
+ uint32_t cisp : 32;
+#endif
+ } s;
+ struct cvmx_pci_cfg10_s cn30xx;
+ struct cvmx_pci_cfg10_s cn31xx;
+ struct cvmx_pci_cfg10_s cn38xx;
+ struct cvmx_pci_cfg10_s cn38xxp2;
+ struct cvmx_pci_cfg10_s cn50xx;
+ struct cvmx_pci_cfg10_s cn58xx;
+ struct cvmx_pci_cfg10_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg10 cvmx_pci_cfg10_t;
+
+/**
+ * cvmx_pci_cfg11
+ *
+ * PCI_CFG11 = Twelfth 32-bits of PCI config space (SubSystem ID/Subsystem Vendor ID Register)
+ *
+ */
+union cvmx_pci_cfg11 {
+ uint32_t u32;
+ struct cvmx_pci_cfg11_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ssid : 16; /**< SubSystem ID */
+ uint32_t ssvid : 16; /**< Subsystem Vendor ID */
+#else
+ uint32_t ssvid : 16;
+ uint32_t ssid : 16;
+#endif
+ } s;
+ struct cvmx_pci_cfg11_s cn30xx;
+ struct cvmx_pci_cfg11_s cn31xx;
+ struct cvmx_pci_cfg11_s cn38xx;
+ struct cvmx_pci_cfg11_s cn38xxp2;
+ struct cvmx_pci_cfg11_s cn50xx;
+ struct cvmx_pci_cfg11_s cn58xx;
+ struct cvmx_pci_cfg11_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg11 cvmx_pci_cfg11_t;
+
+/**
+ * cvmx_pci_cfg12
+ *
+ * PCI_CFG12 = Thirteenth 32-bits of PCI config space (Expansion ROM Base Address Register)
+ *
+ */
+union cvmx_pci_cfg12 {
+ uint32_t u32;
+ struct cvmx_pci_cfg12_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t erbar : 16; /**< Expansion ROM Base Address[31:16] 64KB in size */
+ uint32_t erbarz : 5; /**< Expansion ROM Base Base Address (Read as Zero) */
+ uint32_t reserved_1_10 : 10;
+ uint32_t erbar_en : 1; /**< Expansion ROM Address Decode Enable */
+#else
+ uint32_t erbar_en : 1;
+ uint32_t reserved_1_10 : 10;
+ uint32_t erbarz : 5;
+ uint32_t erbar : 16;
+#endif
+ } s;
+ struct cvmx_pci_cfg12_s cn30xx;
+ struct cvmx_pci_cfg12_s cn31xx;
+ struct cvmx_pci_cfg12_s cn38xx;
+ struct cvmx_pci_cfg12_s cn38xxp2;
+ struct cvmx_pci_cfg12_s cn50xx;
+ struct cvmx_pci_cfg12_s cn58xx;
+ struct cvmx_pci_cfg12_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg12 cvmx_pci_cfg12_t;
+
+/**
+ * cvmx_pci_cfg13
+ *
+ * PCI_CFG13 = Fourteenth 32-bits of PCI config space (Capabilities Pointer Register)
+ *
+ */
+union cvmx_pci_cfg13 {
+ uint32_t u32;
+ struct cvmx_pci_cfg13_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t cp : 8; /**< Capabilities Pointer */
+#else
+ uint32_t cp : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_pci_cfg13_s cn30xx;
+ struct cvmx_pci_cfg13_s cn31xx;
+ struct cvmx_pci_cfg13_s cn38xx;
+ struct cvmx_pci_cfg13_s cn38xxp2;
+ struct cvmx_pci_cfg13_s cn50xx;
+ struct cvmx_pci_cfg13_s cn58xx;
+ struct cvmx_pci_cfg13_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg13 cvmx_pci_cfg13_t;
+
+/**
+ * cvmx_pci_cfg15
+ *
+ * PCI_CFG15 = Sixteenth 32-bits of PCI config space (INT/ARB/LATENCY Register)
+ *
+ */
+union cvmx_pci_cfg15 {
+ uint32_t u32;
+ struct cvmx_pci_cfg15_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ml : 8; /**< Maximum Latency */
+ uint32_t mg : 8; /**< Minimum Grant */
+ uint32_t inta : 8; /**< Interrupt Pin (INTA#) */
+ uint32_t il : 8; /**< Interrupt Line */
+#else
+ uint32_t il : 8;
+ uint32_t inta : 8;
+ uint32_t mg : 8;
+ uint32_t ml : 8;
+#endif
+ } s;
+ struct cvmx_pci_cfg15_s cn30xx;
+ struct cvmx_pci_cfg15_s cn31xx;
+ struct cvmx_pci_cfg15_s cn38xx;
+ struct cvmx_pci_cfg15_s cn38xxp2;
+ struct cvmx_pci_cfg15_s cn50xx;
+ struct cvmx_pci_cfg15_s cn58xx;
+ struct cvmx_pci_cfg15_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg15 cvmx_pci_cfg15_t;
+
+/**
+ * cvmx_pci_cfg16
+ *
+ * PCI_CFG16 = Seventeenth 32-bits of PCI config space (Target Implementation Register)
+ *
+ */
+union cvmx_pci_cfg16 {
+ uint32_t u32;
+ struct cvmx_pci_cfg16_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t trdnpr : 1; /**< Target Read Delayed Transaction for I/O and
+ non-prefetchable regions discarded. */
+ uint32_t trdard : 1; /**< Target Read Delayed Transaction for all regions
+ discarded. */
+ uint32_t rdsati : 1; /**< Target(I/O and Memory) Read Delayed/Split at
+ timeout/immediately (default timeout).
+ Note: OCTEON requires that this bit MBZ(must be zero). */
+ uint32_t trdrs : 1; /**< Target(I/O and Memory) Read Delayed/Split or Retry
+ select (of the application interface is not ready)
+ 0 = Delayed Split Transaction
+ 1 = Retry Transaction (always Immediate Retry, no
+ AT_REQ to application). */
+ uint32_t trtae : 1; /**< Target(I/O and Memory) Read Target Abort Enable
+ (if application interface is not ready at the
+ latency timeout).
+ Note: OCTEON as target will never target-abort,
+ therefore this bit should never be set. */
+ uint32_t twsei : 1; /**< Target(I/O) Write Split Enable (at timeout /
+ immediately; default timeout) */
+ uint32_t twsen : 1; /**< T(I/O) write split Enable (if the application
+ interface is not ready) */
+ uint32_t twtae : 1; /**< Target(I/O and Memory) Write Target Abort Enable
+ (if the application interface is not ready at the
+ start of the cycle).
+ Note: OCTEON as target will never target-abort,
+ therefore this bit should never be set. */
+ uint32_t tmae : 1; /**< Target(Read/Write) Master Abort Enable; check
+ at the start of each transaction.
+ Note: This bit can be used to force a Master
+ Abort when OCTEON is acting as the intended target
+ device. */
+ uint32_t tslte : 3; /**< Target Subsequent(2nd-last) Latency Timeout Enable
+ Valid range: [1..7] and 0=8. */
+ uint32_t tilt : 4; /**< Target Initial(1st data) Latency Timeout in PCI
+ ModeValid range: [8..15] and 0=16. */
+ uint32_t pbe : 12; /**< Programmable Boundary Enable to disconnect/prefetch
+ for target burst read cycles to prefetchable
+ region in PCI. A value of 1 indicates end of
+ boundary (64 KB down to 16 Bytes). */
+ uint32_t dppmr : 1; /**< Disconnect/Prefetch to prefetchable memory
+ regions Enable. Prefetchable memory regions
+ are always disconnected on a region boundary.
+ Non-prefetchable regions for PCI are always
+ disconnected on the first transfer.
+ Note: OCTEON as target will never target-disconnect,
+ therefore this bit should never be set. */
+ uint32_t reserved_2_2 : 1;
+ uint32_t tswc : 1; /**< Target Split Write Control
+ 0 = Blocks all requests except PMW
+ 1 = Blocks all requests including PMW until
+ split completion occurs. */
+ uint32_t mltd : 1; /**< Master Latency Timer Disable
+ Note: For OCTEON, it is recommended that this bit
+ be set(to disable the Master Latency timer). */
+#else
+ uint32_t mltd : 1;
+ uint32_t tswc : 1;
+ uint32_t reserved_2_2 : 1;
+ uint32_t dppmr : 1;
+ uint32_t pbe : 12;
+ uint32_t tilt : 4;
+ uint32_t tslte : 3;
+ uint32_t tmae : 1;
+ uint32_t twtae : 1;
+ uint32_t twsen : 1;
+ uint32_t twsei : 1;
+ uint32_t trtae : 1;
+ uint32_t trdrs : 1;
+ uint32_t rdsati : 1;
+ uint32_t trdard : 1;
+ uint32_t trdnpr : 1;
+#endif
+ } s;
+ struct cvmx_pci_cfg16_s cn30xx;
+ struct cvmx_pci_cfg16_s cn31xx;
+ struct cvmx_pci_cfg16_s cn38xx;
+ struct cvmx_pci_cfg16_s cn38xxp2;
+ struct cvmx_pci_cfg16_s cn50xx;
+ struct cvmx_pci_cfg16_s cn58xx;
+ struct cvmx_pci_cfg16_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg16 cvmx_pci_cfg16_t;
+
+/**
+ * cvmx_pci_cfg17
+ *
+ * PCI_CFG17 = Eighteenth 32-bits of PCI config space (Target Split Completion Message
+ * Enable Register)
+ */
+union cvmx_pci_cfg17 {
+ uint32_t u32;
+ struct cvmx_pci_cfg17_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t tscme : 32; /**< Target Split Completion Message Enable
+ [31:30]: 00
+ [29]: Split Completion Error Indication
+ [28]: 0
+ [27:20]: Split Completion Message Index
+ [19:0]: 0x00000
+ For OCTEON, this register is intended for debug use
+ only. (as such, it is recommended NOT to be written
+ with anything other than ZEROES). */
+#else
+ uint32_t tscme : 32;
+#endif
+ } s;
+ struct cvmx_pci_cfg17_s cn30xx;
+ struct cvmx_pci_cfg17_s cn31xx;
+ struct cvmx_pci_cfg17_s cn38xx;
+ struct cvmx_pci_cfg17_s cn38xxp2;
+ struct cvmx_pci_cfg17_s cn50xx;
+ struct cvmx_pci_cfg17_s cn58xx;
+ struct cvmx_pci_cfg17_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg17 cvmx_pci_cfg17_t;
+
+/**
+ * cvmx_pci_cfg18
+ *
+ * PCI_CFG18 = Nineteenth 32-bits of PCI config space (Target Delayed/Split Request
+ * Pending Sequences)
+ */
+union cvmx_pci_cfg18 {
+ uint32_t u32;
+ struct cvmx_pci_cfg18_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t tdsrps : 32; /**< Target Delayed/Split Request Pending Sequences
+ The application uses this address to remove a
+ pending split sequence from the target queue by
+ clearing the appropriate bit. Example: Clearing [14]
+ clears the pending sequence \#14. An application
+ or configuration write to this address can clear this
+ register.
+ For OCTEON, this register is intended for debug use
+ only and MUST NEVER be written with anything other
+ than ZEROES. */
+#else
+ uint32_t tdsrps : 32;
+#endif
+ } s;
+ struct cvmx_pci_cfg18_s cn30xx;
+ struct cvmx_pci_cfg18_s cn31xx;
+ struct cvmx_pci_cfg18_s cn38xx;
+ struct cvmx_pci_cfg18_s cn38xxp2;
+ struct cvmx_pci_cfg18_s cn50xx;
+ struct cvmx_pci_cfg18_s cn58xx;
+ struct cvmx_pci_cfg18_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg18 cvmx_pci_cfg18_t;
+
+/**
+ * cvmx_pci_cfg19
+ *
+ * PCI_CFG19 = Twentieth 32-bits of PCI config space (Master/Target Implementation Register)
+ *
+ */
+union cvmx_pci_cfg19 {
+ uint32_t u32;
+ struct cvmx_pci_cfg19_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t mrbcm : 1; /**< Master Request (Memory Read) Byte Count/Byte
+ Enable select.
+ 0 = Byte Enables valid. In PCI mode, a burst
+ transaction cannot be performed using
+ Memory Read command=4'h6.
+ 1 = DWORD Byte Count valid (default). In PCI
+ Mode, the memory read byte enables are
+ automatically generated by the core.
+ NOTE: For OCTEON, this bit must always be one
+ for proper operation. */
+ uint32_t mrbci : 1; /**< Master Request (I/O and CR cycles) byte count/byte
+ enable select.
+ 0 = Byte Enables valid (default)
+ 1 = DWORD byte count valid
+ NOTE: For OCTEON, this bit must always be zero
+ for proper operation (in support of
+ Type0/1 Cfg Space accesses which require byte
+ enable generation directly from a read mask). */
+ uint32_t mdwe : 1; /**< Master (Retry) Deferred Write Enable (allow
+ read requests to pass).
+ NOTE: Applicable to PCI Mode I/O and memory
+ transactions only.
+ 0 = New read requests are NOT accepted until
+ the current write cycle completes. [Reads
+ cannot pass writes]
+ 1 = New read requests are accepted, even when
+ there is a write cycle pending [Reads can
+ pass writes].
+ NOTE: For OCTEON, this bit must always be zero
+ for proper operation. */
+ uint32_t mdre : 1; /**< Master (Retry) Deferred Read Enable (Allows
+ read/write requests to pass).
+ NOTE: Applicable to PCI mode I/O and memory
+ transactions only.
+ 0 = New read/write requests are NOT accepted
+ until the current read cycle completes.
+ [Read/write requests CANNOT pass reads]
+ 1 = New read/write requests are accepted, even
+ when there is a read cycle pending.
+ [Read/write requests CAN pass reads]
+ NOTE: For OCTEON, this bit must always be zero
+ for proper operation. */
+ uint32_t mdrimc : 1; /**< Master I/O Deferred/Split Request Outstanding
+ Maximum Count
+ 0 = MDRRMC[26:24]
+ 1 = 1 */
+ uint32_t mdrrmc : 3; /**< Master Deferred Read Request Outstanding Max
+ Count (PCI only).
+ CR4C[26:24] Max SAC cycles MAX DAC cycles
+ 000 8 4
+ 001 1 0
+ 010 2 1
+ 011 3 1
+ 100 4 2
+ 101 5 2
+ 110 6 3
+ 111 7 3
+ For example, if these bits are programmed to
+ 100, the core can support 2 DAC cycles, 4 SAC
+ cycles or a combination of 1 DAC and 2 SAC cycles.
+ NOTE: For the PCI-X maximum outstanding split
+ transactions, refer to CRE0[22:20] */
+ uint32_t tmes : 8; /**< Target/Master Error Sequence \# */
+ uint32_t teci : 1; /**< Target Error Command Indication
+ 0 = Delayed/Split
+ 1 = Others */
+ uint32_t tmei : 1; /**< Target/Master Error Indication
+ 0 = Target
+ 1 = Master */
+ uint32_t tmse : 1; /**< Target/Master System Error. This bit is set
+ whenever ATM_SERR_O is active. */
+ uint32_t tmdpes : 1; /**< Target/Master Data PERR# error status. This
+ bit is set whenever ATM_DATA_PERR_O is active. */
+ uint32_t tmapes : 1; /**< Target/Master Address PERR# error status. This
+ bit is set whenever ATM_ADDR_PERR_O is active. */
+ uint32_t reserved_9_10 : 2;
+ uint32_t tibcd : 1; /**< Target Illegal I/O DWORD byte combinations detected. */
+ uint32_t tibde : 1; /**< Target Illegal I/O DWORD byte detection enable */
+ uint32_t reserved_6_6 : 1;
+ uint32_t tidomc : 1; /**< Target I/O Delayed/Split request outstanding
+ maximum count.
+ 0 = TDOMC[4:0]
+ 1 = 1 */
+ uint32_t tdomc : 5; /**< Target Delayed/Split request outstanding maximum
+ count. [1..31] and 0=32.
+ NOTE: If the user programs these bits beyond the
+ Designed Maximum outstanding count, then the
+ designed maximum table depth will be used instead.
+ No additional Deferred/Split transactions will be
+ accepted if this outstanding maximum count
+ is reached. Furthermore, no additional
+ deferred/split transactions will be accepted if
+ the I/O delay/ I/O Split Request outstanding
+ maximum is reached.
+ NOTE: For OCTEON in PCI Mode, this field MUST BE
+ programmed to 1. (OCTEON can only handle 1 delayed
+ read at a time).
+ For OCTEON in PCIX Mode, this field can range from
+ 1-4. (The designed maximum table depth is 4
+ for PCIX mode splits). */
+#else
+ uint32_t tdomc : 5;
+ uint32_t tidomc : 1;
+ uint32_t reserved_6_6 : 1;
+ uint32_t tibde : 1;
+ uint32_t tibcd : 1;
+ uint32_t reserved_9_10 : 2;
+ uint32_t tmapes : 1;
+ uint32_t tmdpes : 1;
+ uint32_t tmse : 1;
+ uint32_t tmei : 1;
+ uint32_t teci : 1;
+ uint32_t tmes : 8;
+ uint32_t mdrrmc : 3;
+ uint32_t mdrimc : 1;
+ uint32_t mdre : 1;
+ uint32_t mdwe : 1;
+ uint32_t mrbci : 1;
+ uint32_t mrbcm : 1;
+#endif
+ } s;
+ struct cvmx_pci_cfg19_s cn30xx;
+ struct cvmx_pci_cfg19_s cn31xx;
+ struct cvmx_pci_cfg19_s cn38xx;
+ struct cvmx_pci_cfg19_s cn38xxp2;
+ struct cvmx_pci_cfg19_s cn50xx;
+ struct cvmx_pci_cfg19_s cn58xx;
+ struct cvmx_pci_cfg19_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg19 cvmx_pci_cfg19_t;
+
+/**
+ * cvmx_pci_cfg20
+ *
+ * PCI_CFG20 = Twenty-first 32-bits of PCI config space (Master Deferred/Split Sequence Pending)
+ *
+ */
+union cvmx_pci_cfg20 {
+ uint32_t u32;
+ struct cvmx_pci_cfg20_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t mdsp : 32; /**< Master Deferred/Split sequence Pending
+ For OCTEON, this register is intended for debug use
+ only and MUST NEVER be written with anything other
+ than ZEROES. */
+#else
+ uint32_t mdsp : 32;
+#endif
+ } s;
+ struct cvmx_pci_cfg20_s cn30xx;
+ struct cvmx_pci_cfg20_s cn31xx;
+ struct cvmx_pci_cfg20_s cn38xx;
+ struct cvmx_pci_cfg20_s cn38xxp2;
+ struct cvmx_pci_cfg20_s cn50xx;
+ struct cvmx_pci_cfg20_s cn58xx;
+ struct cvmx_pci_cfg20_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg20 cvmx_pci_cfg20_t;
+
+/**
+ * cvmx_pci_cfg21
+ *
+ * PCI_CFG21 = Twenty-second 32-bits of PCI config space (Master Split Completion Message Register)
+ *
+ */
+union cvmx_pci_cfg21 {
+ uint32_t u32;
+ struct cvmx_pci_cfg21_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t scmre : 32; /**< Master Split Completion message received with
+ error message.
+ For OCTEON, this register is intended for debug use
+ only and MUST NEVER be written with anything other
+ than ZEROES. */
+#else
+ uint32_t scmre : 32;
+#endif
+ } s;
+ struct cvmx_pci_cfg21_s cn30xx;
+ struct cvmx_pci_cfg21_s cn31xx;
+ struct cvmx_pci_cfg21_s cn38xx;
+ struct cvmx_pci_cfg21_s cn38xxp2;
+ struct cvmx_pci_cfg21_s cn50xx;
+ struct cvmx_pci_cfg21_s cn58xx;
+ struct cvmx_pci_cfg21_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg21 cvmx_pci_cfg21_t;
+
+/**
+ * cvmx_pci_cfg22
+ *
+ * PCI_CFG22 = Twenty-third 32-bits of PCI config space (Master Arbiter Control Register)
+ *
+ */
+union cvmx_pci_cfg22 {
+ uint32_t u32;
+ struct cvmx_pci_cfg22_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t mac : 7; /**< Master Arbiter Control
+ [31:26]: Used only in Fixed Priority mode
+ (when [25]=1)
+ [31:30]: MSI Request
+ 00 = Highest Priority
+ 01 = Medium Priority
+ 10 = Lowest Priority
+ 11 = RESERVED
+ [29:28]: Target Split Completion
+ 00 = Highest Priority
+ 01 = Medium Priority
+ 10 = Lowest Priority
+ 11 = RESERVED
+ [27:26]: New Request; Deferred Read,Deferred Write
+ 00 = Highest Priority
+ 01 = Medium Priority
+ 10 = Lowest Priority
+ 11 = RESERVED
+ [25]: Fixed/Round Robin Priority Selector
+ 0 = Round Robin
+ 1 = Fixed
+ NOTE: When [25]=1(fixed priority), the three levels
+ [31:26] MUST BE programmed to have mutually exclusive
+ priority levels for proper operation. (Failure to do
+ so may result in PCI hangs). */
+ uint32_t reserved_19_24 : 6;
+ uint32_t flush : 1; /**< AM_DO_FLUSH_I control
+ NOTE: This bit MUST BE ONE for proper OCTEON operation */
+ uint32_t mra : 1; /**< Master Retry Aborted */
+ uint32_t mtta : 1; /**< Master TRDY timeout aborted */
+ uint32_t mrv : 8; /**< Master Retry Value [1..255] and 0=infinite */
+ uint32_t mttv : 8; /**< Master TRDY timeout value [1..255] and 0=disabled
+ NOTE: For OCTEON, this bit must always be zero
+ for proper operation. (OCTEON does not support
+ master TRDY timeout - target is expected to be
+ well behaved). */
+#else
+ uint32_t mttv : 8;
+ uint32_t mrv : 8;
+ uint32_t mtta : 1;
+ uint32_t mra : 1;
+ uint32_t flush : 1;
+ uint32_t reserved_19_24 : 6;
+ uint32_t mac : 7;
+#endif
+ } s;
+ struct cvmx_pci_cfg22_s cn30xx;
+ struct cvmx_pci_cfg22_s cn31xx;
+ struct cvmx_pci_cfg22_s cn38xx;
+ struct cvmx_pci_cfg22_s cn38xxp2;
+ struct cvmx_pci_cfg22_s cn50xx;
+ struct cvmx_pci_cfg22_s cn58xx;
+ struct cvmx_pci_cfg22_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg22 cvmx_pci_cfg22_t;
+
+/**
+ * cvmx_pci_cfg56
+ *
+ * PCI_CFG56 = Fifty-seventh 32-bits of PCI config space (PCIX Capabilities Register)
+ *
+ */
+union cvmx_pci_cfg56 {
+ uint32_t u32;
+ struct cvmx_pci_cfg56_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t most : 3; /**< Maximum outstanding Split transactions
+ Encoded Value \#Max outstanding splits
+ 000 1
+ 001 2
+ 010 3
+ 011 4
+ 100 8
+ 101 8(clamped)
+ 110 8(clamped)
+ 111 8(clamped)
+ NOTE: OCTEON only supports upto a MAXIMUM of 8
+ outstanding master split transactions. */
+ uint32_t mmbc : 2; /**< Maximum Memory Byte Count
+ [0=512B,1=1024B,2=2048B,3=4096B]
+ NOTE: OCTEON does not support this field and has
+ no effect on limiting the maximum memory byte count. */
+ uint32_t roe : 1; /**< Relaxed Ordering Enable */
+ uint32_t dpere : 1; /**< Data Parity Error Recovery Enable */
+ uint32_t ncp : 8; /**< Next Capability Pointer */
+ uint32_t pxcid : 8; /**< PCI-X Capability ID */
+#else
+ uint32_t pxcid : 8;
+ uint32_t ncp : 8;
+ uint32_t dpere : 1;
+ uint32_t roe : 1;
+ uint32_t mmbc : 2;
+ uint32_t most : 3;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_pci_cfg56_s cn30xx;
+ struct cvmx_pci_cfg56_s cn31xx;
+ struct cvmx_pci_cfg56_s cn38xx;
+ struct cvmx_pci_cfg56_s cn38xxp2;
+ struct cvmx_pci_cfg56_s cn50xx;
+ struct cvmx_pci_cfg56_s cn58xx;
+ struct cvmx_pci_cfg56_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg56 cvmx_pci_cfg56_t;
+
+/**
+ * cvmx_pci_cfg57
+ *
+ * PCI_CFG57 = Fifty-eigth 32-bits of PCI config space (PCIX Status Register)
+ *
+ */
+union cvmx_pci_cfg57 {
+ uint32_t u32;
+ struct cvmx_pci_cfg57_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t scemr : 1; /**< Split Completion Error Message Received */
+ uint32_t mcrsd : 3; /**< Maximum Cumulative Read Size designed */
+ uint32_t mostd : 3; /**< Maximum Outstanding Split transaction designed */
+ uint32_t mmrbcd : 2; /**< Maximum Memory Read byte count designed */
+ uint32_t dc : 1; /**< Device Complexity
+ 0 = Simple Device
+ 1 = Bridge Device */
+ uint32_t usc : 1; /**< Unexpected Split Completion */
+ uint32_t scd : 1; /**< Split Completion Discarded */
+ uint32_t m133 : 1; /**< 133MHz Capable */
+ uint32_t w64 : 1; /**< Indicates a 32b(=0) or 64b(=1) device */
+ uint32_t bn : 8; /**< Bus Number. Updated on all configuration write
+ (0x11=PCI) cycles. Its value is dependent upon the PCI/X
+ (0xFF=PCIX) mode. */
+ uint32_t dn : 5; /**< Device Number. Updated on all configuration
+ write cycles. */
+ uint32_t fn : 3; /**< Function Number */
+#else
+ uint32_t fn : 3;
+ uint32_t dn : 5;
+ uint32_t bn : 8;
+ uint32_t w64 : 1;
+ uint32_t m133 : 1;
+ uint32_t scd : 1;
+ uint32_t usc : 1;
+ uint32_t dc : 1;
+ uint32_t mmrbcd : 2;
+ uint32_t mostd : 3;
+ uint32_t mcrsd : 3;
+ uint32_t scemr : 1;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } s;
+ struct cvmx_pci_cfg57_s cn30xx;
+ struct cvmx_pci_cfg57_s cn31xx;
+ struct cvmx_pci_cfg57_s cn38xx;
+ struct cvmx_pci_cfg57_s cn38xxp2;
+ struct cvmx_pci_cfg57_s cn50xx;
+ struct cvmx_pci_cfg57_s cn58xx;
+ struct cvmx_pci_cfg57_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg57 cvmx_pci_cfg57_t;
+
+/**
+ * cvmx_pci_cfg58
+ *
+ * PCI_CFG58 = Fifty-ninth 32-bits of PCI config space (Power Management Capabilities Register)
+ *
+ */
+union cvmx_pci_cfg58 {
+ uint32_t u32;
+ struct cvmx_pci_cfg58_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pmes : 5; /**< PME Support (D0 to D3cold) */
+ uint32_t d2s : 1; /**< D2_Support */
+ uint32_t d1s : 1; /**< D1_Support */
+ uint32_t auxc : 3; /**< AUX_Current (0..375mA) */
+ uint32_t dsi : 1; /**< Device Specific Initialization */
+ uint32_t reserved_20_20 : 1;
+ uint32_t pmec : 1; /**< PME Clock */
+ uint32_t pcimiv : 3; /**< Indicates the version of the PCI
+ Management
+ Interface Specification with which the core
+ complies.
+ 010b = Complies with PCI Management Interface
+ Specification Revision 1.1 */
+ uint32_t ncp : 8; /**< Next Capability Pointer */
+ uint32_t pmcid : 8; /**< Power Management Capability ID */
+#else
+ uint32_t pmcid : 8;
+ uint32_t ncp : 8;
+ uint32_t pcimiv : 3;
+ uint32_t pmec : 1;
+ uint32_t reserved_20_20 : 1;
+ uint32_t dsi : 1;
+ uint32_t auxc : 3;
+ uint32_t d1s : 1;
+ uint32_t d2s : 1;
+ uint32_t pmes : 5;
+#endif
+ } s;
+ struct cvmx_pci_cfg58_s cn30xx;
+ struct cvmx_pci_cfg58_s cn31xx;
+ struct cvmx_pci_cfg58_s cn38xx;
+ struct cvmx_pci_cfg58_s cn38xxp2;
+ struct cvmx_pci_cfg58_s cn50xx;
+ struct cvmx_pci_cfg58_s cn58xx;
+ struct cvmx_pci_cfg58_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg58 cvmx_pci_cfg58_t;
+
+/**
+ * cvmx_pci_cfg59
+ *
+ * PCI_CFG59 = Sixtieth 32-bits of PCI config space (Power Management Data/PMCSR Register(s))
+ *
+ */
+union cvmx_pci_cfg59 {
+ uint32_t u32;
+ struct cvmx_pci_cfg59_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pmdia : 8; /**< Power Management data input from application
+ (PME_DATA) */
+ uint32_t bpccen : 1; /**< BPCC_En (bus power/clock control) enable */
+ uint32_t bd3h : 1; /**< B2_B3\#, B2/B3 Support for D3hot */
+ uint32_t reserved_16_21 : 6;
+ uint32_t pmess : 1; /**< PME_Status sticky bit */
+ uint32_t pmedsia : 2; /**< PME_Data_Scale input from application
+ Device (PME_DATA_SCALE[1:0])
+ Specific */
+ uint32_t pmds : 4; /**< Power Management Data_select */
+ uint32_t pmeens : 1; /**< PME_En sticky bit */
+ uint32_t reserved_2_7 : 6;
+ uint32_t ps : 2; /**< Power State (D0 to D3)
+ The N2 DOES NOT support D1/D2 Power Management
+ states, therefore writing to this register has
+ no effect (please refer to the PCI Power
+ Management
+ Specification v1.1 for further details about
+ it?s R/W nature. This is not a conventional
+ R/W style register. */
+#else
+ uint32_t ps : 2;
+ uint32_t reserved_2_7 : 6;
+ uint32_t pmeens : 1;
+ uint32_t pmds : 4;
+ uint32_t pmedsia : 2;
+ uint32_t pmess : 1;
+ uint32_t reserved_16_21 : 6;
+ uint32_t bd3h : 1;
+ uint32_t bpccen : 1;
+ uint32_t pmdia : 8;
+#endif
+ } s;
+ struct cvmx_pci_cfg59_s cn30xx;
+ struct cvmx_pci_cfg59_s cn31xx;
+ struct cvmx_pci_cfg59_s cn38xx;
+ struct cvmx_pci_cfg59_s cn38xxp2;
+ struct cvmx_pci_cfg59_s cn50xx;
+ struct cvmx_pci_cfg59_s cn58xx;
+ struct cvmx_pci_cfg59_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg59 cvmx_pci_cfg59_t;
+
+/**
+ * cvmx_pci_cfg60
+ *
+ * PCI_CFG60 = Sixty-first 32-bits of PCI config space (MSI Capabilities Register)
+ *
+ */
+union cvmx_pci_cfg60 {
+ uint32_t u32;
+ struct cvmx_pci_cfg60_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t m64 : 1; /**< 32/64 b message */
+ uint32_t mme : 3; /**< Multiple Message Enable(1,2,4,8,16,32) */
+ uint32_t mmc : 3; /**< Multiple Message Capable(0=1,1=2,2=4,3=8,4=16,5=32) */
+ uint32_t msien : 1; /**< MSI Enable */
+ uint32_t ncp : 8; /**< Next Capability Pointer */
+ uint32_t msicid : 8; /**< MSI Capability ID */
+#else
+ uint32_t msicid : 8;
+ uint32_t ncp : 8;
+ uint32_t msien : 1;
+ uint32_t mmc : 3;
+ uint32_t mme : 3;
+ uint32_t m64 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_pci_cfg60_s cn30xx;
+ struct cvmx_pci_cfg60_s cn31xx;
+ struct cvmx_pci_cfg60_s cn38xx;
+ struct cvmx_pci_cfg60_s cn38xxp2;
+ struct cvmx_pci_cfg60_s cn50xx;
+ struct cvmx_pci_cfg60_s cn58xx;
+ struct cvmx_pci_cfg60_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg60 cvmx_pci_cfg60_t;
+
+/**
+ * cvmx_pci_cfg61
+ *
+ * PCI_CFG61 = Sixty-second 32-bits of PCI config space (MSI Lower Address Register)
+ *
+ */
+union cvmx_pci_cfg61 {
+ uint32_t u32;
+ struct cvmx_pci_cfg61_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t msi31t2 : 30; /**< App Specific MSI Address [31:2] */
+ uint32_t reserved_0_1 : 2;
+#else
+ uint32_t reserved_0_1 : 2;
+ uint32_t msi31t2 : 30;
+#endif
+ } s;
+ struct cvmx_pci_cfg61_s cn30xx;
+ struct cvmx_pci_cfg61_s cn31xx;
+ struct cvmx_pci_cfg61_s cn38xx;
+ struct cvmx_pci_cfg61_s cn38xxp2;
+ struct cvmx_pci_cfg61_s cn50xx;
+ struct cvmx_pci_cfg61_s cn58xx;
+ struct cvmx_pci_cfg61_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg61 cvmx_pci_cfg61_t;
+
+/**
+ * cvmx_pci_cfg62
+ *
+ * PCI_CFG62 = Sixty-third 32-bits of PCI config space (MSI Upper Address Register)
+ *
+ */
+union cvmx_pci_cfg62 {
+ uint32_t u32;
+ struct cvmx_pci_cfg62_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t msi : 32; /**< MSI Address [63:32] */
+#else
+ uint32_t msi : 32;
+#endif
+ } s;
+ struct cvmx_pci_cfg62_s cn30xx;
+ struct cvmx_pci_cfg62_s cn31xx;
+ struct cvmx_pci_cfg62_s cn38xx;
+ struct cvmx_pci_cfg62_s cn38xxp2;
+ struct cvmx_pci_cfg62_s cn50xx;
+ struct cvmx_pci_cfg62_s cn58xx;
+ struct cvmx_pci_cfg62_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg62 cvmx_pci_cfg62_t;
+
+/**
+ * cvmx_pci_cfg63
+ *
+ * PCI_CFG63 = Sixty-fourth 32-bits of PCI config space (MSI Message Data Register)
+ *
+ */
+union cvmx_pci_cfg63 {
+ uint32_t u32;
+ struct cvmx_pci_cfg63_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t msimd : 16; /**< MSI Message Data */
+#else
+ uint32_t msimd : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_pci_cfg63_s cn30xx;
+ struct cvmx_pci_cfg63_s cn31xx;
+ struct cvmx_pci_cfg63_s cn38xx;
+ struct cvmx_pci_cfg63_s cn38xxp2;
+ struct cvmx_pci_cfg63_s cn50xx;
+ struct cvmx_pci_cfg63_s cn58xx;
+ struct cvmx_pci_cfg63_s cn58xxp1;
+};
+typedef union cvmx_pci_cfg63 cvmx_pci_cfg63_t;
+
+/**
+ * cvmx_pci_cnt_reg
+ *
+ * PCI_CNT_REG = PCI Clock Count Register
+ *
+ * This register is provided to software as a means to determine PCI Bus Type/Speed.
+ */
+union cvmx_pci_cnt_reg {
+ uint64_t u64;
+ struct cvmx_pci_cnt_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t hm_pcix : 1; /**< PCI Host Mode Sampled Bus Type (0:PCI/1:PCIX)
+ This field represents what OCTEON(in Host mode)
+ sampled as the 'intended' PCI Bus Type based on
+ the PCI_PCIXCAP pin. (see HM_SPEED Bus Type/Speed
+ encoding table). */
+ uint64_t hm_speed : 2; /**< PCI Host Mode Sampled Bus Speed
+ This field represents what OCTEON(in Host mode)
+ sampled as the 'intended' PCI Bus Speed based on
+ the PCI100, PCI_M66EN and PCI_PCIXCAP pins.
+ NOTE: This DOES NOT reflect what the actual PCI
+ Bus Type/Speed values are. They only indicate what
+ OCTEON sampled as the 'intended' values.
+ PCI Host Mode Sampled Bus Type/Speed Table:
+ M66EN | PCIXCAP | PCI100 | HM_PCIX | HM_SPEED[1:0]
+ ---------+---------+---------+----------+-------------
+ 0 | 0 | 0 | 0=PCI | 00=33 MHz
+ 0 | 0 | 1 | 0=PCI | 00=33 MHz
+ 0 | Z | 0 | 0=PCI | 01=66 MHz
+ 0 | Z | 1 | 0=PCI | 01=66 MHz
+ 1 | 0 | 0 | 0=PCI | 01=66 MHz
+ 1 | 0 | 1 | 0=PCI | 01=66 MHz
+ 1 | Z | 0 | 0=PCI | 01=66 MHz
+ 1 | Z | 1 | 0=PCI | 01=66 MHz
+ 0 | 1 | 1 | 1=PCIX | 10=100 MHz
+ 1 | 1 | 1 | 1=PCIX | 10=100 MHz
+ 0 | 1 | 0 | 1=PCIX | 11=133 MHz
+ 1 | 1 | 0 | 1=PCIX | 11=133 MHz
+ NOTE: PCIXCAP has tri-level value (0,1,Z). See PCI specification
+ for more details on board level hookup to achieve these
+ values.
+ NOTE: Software can use the NPI_PCI_INT_ARB_CFG[PCI_OVR]
+ to override the 'sampled' PCI Bus Type/Speed.
+ NOTE: Software can also use the PCI_CNT_REG[PCICNT] to determine
+ the exact PCI(X) Bus speed.
+ Example: PCI_REF_CLKIN=133MHz
+ PCI_HOST_MODE=1
+ PCI_M66EN=0
+ PCI_PCIXCAP=1
+ PCI_PCI100=1
+ For this example, OCTEON will generate
+ PCI_CLK_OUT=100MHz and drive the proper PCI
+ Initialization sequence (DEVSEL#=Deasserted,
+ STOP#=Asserted, TRDY#=Asserted) during PCI_RST_N
+ deassertion.
+ NOTE: The HM_SPEED field is only valid after
+ PLL_REF_CLK is active and PLL_DCOK is asserted.
+ (see HRM description for power-on/reset sequence).
+ NOTE: PCI_REF_CLKIN input must be 133MHz (and is used
+ to generate the PCI_CLK_OUT pin in Host Mode). */
+ uint64_t ap_pcix : 1; /**< PCI(X) Bus Type (0:PCI/1:PCIX)
+ At PCI_RST_N de-assertion, the PCI Initialization
+ pattern(PCI_DEVSEL_N, PCI_STOP_N, PCI_TRDY_N) is
+ captured to provide information to software regarding
+ the PCI Bus Type(PCI/PCIX) and PCI Bus Speed Range. */
+ uint64_t ap_speed : 2; /**< PCI(X) Bus Speed (0:33/1:66/2:100/3:133)
+ At PCI_RST_N de-assertion, the PCI Initialization
+ pattern(PCI_DEVSEL_N, PCI_STOP_N, PCI_TRDY_N) is
+ captured to provide information to software regarding
+ the PCI Bus Type(PCI/PCIX) and PCI Bus Speed Range.
+ PCI-X Initialization Pattern(see PCIX Spec):
+ PCI_DEVSEL_N PCI_STOP_N PCI_TRDY_N Mode MaxClk(ns) MinClk(ns) MinClk(MHz) MaxClk(MHz)
+ -------------+----------+----------+-------+---------+----------+----------+------------------
+ Deasserted Deasserted Deasserted PCI 33 -- 30 0 33
+ PCI 66 30 15 33 66
+ Deasserted Deasserted Asserted PCI-X 20 15 50 66
+ Deasserted Asserted Deasserted PCI-X 15 10 66 100
+ Deasserted Asserted Asserted PCI-X 10 7.5 100 133
+ Asserted Deasserted Deasserted PCI-X Reserved Reserved Reserved Reserved
+ Asserted Deasserted Asserted PCI-X Reserved Reserved Reserved Reserved
+ Asserted Asserted Deasserted PCI-X Reserved Reserved Reserved Reserved
+ Asserted Asserted Asserted PCI-X Reserved Reserved Reserved Reserved
+ NOTE: The PCI Bus speed 'assumed' from the initialization
+ pattern is really intended for an operational range.
+ For example: If PINIT=100, this indicates PCI-X in the
+ 100-133MHz range. The PCI_CNT field can be used to further
+ determine a more exacting PCI Bus frequency value if
+ required. */
+ uint64_t pcicnt : 32; /**< Free Running PCI Clock counter.
+ At PCI Reset, the PCICNT=0, and is auto-incremented
+ on every PCI clock and will auto-wrap back to zero
+ when saturated.
+ NOTE: Writes override the auto-increment to allow
+ software to preload any initial value.
+ The PCICNT field is provided to software as a means
+ to determine the PCI Bus Speed.
+ Assuming software has knowledge of the core frequency
+ (eclk), this register can be written with a value X,
+ wait 'n' core clocks(eclk) and then read later(Y) to
+ determine \#PCI clocks(Y-X) have elapsed within 'n' core
+ clocks to determine the PCI input Clock frequency. */
+#else
+ uint64_t pcicnt : 32;
+ uint64_t ap_speed : 2;
+ uint64_t ap_pcix : 1;
+ uint64_t hm_speed : 2;
+ uint64_t hm_pcix : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_pci_cnt_reg_s cn50xx;
+ struct cvmx_pci_cnt_reg_s cn58xx;
+ struct cvmx_pci_cnt_reg_s cn58xxp1;
+};
+typedef union cvmx_pci_cnt_reg cvmx_pci_cnt_reg_t;
+
+/**
+ * cvmx_pci_ctl_status_2
+ *
+ * PCI_CTL_STATUS_2 = PCI Control Status 2 Register
+ *
+ * Control status register accessable from both PCI and NCB.
+ */
+union cvmx_pci_ctl_status_2 {
+ uint32_t u32;
+ struct cvmx_pci_ctl_status_2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t bb1_hole : 3; /**< Big BAR 1 Hole
+ NOT IN PASS 1 NOR PASS 2
+ When PCI_CTL_STATUS_2[BB1]=1, this field defines
+ an encoded size of the upper BAR1 region which
+ OCTEON will mask out (ie: not respond to).
+ (see definition of BB1_HOLE and BB1_SIZ encodings
+ in the PCI_CTL_STATUS_2[BB1] definition below). */
+ uint32_t bb1_siz : 1; /**< Big BAR 1 Size
+ NOT IN PASS 1 NOR PASS 2
+ When PCI_CTL_STATUS_2[BB1]=1, this field defines
+ the programmable SIZE of BAR 1.
+ - 0: 1GB / 1: 2GB */
+ uint32_t bb_ca : 1; /**< Set to '1' for Big Bar Mode to do STT/LDT L2C
+ operations.
+ NOT IN PASS 1 NOR PASS 2 */
+ uint32_t bb_es : 2; /**< Big Bar Node Endian Swap Mode
+ - 0: No Swizzle
+ - 1: Byte Swizzle (per-QW)
+ - 2: Byte Swizzle (per-LW)
+ - 3: LongWord Swizzle
+ NOT IN PASS 1 NOR PASS 2 */
+ uint32_t bb1 : 1; /**< Big Bar 1 Enable
+ NOT IN PASS 1 NOR PASS 2
+ When PCI_CTL_STATUS_2[BB1] is set, the following differences
+ occur:
+ - OCTEON's BAR1 becomes somewhere in the range 512-2048 MB rather
+ than the default 128MB.
+ - The following table indicates the effective size of
+ BAR1 when BB1 is set:
+ BB1_SIZ BB1_HOLE Effective size Comment
+ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 0 0 1024 MB Normal 1GB BAR
+ 0 1 1008 MB 1 GB, 16 MB hole
+ 0 2 992 MB 1 GB, 32 MB hole
+ 0 3 960 MB 1 GB, 64 MB hole
+ 0 4 896 MB 1 GB,128 MB hole
+ 0 5 768 MB 1 GB,256 MB hole
+ 0 6 512 MB 1 GB,512 MB hole
+ 0 7 Illegal
+ 1 0 2048 MB Normal 2GB BAR
+ 1 1 2032 MB 2 GB, 16 MB hole
+ 1 2 2016 MB 2 GB, 32 MB hole
+ 1 3 1984 MB 2 GB, 64 MB hole
+ 1 4 1920 MB 2 GB,128 MB hole
+ 1 5 1792 MB 2 GB,256 MB hole
+ 1 6 1536 MB 2 GB,512 MB hole
+ 1 7 Illegal
+ - When BB1_SIZ is 0: PCI_CFG06[LBASE<2:0>] reads as zero
+ and are ignored on write. BAR1 is an entirely ordinary
+ 1 GB (power-of-two) BAR in all aspects when BB1_HOLE is 0.
+ When BB1_HOLE is not zero, BAR1 addresses are programmed
+ as if the BAR were 1GB, but, OCTEON does not respond
+ to addresses in the programmed holes.
+ - When BB1_SIZ is 1: PCI_CFG06[LBASE<3:0>] reads as zero
+ and are ignored on write. BAR1 is an entirely ordinary
+ 2 GB (power-of-two) BAR in all aspects when BB1_HOLE is 0.
+ When BB1_HOLE is not zero, BAR1 addresses are programmed
+ as if the BAR were 2GB, but, OCTEON does not respond
+ to addresses in the programmed holes.
+ - Note that the BB1_HOLE value has no effect on the
+ PCI_CFG06[LBASE] behavior. BB1_HOLE only affects whether
+ OCTEON accepts an address. BB1_SIZ does affect PCI_CFG06[LBASE]
+ behavior, however.
+ - The first 128MB, i.e. addresses on the PCI bus in the range
+ BAR1+0 .. BAR1+0x07FFFFFF
+ access OCTEON's DRAM addresses with PCI_BAR1_INDEX CSR's
+ as before
+ - The remaining address space, i.e. addresses
+ on the PCI bus in the range
+ BAR1+0x08000000 .. BAR1+size-1,
+ where size is the size of BAR1 as selected by the above
+ table (based on the BB1_SIZ and BB1_HOLE values), are mapped to
+ OCTEON physical DRAM addresses as follows:
+ PCI Address Range OCTEON Physical Address Range
+ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ BAR1+0x08000000 .. BAR1+size-1 | 0x88000000 .. 0x7FFFFFFF+size
+ and PCI_CTL_STATUS_2[BB_ES] is the endian-swap and
+ PCI_CTL_STATUS_2[BB_CA] is the L2 cache allocation bit
+ for these references.
+ The consequences of any burst that crosses the end of the PCI
+ Address Range for BAR1 are unpredicable.
+ - The consequences of any burst access that crosses the boundary
+ between BAR1+0x07FFFFFF and BAR1+0x08000000 are unpredictable in PCI-X
+ mode. OCTEON may disconnect PCI references at this boundary. */
+ uint32_t bb0 : 1; /**< Big Bar 0 Enable
+ NOT IN PASS 1 NOR PASS 2
+ When PCI_CTL_STATUS_2[BB0] is set, the following
+ differences occur:
+ - OCTEON's BAR0 becomes 2GB rather than the default 4KB.
+ PCI_CFG04[LBASE<18:0>] reads as zero and is ignored on write.
+ - OCTEON's BAR0 becomes burstable. (When BB0 is clear, OCTEON
+ single-phase disconnects PCI BAR0 reads and PCI/PCI-X BAR0
+ writes, and splits (burstably) PCI-X BAR0 reads.)
+ - The first 4KB, i.e. addresses on the PCI bus in the range
+ BAR0+0 .. BAR0+0xFFF
+ access OCTEON's PCI-type CSR's as when BB0 is clear.
+ - The remaining address space, i.e. addresses on the PCI bus
+ in the range
+ BAR0+0x1000 .. BAR0+0x7FFFFFFF
+ are mapped to OCTEON physical DRAM addresses as follows:
+ PCI Address Range OCTEON Physical Address Range
+ ------------------------------------+------------------------------
+ BAR0+0x00001000 .. BAR0+0x0FFFFFFF | 0x000001000 .. 0x00FFFFFFF
+ BAR0+0x10000000 .. BAR0+0x1FFFFFFF | 0x410000000 .. 0x41FFFFFFF
+ BAR0+0x20000000 .. BAR0+0x7FFFFFFF | 0x020000000 .. 0x07FFFFFFF
+ and PCI_CTL_STATUS_2[BB_ES] is the endian-swap and
+ PCI_CTL_STATUS_2[BB_CA] is the L2 cache allocation bit
+ for these references.
+ The consequences of any burst that crosses the end of the PCI
+ Address Range for BAR0 are unpredicable.
+ - The consequences of any burst access that crosses the boundary
+ between BAR0+0xFFF and BAR0+0x1000 are unpredictable in PCI-X
+ mode. OCTEON may disconnect PCI references at this boundary.
+ - The results of any burst read that crosses the boundary
+ between BAR0+0x0FFFFFFF and BAR0+0x10000000 are unpredictable.
+ The consequences of any burst write that crosses this same
+ boundary are unpredictable.
+ - The results of any burst read that crosses the boundary
+ between BAR0+0x1FFFFFFF and BAR0+0x20000000 are unpredictable.
+ The consequences of any burst write that crosses this same
+ boundary are unpredictable. */
+ uint32_t erst_n : 1; /**< Reset active Low. PASS-2 */
+ uint32_t bar2pres : 1; /**< From fuse block. When fuse(MIO_FUS_DAT3[BAR2_EN])
+ is NOT blown the value of this field is '0' after
+ reset and BAR2 is NOT present. When the fuse IS
+ blown the value of this field is '1' after reset
+ and BAR2 is present. Note that SW can change this
+ field after reset. This is a PASS-2 field. */
+ uint32_t scmtyp : 1; /**< Split Completion Message CMD Type (0=RD/1=WR)
+ When SCM=1, SCMTYP specifies the CMD intent (R/W) */
+ uint32_t scm : 1; /**< Split Completion Message Detected (Read or Write) */
+ uint32_t en_wfilt : 1; /**< When '1' the window-access filter is enabled.
+ Unfilter writes are:
+ MIO, SubId0
+ MIO, SubId7
+ NPI, SubId0
+ NPI, SubId7
+ POW, SubId7
+ DFA, SubId7
+ IPD, SubId7
+ Unfiltered Reads are:
+ MIO, SubId0
+ MIO, SubId7
+ NPI, SubId0
+ NPI, SubId7
+ POW, SubId1
+ POW, SubId2
+ POW, SubId3
+ POW, SubId7
+ DFA, SubId7
+ IPD, SubId7 */
+ uint32_t reserved_14_14 : 1;
+ uint32_t ap_pcix : 1; /**< PCX Core Mode status (0=PCI Bus/1=PCIX)
+ If one or more of PCI_DEVSEL_N, PCI_STOP_N, and
+ PCI_TRDY_N are asserted at the rising edge of
+ PCI_RST_N, the device enters PCI-X mode.
+ Otherwise, the device enters conventional PCI
+ mode at the rising edge of RST#. */
+ uint32_t ap_64ad : 1; /**< PCX Core Bus status (0=32b Bus/1=64b Bus)
+ When PCI_RST_N pin is de-asserted, the state
+ of PCI_REQ64_N(driven by central agent) determines
+ the width of the PCI/X bus. */
+ uint32_t b12_bist : 1; /**< Bist Status For Memeory In B12 */
+ uint32_t pmo_amod : 1; /**< PMO-ARB Mode (0=FP[HP=CMD1,LP=CMD0]/1=RR) */
+ uint32_t pmo_fpc : 3; /**< PMO-ARB Fixed Priority Counter
+ When PMO_AMOD=0 (FP mode), this field represents
+ the \# of CMD1 requests that are issued (at higher
+ priority) before a single lower priority CMD0
+ is allowed to issue (to ensure foward progress).
+ - 0: 1 CMD1 Request issued before CMD0 allowed
+ - ...
+ - 7: 8 CMD1 Requests issued before CMD0 allowed */
+ uint32_t tsr_hwm : 3; /**< Target Split-Read ADB(allowable disconnect boundary)
+ High Water Mark.
+ Specifies the number of ADBs(128 Byte aligned chunks)
+ that are accumulated(pending) BEFORE the Target Split
+ completion is attempted on the PCI bus.
+ - 0: RESERVED/ILLEGAL
+ - 1: 2 Pending ADBs (129B-256B)
+ - 2: 3 Pending ADBs (257B-384B)
+ - 3: 4 Pending ADBs (385B-512B)
+ - 4: 5 Pending ADBs (513B-640B)
+ - 5: 6 Pending ADBs (641B-768B)
+ - 6: 7 Pending ADBs (769B-896B)
+ - 7: 8 Pending ADBs (897B-1024B)
+ Example: Suppose a 1KB target memory request with
+ starting byte offset address[6:0]=0x7F is split by
+ the OCTEON and the TSR_HWM=1(2 ADBs).
+ The OCTEON will start the target split completion
+ on the PCI Bus after 1B(1st ADB)+128B(2nd ADB)=129B
+ of data have been received from memory (even though
+ the remaining 895B has not yet been received). The
+ OCTEON will continue the split completion until it
+ has consumed all of the pended split data. If the
+ full transaction length(1KB) of data was NOT entirely
+ transferred, then OCTEON will terminate the split
+ completion and again wait for another 2 ADB-aligned data
+ chunks(256B) of pended split data to be received from
+ memory before starting another split completion request.
+ This allows Octeon (as split completer), to send back
+ multiple split completions for a given large split
+ transaction without having to wait for the entire
+ transaction length to be received from memory.
+ NOTE: For split transaction sizes 'smaller' than the
+ specified TSR_HWM value, the split completion
+ is started when the last datum has been received from
+ memory.
+ NOTE: It is IMPERATIVE that this field NEVER BE
+ written to a ZERO value. A value of zero is
+ reserved/illegal and can result in PCIX bus hangs). */
+ uint32_t bar2_enb : 1; /**< When set '1' BAR2 is enable and will respond when
+ clear '0' BAR2 access will be target-aborted. */
+ uint32_t bar2_esx : 2; /**< Value will be XORed with pci-address[37:36] to
+ determine the endian swap mode. */
+ uint32_t bar2_cax : 1; /**< Value will be XORed with pci-address[38] to
+ determine the L2 cache attribute.
+ When XOR result is 1, not cached in L2 */
+#else
+ uint32_t bar2_cax : 1;
+ uint32_t bar2_esx : 2;
+ uint32_t bar2_enb : 1;
+ uint32_t tsr_hwm : 3;
+ uint32_t pmo_fpc : 3;
+ uint32_t pmo_amod : 1;
+ uint32_t b12_bist : 1;
+ uint32_t ap_64ad : 1;
+ uint32_t ap_pcix : 1;
+ uint32_t reserved_14_14 : 1;
+ uint32_t en_wfilt : 1;
+ uint32_t scm : 1;
+ uint32_t scmtyp : 1;
+ uint32_t bar2pres : 1;
+ uint32_t erst_n : 1;
+ uint32_t bb0 : 1;
+ uint32_t bb1 : 1;
+ uint32_t bb_es : 2;
+ uint32_t bb_ca : 1;
+ uint32_t bb1_siz : 1;
+ uint32_t bb1_hole : 3;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } s;
+ struct cvmx_pci_ctl_status_2_s cn30xx;
+ struct cvmx_pci_ctl_status_2_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t erst_n : 1; /**< Reset active Low. */
+ uint32_t bar2pres : 1; /**< From fuse block. When fuse(MIO_FUS_DAT3[BAR2_EN])
+ is NOT blown the value of this field is '0' after
+ reset and BAR2 is NOT present. When the fuse IS
+ blown the value of this field is '1' after reset
+ and BAR2 is present. Note that SW can change this
+ field after reset. */
+ uint32_t scmtyp : 1; /**< Split Completion Message CMD Type (0=RD/1=WR)
+ When SCM=1, SCMTYP specifies the CMD intent (R/W) */
+ uint32_t scm : 1; /**< Split Completion Message Detected (Read or Write) */
+ uint32_t en_wfilt : 1; /**< When '1' the window-access filter is enabled.
+ Unfilter writes are:
+ MIO, SubId0
+ MIO, SubId7
+ NPI, SubId0
+ NPI, SubId7
+ POW, SubId7
+ DFA, SubId7
+ IPD, SubId7
+ USBN, SubId7
+ Unfiltered Reads are:
+ MIO, SubId0
+ MIO, SubId7
+ NPI, SubId0
+ NPI, SubId7
+ POW, SubId1
+ POW, SubId2
+ POW, SubId3
+ POW, SubId7
+ DFA, SubId7
+ IPD, SubId7
+ USBN, SubId7 */
+ uint32_t reserved_14_14 : 1;
+ uint32_t ap_pcix : 1; /**< PCX Core Mode status (0=PCI Bus/1=PCIX) */
+ uint32_t ap_64ad : 1; /**< PCX Core Bus status (0=32b Bus/1=64b Bus) */
+ uint32_t b12_bist : 1; /**< Bist Status For Memeory In B12 */
+ uint32_t pmo_amod : 1; /**< PMO-ARB Mode (0=FP[HP=CMD1,LP=CMD0]/1=RR) */
+ uint32_t pmo_fpc : 3; /**< PMO-ARB Fixed Priority Counter
+ When PMO_AMOD=0 (FP mode), this field represents
+ the \# of CMD1 requests that are issued (at higher
+ priority) before a single lower priority CMD0
+ is allowed to issue (to ensure foward progress).
+ - 0: 1 CMD1 Request issued before CMD0 allowed
+ - ...
+ - 7: 8 CMD1 Requests issued before CMD0 allowed */
+ uint32_t tsr_hwm : 3; /**< Target Split-Read ADB(allowable disconnect boundary)
+ High Water Mark.
+ Specifies the number of ADBs(128 Byte aligned chunks)
+ that are accumulated(pending) BEFORE the Target Split
+ completion is attempted on the PCI bus.
+ - 0: RESERVED/ILLEGAL
+ - 1: 2 Pending ADBs (129B-256B)
+ - 2: 3 Pending ADBs (257B-384B)
+ - 3: 4 Pending ADBs (385B-512B)
+ - 4: 5 Pending ADBs (513B-640B)
+ - 5: 6 Pending ADBs (641B-768B)
+ - 6: 7 Pending ADBs (769B-896B)
+ - 7: 8 Pending ADBs (897B-1024B)
+ Example: Suppose a 1KB target memory request with
+ starting byte offset address[6:0]=0x7F is split by
+ the OCTEON and the TSR_HWM=1(2 ADBs).
+ The OCTEON will start the target split completion
+ on the PCI Bus after 1B(1st ADB)+128B(2nd ADB)=129B
+ of data have been received from memory (even though
+ the remaining 895B has not yet been received). The
+ OCTEON will continue the split completion until it
+ has consumed all of the pended split data. If the
+ full transaction length(1KB) of data was NOT entirely
+ transferred, then OCTEON will terminate the split
+ completion and again wait for another 2 ADB-aligned data
+ chunks(256B) of pended split data to be received from
+ memory before starting another split completion request.
+ This allows Octeon (as split completer), to send back
+ multiple split completions for a given large split
+ transaction without having to wait for the entire
+ transaction length to be received from memory.
+ NOTE: For split transaction sizes 'smaller' than the
+ specified TSR_HWM value, the split completion
+ is started when the last datum has been received from
+ memory.
+ NOTE: It is IMPERATIVE that this field NEVER BE
+ written to a ZERO value. A value of zero is
+ reserved/illegal and can result in PCIX bus hangs). */
+ uint32_t bar2_enb : 1; /**< When set '1' BAR2 is enable and will respond when
+ clear '0' BAR2 access will be target-aborted. */
+ uint32_t bar2_esx : 2; /**< Value will be XORed with pci-address[37:36] to
+ determine the endian swap mode. */
+ uint32_t bar2_cax : 1; /**< Value will be XORed with pci-address[38] to
+ determine the L2 cache attribute.
+ When XOR result is 1, not allocated in L2 cache */
+#else
+ uint32_t bar2_cax : 1;
+ uint32_t bar2_esx : 2;
+ uint32_t bar2_enb : 1;
+ uint32_t tsr_hwm : 3;
+ uint32_t pmo_fpc : 3;
+ uint32_t pmo_amod : 1;
+ uint32_t b12_bist : 1;
+ uint32_t ap_64ad : 1;
+ uint32_t ap_pcix : 1;
+ uint32_t reserved_14_14 : 1;
+ uint32_t en_wfilt : 1;
+ uint32_t scm : 1;
+ uint32_t scmtyp : 1;
+ uint32_t bar2pres : 1;
+ uint32_t erst_n : 1;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } cn31xx;
+ struct cvmx_pci_ctl_status_2_s cn38xx;
+ struct cvmx_pci_ctl_status_2_cn31xx cn38xxp2;
+ struct cvmx_pci_ctl_status_2_s cn50xx;
+ struct cvmx_pci_ctl_status_2_s cn58xx;
+ struct cvmx_pci_ctl_status_2_s cn58xxp1;
+};
+typedef union cvmx_pci_ctl_status_2 cvmx_pci_ctl_status_2_t;
+
+/**
+ * cvmx_pci_dbell#
+ *
+ * PCI_DBELL0 = PCI Doorbell-0
+ *
+ * The value to write to the doorbell 0 register. The value in this register is acted upon when the
+ * least-significant-byte of this register is written.
+ */
+union cvmx_pci_dbellx {
+ uint32_t u32;
+ struct cvmx_pci_dbellx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t inc_val : 16; /**< Software writes this register with the
+ number of new Instructions to be processed
+ on the Instruction Queue. When read this
+ register contains the last write value. */
+#else
+ uint32_t inc_val : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_pci_dbellx_s cn30xx;
+ struct cvmx_pci_dbellx_s cn31xx;
+ struct cvmx_pci_dbellx_s cn38xx;
+ struct cvmx_pci_dbellx_s cn38xxp2;
+ struct cvmx_pci_dbellx_s cn50xx;
+ struct cvmx_pci_dbellx_s cn58xx;
+ struct cvmx_pci_dbellx_s cn58xxp1;
+};
+typedef union cvmx_pci_dbellx cvmx_pci_dbellx_t;
+
+/**
+ * cvmx_pci_dma_cnt#
+ *
+ * PCI_DMA_CNT0 = PCI DMA Count0
+ *
+ * Keeps track of the number of DMAs or bytes sent by DMAs. The value in this register is acted upon when the
+ * least-significant-byte of this register is written.
+ */
+union cvmx_pci_dma_cntx {
+ uint32_t u32;
+ struct cvmx_pci_dma_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dma_cnt : 32; /**< Update with the number of DMAs completed or the
+ number of bytes sent for DMA's associated with
+ this counter. When this register is written the
+ value written to [15:0] will be subtracted from
+ the value in this register. */
+#else
+ uint32_t dma_cnt : 32;
+#endif
+ } s;
+ struct cvmx_pci_dma_cntx_s cn30xx;
+ struct cvmx_pci_dma_cntx_s cn31xx;
+ struct cvmx_pci_dma_cntx_s cn38xx;
+ struct cvmx_pci_dma_cntx_s cn38xxp2;
+ struct cvmx_pci_dma_cntx_s cn50xx;
+ struct cvmx_pci_dma_cntx_s cn58xx;
+ struct cvmx_pci_dma_cntx_s cn58xxp1;
+};
+typedef union cvmx_pci_dma_cntx cvmx_pci_dma_cntx_t;
+
+/**
+ * cvmx_pci_dma_int_lev#
+ *
+ * PCI_DMA_INT_LEV0 = PCI DMA Sent Interrupt Level For DMA 0
+ *
+ * Interrupt when the value in PCI_DMA_CNT0 is equal to or greater than the register value.
+ */
+union cvmx_pci_dma_int_levx {
+ uint32_t u32;
+ struct cvmx_pci_dma_int_levx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_cnt : 32; /**< When PCI_DMA_CNT0 exceeds the value in this
+ DCNT0 will be set in PCI_INT_SUM and PCI_INT_SUM2. */
+#else
+ uint32_t pkt_cnt : 32;
+#endif
+ } s;
+ struct cvmx_pci_dma_int_levx_s cn30xx;
+ struct cvmx_pci_dma_int_levx_s cn31xx;
+ struct cvmx_pci_dma_int_levx_s cn38xx;
+ struct cvmx_pci_dma_int_levx_s cn38xxp2;
+ struct cvmx_pci_dma_int_levx_s cn50xx;
+ struct cvmx_pci_dma_int_levx_s cn58xx;
+ struct cvmx_pci_dma_int_levx_s cn58xxp1;
+};
+typedef union cvmx_pci_dma_int_levx cvmx_pci_dma_int_levx_t;
+
+/**
+ * cvmx_pci_dma_time#
+ *
+ * PCI_DMA_TIME0 = PCI DMA Sent Timer For DMA0
+ *
+ * Time to wait from DMA being sent before issuing an interrupt.
+ */
+union cvmx_pci_dma_timex {
+ uint32_t u32;
+ struct cvmx_pci_dma_timex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dma_time : 32; /**< Number of PCI clock cycle to wait before
+ setting DTIME0 in PCI_INT_SUM and PCI_INT_SUM2.
+ After PCI_DMA_CNT0 becomes non-zero.
+ The timer is reset when the
+ PCI_INT_SUM[27] register is cleared. */
+#else
+ uint32_t dma_time : 32;
+#endif
+ } s;
+ struct cvmx_pci_dma_timex_s cn30xx;
+ struct cvmx_pci_dma_timex_s cn31xx;
+ struct cvmx_pci_dma_timex_s cn38xx;
+ struct cvmx_pci_dma_timex_s cn38xxp2;
+ struct cvmx_pci_dma_timex_s cn50xx;
+ struct cvmx_pci_dma_timex_s cn58xx;
+ struct cvmx_pci_dma_timex_s cn58xxp1;
+};
+typedef union cvmx_pci_dma_timex cvmx_pci_dma_timex_t;
+
+/**
+ * cvmx_pci_instr_count#
+ *
+ * PCI_INSTR_COUNT0 = PCI Instructions Outstanding Request Count
+ *
+ * The number of instructions to be fetched by the Instruction-0 Engine.
+ */
+union cvmx_pci_instr_countx {
+ uint32_t u32;
+ struct cvmx_pci_instr_countx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t icnt : 32; /**< Number of Instructions to be fetched by the
+ Instruction Engine.
+ A write of any non zero value to this register
+ will clear the value of this register. */
+#else
+ uint32_t icnt : 32;
+#endif
+ } s;
+ struct cvmx_pci_instr_countx_s cn30xx;
+ struct cvmx_pci_instr_countx_s cn31xx;
+ struct cvmx_pci_instr_countx_s cn38xx;
+ struct cvmx_pci_instr_countx_s cn38xxp2;
+ struct cvmx_pci_instr_countx_s cn50xx;
+ struct cvmx_pci_instr_countx_s cn58xx;
+ struct cvmx_pci_instr_countx_s cn58xxp1;
+};
+typedef union cvmx_pci_instr_countx cvmx_pci_instr_countx_t;
+
+/**
+ * cvmx_pci_int_enb
+ *
+ * PCI_INT_ENB = PCI Interrupt Enable
+ *
+ * Enables interrupt bits in the PCI_INT_SUM register.
+ */
+union cvmx_pci_int_enb {
+ uint64_t u64;
+ struct cvmx_pci_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[33] */
+ uint64_t ill_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[32] */
+ uint64_t win_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[31] */
+ uint64_t dma1_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[30] */
+ uint64_t dma0_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[29] */
+ uint64_t idtime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[28] */
+ uint64_t idtime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[27] */
+ uint64_t idcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[26] */
+ uint64_t idcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[25] */
+ uint64_t iptime3 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[24] */
+ uint64_t iptime2 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[23] */
+ uint64_t iptime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[22] */
+ uint64_t iptime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[21] */
+ uint64_t ipcnt3 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[20] */
+ uint64_t ipcnt2 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[19] */
+ uint64_t ipcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[18] */
+ uint64_t ipcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[17] */
+ uint64_t irsl_int : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[16] */
+ uint64_t ill_rrd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[15] */
+ uint64_t ill_rwr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[14] */
+ uint64_t idperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[13] */
+ uint64_t iaperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[12] */
+ uint64_t iserr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[11] */
+ uint64_t itsr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[10] */
+ uint64_t imsc_msg : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[9] */
+ uint64_t imsi_mabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[8] */
+ uint64_t imsi_tabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[7] */
+ uint64_t imsi_per : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[6] */
+ uint64_t imr_tto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[5] */
+ uint64_t imr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[4] */
+ uint64_t itr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[3] */
+ uint64_t imr_wtto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[2] */
+ uint64_t imr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[1] */
+ uint64_t itr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[0] */
+#else
+ uint64_t itr_wabt : 1;
+ uint64_t imr_wabt : 1;
+ uint64_t imr_wtto : 1;
+ uint64_t itr_abt : 1;
+ uint64_t imr_abt : 1;
+ uint64_t imr_tto : 1;
+ uint64_t imsi_per : 1;
+ uint64_t imsi_tabt : 1;
+ uint64_t imsi_mabt : 1;
+ uint64_t imsc_msg : 1;
+ uint64_t itsr_abt : 1;
+ uint64_t iserr : 1;
+ uint64_t iaperr : 1;
+ uint64_t idperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t irsl_int : 1;
+ uint64_t ipcnt0 : 1;
+ uint64_t ipcnt1 : 1;
+ uint64_t ipcnt2 : 1;
+ uint64_t ipcnt3 : 1;
+ uint64_t iptime0 : 1;
+ uint64_t iptime1 : 1;
+ uint64_t iptime2 : 1;
+ uint64_t iptime3 : 1;
+ uint64_t idcnt0 : 1;
+ uint64_t idcnt1 : 1;
+ uint64_t idtime0 : 1;
+ uint64_t idtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_pci_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[33] */
+ uint64_t ill_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[32] */
+ uint64_t win_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[31] */
+ uint64_t dma1_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[30] */
+ uint64_t dma0_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[29] */
+ uint64_t idtime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[28] */
+ uint64_t idtime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[27] */
+ uint64_t idcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[26] */
+ uint64_t idcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[25] */
+ uint64_t reserved_22_24 : 3;
+ uint64_t iptime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[21] */
+ uint64_t reserved_18_20 : 3;
+ uint64_t ipcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[17] */
+ uint64_t irsl_int : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[16] */
+ uint64_t ill_rrd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[15] */
+ uint64_t ill_rwr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[14] */
+ uint64_t idperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[13] */
+ uint64_t iaperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[12] */
+ uint64_t iserr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[11] */
+ uint64_t itsr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[10] */
+ uint64_t imsc_msg : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[9] */
+ uint64_t imsi_mabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[8] */
+ uint64_t imsi_tabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[7] */
+ uint64_t imsi_per : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[6] */
+ uint64_t imr_tto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[5] */
+ uint64_t imr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[4] */
+ uint64_t itr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[3] */
+ uint64_t imr_wtto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[2] */
+ uint64_t imr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[1] */
+ uint64_t itr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[0] */
+#else
+ uint64_t itr_wabt : 1;
+ uint64_t imr_wabt : 1;
+ uint64_t imr_wtto : 1;
+ uint64_t itr_abt : 1;
+ uint64_t imr_abt : 1;
+ uint64_t imr_tto : 1;
+ uint64_t imsi_per : 1;
+ uint64_t imsi_tabt : 1;
+ uint64_t imsi_mabt : 1;
+ uint64_t imsc_msg : 1;
+ uint64_t itsr_abt : 1;
+ uint64_t iserr : 1;
+ uint64_t iaperr : 1;
+ uint64_t idperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t irsl_int : 1;
+ uint64_t ipcnt0 : 1;
+ uint64_t reserved_18_20 : 3;
+ uint64_t iptime0 : 1;
+ uint64_t reserved_22_24 : 3;
+ uint64_t idcnt0 : 1;
+ uint64_t idcnt1 : 1;
+ uint64_t idtime0 : 1;
+ uint64_t idtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn30xx;
+ struct cvmx_pci_int_enb_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[33] */
+ uint64_t ill_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[32] */
+ uint64_t win_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[31] */
+ uint64_t dma1_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[30] */
+ uint64_t dma0_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[29] */
+ uint64_t idtime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[28] */
+ uint64_t idtime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[27] */
+ uint64_t idcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[26] */
+ uint64_t idcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[25] */
+ uint64_t reserved_23_24 : 2;
+ uint64_t iptime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[22] */
+ uint64_t iptime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[21] */
+ uint64_t reserved_19_20 : 2;
+ uint64_t ipcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[18] */
+ uint64_t ipcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[17] */
+ uint64_t irsl_int : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[16] */
+ uint64_t ill_rrd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[15] */
+ uint64_t ill_rwr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[14] */
+ uint64_t idperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[13] */
+ uint64_t iaperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[12] */
+ uint64_t iserr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[11] */
+ uint64_t itsr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[10] */
+ uint64_t imsc_msg : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[9] */
+ uint64_t imsi_mabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[8] */
+ uint64_t imsi_tabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[7] */
+ uint64_t imsi_per : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[6] */
+ uint64_t imr_tto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[5] */
+ uint64_t imr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[4] */
+ uint64_t itr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[3] */
+ uint64_t imr_wtto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[2] */
+ uint64_t imr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[1] */
+ uint64_t itr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[0] */
+#else
+ uint64_t itr_wabt : 1;
+ uint64_t imr_wabt : 1;
+ uint64_t imr_wtto : 1;
+ uint64_t itr_abt : 1;
+ uint64_t imr_abt : 1;
+ uint64_t imr_tto : 1;
+ uint64_t imsi_per : 1;
+ uint64_t imsi_tabt : 1;
+ uint64_t imsi_mabt : 1;
+ uint64_t imsc_msg : 1;
+ uint64_t itsr_abt : 1;
+ uint64_t iserr : 1;
+ uint64_t iaperr : 1;
+ uint64_t idperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t irsl_int : 1;
+ uint64_t ipcnt0 : 1;
+ uint64_t ipcnt1 : 1;
+ uint64_t reserved_19_20 : 2;
+ uint64_t iptime0 : 1;
+ uint64_t iptime1 : 1;
+ uint64_t reserved_23_24 : 2;
+ uint64_t idcnt0 : 1;
+ uint64_t idcnt1 : 1;
+ uint64_t idtime0 : 1;
+ uint64_t idtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn31xx;
+ struct cvmx_pci_int_enb_s cn38xx;
+ struct cvmx_pci_int_enb_s cn38xxp2;
+ struct cvmx_pci_int_enb_cn31xx cn50xx;
+ struct cvmx_pci_int_enb_s cn58xx;
+ struct cvmx_pci_int_enb_s cn58xxp1;
+};
+typedef union cvmx_pci_int_enb cvmx_pci_int_enb_t;
+
+/**
+ * cvmx_pci_int_enb2
+ *
+ * PCI_INT_ENB2 = PCI Interrupt Enable2 Register
+ *
+ * Enables interrupt bits in the PCI_INT_SUM2 register.
+ */
+union cvmx_pci_int_enb2 {
+ uint64_t u64;
+ struct cvmx_pci_int_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[33] */
+ uint64_t ill_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[32] */
+ uint64_t win_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[31] */
+ uint64_t dma1_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[30] */
+ uint64_t dma0_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[29] */
+ uint64_t rdtime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[28] */
+ uint64_t rdtime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[27] */
+ uint64_t rdcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[26] */
+ uint64_t rdcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[25] */
+ uint64_t rptime3 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[24] */
+ uint64_t rptime2 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[23] */
+ uint64_t rptime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[22] */
+ uint64_t rptime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[21] */
+ uint64_t rpcnt3 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[20] */
+ uint64_t rpcnt2 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[19] */
+ uint64_t rpcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[18] */
+ uint64_t rpcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[17] */
+ uint64_t rrsl_int : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[16] */
+ uint64_t ill_rrd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[15] */
+ uint64_t ill_rwr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[14] */
+ uint64_t rdperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[13] */
+ uint64_t raperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[12] */
+ uint64_t rserr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[11] */
+ uint64_t rtsr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[10] */
+ uint64_t rmsc_msg : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[9] */
+ uint64_t rmsi_mabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[8] */
+ uint64_t rmsi_tabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[7] */
+ uint64_t rmsi_per : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[6] */
+ uint64_t rmr_tto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[5] */
+ uint64_t rmr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[4] */
+ uint64_t rtr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[3] */
+ uint64_t rmr_wtto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[2] */
+ uint64_t rmr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[1] */
+ uint64_t rtr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[0] */
+#else
+ uint64_t rtr_wabt : 1;
+ uint64_t rmr_wabt : 1;
+ uint64_t rmr_wtto : 1;
+ uint64_t rtr_abt : 1;
+ uint64_t rmr_abt : 1;
+ uint64_t rmr_tto : 1;
+ uint64_t rmsi_per : 1;
+ uint64_t rmsi_tabt : 1;
+ uint64_t rmsi_mabt : 1;
+ uint64_t rmsc_msg : 1;
+ uint64_t rtsr_abt : 1;
+ uint64_t rserr : 1;
+ uint64_t raperr : 1;
+ uint64_t rdperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t rrsl_int : 1;
+ uint64_t rpcnt0 : 1;
+ uint64_t rpcnt1 : 1;
+ uint64_t rpcnt2 : 1;
+ uint64_t rpcnt3 : 1;
+ uint64_t rptime0 : 1;
+ uint64_t rptime1 : 1;
+ uint64_t rptime2 : 1;
+ uint64_t rptime3 : 1;
+ uint64_t rdcnt0 : 1;
+ uint64_t rdcnt1 : 1;
+ uint64_t rdtime0 : 1;
+ uint64_t rdtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_pci_int_enb2_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[33] */
+ uint64_t ill_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[32] */
+ uint64_t win_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[31] */
+ uint64_t dma1_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[30] */
+ uint64_t dma0_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[29] */
+ uint64_t rdtime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[28] */
+ uint64_t rdtime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[27] */
+ uint64_t rdcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[26] */
+ uint64_t rdcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[25] */
+ uint64_t reserved_22_24 : 3;
+ uint64_t rptime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[21] */
+ uint64_t reserved_18_20 : 3;
+ uint64_t rpcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[17] */
+ uint64_t rrsl_int : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[16] */
+ uint64_t ill_rrd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[15] */
+ uint64_t ill_rwr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[14] */
+ uint64_t rdperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[13] */
+ uint64_t raperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[12] */
+ uint64_t rserr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[11] */
+ uint64_t rtsr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[10] */
+ uint64_t rmsc_msg : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[9] */
+ uint64_t rmsi_mabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[8] */
+ uint64_t rmsi_tabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[7] */
+ uint64_t rmsi_per : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[6] */
+ uint64_t rmr_tto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[5] */
+ uint64_t rmr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[4] */
+ uint64_t rtr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[3] */
+ uint64_t rmr_wtto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[2] */
+ uint64_t rmr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[1] */
+ uint64_t rtr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[0] */
+#else
+ uint64_t rtr_wabt : 1;
+ uint64_t rmr_wabt : 1;
+ uint64_t rmr_wtto : 1;
+ uint64_t rtr_abt : 1;
+ uint64_t rmr_abt : 1;
+ uint64_t rmr_tto : 1;
+ uint64_t rmsi_per : 1;
+ uint64_t rmsi_tabt : 1;
+ uint64_t rmsi_mabt : 1;
+ uint64_t rmsc_msg : 1;
+ uint64_t rtsr_abt : 1;
+ uint64_t rserr : 1;
+ uint64_t raperr : 1;
+ uint64_t rdperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t rrsl_int : 1;
+ uint64_t rpcnt0 : 1;
+ uint64_t reserved_18_20 : 3;
+ uint64_t rptime0 : 1;
+ uint64_t reserved_22_24 : 3;
+ uint64_t rdcnt0 : 1;
+ uint64_t rdcnt1 : 1;
+ uint64_t rdtime0 : 1;
+ uint64_t rdtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn30xx;
+ struct cvmx_pci_int_enb2_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[33] */
+ uint64_t ill_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[32] */
+ uint64_t win_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[31] */
+ uint64_t dma1_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[30] */
+ uint64_t dma0_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[29] */
+ uint64_t rdtime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[28] */
+ uint64_t rdtime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[27] */
+ uint64_t rdcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[26] */
+ uint64_t rdcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[25] */
+ uint64_t reserved_23_24 : 2;
+ uint64_t rptime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[22] */
+ uint64_t rptime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[21] */
+ uint64_t reserved_19_20 : 2;
+ uint64_t rpcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[18] */
+ uint64_t rpcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[17] */
+ uint64_t rrsl_int : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[16] */
+ uint64_t ill_rrd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[15] */
+ uint64_t ill_rwr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[14] */
+ uint64_t rdperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[13] */
+ uint64_t raperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[12] */
+ uint64_t rserr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[11] */
+ uint64_t rtsr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[10] */
+ uint64_t rmsc_msg : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[9] */
+ uint64_t rmsi_mabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[8] */
+ uint64_t rmsi_tabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[7] */
+ uint64_t rmsi_per : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[6] */
+ uint64_t rmr_tto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[5] */
+ uint64_t rmr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[4] */
+ uint64_t rtr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[3] */
+ uint64_t rmr_wtto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[2] */
+ uint64_t rmr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[1] */
+ uint64_t rtr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[0] */
+#else
+ uint64_t rtr_wabt : 1;
+ uint64_t rmr_wabt : 1;
+ uint64_t rmr_wtto : 1;
+ uint64_t rtr_abt : 1;
+ uint64_t rmr_abt : 1;
+ uint64_t rmr_tto : 1;
+ uint64_t rmsi_per : 1;
+ uint64_t rmsi_tabt : 1;
+ uint64_t rmsi_mabt : 1;
+ uint64_t rmsc_msg : 1;
+ uint64_t rtsr_abt : 1;
+ uint64_t rserr : 1;
+ uint64_t raperr : 1;
+ uint64_t rdperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t rrsl_int : 1;
+ uint64_t rpcnt0 : 1;
+ uint64_t rpcnt1 : 1;
+ uint64_t reserved_19_20 : 2;
+ uint64_t rptime0 : 1;
+ uint64_t rptime1 : 1;
+ uint64_t reserved_23_24 : 2;
+ uint64_t rdcnt0 : 1;
+ uint64_t rdcnt1 : 1;
+ uint64_t rdtime0 : 1;
+ uint64_t rdtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn31xx;
+ struct cvmx_pci_int_enb2_s cn38xx;
+ struct cvmx_pci_int_enb2_s cn38xxp2;
+ struct cvmx_pci_int_enb2_cn31xx cn50xx;
+ struct cvmx_pci_int_enb2_s cn58xx;
+ struct cvmx_pci_int_enb2_s cn58xxp1;
+};
+typedef union cvmx_pci_int_enb2 cvmx_pci_int_enb2_t;
+
+/**
+ * cvmx_pci_int_sum
+ *
+ * PCI_INT_SUM = PCI Interrupt Summary
+ *
+ * The PCI Interrupt Summary Register.
+ */
+union cvmx_pci_int_sum {
+ uint64_t u64;
+ struct cvmx_pci_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or
+ Read-Address Register took place. */
+ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 1. */
+ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 0. */
+ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1
+ register is not 0 the DMA_CNT1 timer counts.
+ When the DMA1_CNT timer has a value greater
+ than the PCI_DMA_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0
+ register is not 0 the DMA_CNT0 timer counts.
+ When the DMA0_CNT timer has a value greater
+ than the PCI_DMA_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1
+ value is greater than the value
+ in the PCI_DMA_INT_LEV1 register. */
+ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0
+ value is greater than the value
+ in the PCI_DMA_INT_LEV0 register. */
+ uint64_t ptime3 : 1; /**< When the value in the PCI_PKTS_SENT3
+ register is not 0 the Sent-3 timer counts.
+ When the Sent-3 timer has a value greater
+ than the PCI_PKTS_SENT_TIME3 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t ptime2 : 1; /**< When the value in the PCI_PKTS_SENT2
+ register is not 0 the Sent-2 timer counts.
+ When the Sent-2 timer has a value greater
+ than the PCI_PKTS_SENT_TIME2 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t ptime1 : 1; /**< When the value in the PCI_PKTS_SENT1
+ register is not 0 the Sent-1 timer counts.
+ When the Sent-1 timer has a value greater
+ than the PCI_PKTS_SENT_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0
+ register is not 0 the Sent-0 timer counts.
+ When the Sent-0 timer has a value greater
+ than the PCI_PKTS_SENT_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t pcnt3 : 1; /**< This bit indicates that PCI_PKTS_SENT3
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV3 register. */
+ uint64_t pcnt2 : 1; /**< This bit indicates that PCI_PKTS_SENT2
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV2 register. */
+ uint64_t pcnt1 : 1; /**< This bit indicates that PCI_PKTS_SENT1
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV1 register. */
+ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV0 register. */
+ uint64_t rsl_int : 1; /**< This bit is set when the mio_pci_inta_dr wire
+ is asserted by the MIO. */
+ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */
+ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */
+ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */
+ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */
+ uint64_t serr : 1; /**< SERR# detected by PCX Core */
+ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected
+ CN58XX (as completer), has encountered an error
+ which prevents the split transaction from
+ completing. In this event, the CN58XX (as completer),
+ sends a SCM (Split Completion Message) to the
+ initiator. See: PCIX Spec v1.0a Fig 2-40.
+ [31:28]: Message Class = 2(completer error)
+ [27:20]: Message Index = 0x80
+ [18:12]: Remaining Lower Address
+ [11:0]: Remaining Byte Count */
+ uint64_t msc_msg : 1; /**< Master Split Completion Message (SCM) Detected
+ for either a Split-Read/Write error case.
+ Set if:
+ a) A Split-Write SCM is detected with SCE=1.
+ b) A Split-Read SCM is detected (regardless
+ of SCE status).
+ The Split completion message(SCM)
+ is also latched into the PCI_SCM_REG[SCM] to
+ assist SW with error recovery. */
+ uint64_t msi_mabt : 1; /**< PCI Master Abort on Master MSI */
+ uint64_t msi_tabt : 1; /**< PCI Target-Abort on Master MSI */
+ uint64_t msi_per : 1; /**< PCI Parity Error on Master MSI */
+ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Master-Read */
+ uint64_t mr_abt : 1; /**< PCI Master Abort On Master-Read */
+ uint64_t tr_abt : 1; /**< PCI Target Abort On Master-Read */
+ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on Master-write */
+ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on Master-write */
+ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on Master-write */
+#else
+ uint64_t tr_wabt : 1;
+ uint64_t mr_wabt : 1;
+ uint64_t mr_wtto : 1;
+ uint64_t tr_abt : 1;
+ uint64_t mr_abt : 1;
+ uint64_t mr_tto : 1;
+ uint64_t msi_per : 1;
+ uint64_t msi_tabt : 1;
+ uint64_t msi_mabt : 1;
+ uint64_t msc_msg : 1;
+ uint64_t tsr_abt : 1;
+ uint64_t serr : 1;
+ uint64_t aperr : 1;
+ uint64_t dperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t rsl_int : 1;
+ uint64_t pcnt0 : 1;
+ uint64_t pcnt1 : 1;
+ uint64_t pcnt2 : 1;
+ uint64_t pcnt3 : 1;
+ uint64_t ptime0 : 1;
+ uint64_t ptime1 : 1;
+ uint64_t ptime2 : 1;
+ uint64_t ptime3 : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_pci_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or
+ Read-Address Register took place. */
+ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 1. */
+ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 0. */
+ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1
+ register is not 0 the DMA_CNT1 timer counts.
+ When the DMA1_CNT timer has a value greater
+ than the PCI_DMA_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0
+ register is not 0 the DMA_CNT0 timer counts.
+ When the DMA0_CNT timer has a value greater
+ than the PCI_DMA_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1
+ value is greater than the value
+ in the PCI_DMA_INT_LEV1 register. */
+ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0
+ value is greater than the value
+ in the PCI_DMA_INT_LEV0 register. */
+ uint64_t reserved_22_24 : 3;
+ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0
+ register is not 0 the Sent-0 timer counts.
+ When the Sent-0 timer has a value greater
+ than the PCI_PKTS_SENT_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t reserved_18_20 : 3;
+ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV0 register. */
+ uint64_t rsl_int : 1; /**< This bit is set when the mio_pci_inta_dr wire
+ is asserted by the MIO */
+ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */
+ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */
+ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */
+ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */
+ uint64_t serr : 1; /**< SERR# detected by PCX Core */
+ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected
+ N3K (as completer), has encountered an error
+ which prevents the split transaction from
+ completing. In this event, the N3K (as completer),
+ sends a SCM (Split Completion Message) to the
+ initiator. See: PCIX Spec v1.0a Fig 2-40.
+ [31:28]: Message Class = 2(completer error)
+ [27:20]: Message Index = 0x80
+ [18:12]: Remaining Lower Address
+ [11:0]: Remaining Byte Count */
+ uint64_t msc_msg : 1; /**< Master Split Completion Message (SCM) Detected
+ for either a Split-Read/Write error case.
+ Set if:
+ a) A Split-Write SCM is detected with SCE=1.
+ b) A Split-Read SCM is detected (regardless
+ of SCE status).
+ The Split completion message(SCM)
+ is also latched into the PCI_SCM_REG[SCM] to
+ assist SW with error recovery. */
+ uint64_t msi_mabt : 1; /**< PCI Master Abort on Master MSI */
+ uint64_t msi_tabt : 1; /**< PCI Target-Abort on Master MSI */
+ uint64_t msi_per : 1; /**< PCI Parity Error on Master MSI */
+ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Master-Read */
+ uint64_t mr_abt : 1; /**< PCI Master Abort On Master-Read */
+ uint64_t tr_abt : 1; /**< PCI Target Abort On Master-Read */
+ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on Master-write */
+ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on Master-write */
+ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on Master-write */
+#else
+ uint64_t tr_wabt : 1;
+ uint64_t mr_wabt : 1;
+ uint64_t mr_wtto : 1;
+ uint64_t tr_abt : 1;
+ uint64_t mr_abt : 1;
+ uint64_t mr_tto : 1;
+ uint64_t msi_per : 1;
+ uint64_t msi_tabt : 1;
+ uint64_t msi_mabt : 1;
+ uint64_t msc_msg : 1;
+ uint64_t tsr_abt : 1;
+ uint64_t serr : 1;
+ uint64_t aperr : 1;
+ uint64_t dperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t rsl_int : 1;
+ uint64_t pcnt0 : 1;
+ uint64_t reserved_18_20 : 3;
+ uint64_t ptime0 : 1;
+ uint64_t reserved_22_24 : 3;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn30xx;
+ struct cvmx_pci_int_sum_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or
+ Read-Address Register took place. */
+ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 1. */
+ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 0. */
+ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1
+ register is not 0 the DMA_CNT1 timer counts.
+ When the DMA1_CNT timer has a value greater
+ than the PCI_DMA_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0
+ register is not 0 the DMA_CNT0 timer counts.
+ When the DMA0_CNT timer has a value greater
+ than the PCI_DMA_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1
+ value is greater than the value
+ in the PCI_DMA_INT_LEV1 register. */
+ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0
+ value is greater than the value
+ in the PCI_DMA_INT_LEV0 register. */
+ uint64_t reserved_23_24 : 2;
+ uint64_t ptime1 : 1; /**< When the value in the PCI_PKTS_SENT1
+ register is not 0 the Sent-1 timer counts.
+ When the Sent-1 timer has a value greater
+ than the PCI_PKTS_SENT_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0
+ register is not 0 the Sent-0 timer counts.
+ When the Sent-0 timer has a value greater
+ than the PCI_PKTS_SENT_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t reserved_19_20 : 2;
+ uint64_t pcnt1 : 1; /**< This bit indicates that PCI_PKTS_SENT1
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV1 register. */
+ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV0 register. */
+ uint64_t rsl_int : 1; /**< This bit is set when the mio_pci_inta_dr wire
+ is asserted by the MIO */
+ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */
+ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */
+ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */
+ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */
+ uint64_t serr : 1; /**< SERR# detected by PCX Core */
+ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected
+ N3K (as completer), has encountered an error
+ which prevents the split transaction from
+ completing. In this event, the N3K (as completer),
+ sends a SCM (Split Completion Message) to the
+ initiator. See: PCIX Spec v1.0a Fig 2-40.
+ [31:28]: Message Class = 2(completer error)
+ [27:20]: Message Index = 0x80
+ [18:12]: Remaining Lower Address
+ [11:0]: Remaining Byte Count */
+ uint64_t msc_msg : 1; /**< Master Split Completion Message (SCM) Detected
+ for either a Split-Read/Write error case.
+ Set if:
+ a) A Split-Write SCM is detected with SCE=1.
+ b) A Split-Read SCM is detected (regardless
+ of SCE status).
+ The Split completion message(SCM)
+ is also latched into the PCI_SCM_REG[SCM] to
+ assist SW with error recovery. */
+ uint64_t msi_mabt : 1; /**< PCI Master Abort on Master MSI */
+ uint64_t msi_tabt : 1; /**< PCI Target-Abort on Master MSI */
+ uint64_t msi_per : 1; /**< PCI Parity Error on Master MSI */
+ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Master-Read */
+ uint64_t mr_abt : 1; /**< PCI Master Abort On Master-Read */
+ uint64_t tr_abt : 1; /**< PCI Target Abort On Master-Read */
+ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on Master-write */
+ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on Master-write */
+ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on Master-write */
+#else
+ uint64_t tr_wabt : 1;
+ uint64_t mr_wabt : 1;
+ uint64_t mr_wtto : 1;
+ uint64_t tr_abt : 1;
+ uint64_t mr_abt : 1;
+ uint64_t mr_tto : 1;
+ uint64_t msi_per : 1;
+ uint64_t msi_tabt : 1;
+ uint64_t msi_mabt : 1;
+ uint64_t msc_msg : 1;
+ uint64_t tsr_abt : 1;
+ uint64_t serr : 1;
+ uint64_t aperr : 1;
+ uint64_t dperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t rsl_int : 1;
+ uint64_t pcnt0 : 1;
+ uint64_t pcnt1 : 1;
+ uint64_t reserved_19_20 : 2;
+ uint64_t ptime0 : 1;
+ uint64_t ptime1 : 1;
+ uint64_t reserved_23_24 : 2;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn31xx;
+ struct cvmx_pci_int_sum_s cn38xx;
+ struct cvmx_pci_int_sum_s cn38xxp2;
+ struct cvmx_pci_int_sum_cn31xx cn50xx;
+ struct cvmx_pci_int_sum_s cn58xx;
+ struct cvmx_pci_int_sum_s cn58xxp1;
+};
+typedef union cvmx_pci_int_sum cvmx_pci_int_sum_t;
+
+/**
+ * cvmx_pci_int_sum2
+ *
+ * PCI_INT_SUM2 = PCI Interrupt Summary2 Register
+ *
+ * The PCI Interrupt Summary2 Register copy used for RSL interrupts.
+ */
+union cvmx_pci_int_sum2 {
+ uint64_t u64;
+ struct cvmx_pci_int_sum2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or
+ Read-Address Register took place. */
+ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 1. */
+ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 0. */
+ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1
+ register is not 0 the DMA_CNT1 timer counts.
+ When the DMA1_CNT timer has a value greater
+ than the PCI_DMA_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0
+ register is not 0 the DMA_CNT0 timer counts.
+ When the DMA0_CNT timer has a value greater
+ than the PCI_DMA_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1
+ value is greater than the value
+ in the PCI_DMA_INT_LEV1 register. */
+ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0
+ value is greater than the value
+ in the PCI_DMA_INT_LEV0 register. */
+ uint64_t ptime3 : 1; /**< When the value in the PCI_PKTS_SENT3
+ register is not 0 the Sent-3 timer counts.
+ When the Sent-3 timer has a value greater
+ than the PCI_PKTS_SENT_TIME3 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t ptime2 : 1; /**< When the value in the PCI_PKTS_SENT2
+ register is not 0 the Sent-2 timer counts.
+ When the Sent-2 timer has a value greater
+ than the PCI_PKTS_SENT_TIME2 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t ptime1 : 1; /**< When the value in the PCI_PKTS_SENT1
+ register is not 0 the Sent-1 timer counts.
+ When the Sent-1 timer has a value greater
+ than the PCI_PKTS_SENT_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0
+ register is not 0 the Sent-0 timer counts.
+ When the Sent-0 timer has a value greater
+ than the PCI_PKTS_SENT_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t pcnt3 : 1; /**< This bit indicates that PCI_PKTS_SENT3
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV3 register. */
+ uint64_t pcnt2 : 1; /**< This bit indicates that PCI_PKTS_SENT2
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV2 register. */
+ uint64_t pcnt1 : 1; /**< This bit indicates that PCI_PKTS_SENT1
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV1 register. */
+ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV0 register. */
+ uint64_t rsl_int : 1; /**< This bit is set when the RSL Chain has
+ generated an interrupt. */
+ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */
+ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */
+ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */
+ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */
+ uint64_t serr : 1; /**< SERR# detected by PCX Core */
+ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected */
+ uint64_t msc_msg : 1; /**< Master Split Completion Message Detected */
+ uint64_t msi_mabt : 1; /**< PCI MSI Master Abort. */
+ uint64_t msi_tabt : 1; /**< PCI MSI Target Abort. */
+ uint64_t msi_per : 1; /**< PCI MSI Parity Error. */
+ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Read. */
+ uint64_t mr_abt : 1; /**< PCI Master Abort On Read. */
+ uint64_t tr_abt : 1; /**< PCI Target Abort On Read. */
+ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on write. */
+ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on write. */
+ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on write. */
+#else
+ uint64_t tr_wabt : 1;
+ uint64_t mr_wabt : 1;
+ uint64_t mr_wtto : 1;
+ uint64_t tr_abt : 1;
+ uint64_t mr_abt : 1;
+ uint64_t mr_tto : 1;
+ uint64_t msi_per : 1;
+ uint64_t msi_tabt : 1;
+ uint64_t msi_mabt : 1;
+ uint64_t msc_msg : 1;
+ uint64_t tsr_abt : 1;
+ uint64_t serr : 1;
+ uint64_t aperr : 1;
+ uint64_t dperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t rsl_int : 1;
+ uint64_t pcnt0 : 1;
+ uint64_t pcnt1 : 1;
+ uint64_t pcnt2 : 1;
+ uint64_t pcnt3 : 1;
+ uint64_t ptime0 : 1;
+ uint64_t ptime1 : 1;
+ uint64_t ptime2 : 1;
+ uint64_t ptime3 : 1;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_pci_int_sum2_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or
+ Read-Address Register took place. */
+ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 1. */
+ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 0. */
+ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1
+ register is not 0 the DMA_CNT1 timer counts.
+ When the DMA1_CNT timer has a value greater
+ than the PCI_DMA_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0
+ register is not 0 the DMA_CNT0 timer counts.
+ When the DMA0_CNT timer has a value greater
+ than the PCI_DMA_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1
+ value is greater than the value
+ in the PCI_DMA_INT_LEV1 register. */
+ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0
+ value is greater than the value
+ in the PCI_DMA_INT_LEV0 register. */
+ uint64_t reserved_22_24 : 3;
+ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0
+ register is not 0 the Sent-0 timer counts.
+ When the Sent-0 timer has a value greater
+ than the PCI_PKTS_SENT_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t reserved_18_20 : 3;
+ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV0 register. */
+ uint64_t rsl_int : 1; /**< This bit is set when the RSL Chain has
+ generated an interrupt. */
+ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */
+ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */
+ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */
+ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */
+ uint64_t serr : 1; /**< SERR# detected by PCX Core */
+ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected */
+ uint64_t msc_msg : 1; /**< Master Split Completion Message Detected */
+ uint64_t msi_mabt : 1; /**< PCI MSI Master Abort. */
+ uint64_t msi_tabt : 1; /**< PCI MSI Target Abort. */
+ uint64_t msi_per : 1; /**< PCI MSI Parity Error. */
+ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Read. */
+ uint64_t mr_abt : 1; /**< PCI Master Abort On Read. */
+ uint64_t tr_abt : 1; /**< PCI Target Abort On Read. */
+ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on write. */
+ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on write. */
+ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on write. */
+#else
+ uint64_t tr_wabt : 1;
+ uint64_t mr_wabt : 1;
+ uint64_t mr_wtto : 1;
+ uint64_t tr_abt : 1;
+ uint64_t mr_abt : 1;
+ uint64_t mr_tto : 1;
+ uint64_t msi_per : 1;
+ uint64_t msi_tabt : 1;
+ uint64_t msi_mabt : 1;
+ uint64_t msc_msg : 1;
+ uint64_t tsr_abt : 1;
+ uint64_t serr : 1;
+ uint64_t aperr : 1;
+ uint64_t dperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t rsl_int : 1;
+ uint64_t pcnt0 : 1;
+ uint64_t reserved_18_20 : 3;
+ uint64_t ptime0 : 1;
+ uint64_t reserved_22_24 : 3;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn30xx;
+ struct cvmx_pci_int_sum2_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2,
+ when the mem area is disabled. */
+ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or
+ Read-Address Register took place. */
+ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 1. */
+ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was
+ required to set the FORCE-INT bit for counter 0. */
+ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1
+ register is not 0 the DMA_CNT1 timer counts.
+ When the DMA1_CNT timer has a value greater
+ than the PCI_DMA_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0
+ register is not 0 the DMA_CNT0 timer counts.
+ When the DMA0_CNT timer has a value greater
+ than the PCI_DMA_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1
+ value is greater than the value
+ in the PCI_DMA_INT_LEV1 register. */
+ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0
+ value is greater than the value
+ in the PCI_DMA_INT_LEV0 register. */
+ uint64_t reserved_23_24 : 2;
+ uint64_t ptime1 : 1; /**< When the value in the PCI_PKTS_SENT1
+ register is not 0 the Sent-1 timer counts.
+ When the Sent-1 timer has a value greater
+ than the PCI_PKTS_SENT_TIME1 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0
+ register is not 0 the Sent-0 timer counts.
+ When the Sent-0 timer has a value greater
+ than the PCI_PKTS_SENT_TIME0 register this
+ bit is set. The timer is reset when bit is
+ written with a one. */
+ uint64_t reserved_19_20 : 2;
+ uint64_t pcnt1 : 1; /**< This bit indicates that PCI_PKTS_SENT1
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV1 register. */
+ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0
+ value is greater than the value
+ in the PCI_PKTS_SENT_INT_LEV0 register. */
+ uint64_t rsl_int : 1; /**< This bit is set when the RSL Chain has
+ generated an interrupt. */
+ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */
+ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */
+ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */
+ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */
+ uint64_t serr : 1; /**< SERR# detected by PCX Core */
+ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected */
+ uint64_t msc_msg : 1; /**< Master Split Completion Message Detected */
+ uint64_t msi_mabt : 1; /**< PCI MSI Master Abort. */
+ uint64_t msi_tabt : 1; /**< PCI MSI Target Abort. */
+ uint64_t msi_per : 1; /**< PCI MSI Parity Error. */
+ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Read. */
+ uint64_t mr_abt : 1; /**< PCI Master Abort On Read. */
+ uint64_t tr_abt : 1; /**< PCI Target Abort On Read. */
+ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on write. */
+ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on write. */
+ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on write. */
+#else
+ uint64_t tr_wabt : 1;
+ uint64_t mr_wabt : 1;
+ uint64_t mr_wtto : 1;
+ uint64_t tr_abt : 1;
+ uint64_t mr_abt : 1;
+ uint64_t mr_tto : 1;
+ uint64_t msi_per : 1;
+ uint64_t msi_tabt : 1;
+ uint64_t msi_mabt : 1;
+ uint64_t msc_msg : 1;
+ uint64_t tsr_abt : 1;
+ uint64_t serr : 1;
+ uint64_t aperr : 1;
+ uint64_t dperr : 1;
+ uint64_t ill_rwr : 1;
+ uint64_t ill_rrd : 1;
+ uint64_t rsl_int : 1;
+ uint64_t pcnt0 : 1;
+ uint64_t pcnt1 : 1;
+ uint64_t reserved_19_20 : 2;
+ uint64_t ptime0 : 1;
+ uint64_t ptime1 : 1;
+ uint64_t reserved_23_24 : 2;
+ uint64_t dcnt0 : 1;
+ uint64_t dcnt1 : 1;
+ uint64_t dtime0 : 1;
+ uint64_t dtime1 : 1;
+ uint64_t dma0_fi : 1;
+ uint64_t dma1_fi : 1;
+ uint64_t win_wr : 1;
+ uint64_t ill_wr : 1;
+ uint64_t ill_rd : 1;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } cn31xx;
+ struct cvmx_pci_int_sum2_s cn38xx;
+ struct cvmx_pci_int_sum2_s cn38xxp2;
+ struct cvmx_pci_int_sum2_cn31xx cn50xx;
+ struct cvmx_pci_int_sum2_s cn58xx;
+ struct cvmx_pci_int_sum2_s cn58xxp1;
+};
+typedef union cvmx_pci_int_sum2 cvmx_pci_int_sum2_t;
+
+/**
+ * cvmx_pci_msi_rcv
+ *
+ * PCI_MSI_RCV = PCI's MSI Received Vector Register
+ *
+ * A bit is set in this register relative to the vector received during a MSI. The value in this
+ * register is acted upon when the least-significant-byte of this register is written.
+ */
+union cvmx_pci_msi_rcv {
+ uint32_t u32;
+ struct cvmx_pci_msi_rcv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t intr : 6; /**< When an MSI is received on the PCI the bit selected
+ by data [5:0] will be set in this register. To
+ clear this bit a write must take place to the
+ NPI_MSI_RCV register where any bit set to 1 is
+ cleared. Reading this address will return an
+ unpredicatable value. */
+#else
+ uint32_t intr : 6;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_pci_msi_rcv_s cn30xx;
+ struct cvmx_pci_msi_rcv_s cn31xx;
+ struct cvmx_pci_msi_rcv_s cn38xx;
+ struct cvmx_pci_msi_rcv_s cn38xxp2;
+ struct cvmx_pci_msi_rcv_s cn50xx;
+ struct cvmx_pci_msi_rcv_s cn58xx;
+ struct cvmx_pci_msi_rcv_s cn58xxp1;
+};
+typedef union cvmx_pci_msi_rcv cvmx_pci_msi_rcv_t;
+
+/**
+ * cvmx_pci_pkt_credits#
+ *
+ * PCI_PKT_CREDITS0 = PCI Packet Credits For Output 0
+ *
+ * Used to decrease the number of packets to be processed by the host from Output-0 and return
+ * buffer/info pointer pairs to OCTEON Output-0. The value in this register is acted upon when the
+ * least-significant-byte of this register is written.
+ */
+union cvmx_pci_pkt_creditsx {
+ uint32_t u32;
+ struct cvmx_pci_pkt_creditsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_cnt : 16; /**< The value written to this field will be
+ subtracted from PCI_PKTS_SENT0[PKT_CNT]. */
+ uint32_t ptr_cnt : 16; /**< This field value is added to the
+ NPI's internal Buffer/Info Pointer Pair count. */
+#else
+ uint32_t ptr_cnt : 16;
+ uint32_t pkt_cnt : 16;
+#endif
+ } s;
+ struct cvmx_pci_pkt_creditsx_s cn30xx;
+ struct cvmx_pci_pkt_creditsx_s cn31xx;
+ struct cvmx_pci_pkt_creditsx_s cn38xx;
+ struct cvmx_pci_pkt_creditsx_s cn38xxp2;
+ struct cvmx_pci_pkt_creditsx_s cn50xx;
+ struct cvmx_pci_pkt_creditsx_s cn58xx;
+ struct cvmx_pci_pkt_creditsx_s cn58xxp1;
+};
+typedef union cvmx_pci_pkt_creditsx cvmx_pci_pkt_creditsx_t;
+
+/**
+ * cvmx_pci_pkts_sent#
+ *
+ * PCI_PKTS_SENT0 = PCI Packets Sent 0
+ *
+ * Number of packets sent to the host memory from PCI Output 0
+ */
+union cvmx_pci_pkts_sentx {
+ uint32_t u32;
+ struct cvmx_pci_pkts_sentx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_cnt : 32; /**< Each time a packet is written to the memory via
+ PCI from PCI Output 0, this counter is
+ incremented by 1 or the byte count of the packet
+ as set in NPI_OUTPUT_CONTROL[P0_BMODE]. */
+#else
+ uint32_t pkt_cnt : 32;
+#endif
+ } s;
+ struct cvmx_pci_pkts_sentx_s cn30xx;
+ struct cvmx_pci_pkts_sentx_s cn31xx;
+ struct cvmx_pci_pkts_sentx_s cn38xx;
+ struct cvmx_pci_pkts_sentx_s cn38xxp2;
+ struct cvmx_pci_pkts_sentx_s cn50xx;
+ struct cvmx_pci_pkts_sentx_s cn58xx;
+ struct cvmx_pci_pkts_sentx_s cn58xxp1;
+};
+typedef union cvmx_pci_pkts_sentx cvmx_pci_pkts_sentx_t;
+
+/**
+ * cvmx_pci_pkts_sent_int_lev#
+ *
+ * PCI_PKTS_SENT_INT_LEV0 = PCI Packets Sent Interrupt Level For Output 0
+ *
+ * Interrupt when number of packets sent is equal to or greater than the register value.
+ */
+union cvmx_pci_pkts_sent_int_levx {
+ uint32_t u32;
+ struct cvmx_pci_pkts_sent_int_levx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_cnt : 32; /**< When corresponding port's PCI_PKTS_SENT0 value
+ exceeds the value in this register, PCNT0 of the
+ PCI_INT_SUM and PCI_INT_SUM2 will be set. */
+#else
+ uint32_t pkt_cnt : 32;
+#endif
+ } s;
+ struct cvmx_pci_pkts_sent_int_levx_s cn30xx;
+ struct cvmx_pci_pkts_sent_int_levx_s cn31xx;
+ struct cvmx_pci_pkts_sent_int_levx_s cn38xx;
+ struct cvmx_pci_pkts_sent_int_levx_s cn38xxp2;
+ struct cvmx_pci_pkts_sent_int_levx_s cn50xx;
+ struct cvmx_pci_pkts_sent_int_levx_s cn58xx;
+ struct cvmx_pci_pkts_sent_int_levx_s cn58xxp1;
+};
+typedef union cvmx_pci_pkts_sent_int_levx cvmx_pci_pkts_sent_int_levx_t;
+
+/**
+ * cvmx_pci_pkts_sent_time#
+ *
+ * PCI_PKTS_SENT_TIME0 = PCI Packets Sent Timer For Output-0
+ *
+ * Time to wait from packet being sent to host from Output-0 before issuing an interrupt.
+ */
+union cvmx_pci_pkts_sent_timex {
+ uint32_t u32;
+ struct cvmx_pci_pkts_sent_timex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_time : 32; /**< Number of PCI clock cycle to wait before
+ issuing an interrupt to the host when a
+ packet from this port has been sent to the
+ host. The timer is reset when the
+ PCI_INT_SUM[21] register is cleared. */
+#else
+ uint32_t pkt_time : 32;
+#endif
+ } s;
+ struct cvmx_pci_pkts_sent_timex_s cn30xx;
+ struct cvmx_pci_pkts_sent_timex_s cn31xx;
+ struct cvmx_pci_pkts_sent_timex_s cn38xx;
+ struct cvmx_pci_pkts_sent_timex_s cn38xxp2;
+ struct cvmx_pci_pkts_sent_timex_s cn50xx;
+ struct cvmx_pci_pkts_sent_timex_s cn58xx;
+ struct cvmx_pci_pkts_sent_timex_s cn58xxp1;
+};
+typedef union cvmx_pci_pkts_sent_timex cvmx_pci_pkts_sent_timex_t;
+
+/**
+ * cvmx_pci_read_cmd_6
+ *
+ * PCI_READ_CMD_6 = PCI Read Command 6 Register
+ *
+ * Contains control inforamtion related to a received PCI Command 6.
+ */
+union cvmx_pci_read_cmd_6 {
+ uint32_t u32;
+ struct cvmx_pci_read_cmd_6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t min_data : 6; /**< The number of words to have buffered in the PNI
+ before informing the PCIX-Core that we have
+ read data available for the outstanding Delayed
+ read. 0 is treated as a 64.
+ For reads to the expansion this value is not used. */
+ uint32_t prefetch : 3; /**< Control the amount of data to be preteched when
+ this type of bhmstREAD command is received.
+ 0 = 1 32/64 bit word.
+ 1 = From address to end of 128B block.
+ 2 = From address to end of 128B block plus 128B.
+ 3 = From address to end of 128B block plus 256B.
+ 4 = From address to end of 128B block plus 384B.
+ For reads to the expansion this value is not used. */
+#else
+ uint32_t prefetch : 3;
+ uint32_t min_data : 6;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_pci_read_cmd_6_s cn30xx;
+ struct cvmx_pci_read_cmd_6_s cn31xx;
+ struct cvmx_pci_read_cmd_6_s cn38xx;
+ struct cvmx_pci_read_cmd_6_s cn38xxp2;
+ struct cvmx_pci_read_cmd_6_s cn50xx;
+ struct cvmx_pci_read_cmd_6_s cn58xx;
+ struct cvmx_pci_read_cmd_6_s cn58xxp1;
+};
+typedef union cvmx_pci_read_cmd_6 cvmx_pci_read_cmd_6_t;
+
+/**
+ * cvmx_pci_read_cmd_c
+ *
+ * PCI_READ_CMD_C = PCI Read Command C Register
+ *
+ * Contains control inforamtion related to a received PCI Command C.
+ */
+union cvmx_pci_read_cmd_c {
+ uint32_t u32;
+ struct cvmx_pci_read_cmd_c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t min_data : 6; /**< The number of words to have buffered in the PNI
+ before informing the PCIX-Core that we have
+ read data available for the outstanding Delayed
+ read. 0 is treated as a 64.
+ For reads to the expansion this value is not used. */
+ uint32_t prefetch : 3; /**< Control the amount of data to be preteched when
+ this type of READ command is received.
+ 0 = 1 32/64 bit word.
+ 1 = From address to end of 128B block.
+ 2 = From address to end of 128B block plus 128B.
+ 3 = From address to end of 128B block plus 256B.
+ 4 = From address to end of 128B block plus 384B.
+ For reads to the expansion this value is not used. */
+#else
+ uint32_t prefetch : 3;
+ uint32_t min_data : 6;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_pci_read_cmd_c_s cn30xx;
+ struct cvmx_pci_read_cmd_c_s cn31xx;
+ struct cvmx_pci_read_cmd_c_s cn38xx;
+ struct cvmx_pci_read_cmd_c_s cn38xxp2;
+ struct cvmx_pci_read_cmd_c_s cn50xx;
+ struct cvmx_pci_read_cmd_c_s cn58xx;
+ struct cvmx_pci_read_cmd_c_s cn58xxp1;
+};
+typedef union cvmx_pci_read_cmd_c cvmx_pci_read_cmd_c_t;
+
+/**
+ * cvmx_pci_read_cmd_e
+ *
+ * PCI_READ_CMD_E = PCI Read Command E Register
+ *
+ * Contains control inforamtion related to a received PCI Command 6.
+ */
+union cvmx_pci_read_cmd_e {
+ uint32_t u32;
+ struct cvmx_pci_read_cmd_e_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t min_data : 6; /**< The number of words to have buffered in the PNI
+ before informaing the PCIX-Core that we have
+ read data available for the outstanding Delayed
+ read. 0 is treated as a 64.
+ For reads to the expansion this value is not used. */
+ uint32_t prefetch : 3; /**< Control the amount of data to be preteched when
+ this type of READ command is received.
+ 0 = 1 32/64 bit word.
+ 1 = From address to end of 128B block.
+ 2 = From address to end of 128B block plus 128B.
+ 3 = From address to end of 128B block plus 256B.
+ 4 = From address to end of 128B block plus 384B.
+ For reads to the expansion this value is not used. */
+#else
+ uint32_t prefetch : 3;
+ uint32_t min_data : 6;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_pci_read_cmd_e_s cn30xx;
+ struct cvmx_pci_read_cmd_e_s cn31xx;
+ struct cvmx_pci_read_cmd_e_s cn38xx;
+ struct cvmx_pci_read_cmd_e_s cn38xxp2;
+ struct cvmx_pci_read_cmd_e_s cn50xx;
+ struct cvmx_pci_read_cmd_e_s cn58xx;
+ struct cvmx_pci_read_cmd_e_s cn58xxp1;
+};
+typedef union cvmx_pci_read_cmd_e cvmx_pci_read_cmd_e_t;
+
+/**
+ * cvmx_pci_read_timeout
+ *
+ * PCI_READ_TIMEOUT = PCI Read Timeour Register
+ *
+ * The address to start reading Instructions from for Input-3.
+ */
+union cvmx_pci_read_timeout {
+ uint64_t u64;
+ struct cvmx_pci_read_timeout_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t enb : 1; /**< Enable the use of the Timeout function. */
+ uint64_t cnt : 31; /**< The number of eclk cycles to wait after issuing
+ a read request to the PNI before setting a
+ timeout and not expecting the data to return.
+ This is considered a fatal condition by the NPI. */
+#else
+ uint64_t cnt : 31;
+ uint64_t enb : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pci_read_timeout_s cn30xx;
+ struct cvmx_pci_read_timeout_s cn31xx;
+ struct cvmx_pci_read_timeout_s cn38xx;
+ struct cvmx_pci_read_timeout_s cn38xxp2;
+ struct cvmx_pci_read_timeout_s cn50xx;
+ struct cvmx_pci_read_timeout_s cn58xx;
+ struct cvmx_pci_read_timeout_s cn58xxp1;
+};
+typedef union cvmx_pci_read_timeout cvmx_pci_read_timeout_t;
+
+/**
+ * cvmx_pci_scm_reg
+ *
+ * PCI_SCM_REG = PCI Master Split Completion Message Register
+ *
+ * This register contains the Master Split Completion Message(SCM) generated when a master split
+ * transaction is aborted.
+ */
+union cvmx_pci_scm_reg {
+ uint64_t u64;
+ struct cvmx_pci_scm_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t scm : 32; /**< Contains the Split Completion Message (SCM)
+ driven when a master-split transaction is aborted.
+ [31:28]: Message Class
+ [27:20]: Message Index
+ [19]: Reserved
+ [18:12]: Remaining Lower Address
+ [11:8]: Upper Remaining Byte Count
+ [7:0]: Lower Remaining Byte Count
+ Refer to the PCIX1.0a specification, Fig 2-40
+ for additional details for the split completion
+ message format. */
+#else
+ uint64_t scm : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pci_scm_reg_s cn30xx;
+ struct cvmx_pci_scm_reg_s cn31xx;
+ struct cvmx_pci_scm_reg_s cn38xx;
+ struct cvmx_pci_scm_reg_s cn38xxp2;
+ struct cvmx_pci_scm_reg_s cn50xx;
+ struct cvmx_pci_scm_reg_s cn58xx;
+ struct cvmx_pci_scm_reg_s cn58xxp1;
+};
+typedef union cvmx_pci_scm_reg cvmx_pci_scm_reg_t;
+
+/**
+ * cvmx_pci_tsr_reg
+ *
+ * PCI_TSR_REG = PCI Target Split Attribute Register
+ *
+ * This register contains the Attribute field Master Split Completion Message(SCM) generated when a master split
+ * transaction is aborted.
+ */
+union cvmx_pci_tsr_reg {
+ uint64_t u64;
+ struct cvmx_pci_tsr_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t tsr : 36; /**< Contains the Target Split Attribute field when a
+ target-split transaction is aborted.
+ [35:32]: Upper Byte Count
+ [31]: BCM=Byte Count Modified
+ [30]: SCE=Split Completion Error
+ [29]: SCM=Split Completion Message
+ [28:24]: RESERVED
+ [23:16]: Completer Bus Number
+ [15:11]: Completer Device Number
+ [10:8]: Completer Function Number
+ [7:0]: Lower Byte Count
+ Refer to the PCIX1.0a specification, Fig 2-39
+ for additional details on the completer attribute
+ bit assignments. */
+#else
+ uint64_t tsr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_pci_tsr_reg_s cn30xx;
+ struct cvmx_pci_tsr_reg_s cn31xx;
+ struct cvmx_pci_tsr_reg_s cn38xx;
+ struct cvmx_pci_tsr_reg_s cn38xxp2;
+ struct cvmx_pci_tsr_reg_s cn50xx;
+ struct cvmx_pci_tsr_reg_s cn58xx;
+ struct cvmx_pci_tsr_reg_s cn58xxp1;
+};
+typedef union cvmx_pci_tsr_reg cvmx_pci_tsr_reg_t;
+
+/**
+ * cvmx_pci_win_rd_addr
+ *
+ * PCI_WIN_RD_ADDR = PCI Window Read Address Register
+ *
+ * Writing the least-significant-byte of this register will cause a read operation to take place,
+ * UNLESS, a read operation is already taking place. A read is consider to end when the PCI_WIN_RD_DATA
+ * register is read.
+ */
+union cvmx_pci_win_rd_addr {
+ uint64_t u64;
+ struct cvmx_pci_win_rd_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always
+ read as '0'. */
+ uint64_t reserved_0_47 : 48;
+#else
+ uint64_t reserved_0_47 : 48;
+ uint64_t iobit : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_pci_win_rd_addr_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always
+ read as '0'. */
+ uint64_t rd_addr : 46; /**< The address to be read from. Whenever the LSB of
+ this register is written, the Read Operation will
+ take place.
+ [47:40] = NCB_ID
+ [39:3] = Address
+ When [47:43] == NPI & [42:0] == 0 bits [39:0] are:
+ [39:32] == x, Not Used
+ [31:27] == RSL_ID
+ [12:2] == RSL Register Offset
+ [1:0] == x, Not Used */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t rd_addr : 46;
+ uint64_t iobit : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn30xx;
+ struct cvmx_pci_win_rd_addr_cn30xx cn31xx;
+ struct cvmx_pci_win_rd_addr_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always
+ read as '0'. */
+ uint64_t rd_addr : 45; /**< The address to be read from. Whenever the LSB of
+ this register is written, the Read Operation will
+ take place.
+ [47:40] = NCB_ID
+ [39:3] = Address
+ When [47:43] == NPI & [42:0] == 0 bits [39:0] are:
+ [39:32] == x, Not Used
+ [31:27] == RSL_ID
+ [12:3] == RSL Register Offset
+ [2:0] == x, Not Used */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t rd_addr : 45;
+ uint64_t iobit : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn38xx;
+ struct cvmx_pci_win_rd_addr_cn38xx cn38xxp2;
+ struct cvmx_pci_win_rd_addr_cn30xx cn50xx;
+ struct cvmx_pci_win_rd_addr_cn38xx cn58xx;
+ struct cvmx_pci_win_rd_addr_cn38xx cn58xxp1;
+};
+typedef union cvmx_pci_win_rd_addr cvmx_pci_win_rd_addr_t;
+
+/**
+ * cvmx_pci_win_rd_data
+ *
+ * PCI_WIN_RD_DATA = PCI Window Read Data Register
+ *
+ * Contains the result from the read operation that took place when the LSB of the PCI_WIN_RD_ADDR
+ * register was written.
+ */
+union cvmx_pci_win_rd_data {
+ uint64_t u64;
+ struct cvmx_pci_win_rd_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rd_data : 64; /**< The read data. */
+#else
+ uint64_t rd_data : 64;
+#endif
+ } s;
+ struct cvmx_pci_win_rd_data_s cn30xx;
+ struct cvmx_pci_win_rd_data_s cn31xx;
+ struct cvmx_pci_win_rd_data_s cn38xx;
+ struct cvmx_pci_win_rd_data_s cn38xxp2;
+ struct cvmx_pci_win_rd_data_s cn50xx;
+ struct cvmx_pci_win_rd_data_s cn58xx;
+ struct cvmx_pci_win_rd_data_s cn58xxp1;
+};
+typedef union cvmx_pci_win_rd_data cvmx_pci_win_rd_data_t;
+
+/**
+ * cvmx_pci_win_wr_addr
+ *
+ * PCI_WIN_WR_ADDR = PCI Window Write Address Register
+ *
+ * Contains the address to be writen to when a write operation is started by writing the
+ * PCI_WIN_WR_DATA register (see below).
+ */
+union cvmx_pci_win_wr_addr {
+ uint64_t u64;
+ struct cvmx_pci_win_wr_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always
+ read as '0'. */
+ uint64_t wr_addr : 45; /**< The address that will be written to when the
+ PCI_WIN_WR_DATA register is written.
+ [47:40] = NCB_ID
+ [39:3] = Address
+ When [47:43] == NPI & [42:0] == 0 bits [39:0] are:
+ [39:32] == x, Not Used
+ [31:27] == RSL_ID
+ [12:3] == RSL Register Offset
+ [2:0] == x, Not Used */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t wr_addr : 45;
+ uint64_t iobit : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_pci_win_wr_addr_s cn30xx;
+ struct cvmx_pci_win_wr_addr_s cn31xx;
+ struct cvmx_pci_win_wr_addr_s cn38xx;
+ struct cvmx_pci_win_wr_addr_s cn38xxp2;
+ struct cvmx_pci_win_wr_addr_s cn50xx;
+ struct cvmx_pci_win_wr_addr_s cn58xx;
+ struct cvmx_pci_win_wr_addr_s cn58xxp1;
+};
+typedef union cvmx_pci_win_wr_addr cvmx_pci_win_wr_addr_t;
+
+/**
+ * cvmx_pci_win_wr_data
+ *
+ * PCI_WIN_WR_DATA = PCI Window Write Data Register
+ *
+ * Contains the data to write to the address located in the PCI_WIN_WR_ADDR Register.
+ * Writing the least-significant-byte of this register will cause a write operation to take place.
+ */
+union cvmx_pci_win_wr_data {
+ uint64_t u64;
+ struct cvmx_pci_win_wr_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_data : 64; /**< The data to be written. Whenever the LSB of this
+ register is written, the Window Write will take
+ place. */
+#else
+ uint64_t wr_data : 64;
+#endif
+ } s;
+ struct cvmx_pci_win_wr_data_s cn30xx;
+ struct cvmx_pci_win_wr_data_s cn31xx;
+ struct cvmx_pci_win_wr_data_s cn38xx;
+ struct cvmx_pci_win_wr_data_s cn38xxp2;
+ struct cvmx_pci_win_wr_data_s cn50xx;
+ struct cvmx_pci_win_wr_data_s cn58xx;
+ struct cvmx_pci_win_wr_data_s cn58xxp1;
+};
+typedef union cvmx_pci_win_wr_data cvmx_pci_win_wr_data_t;
+
+/**
+ * cvmx_pci_win_wr_mask
+ *
+ * PCI_WIN_WR_MASK = PCI Window Write Mask Register
+ *
+ * Contains the mask for the data in the PCI_WIN_WR_DATA Register.
+ */
+union cvmx_pci_win_wr_mask {
+ uint64_t u64;
+ struct cvmx_pci_win_wr_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t wr_mask : 8; /**< The data to be written. When a bit is set '1'
+ the corresponding byte will not be written. */
+#else
+ uint64_t wr_mask : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_pci_win_wr_mask_s cn30xx;
+ struct cvmx_pci_win_wr_mask_s cn31xx;
+ struct cvmx_pci_win_wr_mask_s cn38xx;
+ struct cvmx_pci_win_wr_mask_s cn38xxp2;
+ struct cvmx_pci_win_wr_mask_s cn50xx;
+ struct cvmx_pci_win_wr_mask_s cn58xx;
+ struct cvmx_pci_win_wr_mask_s cn58xxp1;
+};
+typedef union cvmx_pci_win_wr_mask cvmx_pci_win_wr_mask_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pci-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pci.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pci.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pci.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,71 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * PCI related structures.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_PCI_H__
+#define __CVMX_PCI_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* The typedefs and enumerations for Octeon's PCI packet engines have been
+ removed from this file. The definitions in this file were out of date
+ and unused. For current definitions, refer to the Octeon PCI NIC
+ driver. OCTEON-PCI-NIC-*.rpm */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_PCI_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pci.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pcie.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pcie.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pcie.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1703 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2011 Cavium, Inc. <support at cavium.com>. All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to PCIe as a host(RC) or target(EP)
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx-dpi-defs.h>
+#include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+#include <asm/octeon/cvmx-pcieepx-defs.h>
+#include <asm/octeon/cvmx-pciercx-defs.h>
+#include <asm/octeon/cvmx-pemx-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-pescx-defs.h>
+#include <asm/octeon/cvmx-sli-defs.h>
+#include <asm/octeon/cvmx-sriox-defs.h>
+#include <asm/octeon/cvmx-helper-jtag.h>
+
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+#include <asm/octeon/cvmx-error.h>
+#endif
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+#include <asm/octeon/cvmx-helper-errata.h>
+#include <asm/octeon/cvmx-qlm.h>
+#include <asm/octeon/cvmx-pcie.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+#include <asm/octeon/cvmx-swap.h>
+#include <asm/octeon/cvmx-wqe.h>
+#else
+#include "cvmx.h"
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "cvmx-csr-db.h"
+#endif
+#include "cvmx-pcie.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-swap.h"
+#include "cvmx-wqe.h"
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "cvmx-error.h"
+#endif
+#include "cvmx-helper-errata.h"
+#include "cvmx-qlm.h"
+#endif
+
+#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
+#define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
+#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
+#define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
+
+/**
+ * Return the Core virtual base address for PCIe IO access. IOs are
+ * read/written as an offset from this address.
+ *
+ * @param pcie_port PCIe port the IO is for
+ *
+ * @return 64bit Octeon IO base address for read/write
+ */
+uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
+{
+ cvmx_pcie_address_t pcie_addr;
+ pcie_addr.u64 = 0;
+ pcie_addr.io.upper = 0;
+ pcie_addr.io.io = 1;
+ pcie_addr.io.did = 3;
+ pcie_addr.io.subdid = 2;
+ pcie_addr.io.es = 1;
+ pcie_addr.io.port = pcie_port;
+ return pcie_addr.u64;
+}
+
+
+/**
+ * Size of the IO address region returned at address
+ * cvmx_pcie_get_io_base_address()
+ *
+ * @param pcie_port PCIe port the IO is for
+ *
+ * @return Size of the IO window
+ */
+uint64_t cvmx_pcie_get_io_size(int pcie_port)
+{
+ return 1ull<<32;
+}
+
+
+/**
+ * Return the Core virtual base address for PCIe MEM access. Memory is
+ * read/written as an offset from this address.
+ *
+ * @param pcie_port PCIe port the IO is for
+ *
+ * @return 64bit Octeon IO base address for read/write
+ */
+uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
+{
+ cvmx_pcie_address_t pcie_addr;
+ pcie_addr.u64 = 0;
+ pcie_addr.mem.upper = 0;
+ pcie_addr.mem.io = 1;
+ pcie_addr.mem.did = 3;
+ pcie_addr.mem.subdid = 3 + pcie_port;
+ return pcie_addr.u64;
+}
+
+
+/**
+ * Size of the Mem address region returned at address
+ * cvmx_pcie_get_mem_base_address()
+ *
+ * @param pcie_port PCIe port the IO is for
+ *
+ * @return Size of the Mem window
+ */
+uint64_t cvmx_pcie_get_mem_size(int pcie_port)
+{
+ return 1ull<<36;
+}
+
+
+/**
+ * @INTERNAL
+ * Initialize the RC config space CSRs
+ *
+ * @param pcie_port PCIe port to initialize
+ */
+static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
+{
+ /* Max Payload Size (PCIE*_CFG030[MPS]) */
+ /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
+ /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
+ /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
+ {
+ cvmx_pciercx_cfg030_t pciercx_cfg030;
+ pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ {
+ pciercx_cfg030.s.mps = MPS_CN5XXX;
+ pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
+ }
+ else
+ {
+ pciercx_cfg030.s.mps = MPS_CN6XXX;
+ pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
+ }
+ pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
+ pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
+ pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
+ pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
+ pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
+ pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
+ }
+
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
+ /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
+ cvmx_npei_ctl_status2_t npei_ctl_status2;
+ npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
+ npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
+ npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
+ if (pcie_port)
+ npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
+ else
+ npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
+
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
+ }
+ else
+ {
+ /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
+ /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
+ cvmx_dpi_sli_prtx_cfg_t prt_cfg;
+ cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
+ prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
+ prt_cfg.s.mps = MPS_CN6XXX;
+ prt_cfg.s.mrrs = MRRS_CN6XXX;
+ /* Max outstanding load request. */
+ prt_cfg.s.molr = 32;
+ cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
+
+ sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
+ sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
+ cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
+ }
+
+ /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
+ {
+ cvmx_pciercx_cfg070_t pciercx_cfg070;
+ pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
+ pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
+ pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
+ }
+
+ /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
+ /* ME and MSAE should always be set. */
+ /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
+ /* System Error Message Enable (PCIE*_CFG001[SEE]) */
+ {
+ cvmx_pciercx_cfg001_t pciercx_cfg001;
+ pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
+ pciercx_cfg001.s.msae = 1; /* Memory space enable. */
+ pciercx_cfg001.s.me = 1; /* Bus master enable. */
+ pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
+ pciercx_cfg001.s.see = 1; /* SERR# enable */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
+ }
+
+
+ /* Advanced Error Recovery Message Enables */
+ /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
+ /* Use CVMX_PCIERCX_CFG067 hardware default */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
+
+
+ /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
+ {
+ cvmx_pciercx_cfg032_t pciercx_cfg032;
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
+ }
+
+ /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
+ /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
+ {
+ /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
+ cvmx_pciercx_cfg006_t pciercx_cfg006;
+ pciercx_cfg006.u32 = 0;
+ pciercx_cfg006.s.pbnum = 1;
+ pciercx_cfg006.s.sbnum = 1;
+ pciercx_cfg006.s.subbnum = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
+ }
+
+ /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
+ /* Most applications should disable the memory-mapped I/O BAR by */
+ /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
+ {
+ cvmx_pciercx_cfg008_t pciercx_cfg008;
+ pciercx_cfg008.u32 = 0;
+ pciercx_cfg008.s.mb_addr = 0x100;
+ pciercx_cfg008.s.ml_addr = 0;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
+ }
+
+ /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
+ /* Most applications should disable the prefetchable BAR by setting */
+ /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
+ /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
+ {
+ cvmx_pciercx_cfg009_t pciercx_cfg009;
+ cvmx_pciercx_cfg010_t pciercx_cfg010;
+ cvmx_pciercx_cfg011_t pciercx_cfg011;
+ pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
+ pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
+ pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
+ pciercx_cfg009.s.lmem_base = 0x100;
+ pciercx_cfg009.s.lmem_limit = 0;
+ pciercx_cfg010.s.umem_base = 0x100;
+ pciercx_cfg011.s.umem_limit = 0;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
+ }
+
+ /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
+ /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
+ {
+ cvmx_pciercx_cfg035_t pciercx_cfg035;
+ pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
+ pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
+ pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
+ pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
+ pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
+ }
+
+ /* Advanced Error Recovery Interrupt Enables */
+ /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
+ {
+ cvmx_pciercx_cfg075_t pciercx_cfg075;
+ pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
+ pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
+ pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
+ pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
+ }
+
+ /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
+ /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
+ {
+ cvmx_pciercx_cfg034_t pciercx_cfg034;
+ pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
+ pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
+ pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
+ pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
+ }
+}
+
+/**
+ * @INTERNAL
+ * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
+ * port from reset to a link up state. Software can then begin
+ * configuring the rest of the link.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
+{
+ uint64_t start_cycle;
+ cvmx_pescx_ctl_status_t pescx_ctl_status;
+ cvmx_pciercx_cfg452_t pciercx_cfg452;
+ cvmx_pciercx_cfg032_t pciercx_cfg032;
+ cvmx_pciercx_cfg448_t pciercx_cfg448;
+
+ /* Set the lane width */
+ pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
+ pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
+ if (pescx_ctl_status.s.qlm_cfg == 0)
+ {
+ /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
+ pciercx_cfg452.s.lme = 0xf;
+ }
+ else
+ {
+ /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
+ pciercx_cfg452.s.lme = 0x7;
+ }
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
+
+ /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
+ cause bus errors on 64bit memory reads. Turning off length error
+ checking fixes this */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
+ {
+ cvmx_pciercx_cfg455_t pciercx_cfg455;
+ pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
+ pciercx_cfg455.s.m_cpl_len_err = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
+ }
+
+ /* Lane swap needs to be manually enabled for CN52XX */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
+ {
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+#if defined(OCTEON_VENDOR_LANNER)
+ case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
+ break;
+#endif
+ default:
+ pescx_ctl_status.s.lane_swp = 1;
+ break;
+ }
+ cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
+ }
+
+ /* Bring up the link */
+ pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
+ pescx_ctl_status.s.lnk_enb = 1;
+ cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
+
+ /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
+ __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
+
+ /* Wait for the link to come up */
+ start_cycle = cvmx_get_cycle();
+ do
+ {
+ if (cvmx_get_cycle() - start_cycle > 100*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
+ {
+ cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
+ return -1;
+ }
+ cvmx_wait(50000);
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ } while (pciercx_cfg032.s.dlla == 0);
+
+ /* Clear all pending errors */
+ cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
+
+ /* Update the Replay Time Limit. Empirically, some PCIe devices take a
+ little longer to respond than expected under load. As a workaround for
+ this we configure the Replay Time Limit to the value expected for a 512
+ byte MPS instead of our actual 256 byte MPS. The numbers below are
+ directly from the PCIe spec table 3-4 */
+ pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
+ switch (pciercx_cfg032.s.nlw)
+ {
+ case 1: /* 1 lane */
+ pciercx_cfg448.s.rtl = 1677;
+ break;
+ case 2: /* 2 lanes */
+ pciercx_cfg448.s.rtl = 867;
+ break;
+ case 4: /* 4 lanes */
+ pciercx_cfg448.s.rtl = 462;
+ break;
+ case 8: /* 8 lanes */
+ pciercx_cfg448.s.rtl = 258;
+ break;
+ }
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
+
+ return 0;
+}
+
+static inline void __cvmx_increment_ba(cvmx_sli_mem_access_subidx_t *pmas)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ pmas->cn68xx.ba++;
+ else
+ pmas->cn63xx.ba++;
+}
+
+/**
+ * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
+ * the bus.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
+{
+ int i;
+ int base;
+ uint64_t addr_swizzle;
+ cvmx_ciu_soft_prst_t ciu_soft_prst;
+ cvmx_pescx_bist_status_t pescx_bist_status;
+ cvmx_pescx_bist_status2_t pescx_bist_status2;
+ cvmx_npei_ctl_status_t npei_ctl_status;
+ cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
+ cvmx_npei_mem_access_subidx_t mem_access_subid;
+ cvmx_npei_dbg_data_t npei_dbg_data;
+ cvmx_pescx_ctl_status2_t pescx_ctl_status2;
+ cvmx_pciercx_cfg032_t pciercx_cfg032;
+ cvmx_npei_bar1_indexx_t bar1_index;
+
+retry:
+ /* Make sure we aren't trying to setup a target mode interface in host mode */
+ npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
+ if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
+ {
+ cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
+ return -1;
+ }
+
+ /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ {
+ npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
+ {
+ cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
+ return -1;
+ }
+ }
+
+ /* Make sure a CN56XX pass 1 isn't trying to do anything; errata for PASS 1 */
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) {
+ cvmx_dprintf ("PCIe port %d: CN56XX_PASS_1, skipping\n", pcie_port);
+ return -1;
+ }
+
+ /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
+ npei_ctl_status.s.arb = 1;
+ /* Allow up to 0x20 config retries */
+ npei_ctl_status.s.cfg_rtry = 0x20;
+ /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
+ {
+ npei_ctl_status.s.p0_ntags = 0x20;
+ npei_ctl_status.s.p1_ntags = 0x20;
+ }
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
+
+ /* Bring the PCIe out of reset */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
+ {
+ /* The EBH5200 board swapped the PCIe reset lines on the board. As a
+ workaround for this bug, we bring both PCIe ports out of reset at
+ the same time instead of on separate calls. So for port 0, we bring
+ both out of reset and do nothing on port 1 */
+ if (pcie_port == 0)
+ {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ /* After a chip reset the PCIe will also be in reset. If it isn't,
+ most likely someone is trying to init it again without a proper
+ PCIe reset */
+ if (ciu_soft_prst.s.soft_prst == 0)
+ {
+ /* Reset the ports */
+ ciu_soft_prst.s.soft_prst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ /* Wait until pcie resets the ports. */
+ cvmx_wait_usec(2000);
+ }
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+ }
+ else
+ {
+ /* The normal case: The PCIe ports are completely separate and can be
+ brought out of reset independently */
+ if (pcie_port)
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ else
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ /* After a chip reset the PCIe will also be in reset. If it isn't,
+ most likely someone is trying to init it again without a proper
+ PCIe reset */
+ if (ciu_soft_prst.s.soft_prst == 0)
+ {
+ /* Reset the port */
+ ciu_soft_prst.s.soft_prst = 1;
+ if (pcie_port)
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ /* Wait until pcie resets the ports. */
+ cvmx_wait_usec(2000);
+ }
+ if (pcie_port)
+ {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ }
+ else
+ {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+ }
+
+ /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
+ PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
+ cvmx_wait(400000);
+
+ /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
+ CN52XX, so we only probe it on newer chips */
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
+ {
+ /* Clear PCLK_RUN so we can check if the clock is running */
+ pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
+ pescx_ctl_status2.s.pclk_run = 1;
+ cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
+ /* Now that we cleared PCLK_RUN, wait for it to be set again telling
+ us the clock is running */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
+ cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
+ {
+ cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
+ return -1;
+ }
+ }
+
+ /* Check and make sure PCIe came out of reset. If it doesn't the board
+ probably hasn't wired the clocks up and the interface should be
+ skipped */
+ pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
+ if (pescx_ctl_status2.s.pcierst)
+ {
+ cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
+ return -1;
+ }
+
+ /* Check BIST2 status. If any bits are set skip this interface. This
+ is an attempt to catch PCIE-813 on pass 1 parts */
+ pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
+ if (pescx_bist_status2.u64)
+ {
+ cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
+ return -1;
+ }
+
+ /* Check BIST status */
+ pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
+ if (pescx_bist_status.u64)
+ cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
+
+ /* Initialize the config space CSRs */
+ __cvmx_pcie_rc_initialize_config_space(pcie_port);
+
+ /* Bring the link up */
+ if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
+ {
+ cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
+ return -1;
+ }
+
+ /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
+ npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
+ npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
+ npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
+ cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
+
+ /* Setup Mem access SubDIDs */
+ mem_access_subid.u64 = 0;
+ mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
+ mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
+ mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
+ mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
+ mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
+ mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
+ mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
+ mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
+
+ /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
+ for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
+ {
+ cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
+ mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
+ }
+
+ /* Disable the peer to peer forwarding register. This must be setup
+ by the OS after it enumerates the bus and assigns addresses to the
+ PCIe busses */
+ for (i=0; i<4; i++)
+ {
+ cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
+ cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
+ }
+
+ /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
+
+ /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
+
+ bar1_index.u32 = 0;
+ bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
+ bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.end_swp = 1; /* Endian Swap mode */
+ bar1_index.s.addr_v = 1; /* Valid entry */
+
+ base = pcie_port ? 16 : 0;
+
+ /* Big endian swizzle for 32-bit PEXP_NCB register. */
+#ifdef __MIPSEB__
+ addr_swizzle = 4;
+#else
+ addr_swizzle = 0;
+#endif
+ for (i = 0; i < 16; i++) {
+ cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
+ base++;
+ /* 256MB / 16 >> 22 == 4 */
+ bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
+ }
+
+ /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
+ where they overlap. It also overlaps with the device addresses, so
+ make sure the peer to peer forwarding is set right */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
+
+ /* Setup BAR2 attributes */
+ /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
+ /* \xAD PTLP_RO,CTLP_RO should normally be set (except for debug). */
+ /* \xAD WAIT_COM=0 will likely work for all applications. */
+ /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
+ if (pcie_port)
+ {
+ cvmx_npei_ctl_port1_t npei_ctl_port;
+ npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
+ npei_ctl_port.s.bar2_enb = 1;
+ npei_ctl_port.s.bar2_esx = 1;
+ npei_ctl_port.s.bar2_cax = 0;
+ npei_ctl_port.s.ptlp_ro = 1;
+ npei_ctl_port.s.ctlp_ro = 1;
+ npei_ctl_port.s.wait_com = 0;
+ npei_ctl_port.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
+ }
+ else
+ {
+ cvmx_npei_ctl_port0_t npei_ctl_port;
+ npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
+ npei_ctl_port.s.bar2_enb = 1;
+ npei_ctl_port.s.bar2_esx = 1;
+ npei_ctl_port.s.bar2_cax = 0;
+ npei_ctl_port.s.ptlp_ro = 1;
+ npei_ctl_port.s.ctlp_ro = 1;
+ npei_ctl_port.s.wait_com = 0;
+ npei_ctl_port.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
+ }
+
+ /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
+ TLP ordering to not be preserved after multiple PCIe port resets. This
+ code detects this fault and corrects it by aligning the TLP counters
+ properly. Another link reset is then performed. See PCIE-13340 */
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
+ {
+ cvmx_npei_dbg_data_t dbg_data;
+ int old_in_fif_p_count;
+ int in_fif_p_count;
+ int out_p_count;
+ int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
+ int i;
+
+ /* Choose a write address of 1MB. It should be harmless as all bars
+ haven't been setup */
+ uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
+
+ /* Make sure at least in_p_offset have been executed before we try and
+ read in_fif_p_count */
+ i = in_p_offset;
+ while (i--)
+ {
+ cvmx_write64_uint32(write_address, 0);
+ cvmx_wait(10000);
+ }
+
+ /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
+ unstable sometimes so read it twice with a write between the reads.
+ This way we can tell the value is good as it will increment by one
+ due to the write */
+ cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
+ cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
+ do
+ {
+ dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ old_in_fif_p_count = dbg_data.s.data & 0xff;
+ cvmx_write64_uint32(write_address, 0);
+ cvmx_wait(10000);
+ dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ in_fif_p_count = dbg_data.s.data & 0xff;
+ } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
+
+ /* Update in_fif_p_count for it's offset with respect to out_p_count */
+ in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
+
+ /* Read the OUT_P_COUNT from the debug select */
+ cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
+ cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
+ dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ out_p_count = (dbg_data.s.data>>1) & 0xff;
+
+ /* Check that the two counters are aligned */
+ if (out_p_count != in_fif_p_count)
+ {
+ cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
+ while (in_fif_p_count != 0)
+ {
+ cvmx_write64_uint32(write_address, 0);
+ cvmx_wait(10000);
+ in_fif_p_count = (in_fif_p_count + 1) & 0xff;
+ }
+ /* The EBH5200 board swapped the PCIe reset lines on the board. This
+ means we must bring both links down and up, which will cause the
+ PCIe0 to need alignment again. Lots of messages will be displayed,
+ but everything should work */
+ if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
+ (pcie_port == 1))
+ cvmx_pcie_rc_initialize(0);
+ /* Rety bringing this port up */
+ goto retry;
+ }
+ }
+
+ /* Display the link status */
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
+ * port from reset to a link up state. Software can then begin
+ * configuring the rest of the link.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
+{
+ uint64_t start_cycle;
+ cvmx_pemx_ctl_status_t pem_ctl_status;
+ cvmx_pciercx_cfg032_t pciercx_cfg032;
+ cvmx_pciercx_cfg448_t pciercx_cfg448;
+
+ /* Bring up the link */
+ pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
+ pem_ctl_status.s.lnk_enb = 1;
+ cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
+
+ /* Wait for the link to come up */
+ start_cycle = cvmx_get_cycle();
+ do
+ {
+ if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
+ return -1;
+ cvmx_wait(10000);
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ } while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
+
+ /* Update the Replay Time Limit. Empirically, some PCIe devices take a
+ little longer to respond than expected under load. As a workaround for
+ this we configure the Replay Time Limit to the value expected for a 512
+ byte MPS instead of our actual 256 byte MPS. The numbers below are
+ directly from the PCIe spec table 3-4 */
+ pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
+ switch (pciercx_cfg032.s.nlw)
+ {
+ case 1: /* 1 lane */
+ pciercx_cfg448.s.rtl = 1677;
+ break;
+ case 2: /* 2 lanes */
+ pciercx_cfg448.s.rtl = 867;
+ break;
+ case 4: /* 4 lanes */
+ pciercx_cfg448.s.rtl = 462;
+ break;
+ case 8: /* 8 lanes */
+ pciercx_cfg448.s.rtl = 258;
+ break;
+ }
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
+
+ return 0;
+}
+
+
+/**
+ * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
+ * the bus.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
+{
+ int i;
+ cvmx_ciu_soft_prst_t ciu_soft_prst;
+ cvmx_mio_rst_ctlx_t mio_rst_ctl;
+ cvmx_pemx_bar_ctl_t pemx_bar_ctl;
+ cvmx_pemx_ctl_status_t pemx_ctl_status;
+ cvmx_pemx_bist_status_t pemx_bist_status;
+ cvmx_pemx_bist_status2_t pemx_bist_status2;
+ cvmx_pciercx_cfg032_t pciercx_cfg032;
+ cvmx_pciercx_cfg515_t pciercx_cfg515;
+ cvmx_sli_ctl_portx_t sli_ctl_portx;
+ cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
+ cvmx_sli_mem_access_subidx_t mem_access_subid;
+ cvmx_pemx_bar1_indexx_t bar1_index;
+ int ep_mode;
+
+ /* Make sure this interface is PCIe */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ {
+ /* Requires reading the MIO_QLMX_CFG register to figure
+ out the port type. */
+ int qlm = pcie_port;
+ int status;
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ qlm = 3 - (pcie_port * 2);
+ else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
+ {
+ cvmx_mio_qlmx_cfg_t qlm_cfg;
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
+ if (qlm_cfg.s.qlm_cfg == 1)
+ qlm = 1;
+ }
+ /* PCIe is allowed only in QLM1, 1 PCIe port in x2 or
+ 2 PCIe ports in x1 */
+ else if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ qlm = 1;
+ status = cvmx_qlm_get_status(qlm);
+ if (status == 4 || status == 5)
+ {
+ cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
+ return -1;
+ }
+ if (status == 1)
+ {
+ cvmx_dprintf("PCIe: Port %d is SGMII, skipping.\n", pcie_port);
+ return -1;
+ }
+ if (status == 2)
+ {
+ cvmx_dprintf("PCIe: Port %d is XAUI, skipping.\n", pcie_port);
+ return -1;
+ }
+ if (status == -1)
+ {
+ cvmx_dprintf("PCIe: Port %d is unknown, skipping.\n", pcie_port);
+ return -1;
+ }
+ }
+
+#if 0
+ /* This code is so that the PCIe analyzer is able to see 63XX traffic */
+ cvmx_dprintf("PCIE : init for pcie analyzer.\n");
+ cvmx_helper_qlm_jtag_init();
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
+ cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
+ cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
+ cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
+ cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
+ cvmx_helper_qlm_jtag_update(pcie_port);
+#endif
+
+ /* Make sure we aren't trying to setup a target mode interface in host mode */
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
+ ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX || OCTEON_IS_MODEL(OCTEON_CNF71XX)) ? (mio_rst_ctl.s.prtmode != 1) : (!mio_rst_ctl.s.host_mode));
+ if (ep_mode)
+ {
+ cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
+ return -1;
+ }
+
+ /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
+ {
+ if (pcie_port)
+ {
+ cvmx_ciu_qlm1_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
+ }
+ else
+ {
+ cvmx_ciu_qlm0_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
+ }
+ }
+ /* Bring the PCIe out of reset */
+ if (pcie_port)
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ else
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ /* After a chip reset the PCIe will also be in reset. If it isn't,
+ most likely someone is trying to init it again without a proper
+ PCIe reset */
+ if (ciu_soft_prst.s.soft_prst == 0)
+ {
+ /* Reset the port */
+ ciu_soft_prst.s.soft_prst = 1;
+ if (pcie_port)
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ /* Wait until pcie resets the ports. */
+ cvmx_wait_usec(2000);
+ }
+ if (pcie_port)
+ {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ }
+ else
+ {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+
+ /* Wait for PCIe reset to complete */
+ cvmx_wait_usec(1000);
+
+ /* Check and make sure PCIe came out of reset. If it doesn't the board
+ probably hasn't wired the clocks up and the interface should be
+ skipped */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), cvmx_mio_rst_ctlx_t, rst_done, ==, 1, 10000))
+ {
+ cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
+ return -1;
+ }
+
+ /* Check BIST status */
+ pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
+ if (pemx_bist_status.u64)
+ cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
+ pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
+ /* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ pemx_bist_status2.u64 &= ~0x3full;
+ if (pemx_bist_status2.u64)
+ cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
+
+ /* Initialize the config space CSRs */
+ __cvmx_pcie_rc_initialize_config_space(pcie_port);
+
+ /* Enable gen2 speed selection */
+ pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
+ pciercx_cfg515.s.dsc = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
+
+ /* Bring the link up */
+ if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
+ {
+ /* Some gen1 devices don't handle the gen 2 training correctly. Disable
+ gen2 and try again with only gen1 */
+ cvmx_pciercx_cfg031_t pciercx_cfg031;
+ pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
+ pciercx_cfg031.s.mls = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32);
+ if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
+ {
+ cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
+ return -1;
+ }
+ }
+
+ /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
+ sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
+ sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
+ sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
+
+ /* Setup Mem access SubDIDs */
+ mem_access_subid.u64 = 0;
+ mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
+ mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
+ mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
+ mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ /* PCIe Adddress Bits <63:34>. */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ mem_access_subid.cn68xx.ba = 0;
+ else
+ mem_access_subid.cn63xx.ba = 0;
+
+ /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
+ for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
+ {
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
+ /* Set each SUBID to extend the addressable range */
+ __cvmx_increment_ba(&mem_access_subid);
+ }
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN61XX))
+ {
+ /* Disable the peer to peer forwarding register. This must be setup
+ by the OS after it enumerates the bus and assigns addresses to the
+ PCIe busses */
+ for (i=0; i<4; i++)
+ {
+ cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
+ cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
+ }
+ }
+
+ /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
+ cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
+
+ /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
+ where they overlap. It also overlaps with the device addresses, so
+ make sure the peer to peer forwarding is set right */
+ cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
+
+ /* Setup BAR2 attributes */
+ /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
+ /* \xAD PTLP_RO,CTLP_RO should normally be set (except for debug). */
+ /* \xAD WAIT_COM=0 will likely work for all applications. */
+ /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
+ pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
+ pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/
+ pemx_bar_ctl.s.bar2_enb = 1;
+ pemx_bar_ctl.s.bar2_esx = 1;
+ pemx_bar_ctl.s.bar2_cax = 0;
+ cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
+ sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
+ sli_ctl_portx.s.ptlp_ro = 1;
+ sli_ctl_portx.s.ctlp_ro = 1;
+ sli_ctl_portx.s.wait_com = 0;
+ sli_ctl_portx.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
+
+ /* BAR1 follows BAR2 */
+ cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
+
+ bar1_index.u64 = 0;
+ bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
+ bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.end_swp = 1; /* Endian Swap mode */
+ bar1_index.s.addr_v = 1; /* Valid entry */
+
+ for (i = 0; i < 16; i++) {
+ cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
+ /* 256MB / 16 >> 22 == 4 */
+ bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
+ }
+
+ /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
+ clock */
+ pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
+ pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
+ cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
+
+ /* Display the link status */
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
+
+ return 0;
+}
+
+/**
+ * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+int cvmx_pcie_rc_initialize(int pcie_port)
+{
+ int result;
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
+ else
+ result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
+#if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL)
+ if (result == 0)
+ cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
+#endif
+ return result;
+}
+
+
+/**
+ * Shutdown a PCIe port and put it in reset
+ *
+ * @param pcie_port PCIe port to shutdown
+ *
+ * @return Zero on success
+ */
+int cvmx_pcie_rc_shutdown(int pcie_port)
+{
+#if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL)
+ cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
+#endif
+ /* Wait for all pending operations to complete */
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
+ cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
+ }
+ else
+ {
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
+ cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
+ }
+
+ /* Force reset */
+ if (pcie_port)
+ {
+ cvmx_ciu_soft_prst_t ciu_soft_prst;
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ }
+ else
+ {
+ cvmx_ciu_soft_prst_t ciu_soft_prst;
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Build a PCIe config space request address for a device
+ *
+ * @param pcie_port PCIe port to access
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return 64bit Octeon IO address
+ */
+static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
+{
+ cvmx_pcie_address_t pcie_addr;
+ cvmx_pciercx_cfg006_t pciercx_cfg006;
+
+ pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
+ if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
+ return 0;
+
+ pcie_addr.u64 = 0;
+ pcie_addr.config.upper = 2;
+ pcie_addr.config.io = 1;
+ pcie_addr.config.did = 3;
+ pcie_addr.config.subdid = 1;
+ pcie_addr.config.es = 1;
+ pcie_addr.config.port = pcie_port;
+ pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
+ pcie_addr.config.bus = bus;
+ pcie_addr.config.dev = dev;
+ pcie_addr.config.func = fn;
+ pcie_addr.config.reg = reg;
+ return pcie_addr.u64;
+}
+
+
+/**
+ * Read 8bits from a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
+{
+ uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ return cvmx_read64_uint8(address);
+ else
+ return 0xff;
+}
+
+
+/**
+ * Read 16bits from a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
+{
+ uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
+ else
+ return 0xffff;
+}
+
+
+/**
+ * Read 32bits from a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
+{
+ uint64_t address;
+
+ address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
+ else
+ return 0xffffffff;
+}
+
+
+/**
+ * Write 8bits to a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
+{
+ uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ cvmx_write64_uint8(address, val);
+}
+
+
+/**
+ * Write 16bits to a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
+{
+ uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
+}
+
+
+/**
+ * Write 32bits to a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
+{
+ uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
+}
+
+
+/**
+ * Read a PCIe config space register indirectly. This is used for
+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
+ *
+ * @param pcie_port PCIe port to read from
+ * @param cfg_offset Address to read
+ *
+ * @return Value read
+ */
+uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_pescx_cfg_rd_t pescx_cfg_rd;
+ pescx_cfg_rd.u64 = 0;
+ pescx_cfg_rd.s.addr = cfg_offset;
+ cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
+ pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
+ return pescx_cfg_rd.s.data;
+ }
+ else
+ {
+ cvmx_pemx_cfg_rd_t pemx_cfg_rd;
+ pemx_cfg_rd.u64 = 0;
+ pemx_cfg_rd.s.addr = cfg_offset;
+ cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
+ pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
+ return pemx_cfg_rd.s.data;
+ }
+}
+
+
+/**
+ * Write a PCIe config space register indirectly. This is used for
+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
+ *
+ * @param pcie_port PCIe port to write to
+ * @param cfg_offset Address to write
+ * @param val Value to write
+ */
+void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_pescx_cfg_wr_t pescx_cfg_wr;
+ pescx_cfg_wr.u64 = 0;
+ pescx_cfg_wr.s.addr = cfg_offset;
+ pescx_cfg_wr.s.data = val;
+ cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
+ }
+ else
+ {
+ cvmx_pemx_cfg_wr_t pemx_cfg_wr;
+ pemx_cfg_wr.u64 = 0;
+ pemx_cfg_wr.s.addr = cfg_offset;
+ pemx_cfg_wr.s.data = val;
+ cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
+ }
+}
+
+
+/**
+ * Initialize a PCIe port for use in target(EP) mode.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+int cvmx_pcie_ep_initialize(int pcie_port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_npei_ctl_status_t npei_ctl_status;
+ npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
+ if (npei_ctl_status.s.host_mode)
+ return -1;
+ }
+ else
+ {
+ cvmx_mio_rst_ctlx_t mio_rst_ctl;
+ int ep_mode;
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
+ ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX) ? (mio_rst_ctl.s.prtmode != 0) : mio_rst_ctl.s.host_mode);
+ if (ep_mode)
+ return -1;
+ }
+
+ /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
+ {
+ if (pcie_port)
+ {
+ cvmx_ciu_qlm1_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
+ }
+ else
+ {
+ cvmx_ciu_qlm0_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
+ }
+ }
+
+ /* Enable bus master and memory */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
+
+ /* Max Payload Size (PCIE*_CFG030[MPS]) */
+ /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
+ /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
+ /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
+ {
+ cvmx_pcieepx_cfg030_t pcieepx_cfg030;
+ pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ {
+ pcieepx_cfg030.s.mps = MPS_CN5XXX;
+ pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
+ }
+ else
+ {
+ pcieepx_cfg030.s.mps = MPS_CN6XXX;
+ pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
+ }
+ pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
+ pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
+ pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
+ pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
+ pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
+ pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
+ }
+
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
+ /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
+ cvmx_npei_ctl_status2_t npei_ctl_status2;
+ npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
+ npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
+ npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
+ }
+ else
+ {
+ /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
+ /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
+ cvmx_dpi_sli_prtx_cfg_t prt_cfg;
+ cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
+ prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
+ prt_cfg.s.mps = MPS_CN6XXX;
+ prt_cfg.s.mrrs = MRRS_CN6XXX;
+ /* Max outstanding load request. */
+ prt_cfg.s.molr = 32;
+ cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
+
+ sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
+ sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
+ cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
+ }
+
+ /* Setup Mem access SubDID 12 to access Host memory */
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_npei_mem_access_subidx_t mem_access_subid;
+ mem_access_subid.u64 = 0;
+ mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
+ mem_access_subid.s.nmerge = 1; /* Merging is not allowed in this window. */
+ mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
+ mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
+ mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
+ mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
+ mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
+ mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
+ cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
+ }
+ else
+ {
+ cvmx_sli_mem_access_subidx_t mem_access_subid;
+ mem_access_subid.u64 = 0;
+ mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
+ mem_access_subid.s.nmerge = 0; /* Merging is allowed in this window. */
+ mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
+ mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ /* PCIe Adddress Bits <63:34>. */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ mem_access_subid.cn68xx.ba = 0;
+ else
+ mem_access_subid.cn63xx.ba = 0;
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
+ }
+ return 0;
+}
+
+
+/**
+ * Wait for posted PCIe read/writes to reach the other side of
+ * the internal PCIe switch. This will insure that core
+ * read/writes are posted before anything after this function
+ * is called. This may be necessary when writing to memory that
+ * will later be read using the DMA/PKT engines.
+ *
+ * @param pcie_port PCIe port to wait for
+ */
+void cvmx_pcie_wait_for_pending(int pcie_port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ {
+ cvmx_npei_data_out_cnt_t npei_data_out_cnt;
+ int a;
+ int b;
+ int c;
+
+ /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
+ description of how this code works */
+ npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
+ if (pcie_port)
+ {
+ if (!npei_data_out_cnt.s.p1_fcnt)
+ return;
+ a = npei_data_out_cnt.s.p1_ucnt;
+ b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
+ }
+ else
+ {
+ if (!npei_data_out_cnt.s.p0_fcnt)
+ return;
+ a = npei_data_out_cnt.s.p0_ucnt;
+ b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
+ }
+
+ while (1)
+ {
+ npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
+ c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
+ if (a<=b)
+ {
+ if ((c<a) || (c>b))
+ return;
+ }
+ else
+ {
+ if ((c>b) && (c<a))
+ return;
+ }
+ }
+ }
+ else
+ {
+ cvmx_sli_data_out_cnt_t sli_data_out_cnt;
+ int a;
+ int b;
+ int c;
+
+ sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
+ if (pcie_port)
+ {
+ if (!sli_data_out_cnt.s.p1_fcnt)
+ return;
+ a = sli_data_out_cnt.s.p1_ucnt;
+ b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff;
+ }
+ else
+ {
+ if (!sli_data_out_cnt.s.p0_fcnt)
+ return;
+ a = sli_data_out_cnt.s.p0_ucnt;
+ b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff;
+ }
+
+ while (1)
+ {
+ sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
+ c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt;
+ if (a<=b)
+ {
+ if ((c<a) || (c>b))
+ return;
+ }
+ else
+ {
+ if ((c>b) && (c<a))
+ return;
+ }
+ }
+ }
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pcie.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pcie.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pcie.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pcie.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,322 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to PCIe as a host(RC) or target(EP)
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_PCIE_H__
+#define __CVMX_PCIE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The physical memory base mapped by BAR1. 256MB at the end of the
+ * first 4GB.
+ */
+#define CVMX_PCIE_BAR1_PHYS_BASE ((1ull << 32) - (1ull << 28))
+#define CVMX_PCIE_BAR1_PHYS_SIZE (1ull << 28)
+
+/*
+ * The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
+ * place BAR1 so it is the same for both.
+ */
+#define CVMX_PCIE_BAR1_RC_BASE (1ull << 41)
+
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t upper : 2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61 : 13; /* Must be zero */
+ uint64_t io : 1; /* 1 for IO space access */
+ uint64_t did : 5; /* PCIe DID = 3 */
+ uint64_t subdid : 3; /* PCIe SubDID = 1 */
+ uint64_t reserved_36_39 : 4; /* Must be zero */
+ uint64_t es : 2; /* Endian swap = 1 */
+ uint64_t port : 2; /* PCIe port 0,1 */
+ uint64_t reserved_29_31 : 3; /* Must be zero */
+ uint64_t ty : 1; /* Selects the type of the configuration request (0 = type 0, 1 = type 1). */
+ uint64_t bus : 8; /* Target bus number sent in the ID in the request. */
+ uint64_t dev : 5; /* Target device number sent in the ID in the request. Note that Dev must be
+ zero for type 0 configuration requests. */
+ uint64_t func : 3; /* Target function number sent in the ID in the request. */
+ uint64_t reg : 12; /* Selects a register in the configuration space of the target. */
+ } config;
+ struct
+ {
+ uint64_t upper : 2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61 : 13; /* Must be zero */
+ uint64_t io : 1; /* 1 for IO space access */
+ uint64_t did : 5; /* PCIe DID = 3 */
+ uint64_t subdid : 3; /* PCIe SubDID = 2 */
+ uint64_t reserved_36_39 : 4; /* Must be zero */
+ uint64_t es : 2; /* Endian swap = 1 */
+ uint64_t port : 2; /* PCIe port 0,1 */
+ uint64_t address : 32; /* PCIe IO address */
+ } io;
+ struct
+ {
+ uint64_t upper : 2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61 : 13; /* Must be zero */
+ uint64_t io : 1; /* 1 for IO space access */
+ uint64_t did : 5; /* PCIe DID = 3 */
+ uint64_t subdid : 3; /* PCIe SubDID = 3-6 */
+ uint64_t reserved_36_39 : 4; /* Must be zero */
+ uint64_t address : 36; /* PCIe Mem address */
+ } mem;
+} cvmx_pcie_address_t;
+
+
+/**
+ * Return the Core virtual base address for PCIe IO access. IOs are
+ * read/written as an offset from this address.
+ *
+ * @param pcie_port PCIe port the IO is for
+ *
+ * @return 64bit Octeon IO base address for read/write
+ */
+uint64_t cvmx_pcie_get_io_base_address(int pcie_port);
+
+/**
+ * Size of the IO address region returned at address
+ * cvmx_pcie_get_io_base_address()
+ *
+ * @param pcie_port PCIe port the IO is for
+ *
+ * @return Size of the IO window
+ */
+uint64_t cvmx_pcie_get_io_size(int pcie_port);
+
+/**
+ * Return the Core virtual base address for PCIe MEM access. Memory is
+ * read/written as an offset from this address.
+ *
+ * @param pcie_port PCIe port the IO is for
+ *
+ * @return 64bit Octeon IO base address for read/write
+ */
+uint64_t cvmx_pcie_get_mem_base_address(int pcie_port);
+
+/**
+ * Size of the Mem address region returned at address
+ * cvmx_pcie_get_mem_base_address()
+ *
+ * @param pcie_port PCIe port the IO is for
+ *
+ * @return Size of the Mem window
+ */
+uint64_t cvmx_pcie_get_mem_size(int pcie_port);
+
+/**
+ * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+int cvmx_pcie_rc_initialize(int pcie_port);
+
+/**
+ * Shutdown a PCIe port and put it in reset
+ *
+ * @param pcie_port PCIe port to shutdown
+ *
+ * @return Zero on success
+ */
+int cvmx_pcie_rc_shutdown(int pcie_port);
+
+/**
+ * Read 8bits from a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg);
+
+/**
+ * Read 16bits from a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg);
+
+/**
+ * Read 32bits from a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg);
+
+/**
+ * Write 8bits to a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val);
+
+/**
+ * Write 16bits to a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val);
+
+/**
+ * Write 32bits to a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val);
+
+/**
+ * Read a PCIe config space register indirectly. This is used for
+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
+ *
+ * @param pcie_port PCIe port to read from
+ * @param cfg_offset Address to read
+ *
+ * @return Value read
+ */
+uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset);
+
+/**
+ * Write a PCIe config space register indirectly. This is used for
+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
+ *
+ * @param pcie_port PCIe port to write to
+ * @param cfg_offset Address to write
+ * @param val Value to write
+ */
+void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val);
+
+/**
+ * Write a 32bit value to the Octeon NPEI register space
+ *
+ * @param address Address to write to
+ * @param val Value to write
+ */
+static inline void cvmx_pcie_npei_write32(uint64_t address, uint32_t val)
+{
+ cvmx_write64_uint32(address ^ 4, val);
+ cvmx_read64_uint32(address ^ 4);
+}
+
+/**
+ * Read a 32bit value from the Octeon NPEI register space
+ *
+ * @param address Address to read
+ * @return The result
+ */
+static inline uint32_t cvmx_pcie_npei_read32(uint64_t address)
+{
+ return cvmx_read64_uint32(address ^ 4);
+}
+
+/**
+ * Initialize a PCIe port for use in target(EP) mode.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+int cvmx_pcie_ep_initialize(int pcie_port);
+
+/**
+ * Wait for posted PCIe read/writes to reach the other side of
+ * the internal PCIe switch. This will insure that core
+ * read/writes are posted before anything after this function
+ * is called. This may be necessary when writing to memory that
+ * will later be read using the DMA/PKT engines.
+ *
+ * @param pcie_port PCIe port to wait for
+ */
+void cvmx_pcie_wait_for_pending(int pcie_port);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pcie.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pcieepx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pcieepx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pcieepx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,6085 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pcieepx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pcieepx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PCIEEPX_DEFS_H__
+#define __CVMX_PCIEEPX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG000(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG000(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000000ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG000(block_id) (0x0000000000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG001(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG001(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000004ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG001(block_id) (0x0000000000000004ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG002(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG002(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000008ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG002(block_id) (0x0000000000000008ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG003(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG003(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000000Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG003(block_id) (0x000000000000000Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG004(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG004(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000010ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG004(block_id) (0x0000000000000010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG004_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG004_MASK(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000080000010ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG004_MASK(block_id) (0x0000000080000010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG005(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG005(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000014ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG005(block_id) (0x0000000000000014ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG005_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG005_MASK(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000080000014ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG005_MASK(block_id) (0x0000000080000014ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG006(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG006(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000018ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG006(block_id) (0x0000000000000018ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG006_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG006_MASK(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000080000018ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG006_MASK(block_id) (0x0000000080000018ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG007(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG007(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000001Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG007(block_id) (0x000000000000001Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG007_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG007_MASK(%lu) is invalid on this chip\n", block_id);
+ return 0x000000008000001Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG007_MASK(block_id) (0x000000008000001Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG008(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG008(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000020ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG008(block_id) (0x0000000000000020ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG008_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG008_MASK(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000080000020ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG008_MASK(block_id) (0x0000000080000020ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG009(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG009(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000024ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG009(block_id) (0x0000000000000024ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG009_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG009_MASK(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000080000024ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG009_MASK(block_id) (0x0000000080000024ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG010(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG010(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000028ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG010(block_id) (0x0000000000000028ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG011(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG011(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000002Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG011(block_id) (0x000000000000002Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG012(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG012(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000030ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG012(block_id) (0x0000000000000030ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG012_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG012_MASK(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000080000030ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG012_MASK(block_id) (0x0000000080000030ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG013(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG013(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000034ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG013(block_id) (0x0000000000000034ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG015(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG015(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000003Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG015(block_id) (0x000000000000003Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG016(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG016(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000040ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG016(block_id) (0x0000000000000040ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG017(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG017(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000044ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG017(block_id) (0x0000000000000044ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG020(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG020(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000050ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG020(block_id) (0x0000000000000050ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG021(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG021(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000054ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG021(block_id) (0x0000000000000054ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG022(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG022(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000058ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG022(block_id) (0x0000000000000058ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG023(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG023(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000005Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG023(block_id) (0x000000000000005Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG028(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG028(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000070ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG028(block_id) (0x0000000000000070ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG029(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG029(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000074ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG029(block_id) (0x0000000000000074ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG030(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG030(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000078ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG030(block_id) (0x0000000000000078ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG031(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG031(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000007Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG031(block_id) (0x000000000000007Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG032(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG032(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000080ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG032(block_id) (0x0000000000000080ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG033(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG033(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000084ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG033(block_id) (0x0000000000000084ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG034(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG034(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000088ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG034(block_id) (0x0000000000000088ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG037(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG037(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000094ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG037(block_id) (0x0000000000000094ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG038(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG038(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000098ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG038(block_id) (0x0000000000000098ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG039(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG039(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000009Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG039(block_id) (0x000000000000009Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG040(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG040(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000000A0ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG040(block_id) (0x00000000000000A0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG041(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG041(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000000A4ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG041(block_id) (0x00000000000000A4ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG042(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG042(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000000A8ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG042(block_id) (0x00000000000000A8ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG064(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG064(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000100ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG064(block_id) (0x0000000000000100ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG065(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG065(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000104ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG065(block_id) (0x0000000000000104ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG066(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG066(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000108ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG066(block_id) (0x0000000000000108ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG067(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG067(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000010Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG067(block_id) (0x000000000000010Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG068(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG068(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000110ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG068(block_id) (0x0000000000000110ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG069(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG069(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000114ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG069(block_id) (0x0000000000000114ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG070(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG070(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000118ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG070(block_id) (0x0000000000000118ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG071(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG071(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000011Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG071(block_id) (0x000000000000011Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG072(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG072(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000120ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG072(block_id) (0x0000000000000120ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG073(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG073(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000124ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG073(block_id) (0x0000000000000124ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG074(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG074(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000128ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG074(block_id) (0x0000000000000128ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG448(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG448(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000700ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG448(block_id) (0x0000000000000700ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG449(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG449(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000704ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG449(block_id) (0x0000000000000704ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG450(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG450(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000708ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG450(block_id) (0x0000000000000708ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG451(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG451(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000070Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG451(block_id) (0x000000000000070Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG452(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG452(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000710ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG452(block_id) (0x0000000000000710ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG453(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG453(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000714ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG453(block_id) (0x0000000000000714ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG454(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG454(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000718ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG454(block_id) (0x0000000000000718ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG455(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG455(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000071Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG455(block_id) (0x000000000000071Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG456(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG456(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000720ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG456(block_id) (0x0000000000000720ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG458(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG458(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000728ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG458(block_id) (0x0000000000000728ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG459(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG459(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000072Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG459(block_id) (0x000000000000072Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG460(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG460(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000730ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG460(block_id) (0x0000000000000730ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG461(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG461(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000734ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG461(block_id) (0x0000000000000734ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG462(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG462(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000738ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG462(block_id) (0x0000000000000738ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG463(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG463(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000073Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG463(block_id) (0x000000000000073Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG464(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG464(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000740ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG464(block_id) (0x0000000000000740ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG465(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG465(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000744ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG465(block_id) (0x0000000000000744ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG466(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG466(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000748ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG466(block_id) (0x0000000000000748ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG467(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG467(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000074Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG467(block_id) (0x000000000000074Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG468(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG468(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000750ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG468(block_id) (0x0000000000000750ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG490(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG490(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000007A8ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG490(block_id) (0x00000000000007A8ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG491(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG491(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000007ACull;
+}
+#else
+#define CVMX_PCIEEPX_CFG491(block_id) (0x00000000000007ACull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG492(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG492(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000007B0ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG492(block_id) (0x00000000000007B0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG515(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG515(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000080Cull;
+}
+#else
+#define CVMX_PCIEEPX_CFG515(block_id) (0x000000000000080Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG516(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG516(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000810ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG516(block_id) (0x0000000000000810ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIEEPX_CFG517(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIEEPX_CFG517(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000814ull;
+}
+#else
+#define CVMX_PCIEEPX_CFG517(block_id) (0x0000000000000814ull)
+#endif
+
+/**
+ * cvmx_pcieep#_cfg000
+ *
+ * PCIE_CFG000 = First 32-bits of PCIE type 0 config space (Device ID and Vendor ID Register)
+ *
+ */
+union cvmx_pcieepx_cfg000 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg000_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t devid : 16; /**< Device ID, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field.
+ For EEPROM loads also see VENDID of this register. */
+ uint32_t vendid : 16; /**< Vendor ID, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field.
+ During and EPROM Load is a value of 0xFFFF is loaded to this
+ field and a value of 0xFFFF is loaded to the DEVID field of
+ this register, the value will not be loaded, EEPROM load will
+ stop, and the FastLinkEnable bit will be set in the
+ PCIE_CFG452 register. */
+#else
+ uint32_t vendid : 16;
+ uint32_t devid : 16;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg000_s cn52xx;
+ struct cvmx_pcieepx_cfg000_s cn52xxp1;
+ struct cvmx_pcieepx_cfg000_s cn56xx;
+ struct cvmx_pcieepx_cfg000_s cn56xxp1;
+ struct cvmx_pcieepx_cfg000_s cn61xx;
+ struct cvmx_pcieepx_cfg000_s cn63xx;
+ struct cvmx_pcieepx_cfg000_s cn63xxp1;
+ struct cvmx_pcieepx_cfg000_s cn66xx;
+ struct cvmx_pcieepx_cfg000_s cn68xx;
+ struct cvmx_pcieepx_cfg000_s cn68xxp1;
+ struct cvmx_pcieepx_cfg000_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg000 cvmx_pcieepx_cfg000_t;
+
+/**
+ * cvmx_pcieep#_cfg001
+ *
+ * PCIE_CFG001 = Second 32-bits of PCIE type 0 config space (Command/Status Register)
+ *
+ */
+union cvmx_pcieepx_cfg001 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg001_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dpe : 1; /**< Detected Parity Error */
+ uint32_t sse : 1; /**< Signaled System Error */
+ uint32_t rma : 1; /**< Received Master Abort */
+ uint32_t rta : 1; /**< Received Target Abort */
+ uint32_t sta : 1; /**< Signaled Target Abort */
+ uint32_t devt : 2; /**< DEVSEL Timing
+ Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t mdpe : 1; /**< Master Data Parity Error */
+ uint32_t fbb : 1; /**< Fast Back-to-Back Capable
+ Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_22_22 : 1;
+ uint32_t m66 : 1; /**< 66 MHz Capable
+ Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t cl : 1; /**< Capabilities List
+ Indicates presence of an extended capability item.
+ Hardwired to 1. */
+ uint32_t i_stat : 1; /**< INTx Status */
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_dis : 1; /**< INTx Assertion Disable */
+ uint32_t fbbe : 1; /**< Fast Back-to-Back Enable
+ Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t see : 1; /**< SERR# Enable */
+ uint32_t ids_wcc : 1; /**< IDSEL Stepping/Wait Cycle Control
+ Not applicable for PCI Express. Must be hardwired to 0 */
+ uint32_t per : 1; /**< Parity Error Response */
+ uint32_t vps : 1; /**< VGA Palette Snoop
+ Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t mwice : 1; /**< Memory Write and Invalidate
+ Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t scse : 1; /**< Special Cycle Enable
+ Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t me : 1; /**< Bus Master Enable */
+ uint32_t msae : 1; /**< Memory Space Enable */
+ uint32_t isae : 1; /**< I/O Space Enable */
+#else
+ uint32_t isae : 1;
+ uint32_t msae : 1;
+ uint32_t me : 1;
+ uint32_t scse : 1;
+ uint32_t mwice : 1;
+ uint32_t vps : 1;
+ uint32_t per : 1;
+ uint32_t ids_wcc : 1;
+ uint32_t see : 1;
+ uint32_t fbbe : 1;
+ uint32_t i_dis : 1;
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_stat : 1;
+ uint32_t cl : 1;
+ uint32_t m66 : 1;
+ uint32_t reserved_22_22 : 1;
+ uint32_t fbb : 1;
+ uint32_t mdpe : 1;
+ uint32_t devt : 2;
+ uint32_t sta : 1;
+ uint32_t rta : 1;
+ uint32_t rma : 1;
+ uint32_t sse : 1;
+ uint32_t dpe : 1;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg001_s cn52xx;
+ struct cvmx_pcieepx_cfg001_s cn52xxp1;
+ struct cvmx_pcieepx_cfg001_s cn56xx;
+ struct cvmx_pcieepx_cfg001_s cn56xxp1;
+ struct cvmx_pcieepx_cfg001_s cn61xx;
+ struct cvmx_pcieepx_cfg001_s cn63xx;
+ struct cvmx_pcieepx_cfg001_s cn63xxp1;
+ struct cvmx_pcieepx_cfg001_s cn66xx;
+ struct cvmx_pcieepx_cfg001_s cn68xx;
+ struct cvmx_pcieepx_cfg001_s cn68xxp1;
+ struct cvmx_pcieepx_cfg001_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg001 cvmx_pcieepx_cfg001_t;
+
+/**
+ * cvmx_pcieep#_cfg002
+ *
+ * PCIE_CFG002 = Third 32-bits of PCIE type 0 config space (Revision ID/Class Code Register)
+ *
+ */
+union cvmx_pcieepx_cfg002 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg002_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bcc : 8; /**< Base Class Code, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t sc : 8; /**< Subclass Code, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t pi : 8; /**< Programming Interface, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t rid : 8; /**< Revision ID, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t rid : 8;
+ uint32_t pi : 8;
+ uint32_t sc : 8;
+ uint32_t bcc : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg002_s cn52xx;
+ struct cvmx_pcieepx_cfg002_s cn52xxp1;
+ struct cvmx_pcieepx_cfg002_s cn56xx;
+ struct cvmx_pcieepx_cfg002_s cn56xxp1;
+ struct cvmx_pcieepx_cfg002_s cn61xx;
+ struct cvmx_pcieepx_cfg002_s cn63xx;
+ struct cvmx_pcieepx_cfg002_s cn63xxp1;
+ struct cvmx_pcieepx_cfg002_s cn66xx;
+ struct cvmx_pcieepx_cfg002_s cn68xx;
+ struct cvmx_pcieepx_cfg002_s cn68xxp1;
+ struct cvmx_pcieepx_cfg002_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg002 cvmx_pcieepx_cfg002_t;
+
+/**
+ * cvmx_pcieep#_cfg003
+ *
+ * PCIE_CFG003 = Fourth 32-bits of PCIE type 0 config space (Cache Line Size/Master Latency Timer/Header Type Register/BIST Register)
+ *
+ */
+union cvmx_pcieepx_cfg003 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg003_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bist : 8; /**< The BIST register functions are not supported.
+ All 8 bits of the BIST register are hardwired to 0. */
+ uint32_t mfd : 1; /**< Multi Function Device
+ The Multi Function Device bit is writable through PEM(0..1)_CFG_WR.
+ However, this is a single function device. Therefore, the
+ application must not write a 1 to this bit. */
+ uint32_t chf : 7; /**< Configuration Header Format
+ Hardwired to 0 for type 0. */
+ uint32_t lt : 8; /**< Master Latency Timer
+ Not applicable for PCI Express, hardwired to 0. */
+ uint32_t cls : 8; /**< Cache Line Size
+ The Cache Line Size register is RW for legacy compatibility
+ purposes and is not applicable to PCI Express device
+ functionality.
+ Writing to the Cache Line Size register does not impact
+ functionality. */
+#else
+ uint32_t cls : 8;
+ uint32_t lt : 8;
+ uint32_t chf : 7;
+ uint32_t mfd : 1;
+ uint32_t bist : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg003_s cn52xx;
+ struct cvmx_pcieepx_cfg003_s cn52xxp1;
+ struct cvmx_pcieepx_cfg003_s cn56xx;
+ struct cvmx_pcieepx_cfg003_s cn56xxp1;
+ struct cvmx_pcieepx_cfg003_s cn61xx;
+ struct cvmx_pcieepx_cfg003_s cn63xx;
+ struct cvmx_pcieepx_cfg003_s cn63xxp1;
+ struct cvmx_pcieepx_cfg003_s cn66xx;
+ struct cvmx_pcieepx_cfg003_s cn68xx;
+ struct cvmx_pcieepx_cfg003_s cn68xxp1;
+ struct cvmx_pcieepx_cfg003_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg003 cvmx_pcieepx_cfg003_t;
+
+/**
+ * cvmx_pcieep#_cfg004
+ *
+ * PCIE_CFG004 = Fifth 32-bits of PCIE type 0 config space (Base Address Register 0 - Low)
+ *
+ */
+union cvmx_pcieepx_cfg004 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg004_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lbab : 18; /**< Lower bits of the BAR 0 base address */
+ uint32_t reserved_4_13 : 10;
+ uint32_t pf : 1; /**< Prefetchable
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t typ : 2; /**< BAR type
+ o 00 = 32-bit BAR
+ o 10 = 64-bit BAR
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t mspc : 1; /**< Memory Space Indicator
+ o 0 = BAR 0 is a memory BAR
+ o 1 = BAR 0 is an I/O BAR
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t mspc : 1;
+ uint32_t typ : 2;
+ uint32_t pf : 1;
+ uint32_t reserved_4_13 : 10;
+ uint32_t lbab : 18;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg004_s cn52xx;
+ struct cvmx_pcieepx_cfg004_s cn52xxp1;
+ struct cvmx_pcieepx_cfg004_s cn56xx;
+ struct cvmx_pcieepx_cfg004_s cn56xxp1;
+ struct cvmx_pcieepx_cfg004_s cn61xx;
+ struct cvmx_pcieepx_cfg004_s cn63xx;
+ struct cvmx_pcieepx_cfg004_s cn63xxp1;
+ struct cvmx_pcieepx_cfg004_s cn66xx;
+ struct cvmx_pcieepx_cfg004_s cn68xx;
+ struct cvmx_pcieepx_cfg004_s cn68xxp1;
+ struct cvmx_pcieepx_cfg004_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg004 cvmx_pcieepx_cfg004_t;
+
+/**
+ * cvmx_pcieep#_cfg004_mask
+ *
+ * PCIE_CFG004_MASK (BAR Mask 0 - Low)
+ * The BAR 0 Mask register is invisible to host software and not readable from the application.
+ * The BAR 0 Mask register is only writable through PEM(0..1)_CFG_WR.
+ */
+union cvmx_pcieepx_cfg004_mask {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg004_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lmask : 31; /**< Bar Mask Low */
+ uint32_t enb : 1; /**< Bar Enable
+ o 0: BAR 0 is disabled
+ o 1: BAR 0 is enabled
+ Bit 0 is interpreted as BAR Enable when writing to the BAR Mask
+ register rather than as a mask bit because bit 0 of a BAR is
+ always masked from writing by host software. Bit 0 must be
+ written prior to writing the other mask bits. */
+#else
+ uint32_t enb : 1;
+ uint32_t lmask : 31;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg004_mask_s cn52xx;
+ struct cvmx_pcieepx_cfg004_mask_s cn52xxp1;
+ struct cvmx_pcieepx_cfg004_mask_s cn56xx;
+ struct cvmx_pcieepx_cfg004_mask_s cn56xxp1;
+ struct cvmx_pcieepx_cfg004_mask_s cn61xx;
+ struct cvmx_pcieepx_cfg004_mask_s cn63xx;
+ struct cvmx_pcieepx_cfg004_mask_s cn63xxp1;
+ struct cvmx_pcieepx_cfg004_mask_s cn66xx;
+ struct cvmx_pcieepx_cfg004_mask_s cn68xx;
+ struct cvmx_pcieepx_cfg004_mask_s cn68xxp1;
+ struct cvmx_pcieepx_cfg004_mask_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg004_mask cvmx_pcieepx_cfg004_mask_t;
+
+/**
+ * cvmx_pcieep#_cfg005
+ *
+ * PCIE_CFG005 = Sixth 32-bits of PCIE type 0 config space (Base Address Register 0 - High)
+ *
+ */
+union cvmx_pcieepx_cfg005 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg005_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ubab : 32; /**< Contains the upper 32 bits of the BAR 0 base address. */
+#else
+ uint32_t ubab : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg005_s cn52xx;
+ struct cvmx_pcieepx_cfg005_s cn52xxp1;
+ struct cvmx_pcieepx_cfg005_s cn56xx;
+ struct cvmx_pcieepx_cfg005_s cn56xxp1;
+ struct cvmx_pcieepx_cfg005_s cn61xx;
+ struct cvmx_pcieepx_cfg005_s cn63xx;
+ struct cvmx_pcieepx_cfg005_s cn63xxp1;
+ struct cvmx_pcieepx_cfg005_s cn66xx;
+ struct cvmx_pcieepx_cfg005_s cn68xx;
+ struct cvmx_pcieepx_cfg005_s cn68xxp1;
+ struct cvmx_pcieepx_cfg005_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg005 cvmx_pcieepx_cfg005_t;
+
+/**
+ * cvmx_pcieep#_cfg005_mask
+ *
+ * PCIE_CFG005_MASK = (BAR Mask 0 - High)
+ * The BAR 0 Mask register is invisible to host software and not readable from the application.
+ * The BAR 0 Mask register is only writable through PEM(0..1)_CFG_WR.
+ */
+union cvmx_pcieepx_cfg005_mask {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg005_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t umask : 32; /**< Bar Mask High */
+#else
+ uint32_t umask : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg005_mask_s cn52xx;
+ struct cvmx_pcieepx_cfg005_mask_s cn52xxp1;
+ struct cvmx_pcieepx_cfg005_mask_s cn56xx;
+ struct cvmx_pcieepx_cfg005_mask_s cn56xxp1;
+ struct cvmx_pcieepx_cfg005_mask_s cn61xx;
+ struct cvmx_pcieepx_cfg005_mask_s cn63xx;
+ struct cvmx_pcieepx_cfg005_mask_s cn63xxp1;
+ struct cvmx_pcieepx_cfg005_mask_s cn66xx;
+ struct cvmx_pcieepx_cfg005_mask_s cn68xx;
+ struct cvmx_pcieepx_cfg005_mask_s cn68xxp1;
+ struct cvmx_pcieepx_cfg005_mask_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg005_mask cvmx_pcieepx_cfg005_mask_t;
+
+/**
+ * cvmx_pcieep#_cfg006
+ *
+ * PCIE_CFG006 = Seventh 32-bits of PCIE type 0 config space (Base Address Register 1 - Low)
+ *
+ */
+union cvmx_pcieepx_cfg006 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg006_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lbab : 6; /**< Lower bits of the BAR 1 base address */
+ uint32_t reserved_4_25 : 22;
+ uint32_t pf : 1; /**< Prefetchable
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t typ : 2; /**< BAR type
+ o 00 = 32-bit BAR
+ o 10 = 64-bit BAR
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t mspc : 1; /**< Memory Space Indicator
+ o 0 = BAR 0 is a memory BAR
+ o 1 = BAR 0 is an I/O BAR
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t mspc : 1;
+ uint32_t typ : 2;
+ uint32_t pf : 1;
+ uint32_t reserved_4_25 : 22;
+ uint32_t lbab : 6;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg006_s cn52xx;
+ struct cvmx_pcieepx_cfg006_s cn52xxp1;
+ struct cvmx_pcieepx_cfg006_s cn56xx;
+ struct cvmx_pcieepx_cfg006_s cn56xxp1;
+ struct cvmx_pcieepx_cfg006_s cn61xx;
+ struct cvmx_pcieepx_cfg006_s cn63xx;
+ struct cvmx_pcieepx_cfg006_s cn63xxp1;
+ struct cvmx_pcieepx_cfg006_s cn66xx;
+ struct cvmx_pcieepx_cfg006_s cn68xx;
+ struct cvmx_pcieepx_cfg006_s cn68xxp1;
+ struct cvmx_pcieepx_cfg006_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg006 cvmx_pcieepx_cfg006_t;
+
+/**
+ * cvmx_pcieep#_cfg006_mask
+ *
+ * PCIE_CFG006_MASK (BAR Mask 1 - Low)
+ * The BAR 1 Mask register is invisible to host software and not readable from the application.
+ * The BAR 1 Mask register is only writable through PEM(0..1)_CFG_WR.
+ */
+union cvmx_pcieepx_cfg006_mask {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg006_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lmask : 31; /**< Bar Mask Low */
+ uint32_t enb : 1; /**< Bar Enable
+ o 0: BAR 1 is disabled
+ o 1: BAR 1 is enabled
+ Bit 0 is interpreted as BAR Enable when writing to the BAR Mask
+ register rather than as a mask bit because bit 0 of a BAR is
+ always masked from writing by host software. Bit 0 must be
+ written prior to writing the other mask bits. */
+#else
+ uint32_t enb : 1;
+ uint32_t lmask : 31;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg006_mask_s cn52xx;
+ struct cvmx_pcieepx_cfg006_mask_s cn52xxp1;
+ struct cvmx_pcieepx_cfg006_mask_s cn56xx;
+ struct cvmx_pcieepx_cfg006_mask_s cn56xxp1;
+ struct cvmx_pcieepx_cfg006_mask_s cn61xx;
+ struct cvmx_pcieepx_cfg006_mask_s cn63xx;
+ struct cvmx_pcieepx_cfg006_mask_s cn63xxp1;
+ struct cvmx_pcieepx_cfg006_mask_s cn66xx;
+ struct cvmx_pcieepx_cfg006_mask_s cn68xx;
+ struct cvmx_pcieepx_cfg006_mask_s cn68xxp1;
+ struct cvmx_pcieepx_cfg006_mask_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg006_mask cvmx_pcieepx_cfg006_mask_t;
+
+/**
+ * cvmx_pcieep#_cfg007
+ *
+ * PCIE_CFG007 = Eighth 32-bits of PCIE type 0 config space (Base Address Register 1 - High)
+ *
+ */
+union cvmx_pcieepx_cfg007 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg007_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ubab : 32; /**< Contains the upper 32 bits of the BAR 1 base address. */
+#else
+ uint32_t ubab : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg007_s cn52xx;
+ struct cvmx_pcieepx_cfg007_s cn52xxp1;
+ struct cvmx_pcieepx_cfg007_s cn56xx;
+ struct cvmx_pcieepx_cfg007_s cn56xxp1;
+ struct cvmx_pcieepx_cfg007_s cn61xx;
+ struct cvmx_pcieepx_cfg007_s cn63xx;
+ struct cvmx_pcieepx_cfg007_s cn63xxp1;
+ struct cvmx_pcieepx_cfg007_s cn66xx;
+ struct cvmx_pcieepx_cfg007_s cn68xx;
+ struct cvmx_pcieepx_cfg007_s cn68xxp1;
+ struct cvmx_pcieepx_cfg007_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg007 cvmx_pcieepx_cfg007_t;
+
+/**
+ * cvmx_pcieep#_cfg007_mask
+ *
+ * PCIE_CFG007_MASK (BAR Mask 1 - High)
+ * The BAR 1 Mask register is invisible to host software and not readable from the application.
+ * The BAR 1 Mask register is only writable through PEM(0..1)_CFG_WR.
+ */
+union cvmx_pcieepx_cfg007_mask {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg007_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t umask : 32; /**< Bar Mask High */
+#else
+ uint32_t umask : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg007_mask_s cn52xx;
+ struct cvmx_pcieepx_cfg007_mask_s cn52xxp1;
+ struct cvmx_pcieepx_cfg007_mask_s cn56xx;
+ struct cvmx_pcieepx_cfg007_mask_s cn56xxp1;
+ struct cvmx_pcieepx_cfg007_mask_s cn61xx;
+ struct cvmx_pcieepx_cfg007_mask_s cn63xx;
+ struct cvmx_pcieepx_cfg007_mask_s cn63xxp1;
+ struct cvmx_pcieepx_cfg007_mask_s cn66xx;
+ struct cvmx_pcieepx_cfg007_mask_s cn68xx;
+ struct cvmx_pcieepx_cfg007_mask_s cn68xxp1;
+ struct cvmx_pcieepx_cfg007_mask_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg007_mask cvmx_pcieepx_cfg007_mask_t;
+
+/**
+ * cvmx_pcieep#_cfg008
+ *
+ * PCIE_CFG008 = Ninth 32-bits of PCIE type 0 config space (Base Address Register 2 - Low)
+ *
+ */
+union cvmx_pcieepx_cfg008 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg008_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_4_31 : 28;
+ uint32_t pf : 1; /**< Prefetchable
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t typ : 2; /**< BAR type
+ o 00 = 32-bit BAR
+ o 10 = 64-bit BAR
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t mspc : 1; /**< Memory Space Indicator
+ o 0 = BAR 0 is a memory BAR
+ o 1 = BAR 0 is an I/O BAR
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t mspc : 1;
+ uint32_t typ : 2;
+ uint32_t pf : 1;
+ uint32_t reserved_4_31 : 28;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg008_s cn52xx;
+ struct cvmx_pcieepx_cfg008_s cn52xxp1;
+ struct cvmx_pcieepx_cfg008_s cn56xx;
+ struct cvmx_pcieepx_cfg008_s cn56xxp1;
+ struct cvmx_pcieepx_cfg008_s cn61xx;
+ struct cvmx_pcieepx_cfg008_s cn63xx;
+ struct cvmx_pcieepx_cfg008_s cn63xxp1;
+ struct cvmx_pcieepx_cfg008_s cn66xx;
+ struct cvmx_pcieepx_cfg008_s cn68xx;
+ struct cvmx_pcieepx_cfg008_s cn68xxp1;
+ struct cvmx_pcieepx_cfg008_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg008 cvmx_pcieepx_cfg008_t;
+
+/**
+ * cvmx_pcieep#_cfg008_mask
+ *
+ * PCIE_CFG008_MASK (BAR Mask 2 - Low)
+ * The BAR 2 Mask register is invisible to host software and not readable from the application.
+ * The BAR 2 Mask register is only writable through PEM(0..1)_CFG_WR.
+ */
+union cvmx_pcieepx_cfg008_mask {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg008_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lmask : 31; /**< Bar Mask Low */
+ uint32_t enb : 1; /**< Bar Enable
+ o 0: BAR 2 is disabled
+ o 1: BAR 2 is enabled
+ Bit 0 is interpreted as BAR Enable when writing to the BAR Mask
+ register rather than as a mask bit because bit 0 of a BAR is
+ always masked from writing by host software. Bit 0 must be
+ written prior to writing the other mask bits. */
+#else
+ uint32_t enb : 1;
+ uint32_t lmask : 31;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg008_mask_s cn52xx;
+ struct cvmx_pcieepx_cfg008_mask_s cn52xxp1;
+ struct cvmx_pcieepx_cfg008_mask_s cn56xx;
+ struct cvmx_pcieepx_cfg008_mask_s cn56xxp1;
+ struct cvmx_pcieepx_cfg008_mask_s cn61xx;
+ struct cvmx_pcieepx_cfg008_mask_s cn63xx;
+ struct cvmx_pcieepx_cfg008_mask_s cn63xxp1;
+ struct cvmx_pcieepx_cfg008_mask_s cn66xx;
+ struct cvmx_pcieepx_cfg008_mask_s cn68xx;
+ struct cvmx_pcieepx_cfg008_mask_s cn68xxp1;
+ struct cvmx_pcieepx_cfg008_mask_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg008_mask cvmx_pcieepx_cfg008_mask_t;
+
+/**
+ * cvmx_pcieep#_cfg009
+ *
+ * PCIE_CFG009 = Tenth 32-bits of PCIE type 0 config space (Base Address Register 2 - High)
+ *
+ */
+union cvmx_pcieepx_cfg009 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg009_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg009_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ubab : 25; /**< Contains the upper 32 bits of the BAR 2 base address. */
+ uint32_t reserved_0_6 : 7;
+#else
+ uint32_t reserved_0_6 : 7;
+ uint32_t ubab : 25;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg009_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg009_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg009_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg009_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ubab : 23; /**< Contains the upper 32 bits of the BAR 2 base address. */
+ uint32_t reserved_0_8 : 9;
+#else
+ uint32_t reserved_0_8 : 9;
+ uint32_t ubab : 23;
+#endif
+ } cn61xx;
+ struct cvmx_pcieepx_cfg009_cn61xx cn63xx;
+ struct cvmx_pcieepx_cfg009_cn61xx cn63xxp1;
+ struct cvmx_pcieepx_cfg009_cn61xx cn66xx;
+ struct cvmx_pcieepx_cfg009_cn61xx cn68xx;
+ struct cvmx_pcieepx_cfg009_cn61xx cn68xxp1;
+ struct cvmx_pcieepx_cfg009_cn61xx cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg009 cvmx_pcieepx_cfg009_t;
+
+/**
+ * cvmx_pcieep#_cfg009_mask
+ *
+ * PCIE_CFG009_MASK (BAR Mask 2 - High)
+ * The BAR 2 Mask register is invisible to host software and not readable from the application.
+ * The BAR 2 Mask register is only writable through PEM(0..1)_CFG_WR.
+ */
+union cvmx_pcieepx_cfg009_mask {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg009_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t umask : 32; /**< Bar Mask High */
+#else
+ uint32_t umask : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg009_mask_s cn52xx;
+ struct cvmx_pcieepx_cfg009_mask_s cn52xxp1;
+ struct cvmx_pcieepx_cfg009_mask_s cn56xx;
+ struct cvmx_pcieepx_cfg009_mask_s cn56xxp1;
+ struct cvmx_pcieepx_cfg009_mask_s cn61xx;
+ struct cvmx_pcieepx_cfg009_mask_s cn63xx;
+ struct cvmx_pcieepx_cfg009_mask_s cn63xxp1;
+ struct cvmx_pcieepx_cfg009_mask_s cn66xx;
+ struct cvmx_pcieepx_cfg009_mask_s cn68xx;
+ struct cvmx_pcieepx_cfg009_mask_s cn68xxp1;
+ struct cvmx_pcieepx_cfg009_mask_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg009_mask cvmx_pcieepx_cfg009_mask_t;
+
+/**
+ * cvmx_pcieep#_cfg010
+ *
+ * PCIE_CFG010 = Eleventh 32-bits of PCIE type 0 config space (CardBus CIS Pointer Register)
+ *
+ */
+union cvmx_pcieepx_cfg010 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg010_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cisp : 32; /**< CardBus CIS Pointer
+ Optional, writable through PEM(0..1)_CFG_WR. */
+#else
+ uint32_t cisp : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg010_s cn52xx;
+ struct cvmx_pcieepx_cfg010_s cn52xxp1;
+ struct cvmx_pcieepx_cfg010_s cn56xx;
+ struct cvmx_pcieepx_cfg010_s cn56xxp1;
+ struct cvmx_pcieepx_cfg010_s cn61xx;
+ struct cvmx_pcieepx_cfg010_s cn63xx;
+ struct cvmx_pcieepx_cfg010_s cn63xxp1;
+ struct cvmx_pcieepx_cfg010_s cn66xx;
+ struct cvmx_pcieepx_cfg010_s cn68xx;
+ struct cvmx_pcieepx_cfg010_s cn68xxp1;
+ struct cvmx_pcieepx_cfg010_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg010 cvmx_pcieepx_cfg010_t;
+
+/**
+ * cvmx_pcieep#_cfg011
+ *
+ * PCIE_CFG011 = Twelfth 32-bits of PCIE type 0 config space (Subsystem ID and Subsystem Vendor ID Register)
+ *
+ */
+union cvmx_pcieepx_cfg011 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg011_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ssid : 16; /**< Subsystem ID
+ Assigned by PCI-SIG, writable through PEM(0..1)_CFG_WR. However, the application must not change this field. */
+ uint32_t ssvid : 16; /**< Subsystem Vendor ID
+ Assigned by PCI-SIG, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t ssvid : 16;
+ uint32_t ssid : 16;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg011_s cn52xx;
+ struct cvmx_pcieepx_cfg011_s cn52xxp1;
+ struct cvmx_pcieepx_cfg011_s cn56xx;
+ struct cvmx_pcieepx_cfg011_s cn56xxp1;
+ struct cvmx_pcieepx_cfg011_s cn61xx;
+ struct cvmx_pcieepx_cfg011_s cn63xx;
+ struct cvmx_pcieepx_cfg011_s cn63xxp1;
+ struct cvmx_pcieepx_cfg011_s cn66xx;
+ struct cvmx_pcieepx_cfg011_s cn68xx;
+ struct cvmx_pcieepx_cfg011_s cn68xxp1;
+ struct cvmx_pcieepx_cfg011_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg011 cvmx_pcieepx_cfg011_t;
+
+/**
+ * cvmx_pcieep#_cfg012
+ *
+ * PCIE_CFG012 = Thirteenth 32-bits of PCIE type 0 config space (Expansion ROM Base Address Register)
+ *
+ */
+union cvmx_pcieepx_cfg012 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg012_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t eraddr : 16; /**< Expansion ROM Address */
+ uint32_t reserved_1_15 : 15;
+ uint32_t er_en : 1; /**< Expansion ROM Enable */
+#else
+ uint32_t er_en : 1;
+ uint32_t reserved_1_15 : 15;
+ uint32_t eraddr : 16;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg012_s cn52xx;
+ struct cvmx_pcieepx_cfg012_s cn52xxp1;
+ struct cvmx_pcieepx_cfg012_s cn56xx;
+ struct cvmx_pcieepx_cfg012_s cn56xxp1;
+ struct cvmx_pcieepx_cfg012_s cn61xx;
+ struct cvmx_pcieepx_cfg012_s cn63xx;
+ struct cvmx_pcieepx_cfg012_s cn63xxp1;
+ struct cvmx_pcieepx_cfg012_s cn66xx;
+ struct cvmx_pcieepx_cfg012_s cn68xx;
+ struct cvmx_pcieepx_cfg012_s cn68xxp1;
+ struct cvmx_pcieepx_cfg012_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg012 cvmx_pcieepx_cfg012_t;
+
+/**
+ * cvmx_pcieep#_cfg012_mask
+ *
+ * PCIE_CFG012_MASK (Exapansion ROM BAR Mask)
+ * The ROM Mask register is invisible to host software and not readable from the application.
+ * The ROM Mask register is only writable through PEM(0..1)_CFG_WR.
+ */
+union cvmx_pcieepx_cfg012_mask {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg012_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t mask : 31; /**< Bar Mask Low NS */
+ uint32_t enb : 1; /**< Bar Enable NS
+ o 0: BAR ROM is disabled
+ o 1: BAR ROM is enabled
+ Bit 0 is interpreted as BAR Enable when writing to the BAR Mask
+ register rather than as a mask bit because bit 0 of a BAR is
+ always masked from writing by host software. Bit 0 must be
+ written prior to writing the other mask bits. */
+#else
+ uint32_t enb : 1;
+ uint32_t mask : 31;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg012_mask_s cn52xx;
+ struct cvmx_pcieepx_cfg012_mask_s cn52xxp1;
+ struct cvmx_pcieepx_cfg012_mask_s cn56xx;
+ struct cvmx_pcieepx_cfg012_mask_s cn56xxp1;
+ struct cvmx_pcieepx_cfg012_mask_s cn61xx;
+ struct cvmx_pcieepx_cfg012_mask_s cn63xx;
+ struct cvmx_pcieepx_cfg012_mask_s cn63xxp1;
+ struct cvmx_pcieepx_cfg012_mask_s cn66xx;
+ struct cvmx_pcieepx_cfg012_mask_s cn68xx;
+ struct cvmx_pcieepx_cfg012_mask_s cn68xxp1;
+ struct cvmx_pcieepx_cfg012_mask_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg012_mask cvmx_pcieepx_cfg012_mask_t;
+
+/**
+ * cvmx_pcieep#_cfg013
+ *
+ * PCIE_CFG013 = Fourteenth 32-bits of PCIE type 0 config space (Capability Pointer Register)
+ *
+ */
+union cvmx_pcieepx_cfg013 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg013_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t cp : 8; /**< First Capability Pointer.
+ Points to Power Management Capability structure by
+ default, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t cp : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg013_s cn52xx;
+ struct cvmx_pcieepx_cfg013_s cn52xxp1;
+ struct cvmx_pcieepx_cfg013_s cn56xx;
+ struct cvmx_pcieepx_cfg013_s cn56xxp1;
+ struct cvmx_pcieepx_cfg013_s cn61xx;
+ struct cvmx_pcieepx_cfg013_s cn63xx;
+ struct cvmx_pcieepx_cfg013_s cn63xxp1;
+ struct cvmx_pcieepx_cfg013_s cn66xx;
+ struct cvmx_pcieepx_cfg013_s cn68xx;
+ struct cvmx_pcieepx_cfg013_s cn68xxp1;
+ struct cvmx_pcieepx_cfg013_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg013 cvmx_pcieepx_cfg013_t;
+
+/**
+ * cvmx_pcieep#_cfg015
+ *
+ * PCIE_CFG015 = Sixteenth 32-bits of PCIE type 0 config space (Interrupt Line Register/Interrupt Pin/Bridge Control Register)
+ *
+ */
+union cvmx_pcieepx_cfg015 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg015_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ml : 8; /**< Maximum Latency (Hardwired to 0) */
+ uint32_t mg : 8; /**< Minimum Grant (Hardwired to 0) */
+ uint32_t inta : 8; /**< Interrupt Pin
+ Identifies the legacy interrupt Message that the device
+ (or device function) uses.
+ The Interrupt Pin register is writable through PEM(0..1)_CFG_WR.
+ In a single-function configuration, only INTA is used.
+ Therefore, the application must not change this field. */
+ uint32_t il : 8; /**< Interrupt Line */
+#else
+ uint32_t il : 8;
+ uint32_t inta : 8;
+ uint32_t mg : 8;
+ uint32_t ml : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg015_s cn52xx;
+ struct cvmx_pcieepx_cfg015_s cn52xxp1;
+ struct cvmx_pcieepx_cfg015_s cn56xx;
+ struct cvmx_pcieepx_cfg015_s cn56xxp1;
+ struct cvmx_pcieepx_cfg015_s cn61xx;
+ struct cvmx_pcieepx_cfg015_s cn63xx;
+ struct cvmx_pcieepx_cfg015_s cn63xxp1;
+ struct cvmx_pcieepx_cfg015_s cn66xx;
+ struct cvmx_pcieepx_cfg015_s cn68xx;
+ struct cvmx_pcieepx_cfg015_s cn68xxp1;
+ struct cvmx_pcieepx_cfg015_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg015 cvmx_pcieepx_cfg015_t;
+
+/**
+ * cvmx_pcieep#_cfg016
+ *
+ * PCIE_CFG016 = Seventeenth 32-bits of PCIE type 0 config space
+ * (Power Management Capability ID/
+ * Power Management Next Item Pointer/
+ * Power Management Capabilities Register)
+ */
+union cvmx_pcieepx_cfg016 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg016_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pmes : 5; /**< PME_Support
+ o Bit 11: If set, PME Messages can be generated from D0
+ o Bit 12: If set, PME Messages can be generated from D1
+ o Bit 13: If set, PME Messages can be generated from D2
+ o Bit 14: If set, PME Messages can be generated from D3hot
+ o Bit 15: If set, PME Messages can be generated from D3cold
+ The PME_Support field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t d2s : 1; /**< D2 Support, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t d1s : 1; /**< D1 Support, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t auxc : 3; /**< AUX Current, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t dsi : 1; /**< Device Specific Initialization (DSI), writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_20_20 : 1;
+ uint32_t pme_clock : 1; /**< PME Clock, hardwired to 0 */
+ uint32_t pmsv : 3; /**< Power Management Specification Version, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t ncp : 8; /**< Next Capability Pointer
+ Points to the MSI capabilities by default, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t pmcid : 8; /**< Power Management Capability ID */
+#else
+ uint32_t pmcid : 8;
+ uint32_t ncp : 8;
+ uint32_t pmsv : 3;
+ uint32_t pme_clock : 1;
+ uint32_t reserved_20_20 : 1;
+ uint32_t dsi : 1;
+ uint32_t auxc : 3;
+ uint32_t d1s : 1;
+ uint32_t d2s : 1;
+ uint32_t pmes : 5;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg016_s cn52xx;
+ struct cvmx_pcieepx_cfg016_s cn52xxp1;
+ struct cvmx_pcieepx_cfg016_s cn56xx;
+ struct cvmx_pcieepx_cfg016_s cn56xxp1;
+ struct cvmx_pcieepx_cfg016_s cn61xx;
+ struct cvmx_pcieepx_cfg016_s cn63xx;
+ struct cvmx_pcieepx_cfg016_s cn63xxp1;
+ struct cvmx_pcieepx_cfg016_s cn66xx;
+ struct cvmx_pcieepx_cfg016_s cn68xx;
+ struct cvmx_pcieepx_cfg016_s cn68xxp1;
+ struct cvmx_pcieepx_cfg016_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg016 cvmx_pcieepx_cfg016_t;
+
+/**
+ * cvmx_pcieep#_cfg017
+ *
+ * PCIE_CFG017 = Eighteenth 32-bits of PCIE type 0 config space (Power Management Control and Status Register)
+ *
+ */
+union cvmx_pcieepx_cfg017 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg017_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pmdia : 8; /**< Data register for additional information (not supported) */
+ uint32_t bpccee : 1; /**< Bus Power/Clock Control Enable, hardwired to 0 */
+ uint32_t bd3h : 1; /**< B2/B3 Support, hardwired to 0 */
+ uint32_t reserved_16_21 : 6;
+ uint32_t pmess : 1; /**< PME Status
+ Indicates if a previously enabled PME event occurred or not. */
+ uint32_t pmedsia : 2; /**< Data Scale (not supported) */
+ uint32_t pmds : 4; /**< Data Select (not supported) */
+ uint32_t pmeens : 1; /**< PME Enable
+ A value of 1 indicates that the device is enabled to
+ generate PME. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t nsr : 1; /**< No Soft Reset, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_2_2 : 1;
+ uint32_t ps : 2; /**< Power State
+ Controls the device power state:
+ o 00b: D0
+ o 01b: D1
+ o 10b: D2
+ o 11b: D3
+ The written value is ignored if the specific state is
+ not supported. */
+#else
+ uint32_t ps : 2;
+ uint32_t reserved_2_2 : 1;
+ uint32_t nsr : 1;
+ uint32_t reserved_4_7 : 4;
+ uint32_t pmeens : 1;
+ uint32_t pmds : 4;
+ uint32_t pmedsia : 2;
+ uint32_t pmess : 1;
+ uint32_t reserved_16_21 : 6;
+ uint32_t bd3h : 1;
+ uint32_t bpccee : 1;
+ uint32_t pmdia : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg017_s cn52xx;
+ struct cvmx_pcieepx_cfg017_s cn52xxp1;
+ struct cvmx_pcieepx_cfg017_s cn56xx;
+ struct cvmx_pcieepx_cfg017_s cn56xxp1;
+ struct cvmx_pcieepx_cfg017_s cn61xx;
+ struct cvmx_pcieepx_cfg017_s cn63xx;
+ struct cvmx_pcieepx_cfg017_s cn63xxp1;
+ struct cvmx_pcieepx_cfg017_s cn66xx;
+ struct cvmx_pcieepx_cfg017_s cn68xx;
+ struct cvmx_pcieepx_cfg017_s cn68xxp1;
+ struct cvmx_pcieepx_cfg017_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg017 cvmx_pcieepx_cfg017_t;
+
+/**
+ * cvmx_pcieep#_cfg020
+ *
+ * PCIE_CFG020 = Twenty-first 32-bits of PCIE type 0 config space
+ * (MSI Capability ID/
+ * MSI Next Item Pointer/
+ * MSI Control Register)
+ */
+union cvmx_pcieepx_cfg020 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg020_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t pvm : 1; /**< Per-vector masking capable */
+ uint32_t m64 : 1; /**< 64-bit Address Capable, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t mme : 3; /**< Multiple Message Enabled
+ Indicates that multiple Message mode is enabled by system
+ software. The number of Messages enabled must be less than
+ or equal to the Multiple Message Capable value. */
+ uint32_t mmc : 3; /**< Multiple Message Capable, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t msien : 1; /**< MSI Enabled
+ When set, INTx must be disabled. */
+ uint32_t ncp : 8; /**< Next Capability Pointer
+ Points to PCI Express Capabilities by default,
+ writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msicid : 8; /**< MSI Capability ID */
+#else
+ uint32_t msicid : 8;
+ uint32_t ncp : 8;
+ uint32_t msien : 1;
+ uint32_t mmc : 3;
+ uint32_t mme : 3;
+ uint32_t m64 : 1;
+ uint32_t pvm : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg020_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t m64 : 1; /**< 64-bit Address Capable, writable through PESC(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t mme : 3; /**< Multiple Message Enabled
+ Indicates that multiple Message mode is enabled by system
+ software. The number of Messages enabled must be less than
+ or equal to the Multiple Message Capable value. */
+ uint32_t mmc : 3; /**< Multiple Message Capable, writable through PESC(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t msien : 1; /**< MSI Enabled
+ When set, INTx must be disabled. */
+ uint32_t ncp : 8; /**< Next Capability Pointer
+ Points to PCI Express Capabilities by default,
+ writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msicid : 8; /**< MSI Capability ID */
+#else
+ uint32_t msicid : 8;
+ uint32_t ncp : 8;
+ uint32_t msien : 1;
+ uint32_t mmc : 3;
+ uint32_t mme : 3;
+ uint32_t m64 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg020_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg020_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg020_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg020_s cn61xx;
+ struct cvmx_pcieepx_cfg020_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg020_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg020_s cn66xx;
+ struct cvmx_pcieepx_cfg020_s cn68xx;
+ struct cvmx_pcieepx_cfg020_s cn68xxp1;
+ struct cvmx_pcieepx_cfg020_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg020 cvmx_pcieepx_cfg020_t;
+
+/**
+ * cvmx_pcieep#_cfg021
+ *
+ * PCIE_CFG021 = Twenty-second 32-bits of PCIE type 0 config space (MSI Lower 32 Bits Address Register)
+ *
+ */
+union cvmx_pcieepx_cfg021 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg021_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lmsi : 30; /**< Lower 32-bit Address */
+ uint32_t reserved_0_1 : 2;
+#else
+ uint32_t reserved_0_1 : 2;
+ uint32_t lmsi : 30;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg021_s cn52xx;
+ struct cvmx_pcieepx_cfg021_s cn52xxp1;
+ struct cvmx_pcieepx_cfg021_s cn56xx;
+ struct cvmx_pcieepx_cfg021_s cn56xxp1;
+ struct cvmx_pcieepx_cfg021_s cn61xx;
+ struct cvmx_pcieepx_cfg021_s cn63xx;
+ struct cvmx_pcieepx_cfg021_s cn63xxp1;
+ struct cvmx_pcieepx_cfg021_s cn66xx;
+ struct cvmx_pcieepx_cfg021_s cn68xx;
+ struct cvmx_pcieepx_cfg021_s cn68xxp1;
+ struct cvmx_pcieepx_cfg021_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg021 cvmx_pcieepx_cfg021_t;
+
+/**
+ * cvmx_pcieep#_cfg022
+ *
+ * PCIE_CFG022 = Twenty-third 32-bits of PCIE type 0 config space (MSI Upper 32 bits Address Register)
+ *
+ */
+union cvmx_pcieepx_cfg022 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg022_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t umsi : 32; /**< Upper 32-bit Address */
+#else
+ uint32_t umsi : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg022_s cn52xx;
+ struct cvmx_pcieepx_cfg022_s cn52xxp1;
+ struct cvmx_pcieepx_cfg022_s cn56xx;
+ struct cvmx_pcieepx_cfg022_s cn56xxp1;
+ struct cvmx_pcieepx_cfg022_s cn61xx;
+ struct cvmx_pcieepx_cfg022_s cn63xx;
+ struct cvmx_pcieepx_cfg022_s cn63xxp1;
+ struct cvmx_pcieepx_cfg022_s cn66xx;
+ struct cvmx_pcieepx_cfg022_s cn68xx;
+ struct cvmx_pcieepx_cfg022_s cn68xxp1;
+ struct cvmx_pcieepx_cfg022_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg022 cvmx_pcieepx_cfg022_t;
+
+/**
+ * cvmx_pcieep#_cfg023
+ *
+ * PCIE_CFG023 = Twenty-fourth 32-bits of PCIE type 0 config space (MSI Data Register)
+ *
+ */
+union cvmx_pcieepx_cfg023 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg023_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t msimd : 16; /**< MSI Data
+ Pattern assigned by system software, bits [4:0] are Or-ed with
+ MSI_VECTOR to generate 32 MSI Messages per function. */
+#else
+ uint32_t msimd : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg023_s cn52xx;
+ struct cvmx_pcieepx_cfg023_s cn52xxp1;
+ struct cvmx_pcieepx_cfg023_s cn56xx;
+ struct cvmx_pcieepx_cfg023_s cn56xxp1;
+ struct cvmx_pcieepx_cfg023_s cn61xx;
+ struct cvmx_pcieepx_cfg023_s cn63xx;
+ struct cvmx_pcieepx_cfg023_s cn63xxp1;
+ struct cvmx_pcieepx_cfg023_s cn66xx;
+ struct cvmx_pcieepx_cfg023_s cn68xx;
+ struct cvmx_pcieepx_cfg023_s cn68xxp1;
+ struct cvmx_pcieepx_cfg023_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg023 cvmx_pcieepx_cfg023_t;
+
+/**
+ * cvmx_pcieep#_cfg028
+ *
+ * PCIE_CFG028 = Twenty-ninth 32-bits of PCIE type 0 config space
+ * (PCI Express Capabilities List Register/
+ * PCI Express Capabilities Register)
+ */
+union cvmx_pcieepx_cfg028 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg028_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t imn : 5; /**< Interrupt Message Number
+ Updated by hardware, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t si : 1; /**< Slot Implemented
+ This bit is writable through PEM(0..1)_CFG_WR.
+ However, it must be 0 for
+ an Endpoint device. Therefore, the application must not write a
+ 1 to this bit. */
+ uint32_t dpt : 4; /**< Device Port Type */
+ uint32_t pciecv : 4; /**< PCI Express Capability Version */
+ uint32_t ncp : 8; /**< Next Capability Pointer
+ writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t pcieid : 8; /**< PCIE Capability ID */
+#else
+ uint32_t pcieid : 8;
+ uint32_t ncp : 8;
+ uint32_t pciecv : 4;
+ uint32_t dpt : 4;
+ uint32_t si : 1;
+ uint32_t imn : 5;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg028_s cn52xx;
+ struct cvmx_pcieepx_cfg028_s cn52xxp1;
+ struct cvmx_pcieepx_cfg028_s cn56xx;
+ struct cvmx_pcieepx_cfg028_s cn56xxp1;
+ struct cvmx_pcieepx_cfg028_s cn61xx;
+ struct cvmx_pcieepx_cfg028_s cn63xx;
+ struct cvmx_pcieepx_cfg028_s cn63xxp1;
+ struct cvmx_pcieepx_cfg028_s cn66xx;
+ struct cvmx_pcieepx_cfg028_s cn68xx;
+ struct cvmx_pcieepx_cfg028_s cn68xxp1;
+ struct cvmx_pcieepx_cfg028_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg028 cvmx_pcieepx_cfg028_t;
+
+/**
+ * cvmx_pcieep#_cfg029
+ *
+ * PCIE_CFG029 = Thirtieth 32-bits of PCIE type 0 config space (Device Capabilities Register)
+ *
+ */
+union cvmx_pcieepx_cfg029 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg029_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_28_31 : 4;
+ uint32_t cspls : 2; /**< Captured Slot Power Limit Scale
+ From Message from RC, upstream port only. */
+ uint32_t csplv : 8; /**< Captured Slot Power Limit Value
+ From Message from RC, upstream port only. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t rber : 1; /**< Role-Based Error Reporting, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t el1al : 3; /**< Endpoint L1 Acceptable Latency, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t el0al : 3; /**< Endpoint L0s Acceptable Latency, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t etfs : 1; /**< Extended Tag Field Supported
+ This bit is writable through PEM(0..1)_CFG_WR.
+ However, the application
+ must not write a 1 to this bit. */
+ uint32_t pfs : 2; /**< Phantom Function Supported
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, Phantom
+ Function is not supported. Therefore, the application must not
+ write any value other than 0x0 to this field. */
+ uint32_t mpss : 3; /**< Max_Payload_Size Supported, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t mpss : 3;
+ uint32_t pfs : 2;
+ uint32_t etfs : 1;
+ uint32_t el0al : 3;
+ uint32_t el1al : 3;
+ uint32_t reserved_12_14 : 3;
+ uint32_t rber : 1;
+ uint32_t reserved_16_17 : 2;
+ uint32_t csplv : 8;
+ uint32_t cspls : 2;
+ uint32_t reserved_28_31 : 4;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg029_s cn52xx;
+ struct cvmx_pcieepx_cfg029_s cn52xxp1;
+ struct cvmx_pcieepx_cfg029_s cn56xx;
+ struct cvmx_pcieepx_cfg029_s cn56xxp1;
+ struct cvmx_pcieepx_cfg029_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t flr_cap : 1; /**< Function Level Reset Capable
+ not supported */
+ uint32_t cspls : 2; /**< Captured Slot Power Limit Scale
+ From Message from RC, upstream port only. */
+ uint32_t csplv : 8; /**< Captured Slot Power Limit Value
+ From Message from RC, upstream port only. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t rber : 1; /**< Role-Based Error Reporting, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t el1al : 3; /**< Endpoint L1 Acceptable Latency, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t el0al : 3; /**< Endpoint L0s Acceptable Latency, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t etfs : 1; /**< Extended Tag Field Supported
+ This bit is writable through PEM(0..1)_CFG_WR.
+ However, the application
+ must not write a 1 to this bit. */
+ uint32_t pfs : 2; /**< Phantom Function Supported
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, Phantom
+ Function is not supported. Therefore, the application must not
+ write any value other than 0x0 to this field. */
+ uint32_t mpss : 3; /**< Max_Payload_Size Supported, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t mpss : 3;
+ uint32_t pfs : 2;
+ uint32_t etfs : 1;
+ uint32_t el0al : 3;
+ uint32_t el1al : 3;
+ uint32_t reserved_12_14 : 3;
+ uint32_t rber : 1;
+ uint32_t reserved_16_17 : 2;
+ uint32_t csplv : 8;
+ uint32_t cspls : 2;
+ uint32_t flr_cap : 1;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } cn61xx;
+ struct cvmx_pcieepx_cfg029_s cn63xx;
+ struct cvmx_pcieepx_cfg029_s cn63xxp1;
+ struct cvmx_pcieepx_cfg029_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t flr : 1; /**< Function Level Reset Capability
+ When set, core support of SR-IOV */
+ uint32_t cspls : 2; /**< Captured Slot Power Limit Scale
+ From Message from RC, upstream port only. */
+ uint32_t csplv : 8; /**< Captured Slot Power Limit Value
+ From Message from RC, upstream port only. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t rber : 1; /**< Role-Based Error Reporting, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t el1al : 3; /**< Endpoint L1 Acceptable Latency, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t el0al : 3; /**< Endpoint L0s Acceptable Latency, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t etfs : 1; /**< Extended Tag Field Supported
+ This bit is writable through PEM(0..1)_CFG_WR.
+ However, the application
+ must not write a 1 to this bit. */
+ uint32_t pfs : 2; /**< Phantom Function Supported
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, Phantom
+ Function is not supported. Therefore, the application must not
+ write any value other than 0x0 to this field. */
+ uint32_t mpss : 3; /**< Max_Payload_Size Supported, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t mpss : 3;
+ uint32_t pfs : 2;
+ uint32_t etfs : 1;
+ uint32_t el0al : 3;
+ uint32_t el1al : 3;
+ uint32_t reserved_12_14 : 3;
+ uint32_t rber : 1;
+ uint32_t reserved_16_17 : 2;
+ uint32_t csplv : 8;
+ uint32_t cspls : 2;
+ uint32_t flr : 1;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } cn66xx;
+ struct cvmx_pcieepx_cfg029_cn66xx cn68xx;
+ struct cvmx_pcieepx_cfg029_cn66xx cn68xxp1;
+ struct cvmx_pcieepx_cfg029_cn61xx cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg029 cvmx_pcieepx_cfg029_t;
+
+/**
+ * cvmx_pcieep#_cfg030
+ *
+ * PCIE_CFG030 = Thirty-first 32-bits of PCIE type 0 config space
+ * (Device Control Register/Device Status Register)
+ */
+union cvmx_pcieepx_cfg030 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg030_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_22_31 : 10;
+ uint32_t tp : 1; /**< Transaction Pending
+ Set to 1 when Non-Posted Requests are not yet completed
+ and clear when they are completed. */
+ uint32_t ap_d : 1; /**< Aux Power Detected
+ Set to 1 if Aux power detected. */
+ uint32_t ur_d : 1; /**< Unsupported Request Detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ UR_D occurs when we receive something we don't support.
+ Unsupported requests are Nonfatal errors, so UR_D should
+ cause NFE_D. Receiving a vendor defined message should
+ cause an unsupported request. */
+ uint32_t fe_d : 1; /**< Fatal Error Detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ FE_D is set if receive any of the errors in PCIE_CFG066 that
+ has a severity set to Fatal. Malformed TLP's generally fit
+ into this category. */
+ uint32_t nfe_d : 1; /**< Non-Fatal Error detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ NFE_D is set if we receive any of the errors in PCIE_CFG066
+ that has a severity set to Nonfatal and does NOT meet Advisory
+ Nonfatal criteria , which
+ most poisoned TLP's should be. */
+ uint32_t ce_d : 1; /**< Correctable Error Detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ CE_D is set if we receive any of the errors in PCIE_CFG068
+ for example a Replay Timer Timeout. Also, it can be set if
+ we get any of the errors in PCIE_CFG066 that has a severity
+ set to Nonfatal and meets the Advisory Nonfatal criteria,
+ which most ECRC errors
+ should be. */
+ uint32_t i_flr : 1; /**< Initiate Function Level Reset
+ (Not Supported) */
+ uint32_t mrrs : 3; /**< Max Read Request Size
+ 0 = 128B
+ 1 = 256B
+ 2 = 512B
+ 3 = 1024B
+ 4 = 2048B
+ 5 = 4096B
+ Note: SLI_S2M_PORT#_CTL[MRRS] and DPI_SLI_PRT#_CFG[MRRS] and
+ also must be set properly.
+ SLI_S2M_PORT#_CTL[MRRS] and DPI_SLI_PRT#_CFG[MRRS] must
+ not exceed the desired max read request size. */
+ uint32_t ns_en : 1; /**< Enable No Snoop */
+ uint32_t ap_en : 1; /**< AUX Power PM Enable */
+ uint32_t pf_en : 1; /**< Phantom Function Enable
+ This bit should never be set - OCTEON requests never use
+ phantom functions. */
+ uint32_t etf_en : 1; /**< Extended Tag Field Enable
+ This bit should never be set - OCTEON requests never use
+ extended tags. */
+ uint32_t mps : 3; /**< Max Payload Size
+ Legal values:
+ 0 = 128B
+ 1 = 256B
+ Larger sizes not supported by OCTEON.
+ Note: DPI_SLI_PRT#_CFG[MPS] must be set to the same
+ value for proper functionality. */
+ uint32_t ro_en : 1; /**< Enable Relaxed Ordering
+ This bit is not used. */
+ uint32_t ur_en : 1; /**< Unsupported Request Reporting Enable */
+ uint32_t fe_en : 1; /**< Fatal Error Reporting Enable */
+ uint32_t nfe_en : 1; /**< Non-Fatal Error Reporting Enable */
+ uint32_t ce_en : 1; /**< Correctable Error Reporting Enable */
+#else
+ uint32_t ce_en : 1;
+ uint32_t nfe_en : 1;
+ uint32_t fe_en : 1;
+ uint32_t ur_en : 1;
+ uint32_t ro_en : 1;
+ uint32_t mps : 3;
+ uint32_t etf_en : 1;
+ uint32_t pf_en : 1;
+ uint32_t ap_en : 1;
+ uint32_t ns_en : 1;
+ uint32_t mrrs : 3;
+ uint32_t i_flr : 1;
+ uint32_t ce_d : 1;
+ uint32_t nfe_d : 1;
+ uint32_t fe_d : 1;
+ uint32_t ur_d : 1;
+ uint32_t ap_d : 1;
+ uint32_t tp : 1;
+ uint32_t reserved_22_31 : 10;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg030_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_22_31 : 10;
+ uint32_t tp : 1; /**< Transaction Pending
+ Set to 1 when Non-Posted Requests are not yet completed
+ and clear when they are completed. */
+ uint32_t ap_d : 1; /**< Aux Power Detected
+ Set to 1 if Aux power detected. */
+ uint32_t ur_d : 1; /**< Unsupported Request Detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ UR_D occurs when we receive something we don't support.
+ Unsupported requests are Nonfatal errors, so UR_D should
+ cause NFE_D. Receiving a vendor defined message should
+ cause an unsupported request. */
+ uint32_t fe_d : 1; /**< Fatal Error Detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ FE_D is set if receive any of the errors in PCIE_CFG066 that
+ has a severity set to Fatal. Malformed TLP's generally fit
+ into this category. */
+ uint32_t nfe_d : 1; /**< Non-Fatal Error detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ NFE_D is set if we receive any of the errors in PCIE_CFG066
+ that has a severity set to Nonfatal and does NOT meet Advisory
+ Nonfatal criteria (PCIe 1.1 spec, Section 6.2.3.2.4), which
+ most poisoned TLP's should be. */
+ uint32_t ce_d : 1; /**< Correctable Error Detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ CE_D is set if we receive any of the errors in PCIE_CFG068
+ for example a Replay Timer Timeout. Also, it can be set if
+ we get any of the errors in PCIE_CFG066 that has a severity
+ set to Nonfatal and meets the Advisory Nonfatal criteria
+ (PCIe 1.1 spec, Section 6.2.3.2.4), which most ECRC errors
+ should be. */
+ uint32_t reserved_15_15 : 1;
+ uint32_t mrrs : 3; /**< Max Read Request Size
+ 0 = 128B
+ 1 = 256B
+ 2 = 512B
+ 3 = 1024B
+ 4 = 2048B
+ 5 = 4096B
+ Note: NPEI_CTL_STATUS2[MRRS] also must be set properly.
+ NPEI_CTL_STATUS2[MRRS] must not exceed the
+ desired max read request size. */
+ uint32_t ns_en : 1; /**< Enable No Snoop */
+ uint32_t ap_en : 1; /**< AUX Power PM Enable */
+ uint32_t pf_en : 1; /**< Phantom Function Enable
+ This bit should never be set - OCTEON requests never use
+ phantom functions. */
+ uint32_t etf_en : 1; /**< Extended Tag Field Enable
+ This bit should never be set - OCTEON requests never use
+ extended tags. */
+ uint32_t mps : 3; /**< Max Payload Size
+ Legal values:
+ 0 = 128B
+ 1 = 256B
+ Larger sizes not supported by OCTEON.
+ Note: NPEI_CTL_STATUS2[MPS] must be set to the same
+ value for proper functionality. */
+ uint32_t ro_en : 1; /**< Enable Relaxed Ordering */
+ uint32_t ur_en : 1; /**< Unsupported Request Reporting Enable */
+ uint32_t fe_en : 1; /**< Fatal Error Reporting Enable */
+ uint32_t nfe_en : 1; /**< Non-Fatal Error Reporting Enable */
+ uint32_t ce_en : 1; /**< Correctable Error Reporting Enable */
+#else
+ uint32_t ce_en : 1;
+ uint32_t nfe_en : 1;
+ uint32_t fe_en : 1;
+ uint32_t ur_en : 1;
+ uint32_t ro_en : 1;
+ uint32_t mps : 3;
+ uint32_t etf_en : 1;
+ uint32_t pf_en : 1;
+ uint32_t ap_en : 1;
+ uint32_t ns_en : 1;
+ uint32_t mrrs : 3;
+ uint32_t reserved_15_15 : 1;
+ uint32_t ce_d : 1;
+ uint32_t nfe_d : 1;
+ uint32_t fe_d : 1;
+ uint32_t ur_d : 1;
+ uint32_t ap_d : 1;
+ uint32_t tp : 1;
+ uint32_t reserved_22_31 : 10;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg030_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg030_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg030_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg030_s cn61xx;
+ struct cvmx_pcieepx_cfg030_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg030_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg030_s cn66xx;
+ struct cvmx_pcieepx_cfg030_s cn68xx;
+ struct cvmx_pcieepx_cfg030_s cn68xxp1;
+ struct cvmx_pcieepx_cfg030_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg030 cvmx_pcieepx_cfg030_t;
+
+/**
+ * cvmx_pcieep#_cfg031
+ *
+ * PCIE_CFG031 = Thirty-second 32-bits of PCIE type 0 config space
+ * (Link Capabilities Register)
+ */
+union cvmx_pcieepx_cfg031 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg031_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pnum : 8; /**< Port Number
+ writable through PEM(0..1)_CFG_WR, however the application
+ must not change this field. */
+ uint32_t reserved_23_23 : 1;
+ uint32_t aspm : 1; /**< ASPM Optionality Compliance */
+ uint32_t lbnc : 1; /**< Link Bandwidth Notification Capability
+ Set 0 for Endpoint devices. */
+ uint32_t dllarc : 1; /**< Data Link Layer Active Reporting Capable */
+ uint32_t sderc : 1; /**< Surprise Down Error Reporting Capable
+ Not supported, hardwired to 0x0. */
+ uint32_t cpm : 1; /**< Clock Power Management
+ The default value is the value you specify during hardware
+ configuration, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l1el : 3; /**< L1 Exit Latency
+ The default value is the value you specify during hardware
+ configuration, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0el : 3; /**< L0s Exit Latency
+ The default value is the value you specify during hardware
+ configuration, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t aslpms : 2; /**< Active State Link PM Support
+ The default value is the value you specify during hardware
+ configuration, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t mlw : 6; /**< Maximum Link Width
+ The default value is the value you specify during hardware
+ configuration (x1), writable through PEM(0..1)_CFG_WR
+ however wider cofigurations are not supported. */
+ uint32_t mls : 4; /**< Maximum Link Speed
+ The reset value of this field is controlled by a value sent from
+ the lsb of the MIO_QLM#_SPD register.
+ qlm#_spd[1] RST_VALUE NOTE
+ 1 0001b 2.5 GHz supported
+ 0 0010b 5.0 GHz and 2.5 GHz supported
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t mls : 4;
+ uint32_t mlw : 6;
+ uint32_t aslpms : 2;
+ uint32_t l0el : 3;
+ uint32_t l1el : 3;
+ uint32_t cpm : 1;
+ uint32_t sderc : 1;
+ uint32_t dllarc : 1;
+ uint32_t lbnc : 1;
+ uint32_t aspm : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t pnum : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg031_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pnum : 8; /**< Port Number, writable through PESC(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t lbnc : 1; /**< Link Bandwith Notification Capability */
+ uint32_t dllarc : 1; /**< Data Link Layer Active Reporting Capable */
+ uint32_t sderc : 1; /**< Surprise Down Error Reporting Capable
+ Not supported, hardwired to 0x0. */
+ uint32_t cpm : 1; /**< Clock Power Management
+ The default value is the value you specify during hardware
+ configuration, writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l1el : 3; /**< L1 Exit Latency
+ The default value is the value you specify during hardware
+ configuration, writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0el : 3; /**< L0s Exit Latency
+ The default value is the value you specify during hardware
+ configuration, writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t aslpms : 2; /**< Active State Link PM Support
+ The default value is the value you specify during hardware
+ configuration, writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t mlw : 6; /**< Maximum Link Width
+ The default value is the value you specify during hardware
+ configuration (x1, x2, x4, x8, or x16), writable through PESC(0..1)_CFG_WR.
+ This value will be set to 0x4 or 0x2 depending on the max
+ number of lanes (QLM_CFG == 0 set to 0x2 else 0x4). */
+ uint32_t mls : 4; /**< Maximum Link Speed
+ Default value is 0x1 for 2.5 Gbps Link.
+ This field is writable through PESC(0..1)_CFG_WR.
+ However, 0x1 is the
+ only supported value. Therefore, the application must not write
+ any value other than 0x1 to this field. */
+#else
+ uint32_t mls : 4;
+ uint32_t mlw : 6;
+ uint32_t aslpms : 2;
+ uint32_t l0el : 3;
+ uint32_t l1el : 3;
+ uint32_t cpm : 1;
+ uint32_t sderc : 1;
+ uint32_t dllarc : 1;
+ uint32_t lbnc : 1;
+ uint32_t reserved_22_23 : 2;
+ uint32_t pnum : 8;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg031_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg031_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg031_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg031_s cn61xx;
+ struct cvmx_pcieepx_cfg031_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg031_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg031_s cn66xx;
+ struct cvmx_pcieepx_cfg031_s cn68xx;
+ struct cvmx_pcieepx_cfg031_cn52xx cn68xxp1;
+ struct cvmx_pcieepx_cfg031_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg031 cvmx_pcieepx_cfg031_t;
+
+/**
+ * cvmx_pcieep#_cfg032
+ *
+ * PCIE_CFG032 = Thirty-third 32-bits of PCIE type 0 config space
+ * (Link Control Register/Link Status Register)
+ */
+union cvmx_pcieepx_cfg032 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg032_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lab : 1; /**< Link Autonomous Bandwidth Status */
+ uint32_t lbm : 1; /**< Link Bandwidth Management Status */
+ uint32_t dlla : 1; /**< Data Link Layer Active
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t scc : 1; /**< Slot Clock Configuration
+ Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector.
+ Writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t lt : 1; /**< Link Training
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t reserved_26_26 : 1;
+ uint32_t nlw : 6; /**< Negotiated Link Width
+ Set automatically by hardware after Link initialization.
+ Value is undefined when link is not up. */
+ uint32_t ls : 4; /**< Link Speed
+ 1 == The negotiated Link speed: 2.5 Gbps
+ 2 == The negotiated Link speed: 5.0 Gbps
+ 4 == The negotiated Link speed: 8.0 Gbps (Not Supported) */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lab_int_enb : 1; /**< Link Autonomous Bandwidth Interrupt Enable
+ This bit is not applicable and is reserved for endpoints */
+ uint32_t lbm_int_enb : 1; /**< Link Bandwidth Management Interrupt Enable
+ This bit is not applicable and is reserved for endpoints */
+ uint32_t hawd : 1; /**< Hardware Autonomous Width Disable
+ (Not Supported) */
+ uint32_t ecpm : 1; /**< Enable Clock Power Management
+ Hardwired to 0 if Clock Power Management is disabled in
+ the Link Capabilities register. */
+ uint32_t es : 1; /**< Extended Synch */
+ uint32_t ccc : 1; /**< Common Clock Configuration */
+ uint32_t rl : 1; /**< Retrain Link
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t ld : 1; /**< Link Disable
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t rcb : 1; /**< Read Completion Boundary (RCB) */
+ uint32_t reserved_2_2 : 1;
+ uint32_t aslpc : 2; /**< Active State Link PM Control */
+#else
+ uint32_t aslpc : 2;
+ uint32_t reserved_2_2 : 1;
+ uint32_t rcb : 1;
+ uint32_t ld : 1;
+ uint32_t rl : 1;
+ uint32_t ccc : 1;
+ uint32_t es : 1;
+ uint32_t ecpm : 1;
+ uint32_t hawd : 1;
+ uint32_t lbm_int_enb : 1;
+ uint32_t lab_int_enb : 1;
+ uint32_t reserved_12_15 : 4;
+ uint32_t ls : 4;
+ uint32_t nlw : 6;
+ uint32_t reserved_26_26 : 1;
+ uint32_t lt : 1;
+ uint32_t scc : 1;
+ uint32_t dlla : 1;
+ uint32_t lbm : 1;
+ uint32_t lab : 1;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg032_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t dlla : 1; /**< Data Link Layer Active
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t scc : 1; /**< Slot Clock Configuration
+ Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector.
+ Writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t lt : 1; /**< Link Training
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t reserved_26_26 : 1;
+ uint32_t nlw : 6; /**< Negotiated Link Width
+ Set automatically by hardware after Link initialization. */
+ uint32_t ls : 4; /**< Link Speed
+ The negotiated Link speed: 2.5 Gbps */
+ uint32_t reserved_10_15 : 6;
+ uint32_t hawd : 1; /**< Hardware Autonomous Width Disable
+ (Not Supported) */
+ uint32_t ecpm : 1; /**< Enable Clock Power Management
+ Hardwired to 0 if Clock Power Management is disabled in
+ the Link Capabilities register. */
+ uint32_t es : 1; /**< Extended Synch */
+ uint32_t ccc : 1; /**< Common Clock Configuration */
+ uint32_t rl : 1; /**< Retrain Link
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t ld : 1; /**< Link Disable
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t rcb : 1; /**< Read Completion Boundary (RCB) */
+ uint32_t reserved_2_2 : 1;
+ uint32_t aslpc : 2; /**< Active State Link PM Control */
+#else
+ uint32_t aslpc : 2;
+ uint32_t reserved_2_2 : 1;
+ uint32_t rcb : 1;
+ uint32_t ld : 1;
+ uint32_t rl : 1;
+ uint32_t ccc : 1;
+ uint32_t es : 1;
+ uint32_t ecpm : 1;
+ uint32_t hawd : 1;
+ uint32_t reserved_10_15 : 6;
+ uint32_t ls : 4;
+ uint32_t nlw : 6;
+ uint32_t reserved_26_26 : 1;
+ uint32_t lt : 1;
+ uint32_t scc : 1;
+ uint32_t dlla : 1;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg032_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg032_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg032_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg032_s cn61xx;
+ struct cvmx_pcieepx_cfg032_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg032_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg032_s cn66xx;
+ struct cvmx_pcieepx_cfg032_s cn68xx;
+ struct cvmx_pcieepx_cfg032_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t dlla : 1; /**< Data Link Layer Active
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t scc : 1; /**< Slot Clock Configuration
+ Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector.
+ Writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t lt : 1; /**< Link Training
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t reserved_26_26 : 1;
+ uint32_t nlw : 6; /**< Negotiated Link Width
+ Set automatically by hardware after Link initialization. */
+ uint32_t ls : 4; /**< Link Speed
+ 1 == The negotiated Link speed: 2.5 Gbps
+ 2 == The negotiated Link speed: 5.0 Gbps
+ 4 == The negotiated Link speed: 8.0 Gbps (Not Supported) */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lab_int_enb : 1; /**< Link Autonomous Bandwidth Interrupt Enable
+ This bit is not applicable and is reserved for endpoints */
+ uint32_t lbm_int_enb : 1; /**< Link Bandwidth Management Interrupt Enable
+ This bit is not applicable and is reserved for endpoints */
+ uint32_t hawd : 1; /**< Hardware Autonomous Width Disable
+ (Not Supported) */
+ uint32_t ecpm : 1; /**< Enable Clock Power Management
+ Hardwired to 0 if Clock Power Management is disabled in
+ the Link Capabilities register. */
+ uint32_t es : 1; /**< Extended Synch */
+ uint32_t ccc : 1; /**< Common Clock Configuration */
+ uint32_t rl : 1; /**< Retrain Link
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t ld : 1; /**< Link Disable
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t rcb : 1; /**< Read Completion Boundary (RCB) */
+ uint32_t reserved_2_2 : 1;
+ uint32_t aslpc : 2; /**< Active State Link PM Control */
+#else
+ uint32_t aslpc : 2;
+ uint32_t reserved_2_2 : 1;
+ uint32_t rcb : 1;
+ uint32_t ld : 1;
+ uint32_t rl : 1;
+ uint32_t ccc : 1;
+ uint32_t es : 1;
+ uint32_t ecpm : 1;
+ uint32_t hawd : 1;
+ uint32_t lbm_int_enb : 1;
+ uint32_t lab_int_enb : 1;
+ uint32_t reserved_12_15 : 4;
+ uint32_t ls : 4;
+ uint32_t nlw : 6;
+ uint32_t reserved_26_26 : 1;
+ uint32_t lt : 1;
+ uint32_t scc : 1;
+ uint32_t dlla : 1;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } cn68xxp1;
+ struct cvmx_pcieepx_cfg032_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg032 cvmx_pcieepx_cfg032_t;
+
+/**
+ * cvmx_pcieep#_cfg033
+ *
+ * PCIE_CFG033 = Thirty-fourth 32-bits of PCIE type 0 config space
+ * (Slot Capabilities Register)
+ */
+union cvmx_pcieepx_cfg033 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg033_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ps_num : 13; /**< Physical Slot Number, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t nccs : 1; /**< No Command Complete Support, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t emip : 1; /**< Electromechanical Interlock Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t sp_ls : 2; /**< Slot Power Limit Scale, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t sp_lv : 8; /**< Slot Power Limit Value, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t hp_c : 1; /**< Hot-Plug Capable, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t hp_s : 1; /**< Hot-Plug Surprise, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t pip : 1; /**< Power Indicator Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t aip : 1; /**< Attention Indicator Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t mrlsp : 1; /**< MRL Sensor Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t pcp : 1; /**< Power Controller Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t abp : 1; /**< Attention Button Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t abp : 1;
+ uint32_t pcp : 1;
+ uint32_t mrlsp : 1;
+ uint32_t aip : 1;
+ uint32_t pip : 1;
+ uint32_t hp_s : 1;
+ uint32_t hp_c : 1;
+ uint32_t sp_lv : 8;
+ uint32_t sp_ls : 2;
+ uint32_t emip : 1;
+ uint32_t nccs : 1;
+ uint32_t ps_num : 13;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg033_s cn52xx;
+ struct cvmx_pcieepx_cfg033_s cn52xxp1;
+ struct cvmx_pcieepx_cfg033_s cn56xx;
+ struct cvmx_pcieepx_cfg033_s cn56xxp1;
+ struct cvmx_pcieepx_cfg033_s cn63xx;
+ struct cvmx_pcieepx_cfg033_s cn63xxp1;
+};
+typedef union cvmx_pcieepx_cfg033 cvmx_pcieepx_cfg033_t;
+
+/**
+ * cvmx_pcieep#_cfg034
+ *
+ * PCIE_CFG034 = Thirty-fifth 32-bits of PCIE type 0 config space
+ * (Slot Control Register/Slot Status Register)
+ */
+union cvmx_pcieepx_cfg034 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg034_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t dlls_c : 1; /**< Data Link Layer State Changed
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t emis : 1; /**< Electromechanical Interlock Status */
+ uint32_t pds : 1; /**< Presence Detect State */
+ uint32_t mrlss : 1; /**< MRL Sensor State */
+ uint32_t ccint_d : 1; /**< Command Completed */
+ uint32_t pd_c : 1; /**< Presence Detect Changed */
+ uint32_t mrls_c : 1; /**< MRL Sensor Changed */
+ uint32_t pf_d : 1; /**< Power Fault Detected */
+ uint32_t abp_d : 1; /**< Attention Button Pressed */
+ uint32_t reserved_13_15 : 3;
+ uint32_t dlls_en : 1; /**< Data Link Layer State Changed Enable
+ Not applicable for an upstream Port or Endpoint device,
+ hardwired to 0. */
+ uint32_t emic : 1; /**< Electromechanical Interlock Control */
+ uint32_t pcc : 1; /**< Power Controller Control */
+ uint32_t pic : 2; /**< Power Indicator Control */
+ uint32_t aic : 2; /**< Attention Indicator Control */
+ uint32_t hpint_en : 1; /**< Hot-Plug Interrupt Enable */
+ uint32_t ccint_en : 1; /**< Command Completed Interrupt Enable */
+ uint32_t pd_en : 1; /**< Presence Detect Changed Enable */
+ uint32_t mrls_en : 1; /**< MRL Sensor Changed Enable */
+ uint32_t pf_en : 1; /**< Power Fault Detected Enable */
+ uint32_t abp_en : 1; /**< Attention Button Pressed Enable */
+#else
+ uint32_t abp_en : 1;
+ uint32_t pf_en : 1;
+ uint32_t mrls_en : 1;
+ uint32_t pd_en : 1;
+ uint32_t ccint_en : 1;
+ uint32_t hpint_en : 1;
+ uint32_t aic : 2;
+ uint32_t pic : 2;
+ uint32_t pcc : 1;
+ uint32_t emic : 1;
+ uint32_t dlls_en : 1;
+ uint32_t reserved_13_15 : 3;
+ uint32_t abp_d : 1;
+ uint32_t pf_d : 1;
+ uint32_t mrls_c : 1;
+ uint32_t pd_c : 1;
+ uint32_t ccint_d : 1;
+ uint32_t mrlss : 1;
+ uint32_t pds : 1;
+ uint32_t emis : 1;
+ uint32_t dlls_c : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg034_s cn52xx;
+ struct cvmx_pcieepx_cfg034_s cn52xxp1;
+ struct cvmx_pcieepx_cfg034_s cn56xx;
+ struct cvmx_pcieepx_cfg034_s cn56xxp1;
+ struct cvmx_pcieepx_cfg034_s cn63xx;
+ struct cvmx_pcieepx_cfg034_s cn63xxp1;
+};
+typedef union cvmx_pcieepx_cfg034 cvmx_pcieepx_cfg034_t;
+
+/**
+ * cvmx_pcieep#_cfg037
+ *
+ * PCIE_CFG037 = Thirty-eighth 32-bits of PCIE type 0 config space
+ * (Device Capabilities 2 Register)
+ */
+union cvmx_pcieepx_cfg037 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg037_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t obffs : 2; /**< Optimized Buffer Flush Fill (OBFF) Supported
+ (Not Supported) */
+ uint32_t reserved_12_17 : 6;
+ uint32_t ltrs : 1; /**< Latency Tolerance Reporting (LTR) Mechanism Supported
+ (Not Supported) */
+ uint32_t noroprpr : 1; /**< No RO-enabled PR-PR Passing
+ (This bit applies to RCs) */
+ uint32_t atom128s : 1; /**< 128-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom64s : 1; /**< 64-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom32s : 1; /**< 32-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom_ops : 1; /**< AtomicOp Routing Supported
+ (Not Applicable for EP) */
+ uint32_t ari : 1; /**< Alternate Routing ID Forwarding Supported
+ (Not Supported) */
+ uint32_t ctds : 1; /**< Completion Timeout Disable Supported */
+ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported */
+#else
+ uint32_t ctrs : 4;
+ uint32_t ctds : 1;
+ uint32_t ari : 1;
+ uint32_t atom_ops : 1;
+ uint32_t atom32s : 1;
+ uint32_t atom64s : 1;
+ uint32_t atom128s : 1;
+ uint32_t noroprpr : 1;
+ uint32_t ltrs : 1;
+ uint32_t reserved_12_17 : 6;
+ uint32_t obffs : 2;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg037_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_5_31 : 27;
+ uint32_t ctds : 1; /**< Completion Timeout Disable Supported */
+ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported
+ Value of 0 indicates that Completion Timeout Programming
+ is not supported
+ Completion timeout is 16.7ms. */
+#else
+ uint32_t ctrs : 4;
+ uint32_t ctds : 1;
+ uint32_t reserved_5_31 : 27;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg037_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg037_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg037_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg037_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t tph : 2; /**< TPH Completer Supported
+ (Not Supported) */
+ uint32_t reserved_11_11 : 1;
+ uint32_t noroprpr : 1; /**< No RO-enabled PR-PR Passing
+ (This bit applies to RCs) */
+ uint32_t atom128s : 1; /**< 128-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom64s : 1; /**< 64-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom32s : 1; /**< 32-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom_ops : 1; /**< AtomicOp Routing Supported
+ (Not Applicable for EP) */
+ uint32_t ari : 1; /**< Alternate Routing ID Forwarding Supported
+ (Not Supported) */
+ uint32_t ctds : 1; /**< Completion Timeout Disable Supported */
+ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported */
+#else
+ uint32_t ctrs : 4;
+ uint32_t ctds : 1;
+ uint32_t ari : 1;
+ uint32_t atom_ops : 1;
+ uint32_t atom32s : 1;
+ uint32_t atom64s : 1;
+ uint32_t atom128s : 1;
+ uint32_t noroprpr : 1;
+ uint32_t reserved_11_11 : 1;
+ uint32_t tph : 2;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } cn61xx;
+ struct cvmx_pcieepx_cfg037_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg037_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg037_cn61xx cn66xx;
+ struct cvmx_pcieepx_cfg037_cn61xx cn68xx;
+ struct cvmx_pcieepx_cfg037_cn61xx cn68xxp1;
+ struct cvmx_pcieepx_cfg037_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t obffs : 2; /**< Optimized Buffer Flush Fill (OBFF) Supported
+ (Not Supported) */
+ uint32_t reserved_14_17 : 4;
+ uint32_t tphs : 2; /**< TPH Completer Supported
+ (Not Supported) */
+ uint32_t ltrs : 1; /**< Latency Tolerance Reporting (LTR) Mechanism Supported
+ (Not Supported) */
+ uint32_t noroprpr : 1; /**< No RO-enabled PR-PR Passing
+ (This bit applies to RCs) */
+ uint32_t atom128s : 1; /**< 128-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom64s : 1; /**< 64-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom32s : 1; /**< 32-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom_ops : 1; /**< AtomicOp Routing Supported
+ (Not Applicable for EP) */
+ uint32_t ari : 1; /**< Alternate Routing ID Forwarding Supported
+ (Not Supported) */
+ uint32_t ctds : 1; /**< Completion Timeout Disable Supported */
+ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported */
+#else
+ uint32_t ctrs : 4;
+ uint32_t ctds : 1;
+ uint32_t ari : 1;
+ uint32_t atom_ops : 1;
+ uint32_t atom32s : 1;
+ uint32_t atom64s : 1;
+ uint32_t atom128s : 1;
+ uint32_t noroprpr : 1;
+ uint32_t ltrs : 1;
+ uint32_t tphs : 2;
+ uint32_t reserved_14_17 : 4;
+ uint32_t obffs : 2;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg037 cvmx_pcieepx_cfg037_t;
+
+/**
+ * cvmx_pcieep#_cfg038
+ *
+ * PCIE_CFG038 = Thirty-ninth 32-bits of PCIE type 0 config space
+ * (Device Control 2 Register/Device Status 2 Register)
+ */
+union cvmx_pcieepx_cfg038 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg038_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_15_31 : 17;
+ uint32_t obffe : 2; /**< Optimized Buffer Flush Fill (OBFF) Enable
+ (Not Supported) */
+ uint32_t reserved_11_12 : 2;
+ uint32_t ltre : 1; /**< Latency Tolerance Reporting (LTR) Mechanism Enable
+ (Not Supported) */
+ uint32_t id0_cp : 1; /**< ID Based Ordering Completion Enable
+ (Not Supported) */
+ uint32_t id0_rq : 1; /**< ID Based Ordering Request Enable
+ (Not Supported) */
+ uint32_t atom_op_eb : 1; /**< AtomicOp Egress Blocking
+ (Not Supported)m */
+ uint32_t atom_op : 1; /**< AtomicOp Requester Enable
+ (Not Supported) */
+ uint32_t ari : 1; /**< Alternate Routing ID Forwarding Supported
+ (Not Supported) */
+ uint32_t ctd : 1; /**< Completion Timeout Disable */
+ uint32_t ctv : 4; /**< Completion Timeout Value
+ Completion Timeout Programming is not supported
+ Completion timeout is the range of 16 ms to 55 ms. */
+#else
+ uint32_t ctv : 4;
+ uint32_t ctd : 1;
+ uint32_t ari : 1;
+ uint32_t atom_op : 1;
+ uint32_t atom_op_eb : 1;
+ uint32_t id0_rq : 1;
+ uint32_t id0_cp : 1;
+ uint32_t ltre : 1;
+ uint32_t reserved_11_12 : 2;
+ uint32_t obffe : 2;
+ uint32_t reserved_15_31 : 17;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg038_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_5_31 : 27;
+ uint32_t ctd : 1; /**< Completion Timeout Disable */
+ uint32_t ctv : 4; /**< Completion Timeout Value
+ Completion Timeout Programming is not supported
+ Completion timeout is 16.7ms. */
+#else
+ uint32_t ctv : 4;
+ uint32_t ctd : 1;
+ uint32_t reserved_5_31 : 27;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg038_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg038_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg038_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg038_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_10_31 : 22;
+ uint32_t id0_cp : 1; /**< ID Based Ordering Completion Enable
+ (Not Supported) */
+ uint32_t id0_rq : 1; /**< ID Based Ordering Request Enable
+ (Not Supported) */
+ uint32_t atom_op_eb : 1; /**< AtomicOp Egress Blocking
+ (Not Supported)m */
+ uint32_t atom_op : 1; /**< AtomicOp Requester Enable
+ (Not Supported) */
+ uint32_t ari : 1; /**< Alternate Routing ID Forwarding Supported
+ (Not Supported) */
+ uint32_t ctd : 1; /**< Completion Timeout Disable */
+ uint32_t ctv : 4; /**< Completion Timeout Value
+ Completion Timeout Programming is not supported
+ Completion timeout is the range of 16 ms to 55 ms. */
+#else
+ uint32_t ctv : 4;
+ uint32_t ctd : 1;
+ uint32_t ari : 1;
+ uint32_t atom_op : 1;
+ uint32_t atom_op_eb : 1;
+ uint32_t id0_rq : 1;
+ uint32_t id0_cp : 1;
+ uint32_t reserved_10_31 : 22;
+#endif
+ } cn61xx;
+ struct cvmx_pcieepx_cfg038_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg038_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg038_cn61xx cn66xx;
+ struct cvmx_pcieepx_cfg038_cn61xx cn68xx;
+ struct cvmx_pcieepx_cfg038_cn61xx cn68xxp1;
+ struct cvmx_pcieepx_cfg038_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg038 cvmx_pcieepx_cfg038_t;
+
+/**
+ * cvmx_pcieep#_cfg039
+ *
+ * PCIE_CFG039 = Fourtieth 32-bits of PCIE type 0 config space
+ * (Link Capabilities 2 Register)
+ */
+union cvmx_pcieepx_cfg039 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg039_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t cls : 1; /**< Crosslink Supported */
+ uint32_t slsv : 7; /**< Supported Link Speeds Vector
+ Indicates the supported Link speeds of the associated Port.
+ For each bit, a value of 1b indicates that the cooresponding
+ Link speed is supported; otherwise, the Link speed is not
+ supported.
+ Bit definitions are:
+ Bit 1 2.5 GT/s
+ Bit 2 5.0 GT/s
+ Bit 3 8.0 GT/s (Not Supported)
+ Bits 7:4 reserved
+ The reset value of this field is controlled by a value sent from
+ the lsb of the MIO_QLM#_SPD register
+ qlm#_spd[0] RST_VALUE NOTE
+ 1 0001b 2.5 GHz supported
+ 0 0011b 5.0 GHz and 2.5 GHz supported */
+ uint32_t reserved_0_0 : 1;
+#else
+ uint32_t reserved_0_0 : 1;
+ uint32_t slsv : 7;
+ uint32_t cls : 1;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg039_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg039_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg039_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg039_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg039_s cn61xx;
+ struct cvmx_pcieepx_cfg039_s cn63xx;
+ struct cvmx_pcieepx_cfg039_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg039_s cn66xx;
+ struct cvmx_pcieepx_cfg039_s cn68xx;
+ struct cvmx_pcieepx_cfg039_s cn68xxp1;
+ struct cvmx_pcieepx_cfg039_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg039 cvmx_pcieepx_cfg039_t;
+
+/**
+ * cvmx_pcieep#_cfg040
+ *
+ * PCIE_CFG040 = Fourty-first 32-bits of PCIE type 0 config space
+ * (Link Control 2 Register/Link Status 2 Register)
+ */
+union cvmx_pcieepx_cfg040 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg040_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_17_31 : 15;
+ uint32_t cdl : 1; /**< Current De-emphasis Level
+ When the Link is operating at 5 GT/s speed, this bit
+ reflects the level of de-emphasis. Encodings:
+ 1b: -3.5 dB
+ 0b: -6 dB
+ Note: The value in this bit is undefined when the Link is
+ operating at 2.5 GT/s speed */
+ uint32_t reserved_13_15 : 3;
+ uint32_t cde : 1; /**< Compliance De-emphasis
+ This bit sets the de-emphasis level in Polling. Compliance
+ state if the entry occurred due to the Tx Compliance
+ Receive bit being 1b. Encodings:
+ 1b: -3.5 dB
+ 0b: -6 dB
+ Note: When the Link is operating at 2.5 GT/s, the setting
+ of this bit has no effect. */
+ uint32_t csos : 1; /**< Compliance SOS
+ When set to 1b, the LTSSM is required to send SKP
+ Ordered Sets periodically in between the (modified)
+ compliance patterns.
+ Note: When the Link is operating at 2.5 GT/s, the setting
+ of this bit has no effect. */
+ uint32_t emc : 1; /**< Enter Modified Compliance
+ When this bit is set to 1b, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.
+ Compliance state. */
+ uint32_t tm : 3; /**< Transmit Margin
+ This field controls the value of the non-de-emphasized
+ voltage level at the Transmitter signals:
+ - 000: 800-1200 mV for full swing 400-600 mV for half-swing
+ - 001-010: values must be monotonic with a non-zero slope
+ - 011: 200-400 mV for full-swing and 100-200 mV for halfswing
+ - 100-111: reserved
+ This field is reset to 000b on entry to the LTSSM Polling.
+ Compliance substate.
+ When operating in 5.0 GT/s mode with full swing, the
+ de-emphasis ratio must be maintained within +/- 1 dB
+ from the specification-defined operational value
+ either -3.5 or -6 dB). */
+ uint32_t sde : 1; /**< Selectable De-emphasis
+ Not applicable for an upstream Port or Endpoint device.
+ Hardwired to 0. */
+ uint32_t hasd : 1; /**< Hardware Autonomous Speed Disable
+ When asserted, the
+ application must disable hardware from changing the Link
+ speed for device-specific reasons other than attempting to
+ correct unreliable Link operation by reducing Link speed.
+ Initial transition to the highest supported common link
+ speed is not blocked by this signal. */
+ uint32_t ec : 1; /**< Enter Compliance
+ Software is permitted to force a link to enter Compliance
+ mode at the speed indicated in the Target Link Speed
+ field by setting this bit to 1b in both components on a link
+ and then initiating a hot reset on the link. */
+ uint32_t tls : 4; /**< Target Link Speed
+ For Downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by
+ the upstream component in its training sequences:
+ - 0001: 2.5Gb/s Target Link Speed
+ - 0010: 5Gb/s Target Link Speed
+ - 0100: 8Gb/s Target Link Speed (Not Supported)
+ All other encodings are reserved.
+ If a value is written to this field that does not correspond to
+ a speed included in the Supported Link Speeds field, the
+ result is undefined.
+ For both Upstream and Downstream ports, this field is
+ used to set the target compliance mode speed when
+ software is using the Enter Compliance bit to force a link
+ into compliance mode.
+ The reset value of this field is controlled by a value sent from
+ the lsb of the MIO_QLM#_SPD register.
+ qlm#_spd[0] RST_VALUE NOTE
+ 1 0001b 2.5 GHz supported
+ 0 0010b 5.0 GHz and 2.5 GHz supported */
+#else
+ uint32_t tls : 4;
+ uint32_t ec : 1;
+ uint32_t hasd : 1;
+ uint32_t sde : 1;
+ uint32_t tm : 3;
+ uint32_t emc : 1;
+ uint32_t csos : 1;
+ uint32_t cde : 1;
+ uint32_t reserved_13_15 : 3;
+ uint32_t cdl : 1;
+ uint32_t reserved_17_31 : 15;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg040_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg040_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg040_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg040_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg040_s cn61xx;
+ struct cvmx_pcieepx_cfg040_s cn63xx;
+ struct cvmx_pcieepx_cfg040_s cn63xxp1;
+ struct cvmx_pcieepx_cfg040_s cn66xx;
+ struct cvmx_pcieepx_cfg040_s cn68xx;
+ struct cvmx_pcieepx_cfg040_s cn68xxp1;
+ struct cvmx_pcieepx_cfg040_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg040 cvmx_pcieepx_cfg040_t;
+
+/**
+ * cvmx_pcieep#_cfg041
+ *
+ * PCIE_CFG041 = Fourty-second 32-bits of PCIE type 0 config space
+ * (Slot Capabilities 2 Register)
+ */
+union cvmx_pcieepx_cfg041 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg041_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg041_s cn52xx;
+ struct cvmx_pcieepx_cfg041_s cn52xxp1;
+ struct cvmx_pcieepx_cfg041_s cn56xx;
+ struct cvmx_pcieepx_cfg041_s cn56xxp1;
+ struct cvmx_pcieepx_cfg041_s cn63xx;
+ struct cvmx_pcieepx_cfg041_s cn63xxp1;
+};
+typedef union cvmx_pcieepx_cfg041 cvmx_pcieepx_cfg041_t;
+
+/**
+ * cvmx_pcieep#_cfg042
+ *
+ * PCIE_CFG042 = Fourty-third 32-bits of PCIE type 0 config space
+ * (Slot Control 2 Register/Slot Status 2 Register)
+ */
+union cvmx_pcieepx_cfg042 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg042_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg042_s cn52xx;
+ struct cvmx_pcieepx_cfg042_s cn52xxp1;
+ struct cvmx_pcieepx_cfg042_s cn56xx;
+ struct cvmx_pcieepx_cfg042_s cn56xxp1;
+ struct cvmx_pcieepx_cfg042_s cn63xx;
+ struct cvmx_pcieepx_cfg042_s cn63xxp1;
+};
+typedef union cvmx_pcieepx_cfg042 cvmx_pcieepx_cfg042_t;
+
+/**
+ * cvmx_pcieep#_cfg064
+ *
+ * PCIE_CFG064 = Sixty-fifth 32-bits of PCIE type 0 config space
+ * (PCI Express Extended Capability Header)
+ */
+union cvmx_pcieepx_cfg064 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg064_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t nco : 12; /**< Next Capability Offset */
+ uint32_t cv : 4; /**< Capability Version */
+ uint32_t pcieec : 16; /**< PCIE Express Extended Capability */
+#else
+ uint32_t pcieec : 16;
+ uint32_t cv : 4;
+ uint32_t nco : 12;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg064_s cn52xx;
+ struct cvmx_pcieepx_cfg064_s cn52xxp1;
+ struct cvmx_pcieepx_cfg064_s cn56xx;
+ struct cvmx_pcieepx_cfg064_s cn56xxp1;
+ struct cvmx_pcieepx_cfg064_s cn61xx;
+ struct cvmx_pcieepx_cfg064_s cn63xx;
+ struct cvmx_pcieepx_cfg064_s cn63xxp1;
+ struct cvmx_pcieepx_cfg064_s cn66xx;
+ struct cvmx_pcieepx_cfg064_s cn68xx;
+ struct cvmx_pcieepx_cfg064_s cn68xxp1;
+ struct cvmx_pcieepx_cfg064_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg064 cvmx_pcieepx_cfg064_t;
+
+/**
+ * cvmx_pcieep#_cfg065
+ *
+ * PCIE_CFG065 = Sixty-sixth 32-bits of PCIE type 0 config space
+ * (Uncorrectable Error Status Register)
+ */
+union cvmx_pcieepx_cfg065 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg065_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Status */
+ uint32_t reserved_23_23 : 1;
+ uint32_t ucies : 1; /**< Uncorrectable Internal Error Status */
+ uint32_t reserved_21_21 : 1;
+ uint32_t ures : 1; /**< Unsupported Request Error Status */
+ uint32_t ecrces : 1; /**< ECRC Error Status */
+ uint32_t mtlps : 1; /**< Malformed TLP Status */
+ uint32_t ros : 1; /**< Receiver Overflow Status */
+ uint32_t ucs : 1; /**< Unexpected Completion Status */
+ uint32_t cas : 1; /**< Completer Abort Status */
+ uint32_t cts : 1; /**< Completion Timeout Status */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Status */
+ uint32_t ptlps : 1; /**< Poisoned TLP Status */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Status (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Status */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_21 : 1;
+ uint32_t ucies : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg065_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t ures : 1; /**< Unsupported Request Error Status */
+ uint32_t ecrces : 1; /**< ECRC Error Status */
+ uint32_t mtlps : 1; /**< Malformed TLP Status */
+ uint32_t ros : 1; /**< Receiver Overflow Status */
+ uint32_t ucs : 1; /**< Unexpected Completion Status */
+ uint32_t cas : 1; /**< Completer Abort Status */
+ uint32_t cts : 1; /**< Completion Timeout Status */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Status */
+ uint32_t ptlps : 1; /**< Poisoned TLP Status */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Status (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Status */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg065_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg065_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg065_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg065_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Status */
+ uint32_t reserved_21_23 : 3;
+ uint32_t ures : 1; /**< Unsupported Request Error Status */
+ uint32_t ecrces : 1; /**< ECRC Error Status */
+ uint32_t mtlps : 1; /**< Malformed TLP Status */
+ uint32_t ros : 1; /**< Receiver Overflow Status */
+ uint32_t ucs : 1; /**< Unexpected Completion Status */
+ uint32_t cas : 1; /**< Completer Abort Status */
+ uint32_t cts : 1; /**< Completion Timeout Status */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Status */
+ uint32_t ptlps : 1; /**< Poisoned TLP Status */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Status (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Status */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_23 : 3;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cn61xx;
+ struct cvmx_pcieepx_cfg065_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg065_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg065_cn61xx cn66xx;
+ struct cvmx_pcieepx_cfg065_cn61xx cn68xx;
+ struct cvmx_pcieepx_cfg065_cn52xx cn68xxp1;
+ struct cvmx_pcieepx_cfg065_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Status */
+ uint32_t reserved_23_23 : 1;
+ uint32_t ucies : 1; /**< Uncorrectable Internal Error Status */
+ uint32_t reserved_21_21 : 1;
+ uint32_t ures : 1; /**< Unsupported Request Error Status */
+ uint32_t ecrces : 1; /**< ECRC Error Status */
+ uint32_t mtlps : 1; /**< Malformed TLP Status */
+ uint32_t ros : 1; /**< Receiver Overflow Status */
+ uint32_t ucs : 1; /**< Unexpected Completion Status */
+ uint32_t cas : 1; /**< Completer Abort Status */
+ uint32_t cts : 1; /**< Completion Timeout Status */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Status */
+ uint32_t ptlps : 1; /**< Poisoned TLP Status */
+ uint32_t reserved_5_11 : 7;
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Status */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t reserved_5_11 : 7;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_21 : 1;
+ uint32_t ucies : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg065 cvmx_pcieepx_cfg065_t;
+
+/**
+ * cvmx_pcieep#_cfg066
+ *
+ * PCIE_CFG066 = Sixty-seventh 32-bits of PCIE type 0 config space
+ * (Uncorrectable Error Mask Register)
+ */
+union cvmx_pcieepx_cfg066 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg066_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombm : 1; /**< Unsupported AtomicOp Egress Blocked Mask */
+ uint32_t reserved_23_23 : 1;
+ uint32_t uciem : 1; /**< Uncorrectable Internal Error Mask */
+ uint32_t reserved_21_21 : 1;
+ uint32_t urem : 1; /**< Unsupported Request Error Mask */
+ uint32_t ecrcem : 1; /**< ECRC Error Mask */
+ uint32_t mtlpm : 1; /**< Malformed TLP Mask */
+ uint32_t rom : 1; /**< Receiver Overflow Mask */
+ uint32_t ucm : 1; /**< Unexpected Completion Mask */
+ uint32_t cam : 1; /**< Completer Abort Mask */
+ uint32_t ctm : 1; /**< Completion Timeout Mask */
+ uint32_t fcpem : 1; /**< Flow Control Protocol Error Mask */
+ uint32_t ptlpm : 1; /**< Poisoned TLP Mask */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< Surprise Down Error Mask (not supported) */
+ uint32_t dlpem : 1; /**< Data Link Protocol Error Mask */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpem : 1;
+ uint32_t sdem : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1;
+ uint32_t fcpem : 1;
+ uint32_t ctm : 1;
+ uint32_t cam : 1;
+ uint32_t ucm : 1;
+ uint32_t rom : 1;
+ uint32_t mtlpm : 1;
+ uint32_t ecrcem : 1;
+ uint32_t urem : 1;
+ uint32_t reserved_21_21 : 1;
+ uint32_t uciem : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t uatombm : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg066_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t urem : 1; /**< Unsupported Request Error Mask */
+ uint32_t ecrcem : 1; /**< ECRC Error Mask */
+ uint32_t mtlpm : 1; /**< Malformed TLP Mask */
+ uint32_t rom : 1; /**< Receiver Overflow Mask */
+ uint32_t ucm : 1; /**< Unexpected Completion Mask */
+ uint32_t cam : 1; /**< Completer Abort Mask */
+ uint32_t ctm : 1; /**< Completion Timeout Mask */
+ uint32_t fcpem : 1; /**< Flow Control Protocol Error Mask */
+ uint32_t ptlpm : 1; /**< Poisoned TLP Mask */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< Surprise Down Error Mask (not supported) */
+ uint32_t dlpem : 1; /**< Data Link Protocol Error Mask */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpem : 1;
+ uint32_t sdem : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1;
+ uint32_t fcpem : 1;
+ uint32_t ctm : 1;
+ uint32_t cam : 1;
+ uint32_t ucm : 1;
+ uint32_t rom : 1;
+ uint32_t mtlpm : 1;
+ uint32_t ecrcem : 1;
+ uint32_t urem : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg066_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg066_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg066_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg066_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombm : 1; /**< Unsupported AtomicOp Egress Blocked Mask */
+ uint32_t reserved_21_23 : 3;
+ uint32_t urem : 1; /**< Unsupported Request Error Mask */
+ uint32_t ecrcem : 1; /**< ECRC Error Mask */
+ uint32_t mtlpm : 1; /**< Malformed TLP Mask */
+ uint32_t rom : 1; /**< Receiver Overflow Mask */
+ uint32_t ucm : 1; /**< Unexpected Completion Mask */
+ uint32_t cam : 1; /**< Completer Abort Mask */
+ uint32_t ctm : 1; /**< Completion Timeout Mask */
+ uint32_t fcpem : 1; /**< Flow Control Protocol Error Mask */
+ uint32_t ptlpm : 1; /**< Poisoned TLP Mask */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< Surprise Down Error Mask (not supported) */
+ uint32_t dlpem : 1; /**< Data Link Protocol Error Mask */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpem : 1;
+ uint32_t sdem : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1;
+ uint32_t fcpem : 1;
+ uint32_t ctm : 1;
+ uint32_t cam : 1;
+ uint32_t ucm : 1;
+ uint32_t rom : 1;
+ uint32_t mtlpm : 1;
+ uint32_t ecrcem : 1;
+ uint32_t urem : 1;
+ uint32_t reserved_21_23 : 3;
+ uint32_t uatombm : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cn61xx;
+ struct cvmx_pcieepx_cfg066_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg066_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg066_cn61xx cn66xx;
+ struct cvmx_pcieepx_cfg066_cn61xx cn68xx;
+ struct cvmx_pcieepx_cfg066_cn52xx cn68xxp1;
+ struct cvmx_pcieepx_cfg066_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombm : 1; /**< Unsupported AtomicOp Egress Blocked Mask */
+ uint32_t reserved_23_23 : 1;
+ uint32_t uciem : 1; /**< Uncorrectable Internal Error Mask */
+ uint32_t reserved_21_21 : 1;
+ uint32_t urem : 1; /**< Unsupported Request Error Mask */
+ uint32_t ecrcem : 1; /**< ECRC Error Mask */
+ uint32_t mtlpm : 1; /**< Malformed TLP Mask */
+ uint32_t rom : 1; /**< Receiver Overflow Mask */
+ uint32_t ucm : 1; /**< Unexpected Completion Mask */
+ uint32_t cam : 1; /**< Completer Abort Mask */
+ uint32_t ctm : 1; /**< Completion Timeout Mask */
+ uint32_t fcpem : 1; /**< Flow Control Protocol Error Mask */
+ uint32_t ptlpm : 1; /**< Poisoned TLP Mask */
+ uint32_t reserved_5_11 : 7;
+ uint32_t dlpem : 1; /**< Data Link Protocol Error Mask */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpem : 1;
+ uint32_t reserved_5_11 : 7;
+ uint32_t ptlpm : 1;
+ uint32_t fcpem : 1;
+ uint32_t ctm : 1;
+ uint32_t cam : 1;
+ uint32_t ucm : 1;
+ uint32_t rom : 1;
+ uint32_t mtlpm : 1;
+ uint32_t ecrcem : 1;
+ uint32_t urem : 1;
+ uint32_t reserved_21_21 : 1;
+ uint32_t uciem : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t uatombm : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg066 cvmx_pcieepx_cfg066_t;
+
+/**
+ * cvmx_pcieep#_cfg067
+ *
+ * PCIE_CFG067 = Sixty-eighth 32-bits of PCIE type 0 config space
+ * (Uncorrectable Error Severity Register)
+ */
+union cvmx_pcieepx_cfg067 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg067_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Severity */
+ uint32_t reserved_23_23 : 1;
+ uint32_t ucies : 1; /**< Uncorrectable Internal Error Severity */
+ uint32_t reserved_21_21 : 1;
+ uint32_t ures : 1; /**< Unsupported Request Error Severity */
+ uint32_t ecrces : 1; /**< ECRC Error Severity */
+ uint32_t mtlps : 1; /**< Malformed TLP Severity */
+ uint32_t ros : 1; /**< Receiver Overflow Severity */
+ uint32_t ucs : 1; /**< Unexpected Completion Severity */
+ uint32_t cas : 1; /**< Completer Abort Severity */
+ uint32_t cts : 1; /**< Completion Timeout Severity */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Severity */
+ uint32_t ptlps : 1; /**< Poisoned TLP Severity */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Severity (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Severity */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_21 : 1;
+ uint32_t ucies : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg067_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t ures : 1; /**< Unsupported Request Error Severity */
+ uint32_t ecrces : 1; /**< ECRC Error Severity */
+ uint32_t mtlps : 1; /**< Malformed TLP Severity */
+ uint32_t ros : 1; /**< Receiver Overflow Severity */
+ uint32_t ucs : 1; /**< Unexpected Completion Severity */
+ uint32_t cas : 1; /**< Completer Abort Severity */
+ uint32_t cts : 1; /**< Completion Timeout Severity */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Severity */
+ uint32_t ptlps : 1; /**< Poisoned TLP Severity */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Severity (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Severity */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg067_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg067_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg067_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg067_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Severity */
+ uint32_t reserved_21_23 : 3;
+ uint32_t ures : 1; /**< Unsupported Request Error Severity */
+ uint32_t ecrces : 1; /**< ECRC Error Severity */
+ uint32_t mtlps : 1; /**< Malformed TLP Severity */
+ uint32_t ros : 1; /**< Receiver Overflow Severity */
+ uint32_t ucs : 1; /**< Unexpected Completion Severity */
+ uint32_t cas : 1; /**< Completer Abort Severity */
+ uint32_t cts : 1; /**< Completion Timeout Severity */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Severity */
+ uint32_t ptlps : 1; /**< Poisoned TLP Severity */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Severity (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Severity */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_23 : 3;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cn61xx;
+ struct cvmx_pcieepx_cfg067_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg067_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg067_cn61xx cn66xx;
+ struct cvmx_pcieepx_cfg067_cn61xx cn68xx;
+ struct cvmx_pcieepx_cfg067_cn52xx cn68xxp1;
+ struct cvmx_pcieepx_cfg067_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Severity */
+ uint32_t reserved_23_23 : 1;
+ uint32_t ucies : 1; /**< Uncorrectable Internal Error Severity */
+ uint32_t reserved_21_21 : 1;
+ uint32_t ures : 1; /**< Unsupported Request Error Severity */
+ uint32_t ecrces : 1; /**< ECRC Error Severity */
+ uint32_t mtlps : 1; /**< Malformed TLP Severity */
+ uint32_t ros : 1; /**< Receiver Overflow Severity */
+ uint32_t ucs : 1; /**< Unexpected Completion Severity */
+ uint32_t cas : 1; /**< Completer Abort Severity */
+ uint32_t cts : 1; /**< Completion Timeout Severity */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Severity */
+ uint32_t ptlps : 1; /**< Poisoned TLP Severity */
+ uint32_t reserved_5_11 : 7;
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Severity */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t reserved_5_11 : 7;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_21 : 1;
+ uint32_t ucies : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg067 cvmx_pcieepx_cfg067_t;
+
+/**
+ * cvmx_pcieep#_cfg068
+ *
+ * PCIE_CFG068 = Sixty-ninth 32-bits of PCIE type 0 config space
+ * (Correctable Error Status Register)
+ */
+union cvmx_pcieepx_cfg068 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg068_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_15_31 : 17;
+ uint32_t cies : 1; /**< Corrected Internal Error Status */
+ uint32_t anfes : 1; /**< Advisory Non-Fatal Error Status */
+ uint32_t rtts : 1; /**< Reply Timer Timeout Status */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrs : 1; /**< REPLAY_NUM Rollover Status */
+ uint32_t bdllps : 1; /**< Bad DLLP Status */
+ uint32_t btlps : 1; /**< Bad TLP Status */
+ uint32_t reserved_1_5 : 5;
+ uint32_t res : 1; /**< Receiver Error Status */
+#else
+ uint32_t res : 1;
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlps : 1;
+ uint32_t bdllps : 1;
+ uint32_t rnrs : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t rtts : 1;
+ uint32_t anfes : 1;
+ uint32_t cies : 1;
+ uint32_t reserved_15_31 : 17;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg068_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t anfes : 1; /**< Advisory Non-Fatal Error Status */
+ uint32_t rtts : 1; /**< Reply Timer Timeout Status */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrs : 1; /**< REPLAY_NUM Rollover Status */
+ uint32_t bdllps : 1; /**< Bad DLLP Status */
+ uint32_t btlps : 1; /**< Bad TLP Status */
+ uint32_t reserved_1_5 : 5;
+ uint32_t res : 1; /**< Receiver Error Status */
+#else
+ uint32_t res : 1;
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlps : 1;
+ uint32_t bdllps : 1;
+ uint32_t rnrs : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t rtts : 1;
+ uint32_t anfes : 1;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg068_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg068_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg068_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg068_cn52xx cn61xx;
+ struct cvmx_pcieepx_cfg068_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg068_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg068_cn52xx cn66xx;
+ struct cvmx_pcieepx_cfg068_cn52xx cn68xx;
+ struct cvmx_pcieepx_cfg068_cn52xx cn68xxp1;
+ struct cvmx_pcieepx_cfg068_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg068 cvmx_pcieepx_cfg068_t;
+
+/**
+ * cvmx_pcieep#_cfg069
+ *
+ * PCIE_CFG069 = Seventieth 32-bits of PCIE type 0 config space
+ * (Correctable Error Mask Register)
+ */
+union cvmx_pcieepx_cfg069 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg069_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_15_31 : 17;
+ uint32_t ciem : 1; /**< Corrected Internal Error Mask */
+ uint32_t anfem : 1; /**< Advisory Non-Fatal Error Mask */
+ uint32_t rttm : 1; /**< Reply Timer Timeout Mask */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrm : 1; /**< REPLAY_NUM Rollover Mask */
+ uint32_t bdllpm : 1; /**< Bad DLLP Mask */
+ uint32_t btlpm : 1; /**< Bad TLP Mask */
+ uint32_t reserved_1_5 : 5;
+ uint32_t rem : 1; /**< Receiver Error Mask */
+#else
+ uint32_t rem : 1;
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlpm : 1;
+ uint32_t bdllpm : 1;
+ uint32_t rnrm : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t rttm : 1;
+ uint32_t anfem : 1;
+ uint32_t ciem : 1;
+ uint32_t reserved_15_31 : 17;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg069_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t anfem : 1; /**< Advisory Non-Fatal Error Mask */
+ uint32_t rttm : 1; /**< Reply Timer Timeout Mask */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrm : 1; /**< REPLAY_NUM Rollover Mask */
+ uint32_t bdllpm : 1; /**< Bad DLLP Mask */
+ uint32_t btlpm : 1; /**< Bad TLP Mask */
+ uint32_t reserved_1_5 : 5;
+ uint32_t rem : 1; /**< Receiver Error Mask */
+#else
+ uint32_t rem : 1;
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlpm : 1;
+ uint32_t bdllpm : 1;
+ uint32_t rnrm : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t rttm : 1;
+ uint32_t anfem : 1;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg069_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg069_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg069_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg069_cn52xx cn61xx;
+ struct cvmx_pcieepx_cfg069_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg069_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg069_cn52xx cn66xx;
+ struct cvmx_pcieepx_cfg069_cn52xx cn68xx;
+ struct cvmx_pcieepx_cfg069_cn52xx cn68xxp1;
+ struct cvmx_pcieepx_cfg069_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg069 cvmx_pcieepx_cfg069_t;
+
+/**
+ * cvmx_pcieep#_cfg070
+ *
+ * PCIE_CFG070 = Seventy-first 32-bits of PCIE type 0 config space
+ * (Advanced Error Capabilities and Control Register)
+ */
+union cvmx_pcieepx_cfg070 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg070_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t ce : 1; /**< ECRC Check Enable */
+ uint32_t cc : 1; /**< ECRC Check Capable */
+ uint32_t ge : 1; /**< ECRC Generation Enable */
+ uint32_t gc : 1; /**< ECRC Generation Capability */
+ uint32_t fep : 5; /**< First Error Pointer */
+#else
+ uint32_t fep : 5;
+ uint32_t gc : 1;
+ uint32_t ge : 1;
+ uint32_t cc : 1;
+ uint32_t ce : 1;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg070_s cn52xx;
+ struct cvmx_pcieepx_cfg070_s cn52xxp1;
+ struct cvmx_pcieepx_cfg070_s cn56xx;
+ struct cvmx_pcieepx_cfg070_s cn56xxp1;
+ struct cvmx_pcieepx_cfg070_s cn61xx;
+ struct cvmx_pcieepx_cfg070_s cn63xx;
+ struct cvmx_pcieepx_cfg070_s cn63xxp1;
+ struct cvmx_pcieepx_cfg070_s cn66xx;
+ struct cvmx_pcieepx_cfg070_s cn68xx;
+ struct cvmx_pcieepx_cfg070_s cn68xxp1;
+ struct cvmx_pcieepx_cfg070_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg070 cvmx_pcieepx_cfg070_t;
+
+/**
+ * cvmx_pcieep#_cfg071
+ *
+ * PCIE_CFG071 = Seventy-second 32-bits of PCIE type 0 config space
+ * (Header Log Register 1)
+ */
+union cvmx_pcieepx_cfg071 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg071_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dword1 : 32; /**< Header Log Register (first DWORD) */
+#else
+ uint32_t dword1 : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg071_s cn52xx;
+ struct cvmx_pcieepx_cfg071_s cn52xxp1;
+ struct cvmx_pcieepx_cfg071_s cn56xx;
+ struct cvmx_pcieepx_cfg071_s cn56xxp1;
+ struct cvmx_pcieepx_cfg071_s cn61xx;
+ struct cvmx_pcieepx_cfg071_s cn63xx;
+ struct cvmx_pcieepx_cfg071_s cn63xxp1;
+ struct cvmx_pcieepx_cfg071_s cn66xx;
+ struct cvmx_pcieepx_cfg071_s cn68xx;
+ struct cvmx_pcieepx_cfg071_s cn68xxp1;
+ struct cvmx_pcieepx_cfg071_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg071 cvmx_pcieepx_cfg071_t;
+
+/**
+ * cvmx_pcieep#_cfg072
+ *
+ * PCIE_CFG072 = Seventy-third 32-bits of PCIE type 0 config space
+ * (Header Log Register 2)
+ */
+union cvmx_pcieepx_cfg072 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg072_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dword2 : 32; /**< Header Log Register (second DWORD) */
+#else
+ uint32_t dword2 : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg072_s cn52xx;
+ struct cvmx_pcieepx_cfg072_s cn52xxp1;
+ struct cvmx_pcieepx_cfg072_s cn56xx;
+ struct cvmx_pcieepx_cfg072_s cn56xxp1;
+ struct cvmx_pcieepx_cfg072_s cn61xx;
+ struct cvmx_pcieepx_cfg072_s cn63xx;
+ struct cvmx_pcieepx_cfg072_s cn63xxp1;
+ struct cvmx_pcieepx_cfg072_s cn66xx;
+ struct cvmx_pcieepx_cfg072_s cn68xx;
+ struct cvmx_pcieepx_cfg072_s cn68xxp1;
+ struct cvmx_pcieepx_cfg072_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg072 cvmx_pcieepx_cfg072_t;
+
+/**
+ * cvmx_pcieep#_cfg073
+ *
+ * PCIE_CFG073 = Seventy-fourth 32-bits of PCIE type 0 config space
+ * (Header Log Register 3)
+ */
+union cvmx_pcieepx_cfg073 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg073_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dword3 : 32; /**< Header Log Register (third DWORD) */
+#else
+ uint32_t dword3 : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg073_s cn52xx;
+ struct cvmx_pcieepx_cfg073_s cn52xxp1;
+ struct cvmx_pcieepx_cfg073_s cn56xx;
+ struct cvmx_pcieepx_cfg073_s cn56xxp1;
+ struct cvmx_pcieepx_cfg073_s cn61xx;
+ struct cvmx_pcieepx_cfg073_s cn63xx;
+ struct cvmx_pcieepx_cfg073_s cn63xxp1;
+ struct cvmx_pcieepx_cfg073_s cn66xx;
+ struct cvmx_pcieepx_cfg073_s cn68xx;
+ struct cvmx_pcieepx_cfg073_s cn68xxp1;
+ struct cvmx_pcieepx_cfg073_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg073 cvmx_pcieepx_cfg073_t;
+
+/**
+ * cvmx_pcieep#_cfg074
+ *
+ * PCIE_CFG074 = Seventy-fifth 32-bits of PCIE type 0 config space
+ * (Header Log Register 4)
+ */
+union cvmx_pcieepx_cfg074 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg074_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dword4 : 32; /**< Header Log Register (fourth DWORD) */
+#else
+ uint32_t dword4 : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg074_s cn52xx;
+ struct cvmx_pcieepx_cfg074_s cn52xxp1;
+ struct cvmx_pcieepx_cfg074_s cn56xx;
+ struct cvmx_pcieepx_cfg074_s cn56xxp1;
+ struct cvmx_pcieepx_cfg074_s cn61xx;
+ struct cvmx_pcieepx_cfg074_s cn63xx;
+ struct cvmx_pcieepx_cfg074_s cn63xxp1;
+ struct cvmx_pcieepx_cfg074_s cn66xx;
+ struct cvmx_pcieepx_cfg074_s cn68xx;
+ struct cvmx_pcieepx_cfg074_s cn68xxp1;
+ struct cvmx_pcieepx_cfg074_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg074 cvmx_pcieepx_cfg074_t;
+
+/**
+ * cvmx_pcieep#_cfg448
+ *
+ * PCIE_CFG448 = Four hundred forty-ninth 32-bits of PCIE type 0 config space
+ * (Ack Latency Timer and Replay Timer Register)
+ */
+union cvmx_pcieepx_cfg448 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg448_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rtl : 16; /**< Replay Time Limit
+ The replay timer expires when it reaches this limit. The PCI
+ Express bus initiates a replay upon reception of a Nak or when
+ the replay timer expires.
+ This value will be set correctly by the hardware out of reset
+ or when the negotiated Link-Width or Payload-Size changes. If
+ the user changes this value through a CSR write or by an
+ EEPROM load then they should refer to the PCIe Specification
+ for the correct value. */
+ uint32_t rtltl : 16; /**< Round Trip Latency Time Limit
+ The Ack/Nak latency timer expires when it reaches this limit.
+ This value will be set correctly by the hardware out of reset
+ or when the negotiated Link-Width or Payload-Size changes. If
+ the user changes this value through a CSR write or by an
+ EEPROM load then they should refer to the PCIe Specification
+ for the correct value. */
+#else
+ uint32_t rtltl : 16;
+ uint32_t rtl : 16;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg448_s cn52xx;
+ struct cvmx_pcieepx_cfg448_s cn52xxp1;
+ struct cvmx_pcieepx_cfg448_s cn56xx;
+ struct cvmx_pcieepx_cfg448_s cn56xxp1;
+ struct cvmx_pcieepx_cfg448_s cn61xx;
+ struct cvmx_pcieepx_cfg448_s cn63xx;
+ struct cvmx_pcieepx_cfg448_s cn63xxp1;
+ struct cvmx_pcieepx_cfg448_s cn66xx;
+ struct cvmx_pcieepx_cfg448_s cn68xx;
+ struct cvmx_pcieepx_cfg448_s cn68xxp1;
+ struct cvmx_pcieepx_cfg448_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg448 cvmx_pcieepx_cfg448_t;
+
+/**
+ * cvmx_pcieep#_cfg449
+ *
+ * PCIE_CFG449 = Four hundred fiftieth 32-bits of PCIE type 0 config space
+ * (Other Message Register)
+ */
+union cvmx_pcieepx_cfg449 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg449_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t omr : 32; /**< Other Message Register
+ This register can be used for either of the following purposes:
+ o To send a specific PCI Express Message, the application
+ writes the payload of the Message into this register, then
+ sets bit 0 of the Port Link Control Register to send the
+ Message.
+ o To store a corruption pattern for corrupting the LCRC on all
+ TLPs, the application places a 32-bit corruption pattern into
+ this register and enables this function by setting bit 25 of
+ the Port Link Control Register. When enabled, the transmit
+ LCRC result is XOR'd with this pattern before inserting
+ it into the packet. */
+#else
+ uint32_t omr : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg449_s cn52xx;
+ struct cvmx_pcieepx_cfg449_s cn52xxp1;
+ struct cvmx_pcieepx_cfg449_s cn56xx;
+ struct cvmx_pcieepx_cfg449_s cn56xxp1;
+ struct cvmx_pcieepx_cfg449_s cn61xx;
+ struct cvmx_pcieepx_cfg449_s cn63xx;
+ struct cvmx_pcieepx_cfg449_s cn63xxp1;
+ struct cvmx_pcieepx_cfg449_s cn66xx;
+ struct cvmx_pcieepx_cfg449_s cn68xx;
+ struct cvmx_pcieepx_cfg449_s cn68xxp1;
+ struct cvmx_pcieepx_cfg449_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg449 cvmx_pcieepx_cfg449_t;
+
+/**
+ * cvmx_pcieep#_cfg450
+ *
+ * PCIE_CFG450 = Four hundred fifty-first 32-bits of PCIE type 0 config space
+ * (Port Force Link Register)
+ */
+union cvmx_pcieepx_cfg450 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg450_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lpec : 8; /**< Low Power Entrance Count
+ The Power Management state will wait for this many clock cycles
+ for the associated completion of a CfgWr to PCIE_CFG017 register
+ Power State (PS) field register to go low-power. This register
+ is intended for applications that do not let the PCI Express
+ bus handle a completion for configuration request to the
+ Power Management Control and Status (PCIE_CFG017) register. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t link_state : 6; /**< Link State
+ The Link state that the PCI Express Bus will be forced to
+ when bit 15 (Force Link) is set.
+ State encoding:
+ o DETECT_QUIET 00h
+ o DETECT_ACT 01h
+ o POLL_ACTIVE 02h
+ o POLL_COMPLIANCE 03h
+ o POLL_CONFIG 04h
+ o PRE_DETECT_QUIET 05h
+ o DETECT_WAIT 06h
+ o CFG_LINKWD_START 07h
+ o CFG_LINKWD_ACEPT 08h
+ o CFG_LANENUM_WAIT 09h
+ o CFG_LANENUM_ACEPT 0Ah
+ o CFG_COMPLETE 0Bh
+ o CFG_IDLE 0Ch
+ o RCVRY_LOCK 0Dh
+ o RCVRY_SPEED 0Eh
+ o RCVRY_RCVRCFG 0Fh
+ o RCVRY_IDLE 10h
+ o L0 11h
+ o L0S 12h
+ o L123_SEND_EIDLE 13h
+ o L1_IDLE 14h
+ o L2_IDLE 15h
+ o L2_WAKE 16h
+ o DISABLED_ENTRY 17h
+ o DISABLED_IDLE 18h
+ o DISABLED 19h
+ o LPBK_ENTRY 1Ah
+ o LPBK_ACTIVE 1Bh
+ o LPBK_EXIT 1Ch
+ o LPBK_EXIT_TIMEOUT 1Dh
+ o HOT_RESET_ENTRY 1Eh
+ o HOT_RESET 1Fh */
+ uint32_t force_link : 1; /**< Force Link
+ Forces the Link to the state specified by the Link State field.
+ The Force Link pulse will trigger Link re-negotiation.
+ * As the The Force Link is a pulse, writing a 1 to it does
+ trigger the forced link state event, even thought reading it
+ always returns a 0. */
+ uint32_t reserved_8_14 : 7;
+ uint32_t link_num : 8; /**< Link Number
+ Not used for Endpoint */
+#else
+ uint32_t link_num : 8;
+ uint32_t reserved_8_14 : 7;
+ uint32_t force_link : 1;
+ uint32_t link_state : 6;
+ uint32_t reserved_22_23 : 2;
+ uint32_t lpec : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg450_s cn52xx;
+ struct cvmx_pcieepx_cfg450_s cn52xxp1;
+ struct cvmx_pcieepx_cfg450_s cn56xx;
+ struct cvmx_pcieepx_cfg450_s cn56xxp1;
+ struct cvmx_pcieepx_cfg450_s cn61xx;
+ struct cvmx_pcieepx_cfg450_s cn63xx;
+ struct cvmx_pcieepx_cfg450_s cn63xxp1;
+ struct cvmx_pcieepx_cfg450_s cn66xx;
+ struct cvmx_pcieepx_cfg450_s cn68xx;
+ struct cvmx_pcieepx_cfg450_s cn68xxp1;
+ struct cvmx_pcieepx_cfg450_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg450 cvmx_pcieepx_cfg450_t;
+
+/**
+ * cvmx_pcieep#_cfg451
+ *
+ * PCIE_CFG451 = Four hundred fifty-second 32-bits of PCIE type 0 config space
+ * (Ack Frequency Register)
+ */
+union cvmx_pcieepx_cfg451 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg451_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_31_31 : 1;
+ uint32_t easpml1 : 1; /**< Enter ASPM L1 without receive in L0s
+ Allow core to enter ASPM L1 even when link partner did
+ not go to L0s (receive is not in L0s).
+ When not set, core goes to ASPM L1 only after idle period
+ during which both receive and transmit are in L0s. */
+ uint32_t l1el : 3; /**< L1 Entrance Latency
+ Values correspond to:
+ o 000: 1 ms
+ o 001: 2 ms
+ o 010: 4 ms
+ o 011: 8 ms
+ o 100: 16 ms
+ o 101: 32 ms
+ o 110 or 111: 64 ms */
+ uint32_t l0el : 3; /**< L0s Entrance Latency
+ Values correspond to:
+ o 000: 1 ms
+ o 001: 2 ms
+ o 010: 3 ms
+ o 011: 4 ms
+ o 100: 5 ms
+ o 101: 6 ms
+ o 110 or 111: 7 ms */
+ uint32_t n_fts_cc : 8; /**< N_FTS when common clock is used.
+ The number of Fast Training Sequence ordered sets to be
+ transmitted when transitioning from L0s to L0. The maximum
+ number of FTS ordered-sets that a component can request is 255.
+ Note: A value of zero is not supported; a value of
+ zero can cause the LTSSM to go into the recovery state
+ when exiting from L0s. */
+ uint32_t n_fts : 8; /**< N_FTS
+ The number of Fast Training Sequence ordered sets to be
+ transmitted when transitioning from L0s to L0. The maximum
+ number of FTS ordered-sets that a component can request is 255.
+ Note: A value of zero is not supported; a value of
+ zero can cause the LTSSM to go into the recovery state
+ when exiting from L0s. */
+ uint32_t ack_freq : 8; /**< Ack Frequency
+ The number of pending Ack's specified here (up to 255) before
+ sending an Ack. */
+#else
+ uint32_t ack_freq : 8;
+ uint32_t n_fts : 8;
+ uint32_t n_fts_cc : 8;
+ uint32_t l0el : 3;
+ uint32_t l1el : 3;
+ uint32_t easpml1 : 1;
+ uint32_t reserved_31_31 : 1;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg451_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t l1el : 3; /**< L1 Entrance Latency
+ Values correspond to:
+ o 000: 1 ms
+ o 001: 2 ms
+ o 010: 4 ms
+ o 011: 8 ms
+ o 100: 16 ms
+ o 101: 32 ms
+ o 110 or 111: 64 ms */
+ uint32_t l0el : 3; /**< L0s Entrance Latency
+ Values correspond to:
+ o 000: 1 ms
+ o 001: 2 ms
+ o 010: 3 ms
+ o 011: 4 ms
+ o 100: 5 ms
+ o 101: 6 ms
+ o 110 or 111: 7 ms */
+ uint32_t n_fts_cc : 8; /**< N_FTS when common clock is used.
+ The number of Fast Training Sequence ordered sets to be
+ transmitted when transitioning from L0s to L0. The maximum
+ number of FTS ordered-sets that a component can request is 255.
+ Note: A value of zero is not supported; a value of
+ zero can cause the LTSSM to go into the recovery state
+ when exiting from L0s. */
+ uint32_t n_fts : 8; /**< N_FTS
+ The number of Fast Training Sequence ordered sets to be
+ transmitted when transitioning from L0s to L0. The maximum
+ number of FTS ordered-sets that a component can request is 255.
+ Note: A value of zero is not supported; a value of
+ zero can cause the LTSSM to go into the recovery state
+ when exiting from L0s. */
+ uint32_t ack_freq : 8; /**< Ack Frequency
+ The number of pending Ack's specified here (up to 255) before
+ sending an Ack. */
+#else
+ uint32_t ack_freq : 8;
+ uint32_t n_fts : 8;
+ uint32_t n_fts_cc : 8;
+ uint32_t l0el : 3;
+ uint32_t l1el : 3;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg451_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg451_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg451_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg451_s cn61xx;
+ struct cvmx_pcieepx_cfg451_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg451_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg451_s cn66xx;
+ struct cvmx_pcieepx_cfg451_s cn68xx;
+ struct cvmx_pcieepx_cfg451_s cn68xxp1;
+ struct cvmx_pcieepx_cfg451_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg451 cvmx_pcieepx_cfg451_t;
+
+/**
+ * cvmx_pcieep#_cfg452
+ *
+ * PCIE_CFG452 = Four hundred fifty-third 32-bits of PCIE type 0 config space
+ * (Port Link Control Register)
+ */
+union cvmx_pcieepx_cfg452 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg452_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_26_31 : 6;
+ uint32_t eccrc : 1; /**< Enable Corrupted CRC
+ Causes corrupt LCRC for TLPs when set,
+ using the pattern contained in the Other Message register.
+ This is a test feature, not to be used in normal operation. */
+ uint32_t reserved_22_24 : 3;
+ uint32_t lme : 6; /**< Link Mode Enable
+ o 000001: x1
+ o 000011: x2 (not supported)
+ o 000111: x4 (not supported)
+ o 001111: x8 (not supported)
+ o 011111: x16 (not supported)
+ o 111111: x32 (not supported)
+ This field indicates the MAXIMUM number of lanes supported
+ by the PCIe port.
+ See also MLW.
+ (Note: The value of this field does NOT indicate the number
+ of lanes in use by the PCIe. LME sets the max number of lanes
+ in the PCIe core that COULD be used. As per the PCIe specs,
+ the PCIe core can negotiate a smaller link width) */
+ uint32_t reserved_8_15 : 8;
+ uint32_t flm : 1; /**< Fast Link Mode
+ Sets all internal timers to fast mode for simulation purposes.
+ If during an eeprom load, the first word loaded is 0xffffffff,
+ then the EEPROM load will be terminated and this bit will be set. */
+ uint32_t reserved_6_6 : 1;
+ uint32_t dllle : 1; /**< DLL Link Enable
+ Enables Link initialization. If DLL Link Enable = 0, the PCI
+ Express bus does not transmit InitFC DLLPs and does not
+ establish a Link. */
+ uint32_t reserved_4_4 : 1;
+ uint32_t ra : 1; /**< Reset Assert
+ Triggers a recovery and forces the LTSSM to the Hot Reset
+ state (downstream port only). */
+ uint32_t le : 1; /**< Loopback Enable
+ Initiate loopback mode as a master. On a 0->1 transition,
+ the PCIe core sends TS ordered sets with the loopback bit set
+ to cause the link partner to enter into loopback mode as a
+ slave. Normal transmission is not possible when LE=1. To exit
+ loopback mode, take the link through a reset sequence. */
+ uint32_t sd : 1; /**< Scramble Disable
+ Turns off data scrambling. */
+ uint32_t omr : 1; /**< Other Message Request
+ When software writes a `1' to this bit, the PCI Express bus
+ transmits the Message contained in the Other Message register. */
+#else
+ uint32_t omr : 1;
+ uint32_t sd : 1;
+ uint32_t le : 1;
+ uint32_t ra : 1;
+ uint32_t reserved_4_4 : 1;
+ uint32_t dllle : 1;
+ uint32_t reserved_6_6 : 1;
+ uint32_t flm : 1;
+ uint32_t reserved_8_15 : 8;
+ uint32_t lme : 6;
+ uint32_t reserved_22_24 : 3;
+ uint32_t eccrc : 1;
+ uint32_t reserved_26_31 : 6;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg452_s cn52xx;
+ struct cvmx_pcieepx_cfg452_s cn52xxp1;
+ struct cvmx_pcieepx_cfg452_s cn56xx;
+ struct cvmx_pcieepx_cfg452_s cn56xxp1;
+ struct cvmx_pcieepx_cfg452_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_22_31 : 10;
+ uint32_t lme : 6; /**< Link Mode Enable
+ o 000001: x1
+ o 000011: x2
+ o 000111: x4
+ o 001111: x8 (not supported)
+ o 011111: x16 (not supported)
+ o 111111: x32 (not supported)
+ This field indicates the MAXIMUM number of lanes supported
+ by the PCIe port. The value can be set less than 0x7
+ to limit the number of lanes the PCIe will attempt to use.
+ If the value of 0x7 set by the HW is not desired,
+ this field can be programmed to a smaller value (i.e. EEPROM)
+ See also MLW.
+ (Note: The value of this field does NOT indicate the number
+ of lanes in use by the PCIe. LME sets the max number of lanes
+ in the PCIe core that COULD be used. As per the PCIe specs,
+ the PCIe core can negotiate a smaller link width, so all
+ of x4, x2, and x1 are supported when LME=0x7,
+ for example.) */
+ uint32_t reserved_8_15 : 8;
+ uint32_t flm : 1; /**< Fast Link Mode
+ Sets all internal timers to fast mode for simulation purposes.
+ If during an eeprom load, the first word loaded is 0xffffffff,
+ then the EEPROM load will be terminated and this bit will be set. */
+ uint32_t reserved_6_6 : 1;
+ uint32_t dllle : 1; /**< DLL Link Enable
+ Enables Link initialization. If DLL Link Enable = 0, the PCI
+ Express bus does not transmit InitFC DLLPs and does not
+ establish a Link. */
+ uint32_t reserved_4_4 : 1;
+ uint32_t ra : 1; /**< Reset Assert
+ Triggers a recovery and forces the LTSSM to the Hot Reset
+ state (downstream port only). */
+ uint32_t le : 1; /**< Loopback Enable
+ Initiate loopback mode as a master. On a 0->1 transition,
+ the PCIe core sends TS ordered sets with the loopback bit set
+ to cause the link partner to enter into loopback mode as a
+ slave. Normal transmission is not possible when LE=1. To exit
+ loopback mode, take the link through a reset sequence. */
+ uint32_t sd : 1; /**< Scramble Disable
+ Turns off data scrambling. */
+ uint32_t omr : 1; /**< Other Message Request
+ When software writes a `1' to this bit, the PCI Express bus
+ transmits the Message contained in the Other Message register. */
+#else
+ uint32_t omr : 1;
+ uint32_t sd : 1;
+ uint32_t le : 1;
+ uint32_t ra : 1;
+ uint32_t reserved_4_4 : 1;
+ uint32_t dllle : 1;
+ uint32_t reserved_6_6 : 1;
+ uint32_t flm : 1;
+ uint32_t reserved_8_15 : 8;
+ uint32_t lme : 6;
+ uint32_t reserved_22_31 : 10;
+#endif
+ } cn61xx;
+ struct cvmx_pcieepx_cfg452_s cn63xx;
+ struct cvmx_pcieepx_cfg452_s cn63xxp1;
+ struct cvmx_pcieepx_cfg452_cn61xx cn66xx;
+ struct cvmx_pcieepx_cfg452_cn61xx cn68xx;
+ struct cvmx_pcieepx_cfg452_cn61xx cn68xxp1;
+ struct cvmx_pcieepx_cfg452_cn61xx cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg452 cvmx_pcieepx_cfg452_t;
+
+/**
+ * cvmx_pcieep#_cfg453
+ *
+ * PCIE_CFG453 = Four hundred fifty-fourth 32-bits of PCIE type 0 config space
+ * (Lane Skew Register)
+ */
+union cvmx_pcieepx_cfg453 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg453_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dlld : 1; /**< Disable Lane-to-Lane Deskew
+ Disables the internal Lane-to-Lane deskew logic. */
+ uint32_t reserved_26_30 : 5;
+ uint32_t ack_nak : 1; /**< Ack/Nak Disable
+ Prevents the PCI Express bus from sending Ack and Nak DLLPs. */
+ uint32_t fcd : 1; /**< Flow Control Disable
+ Prevents the PCI Express bus from sending FC DLLPs. */
+ uint32_t ilst : 24; /**< Insert Lane Skew for Transmit
+ Causes skew between lanes for test purposes. There are three
+ bits per Lane. The value is in units of one symbol time. For
+ example, the value 010b for a Lane forces a skew of two symbol
+ times for that Lane. The maximum skew value for any Lane is 5
+ symbol times. */
+#else
+ uint32_t ilst : 24;
+ uint32_t fcd : 1;
+ uint32_t ack_nak : 1;
+ uint32_t reserved_26_30 : 5;
+ uint32_t dlld : 1;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg453_s cn52xx;
+ struct cvmx_pcieepx_cfg453_s cn52xxp1;
+ struct cvmx_pcieepx_cfg453_s cn56xx;
+ struct cvmx_pcieepx_cfg453_s cn56xxp1;
+ struct cvmx_pcieepx_cfg453_s cn61xx;
+ struct cvmx_pcieepx_cfg453_s cn63xx;
+ struct cvmx_pcieepx_cfg453_s cn63xxp1;
+ struct cvmx_pcieepx_cfg453_s cn66xx;
+ struct cvmx_pcieepx_cfg453_s cn68xx;
+ struct cvmx_pcieepx_cfg453_s cn68xxp1;
+ struct cvmx_pcieepx_cfg453_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg453 cvmx_pcieepx_cfg453_t;
+
+/**
+ * cvmx_pcieep#_cfg454
+ *
+ * PCIE_CFG454 = Four hundred fifty-fifth 32-bits of PCIE type 0 config space
+ * (Symbol Number Register)
+ */
+union cvmx_pcieepx_cfg454 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg454_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cx_nfunc : 3; /**< Number of Functions (minus 1)
+ Configuration Requests targeted at function numbers above this
+ value will be returned with unsupported request */
+ uint32_t tmfcwt : 5; /**< Timer Modifier for Flow Control Watchdog Timer
+ Increases the timer value for the Flow Control watchdog timer,
+ in increments of 16 clock cycles. */
+ uint32_t tmanlt : 5; /**< Timer Modifier for Ack/Nak Latency Timer
+ Increases the timer value for the Ack/Nak latency timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< Timer Modifier for Replay Timer
+ Increases the timer value for the replay timer, in increments
+ of 64 clock cycles. */
+ uint32_t reserved_11_13 : 3;
+ uint32_t nskps : 3; /**< Number of SKP Symbols */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t nskps : 3;
+ uint32_t reserved_11_13 : 3;
+ uint32_t tmrt : 5;
+ uint32_t tmanlt : 5;
+ uint32_t tmfcwt : 5;
+ uint32_t cx_nfunc : 3;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg454_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t tmfcwt : 5; /**< Timer Modifier for Flow Control Watchdog Timer
+ Increases the timer value for the Flow Control watchdog timer,
+ in increments of 16 clock cycles. */
+ uint32_t tmanlt : 5; /**< Timer Modifier for Ack/Nak Latency Timer
+ Increases the timer value for the Ack/Nak latency timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< Timer Modifier for Replay Timer
+ Increases the timer value for the replay timer, in increments
+ of 64 clock cycles. */
+ uint32_t reserved_11_13 : 3;
+ uint32_t nskps : 3; /**< Number of SKP Symbols */
+ uint32_t reserved_4_7 : 4;
+ uint32_t ntss : 4; /**< Number of TS Symbols
+ Sets the number of TS identifier symbols that are sent in TS1
+ and TS2 ordered sets. */
+#else
+ uint32_t ntss : 4;
+ uint32_t reserved_4_7 : 4;
+ uint32_t nskps : 3;
+ uint32_t reserved_11_13 : 3;
+ uint32_t tmrt : 5;
+ uint32_t tmanlt : 5;
+ uint32_t tmfcwt : 5;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg454_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg454_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg454_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg454_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cx_nfunc : 3; /**< Number of Functions (minus 1)
+ Configuration Requests targeted at function numbers above this
+ value will be returned with unsupported request */
+ uint32_t tmfcwt : 5; /**< Timer Modifier for Flow Control Watchdog Timer
+ Increases the timer value for the Flow Control watchdog timer,
+ in increments of 16 clock cycles. */
+ uint32_t tmanlt : 5; /**< Timer Modifier for Ack/Nak Latency Timer
+ Increases the timer value for the Ack/Nak latency timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< Timer Modifier for Replay Timer
+ Increases the timer value for the replay timer, in increments
+ of 64 clock cycles. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t mfuncn : 8; /**< Max Number of Functions Supported */
+#else
+ uint32_t mfuncn : 8;
+ uint32_t reserved_8_13 : 6;
+ uint32_t tmrt : 5;
+ uint32_t tmanlt : 5;
+ uint32_t tmfcwt : 5;
+ uint32_t cx_nfunc : 3;
+#endif
+ } cn61xx;
+ struct cvmx_pcieepx_cfg454_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg454_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg454_cn61xx cn66xx;
+ struct cvmx_pcieepx_cfg454_cn61xx cn68xx;
+ struct cvmx_pcieepx_cfg454_cn52xx cn68xxp1;
+ struct cvmx_pcieepx_cfg454_cn61xx cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg454 cvmx_pcieepx_cfg454_t;
+
+/**
+ * cvmx_pcieep#_cfg455
+ *
+ * PCIE_CFG455 = Four hundred fifty-sixth 32-bits of PCIE type 0 config space
+ * (Symbol Timer Register/Filter Mask Register 1)
+ */
+union cvmx_pcieepx_cfg455 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg455_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t m_cfg0_filt : 1; /**< Mask filtering of received Configuration Requests (RC mode only) */
+ uint32_t m_io_filt : 1; /**< Mask filtering of received I/O Requests (RC mode only) */
+ uint32_t msg_ctrl : 1; /**< Message Control
+ The application must not change this field. */
+ uint32_t m_cpl_ecrc_filt : 1; /**< Mask ECRC error filtering for Completions */
+ uint32_t m_ecrc_filt : 1; /**< Mask ECRC error filtering */
+ uint32_t m_cpl_len_err : 1; /**< Mask Length mismatch error for received Completions */
+ uint32_t m_cpl_attr_err : 1; /**< Mask Attributes mismatch error for received Completions */
+ uint32_t m_cpl_tc_err : 1; /**< Mask Traffic Class mismatch error for received Completions */
+ uint32_t m_cpl_fun_err : 1; /**< Mask function mismatch error for received Completions */
+ uint32_t m_cpl_rid_err : 1; /**< Mask Requester ID mismatch error for received Completions */
+ uint32_t m_cpl_tag_err : 1; /**< Mask Tag error rules for received Completions */
+ uint32_t m_lk_filt : 1; /**< Mask Locked Request filtering */
+ uint32_t m_cfg1_filt : 1; /**< Mask Type 1 Configuration Request filtering */
+ uint32_t m_bar_match : 1; /**< Mask BAR match filtering */
+ uint32_t m_pois_filt : 1; /**< Mask poisoned TLP filtering */
+ uint32_t m_fun : 1; /**< Mask function */
+ uint32_t dfcwt : 1; /**< Disable FC Watchdog Timer */
+ uint32_t reserved_11_14 : 4;
+ uint32_t skpiv : 11; /**< SKP Interval Value */
+#else
+ uint32_t skpiv : 11;
+ uint32_t reserved_11_14 : 4;
+ uint32_t dfcwt : 1;
+ uint32_t m_fun : 1;
+ uint32_t m_pois_filt : 1;
+ uint32_t m_bar_match : 1;
+ uint32_t m_cfg1_filt : 1;
+ uint32_t m_lk_filt : 1;
+ uint32_t m_cpl_tag_err : 1;
+ uint32_t m_cpl_rid_err : 1;
+ uint32_t m_cpl_fun_err : 1;
+ uint32_t m_cpl_tc_err : 1;
+ uint32_t m_cpl_attr_err : 1;
+ uint32_t m_cpl_len_err : 1;
+ uint32_t m_ecrc_filt : 1;
+ uint32_t m_cpl_ecrc_filt : 1;
+ uint32_t msg_ctrl : 1;
+ uint32_t m_io_filt : 1;
+ uint32_t m_cfg0_filt : 1;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg455_s cn52xx;
+ struct cvmx_pcieepx_cfg455_s cn52xxp1;
+ struct cvmx_pcieepx_cfg455_s cn56xx;
+ struct cvmx_pcieepx_cfg455_s cn56xxp1;
+ struct cvmx_pcieepx_cfg455_s cn61xx;
+ struct cvmx_pcieepx_cfg455_s cn63xx;
+ struct cvmx_pcieepx_cfg455_s cn63xxp1;
+ struct cvmx_pcieepx_cfg455_s cn66xx;
+ struct cvmx_pcieepx_cfg455_s cn68xx;
+ struct cvmx_pcieepx_cfg455_s cn68xxp1;
+ struct cvmx_pcieepx_cfg455_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg455 cvmx_pcieepx_cfg455_t;
+
+/**
+ * cvmx_pcieep#_cfg456
+ *
+ * PCIE_CFG456 = Four hundred fifty-seventh 32-bits of PCIE type 0 config space
+ * (Filter Mask Register 2)
+ */
+union cvmx_pcieepx_cfg456 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg456_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_4_31 : 28;
+ uint32_t m_handle_flush : 1; /**< Mask Core Filter to handle flush request */
+ uint32_t m_dabort_4ucpl : 1; /**< Mask DLLP abort for unexpected CPL */
+ uint32_t m_vend1_drp : 1; /**< Mask Vendor MSG Type 1 dropped silently */
+ uint32_t m_vend0_drp : 1; /**< Mask Vendor MSG Type 0 dropped with UR error reporting. */
+#else
+ uint32_t m_vend0_drp : 1;
+ uint32_t m_vend1_drp : 1;
+ uint32_t m_dabort_4ucpl : 1;
+ uint32_t m_handle_flush : 1;
+ uint32_t reserved_4_31 : 28;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg456_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_2_31 : 30;
+ uint32_t m_vend1_drp : 1; /**< Mask Vendor MSG Type 1 dropped silently */
+ uint32_t m_vend0_drp : 1; /**< Mask Vendor MSG Type 0 dropped with UR error reporting. */
+#else
+ uint32_t m_vend0_drp : 1;
+ uint32_t m_vend1_drp : 1;
+ uint32_t reserved_2_31 : 30;
+#endif
+ } cn52xx;
+ struct cvmx_pcieepx_cfg456_cn52xx cn52xxp1;
+ struct cvmx_pcieepx_cfg456_cn52xx cn56xx;
+ struct cvmx_pcieepx_cfg456_cn52xx cn56xxp1;
+ struct cvmx_pcieepx_cfg456_s cn61xx;
+ struct cvmx_pcieepx_cfg456_cn52xx cn63xx;
+ struct cvmx_pcieepx_cfg456_cn52xx cn63xxp1;
+ struct cvmx_pcieepx_cfg456_s cn66xx;
+ struct cvmx_pcieepx_cfg456_s cn68xx;
+ struct cvmx_pcieepx_cfg456_cn52xx cn68xxp1;
+ struct cvmx_pcieepx_cfg456_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg456 cvmx_pcieepx_cfg456_t;
+
+/**
+ * cvmx_pcieep#_cfg458
+ *
+ * PCIE_CFG458 = Four hundred fifty-ninth 32-bits of PCIE type 0 config space
+ * (Debug Register 0)
+ */
+union cvmx_pcieepx_cfg458 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg458_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dbg_info_l32 : 32; /**< Debug Info Lower 32 Bits */
+#else
+ uint32_t dbg_info_l32 : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg458_s cn52xx;
+ struct cvmx_pcieepx_cfg458_s cn52xxp1;
+ struct cvmx_pcieepx_cfg458_s cn56xx;
+ struct cvmx_pcieepx_cfg458_s cn56xxp1;
+ struct cvmx_pcieepx_cfg458_s cn61xx;
+ struct cvmx_pcieepx_cfg458_s cn63xx;
+ struct cvmx_pcieepx_cfg458_s cn63xxp1;
+ struct cvmx_pcieepx_cfg458_s cn66xx;
+ struct cvmx_pcieepx_cfg458_s cn68xx;
+ struct cvmx_pcieepx_cfg458_s cn68xxp1;
+ struct cvmx_pcieepx_cfg458_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg458 cvmx_pcieepx_cfg458_t;
+
+/**
+ * cvmx_pcieep#_cfg459
+ *
+ * PCIE_CFG459 = Four hundred sixtieth 32-bits of PCIE type 0 config space
+ * (Debug Register 1)
+ */
+union cvmx_pcieepx_cfg459 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg459_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dbg_info_u32 : 32; /**< Debug Info Upper 32 Bits */
+#else
+ uint32_t dbg_info_u32 : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg459_s cn52xx;
+ struct cvmx_pcieepx_cfg459_s cn52xxp1;
+ struct cvmx_pcieepx_cfg459_s cn56xx;
+ struct cvmx_pcieepx_cfg459_s cn56xxp1;
+ struct cvmx_pcieepx_cfg459_s cn61xx;
+ struct cvmx_pcieepx_cfg459_s cn63xx;
+ struct cvmx_pcieepx_cfg459_s cn63xxp1;
+ struct cvmx_pcieepx_cfg459_s cn66xx;
+ struct cvmx_pcieepx_cfg459_s cn68xx;
+ struct cvmx_pcieepx_cfg459_s cn68xxp1;
+ struct cvmx_pcieepx_cfg459_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg459 cvmx_pcieepx_cfg459_t;
+
+/**
+ * cvmx_pcieep#_cfg460
+ *
+ * PCIE_CFG460 = Four hundred sixty-first 32-bits of PCIE type 0 config space
+ * (Transmit Posted FC Credit Status)
+ */
+union cvmx_pcieepx_cfg460 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg460_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t tphfcc : 8; /**< Transmit Posted Header FC Credits
+ The Posted Header credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+ uint32_t tpdfcc : 12; /**< Transmit Posted Data FC Credits
+ The Posted Data credits advertised by the receiver at the other
+ end of the Link, updated with each UpdateFC DLLP. */
+#else
+ uint32_t tpdfcc : 12;
+ uint32_t tphfcc : 8;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg460_s cn52xx;
+ struct cvmx_pcieepx_cfg460_s cn52xxp1;
+ struct cvmx_pcieepx_cfg460_s cn56xx;
+ struct cvmx_pcieepx_cfg460_s cn56xxp1;
+ struct cvmx_pcieepx_cfg460_s cn61xx;
+ struct cvmx_pcieepx_cfg460_s cn63xx;
+ struct cvmx_pcieepx_cfg460_s cn63xxp1;
+ struct cvmx_pcieepx_cfg460_s cn66xx;
+ struct cvmx_pcieepx_cfg460_s cn68xx;
+ struct cvmx_pcieepx_cfg460_s cn68xxp1;
+ struct cvmx_pcieepx_cfg460_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg460 cvmx_pcieepx_cfg460_t;
+
+/**
+ * cvmx_pcieep#_cfg461
+ *
+ * PCIE_CFG461 = Four hundred sixty-second 32-bits of PCIE type 0 config space
+ * (Transmit Non-Posted FC Credit Status)
+ */
+union cvmx_pcieepx_cfg461 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg461_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t tchfcc : 8; /**< Transmit Non-Posted Header FC Credits
+ The Non-Posted Header credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+ uint32_t tcdfcc : 12; /**< Transmit Non-Posted Data FC Credits
+ The Non-Posted Data credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+#else
+ uint32_t tcdfcc : 12;
+ uint32_t tchfcc : 8;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg461_s cn52xx;
+ struct cvmx_pcieepx_cfg461_s cn52xxp1;
+ struct cvmx_pcieepx_cfg461_s cn56xx;
+ struct cvmx_pcieepx_cfg461_s cn56xxp1;
+ struct cvmx_pcieepx_cfg461_s cn61xx;
+ struct cvmx_pcieepx_cfg461_s cn63xx;
+ struct cvmx_pcieepx_cfg461_s cn63xxp1;
+ struct cvmx_pcieepx_cfg461_s cn66xx;
+ struct cvmx_pcieepx_cfg461_s cn68xx;
+ struct cvmx_pcieepx_cfg461_s cn68xxp1;
+ struct cvmx_pcieepx_cfg461_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg461 cvmx_pcieepx_cfg461_t;
+
+/**
+ * cvmx_pcieep#_cfg462
+ *
+ * PCIE_CFG462 = Four hundred sixty-third 32-bits of PCIE type 0 config space
+ * (Transmit Completion FC Credit Status )
+ */
+union cvmx_pcieepx_cfg462 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg462_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t tchfcc : 8; /**< Transmit Completion Header FC Credits
+ The Completion Header credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+ uint32_t tcdfcc : 12; /**< Transmit Completion Data FC Credits
+ The Completion Data credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+#else
+ uint32_t tcdfcc : 12;
+ uint32_t tchfcc : 8;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg462_s cn52xx;
+ struct cvmx_pcieepx_cfg462_s cn52xxp1;
+ struct cvmx_pcieepx_cfg462_s cn56xx;
+ struct cvmx_pcieepx_cfg462_s cn56xxp1;
+ struct cvmx_pcieepx_cfg462_s cn61xx;
+ struct cvmx_pcieepx_cfg462_s cn63xx;
+ struct cvmx_pcieepx_cfg462_s cn63xxp1;
+ struct cvmx_pcieepx_cfg462_s cn66xx;
+ struct cvmx_pcieepx_cfg462_s cn68xx;
+ struct cvmx_pcieepx_cfg462_s cn68xxp1;
+ struct cvmx_pcieepx_cfg462_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg462 cvmx_pcieepx_cfg462_t;
+
+/**
+ * cvmx_pcieep#_cfg463
+ *
+ * PCIE_CFG463 = Four hundred sixty-fourth 32-bits of PCIE type 0 config space
+ * (Queue Status)
+ */
+union cvmx_pcieepx_cfg463 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg463_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_3_31 : 29;
+ uint32_t rqne : 1; /**< Received Queue Not Empty
+ Indicates there is data in one or more of the receive buffers. */
+ uint32_t trbne : 1; /**< Transmit Retry Buffer Not Empty
+ Indicates that there is data in the transmit retry buffer. */
+ uint32_t rtlpfccnr : 1; /**< Received TLP FC Credits Not Returned
+ Indicates that the PCI Express bus has sent a TLP but has not
+ yet received an UpdateFC DLLP indicating that the credits for
+ that TLP have been restored by the receiver at the other end of
+ the Link. */
+#else
+ uint32_t rtlpfccnr : 1;
+ uint32_t trbne : 1;
+ uint32_t rqne : 1;
+ uint32_t reserved_3_31 : 29;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg463_s cn52xx;
+ struct cvmx_pcieepx_cfg463_s cn52xxp1;
+ struct cvmx_pcieepx_cfg463_s cn56xx;
+ struct cvmx_pcieepx_cfg463_s cn56xxp1;
+ struct cvmx_pcieepx_cfg463_s cn61xx;
+ struct cvmx_pcieepx_cfg463_s cn63xx;
+ struct cvmx_pcieepx_cfg463_s cn63xxp1;
+ struct cvmx_pcieepx_cfg463_s cn66xx;
+ struct cvmx_pcieepx_cfg463_s cn68xx;
+ struct cvmx_pcieepx_cfg463_s cn68xxp1;
+ struct cvmx_pcieepx_cfg463_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg463 cvmx_pcieepx_cfg463_t;
+
+/**
+ * cvmx_pcieep#_cfg464
+ *
+ * PCIE_CFG464 = Four hundred sixty-fifth 32-bits of PCIE type 0 config space
+ * (VC Transmit Arbitration Register 1)
+ */
+union cvmx_pcieepx_cfg464 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg464_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t wrr_vc3 : 8; /**< WRR Weight for VC3 */
+ uint32_t wrr_vc2 : 8; /**< WRR Weight for VC2 */
+ uint32_t wrr_vc1 : 8; /**< WRR Weight for VC1 */
+ uint32_t wrr_vc0 : 8; /**< WRR Weight for VC0 */
+#else
+ uint32_t wrr_vc0 : 8;
+ uint32_t wrr_vc1 : 8;
+ uint32_t wrr_vc2 : 8;
+ uint32_t wrr_vc3 : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg464_s cn52xx;
+ struct cvmx_pcieepx_cfg464_s cn52xxp1;
+ struct cvmx_pcieepx_cfg464_s cn56xx;
+ struct cvmx_pcieepx_cfg464_s cn56xxp1;
+ struct cvmx_pcieepx_cfg464_s cn61xx;
+ struct cvmx_pcieepx_cfg464_s cn63xx;
+ struct cvmx_pcieepx_cfg464_s cn63xxp1;
+ struct cvmx_pcieepx_cfg464_s cn66xx;
+ struct cvmx_pcieepx_cfg464_s cn68xx;
+ struct cvmx_pcieepx_cfg464_s cn68xxp1;
+ struct cvmx_pcieepx_cfg464_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg464 cvmx_pcieepx_cfg464_t;
+
+/**
+ * cvmx_pcieep#_cfg465
+ *
+ * PCIE_CFG465 = Four hundred sixty-sixth 32-bits of PCIE type 0 config space
+ * (VC Transmit Arbitration Register 2)
+ */
+union cvmx_pcieepx_cfg465 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg465_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t wrr_vc7 : 8; /**< WRR Weight for VC7 */
+ uint32_t wrr_vc6 : 8; /**< WRR Weight for VC6 */
+ uint32_t wrr_vc5 : 8; /**< WRR Weight for VC5 */
+ uint32_t wrr_vc4 : 8; /**< WRR Weight for VC4 */
+#else
+ uint32_t wrr_vc4 : 8;
+ uint32_t wrr_vc5 : 8;
+ uint32_t wrr_vc6 : 8;
+ uint32_t wrr_vc7 : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg465_s cn52xx;
+ struct cvmx_pcieepx_cfg465_s cn52xxp1;
+ struct cvmx_pcieepx_cfg465_s cn56xx;
+ struct cvmx_pcieepx_cfg465_s cn56xxp1;
+ struct cvmx_pcieepx_cfg465_s cn61xx;
+ struct cvmx_pcieepx_cfg465_s cn63xx;
+ struct cvmx_pcieepx_cfg465_s cn63xxp1;
+ struct cvmx_pcieepx_cfg465_s cn66xx;
+ struct cvmx_pcieepx_cfg465_s cn68xx;
+ struct cvmx_pcieepx_cfg465_s cn68xxp1;
+ struct cvmx_pcieepx_cfg465_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg465 cvmx_pcieepx_cfg465_t;
+
+/**
+ * cvmx_pcieep#_cfg466
+ *
+ * PCIE_CFG466 = Four hundred sixty-seventh 32-bits of PCIE type 0 config space
+ * (VC0 Posted Receive Queue Control)
+ */
+union cvmx_pcieepx_cfg466 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg466_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rx_queue_order : 1; /**< VC Ordering for Receive Queues
+ Determines the VC ordering rule for the receive queues, used
+ only in the segmented-buffer configuration,
+ writable through PEM(0..1)_CFG_WR:
+ o 1: Strict ordering, higher numbered VCs have higher priority
+ o 0: Round robin
+ However, the application must not change this field. */
+ uint32_t type_ordering : 1; /**< TLP Type Ordering for VC0
+ Determines the TLP type ordering rule for VC0 receive queues,
+ used only in the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR:
+ o 1: Ordering of received TLPs follows the rules in
+ PCI Express Base Specification
+ o 0: Strict ordering for received TLPs: Posted, then
+ Completion, then Non-Posted
+ However, the application must not change this field. */
+ uint32_t reserved_24_29 : 6;
+ uint32_t queue_mode : 3; /**< VC0 Posted TLP Queue Mode
+ The operating mode of the Posted receive queue for VC0, used
+ only in the segmented-buffer configuration, writable through
+ PEM(0..1)_CFG_WR.
+ However, the application must not change this field.
+ Only one bit can be set at a time:
+ o Bit 23: Bypass
+ o Bit 22: Cut-through
+ o Bit 21: Store-and-forward */
+ uint32_t reserved_20_20 : 1;
+ uint32_t header_credits : 8; /**< VC0 Posted Header Credits
+ The number of initial Posted header credits for VC0, used for
+ all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< VC0 Posted Data Credits
+ The number of initial Posted data credits for VC0, used for all
+ receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_credits : 12;
+ uint32_t header_credits : 8;
+ uint32_t reserved_20_20 : 1;
+ uint32_t queue_mode : 3;
+ uint32_t reserved_24_29 : 6;
+ uint32_t type_ordering : 1;
+ uint32_t rx_queue_order : 1;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg466_s cn52xx;
+ struct cvmx_pcieepx_cfg466_s cn52xxp1;
+ struct cvmx_pcieepx_cfg466_s cn56xx;
+ struct cvmx_pcieepx_cfg466_s cn56xxp1;
+ struct cvmx_pcieepx_cfg466_s cn61xx;
+ struct cvmx_pcieepx_cfg466_s cn63xx;
+ struct cvmx_pcieepx_cfg466_s cn63xxp1;
+ struct cvmx_pcieepx_cfg466_s cn66xx;
+ struct cvmx_pcieepx_cfg466_s cn68xx;
+ struct cvmx_pcieepx_cfg466_s cn68xxp1;
+ struct cvmx_pcieepx_cfg466_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg466 cvmx_pcieepx_cfg466_t;
+
+/**
+ * cvmx_pcieep#_cfg467
+ *
+ * PCIE_CFG467 = Four hundred sixty-eighth 32-bits of PCIE type 0 config space
+ * (VC0 Non-Posted Receive Queue Control)
+ */
+union cvmx_pcieepx_cfg467 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg467_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t queue_mode : 3; /**< VC0 Non-Posted TLP Queue Mode
+ The operating mode of the Non-Posted receive queue for VC0,
+ used only in the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ Only one bit can be set at a time:
+ o Bit 23: Bypass
+ o Bit 22: Cut-through
+ o Bit 21: Store-and-forward
+ However, the application must not change this field. */
+ uint32_t reserved_20_20 : 1;
+ uint32_t header_credits : 8; /**< VC0 Non-Posted Header Credits
+ The number of initial Non-Posted header credits for VC0, used
+ for all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< VC0 Non-Posted Data Credits
+ The number of initial Non-Posted data credits for VC0, used for
+ all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_credits : 12;
+ uint32_t header_credits : 8;
+ uint32_t reserved_20_20 : 1;
+ uint32_t queue_mode : 3;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg467_s cn52xx;
+ struct cvmx_pcieepx_cfg467_s cn52xxp1;
+ struct cvmx_pcieepx_cfg467_s cn56xx;
+ struct cvmx_pcieepx_cfg467_s cn56xxp1;
+ struct cvmx_pcieepx_cfg467_s cn61xx;
+ struct cvmx_pcieepx_cfg467_s cn63xx;
+ struct cvmx_pcieepx_cfg467_s cn63xxp1;
+ struct cvmx_pcieepx_cfg467_s cn66xx;
+ struct cvmx_pcieepx_cfg467_s cn68xx;
+ struct cvmx_pcieepx_cfg467_s cn68xxp1;
+ struct cvmx_pcieepx_cfg467_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg467 cvmx_pcieepx_cfg467_t;
+
+/**
+ * cvmx_pcieep#_cfg468
+ *
+ * PCIE_CFG468 = Four hundred sixty-ninth 32-bits of PCIE type 0 config space
+ * (VC0 Completion Receive Queue Control)
+ */
+union cvmx_pcieepx_cfg468 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg468_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t queue_mode : 3; /**< VC0 Completion TLP Queue Mode
+ The operating mode of the Completion receive queue for VC0,
+ used only in the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ Only one bit can be set at a time:
+ o Bit 23: Bypass
+ o Bit 22: Cut-through
+ o Bit 21: Store-and-forward
+ However, the application must not change this field. */
+ uint32_t reserved_20_20 : 1;
+ uint32_t header_credits : 8; /**< VC0 Completion Header Credits
+ The number of initial Completion header credits for VC0, used
+ for all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< VC0 Completion Data Credits
+ The number of initial Completion data credits for VC0, used for
+ all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_credits : 12;
+ uint32_t header_credits : 8;
+ uint32_t reserved_20_20 : 1;
+ uint32_t queue_mode : 3;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg468_s cn52xx;
+ struct cvmx_pcieepx_cfg468_s cn52xxp1;
+ struct cvmx_pcieepx_cfg468_s cn56xx;
+ struct cvmx_pcieepx_cfg468_s cn56xxp1;
+ struct cvmx_pcieepx_cfg468_s cn61xx;
+ struct cvmx_pcieepx_cfg468_s cn63xx;
+ struct cvmx_pcieepx_cfg468_s cn63xxp1;
+ struct cvmx_pcieepx_cfg468_s cn66xx;
+ struct cvmx_pcieepx_cfg468_s cn68xx;
+ struct cvmx_pcieepx_cfg468_s cn68xxp1;
+ struct cvmx_pcieepx_cfg468_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg468 cvmx_pcieepx_cfg468_t;
+
+/**
+ * cvmx_pcieep#_cfg490
+ *
+ * PCIE_CFG490 = Four hundred ninety-first 32-bits of PCIE type 0 config space
+ * (VC0 Posted Buffer Depth)
+ */
+union cvmx_pcieepx_cfg490 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg490_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_26_31 : 6;
+ uint32_t header_depth : 10; /**< VC0 Posted Header Queue Depth
+ Sets the number of entries in the Posted header queue for VC0
+ when using the segmented-buffer configuration, writable through
+ PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t data_depth : 14; /**< VC0 Posted Data Queue Depth
+ Sets the number of entries in the Posted data queue for VC0
+ when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_depth : 14;
+ uint32_t reserved_14_15 : 2;
+ uint32_t header_depth : 10;
+ uint32_t reserved_26_31 : 6;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg490_s cn52xx;
+ struct cvmx_pcieepx_cfg490_s cn52xxp1;
+ struct cvmx_pcieepx_cfg490_s cn56xx;
+ struct cvmx_pcieepx_cfg490_s cn56xxp1;
+ struct cvmx_pcieepx_cfg490_s cn61xx;
+ struct cvmx_pcieepx_cfg490_s cn63xx;
+ struct cvmx_pcieepx_cfg490_s cn63xxp1;
+ struct cvmx_pcieepx_cfg490_s cn66xx;
+ struct cvmx_pcieepx_cfg490_s cn68xx;
+ struct cvmx_pcieepx_cfg490_s cn68xxp1;
+ struct cvmx_pcieepx_cfg490_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg490 cvmx_pcieepx_cfg490_t;
+
+/**
+ * cvmx_pcieep#_cfg491
+ *
+ * PCIE_CFG491 = Four hundred ninety-second 32-bits of PCIE type 0 config space
+ * (VC0 Non-Posted Buffer Depth)
+ */
+union cvmx_pcieepx_cfg491 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg491_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_26_31 : 6;
+ uint32_t header_depth : 10; /**< VC0 Non-Posted Header Queue Depth
+ Sets the number of entries in the Non-Posted header queue for
+ VC0 when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t data_depth : 14; /**< VC0 Non-Posted Data Queue Depth
+ Sets the number of entries in the Non-Posted data queue for VC0
+ when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_depth : 14;
+ uint32_t reserved_14_15 : 2;
+ uint32_t header_depth : 10;
+ uint32_t reserved_26_31 : 6;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg491_s cn52xx;
+ struct cvmx_pcieepx_cfg491_s cn52xxp1;
+ struct cvmx_pcieepx_cfg491_s cn56xx;
+ struct cvmx_pcieepx_cfg491_s cn56xxp1;
+ struct cvmx_pcieepx_cfg491_s cn61xx;
+ struct cvmx_pcieepx_cfg491_s cn63xx;
+ struct cvmx_pcieepx_cfg491_s cn63xxp1;
+ struct cvmx_pcieepx_cfg491_s cn66xx;
+ struct cvmx_pcieepx_cfg491_s cn68xx;
+ struct cvmx_pcieepx_cfg491_s cn68xxp1;
+ struct cvmx_pcieepx_cfg491_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg491 cvmx_pcieepx_cfg491_t;
+
+/**
+ * cvmx_pcieep#_cfg492
+ *
+ * PCIE_CFG492 = Four hundred ninety-third 32-bits of PCIE type 0 config space
+ * (VC0 Completion Buffer Depth)
+ */
+union cvmx_pcieepx_cfg492 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg492_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_26_31 : 6;
+ uint32_t header_depth : 10; /**< VC0 Completion Header Queue Depth
+ Sets the number of entries in the Completion header queue for
+ VC0 when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t data_depth : 14; /**< VC0 Completion Data Queue Depth
+ Sets the number of entries in the Completion data queue for VC0
+ when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_depth : 14;
+ uint32_t reserved_14_15 : 2;
+ uint32_t header_depth : 10;
+ uint32_t reserved_26_31 : 6;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg492_s cn52xx;
+ struct cvmx_pcieepx_cfg492_s cn52xxp1;
+ struct cvmx_pcieepx_cfg492_s cn56xx;
+ struct cvmx_pcieepx_cfg492_s cn56xxp1;
+ struct cvmx_pcieepx_cfg492_s cn61xx;
+ struct cvmx_pcieepx_cfg492_s cn63xx;
+ struct cvmx_pcieepx_cfg492_s cn63xxp1;
+ struct cvmx_pcieepx_cfg492_s cn66xx;
+ struct cvmx_pcieepx_cfg492_s cn68xx;
+ struct cvmx_pcieepx_cfg492_s cn68xxp1;
+ struct cvmx_pcieepx_cfg492_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg492 cvmx_pcieepx_cfg492_t;
+
+/**
+ * cvmx_pcieep#_cfg515
+ *
+ * PCIE_CFG515 = Five hundred sixteenth 32-bits of PCIE type 0 config space
+ * (Port Logic Register (Gen2))
+ */
+union cvmx_pcieepx_cfg515 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg515_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t s_d_e : 1; /**< SEL_DE_EMPHASIS
+ Used to set the de-emphasis level for upstream ports. */
+ uint32_t ctcrb : 1; /**< Config Tx Compliance Receive Bit
+ When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t cpyts : 1; /**< Config PHY Tx Swing
+ Indicates the voltage level the PHY should drive. When set to
+ 1, indicates Full Swing. When set to 0, indicates Low Swing */
+ uint32_t dsc : 1; /**< Directed Speed Change
+ o a write of '1' will initiate a speed change
+ o always reads a zero */
+ uint32_t le : 9; /**< Lane Enable
+ Indicates the number of lanes to check for exit from electrical
+ idle in Polling.Active and Polling.Compliance. 1 = x1, 2 = x2,
+ etc. Used to limit the maximum link width to ignore broken
+ lanes that detect a receiver, but will not exit electrical
+ idle and
+ would otherwise prevent a valid link from being configured. */
+ uint32_t n_fts : 8; /**< N_FTS
+ Sets the Number of Fast Training Sequences (N_FTS) that
+ the core advertises as its N_FTS during GEN2 Link training.
+ This value is used to inform the Link partner about the PHYs
+ ability to recover synchronization after a low power state.
+ Note: Do not set N_FTS to zero; doing so can cause the
+ LTSSM to go into the recovery state when exiting from
+ L0s. */
+#else
+ uint32_t n_fts : 8;
+ uint32_t le : 9;
+ uint32_t dsc : 1;
+ uint32_t cpyts : 1;
+ uint32_t ctcrb : 1;
+ uint32_t s_d_e : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg515_s cn61xx;
+ struct cvmx_pcieepx_cfg515_s cn63xx;
+ struct cvmx_pcieepx_cfg515_s cn63xxp1;
+ struct cvmx_pcieepx_cfg515_s cn66xx;
+ struct cvmx_pcieepx_cfg515_s cn68xx;
+ struct cvmx_pcieepx_cfg515_s cn68xxp1;
+ struct cvmx_pcieepx_cfg515_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg515 cvmx_pcieepx_cfg515_t;
+
+/**
+ * cvmx_pcieep#_cfg516
+ *
+ * PCIE_CFG516 = Five hundred seventeenth 32-bits of PCIE type 0 config space
+ * (PHY Status Register)
+ */
+union cvmx_pcieepx_cfg516 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg516_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t phy_stat : 32; /**< PHY Status */
+#else
+ uint32_t phy_stat : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg516_s cn52xx;
+ struct cvmx_pcieepx_cfg516_s cn52xxp1;
+ struct cvmx_pcieepx_cfg516_s cn56xx;
+ struct cvmx_pcieepx_cfg516_s cn56xxp1;
+ struct cvmx_pcieepx_cfg516_s cn61xx;
+ struct cvmx_pcieepx_cfg516_s cn63xx;
+ struct cvmx_pcieepx_cfg516_s cn63xxp1;
+ struct cvmx_pcieepx_cfg516_s cn66xx;
+ struct cvmx_pcieepx_cfg516_s cn68xx;
+ struct cvmx_pcieepx_cfg516_s cn68xxp1;
+ struct cvmx_pcieepx_cfg516_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg516 cvmx_pcieepx_cfg516_t;
+
+/**
+ * cvmx_pcieep#_cfg517
+ *
+ * PCIE_CFG517 = Five hundred eighteenth 32-bits of PCIE type 0 config space
+ * (PHY Control Register)
+ */
+union cvmx_pcieepx_cfg517 {
+ uint32_t u32;
+ struct cvmx_pcieepx_cfg517_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t phy_ctrl : 32; /**< PHY Control */
+#else
+ uint32_t phy_ctrl : 32;
+#endif
+ } s;
+ struct cvmx_pcieepx_cfg517_s cn52xx;
+ struct cvmx_pcieepx_cfg517_s cn52xxp1;
+ struct cvmx_pcieepx_cfg517_s cn56xx;
+ struct cvmx_pcieepx_cfg517_s cn56xxp1;
+ struct cvmx_pcieepx_cfg517_s cn61xx;
+ struct cvmx_pcieepx_cfg517_s cn63xx;
+ struct cvmx_pcieepx_cfg517_s cn63xxp1;
+ struct cvmx_pcieepx_cfg517_s cn66xx;
+ struct cvmx_pcieepx_cfg517_s cn68xx;
+ struct cvmx_pcieepx_cfg517_s cn68xxp1;
+ struct cvmx_pcieepx_cfg517_s cnf71xx;
+};
+typedef union cvmx_pcieepx_cfg517 cvmx_pcieepx_cfg517_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pcieepx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pciercx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pciercx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pciercx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,5783 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pciercx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pciercx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PCIERCX_DEFS_H__
+#define __CVMX_PCIERCX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG000(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG000(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000000ull;
+}
+#else
+#define CVMX_PCIERCX_CFG000(block_id) (0x0000000000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG001(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG001(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000004ull;
+}
+#else
+#define CVMX_PCIERCX_CFG001(block_id) (0x0000000000000004ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG002(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG002(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000008ull;
+}
+#else
+#define CVMX_PCIERCX_CFG002(block_id) (0x0000000000000008ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG003(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG003(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000000Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG003(block_id) (0x000000000000000Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG004(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG004(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000010ull;
+}
+#else
+#define CVMX_PCIERCX_CFG004(block_id) (0x0000000000000010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG005(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG005(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000014ull;
+}
+#else
+#define CVMX_PCIERCX_CFG005(block_id) (0x0000000000000014ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG006(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG006(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000018ull;
+}
+#else
+#define CVMX_PCIERCX_CFG006(block_id) (0x0000000000000018ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG007(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG007(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000001Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG007(block_id) (0x000000000000001Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG008(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG008(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000020ull;
+}
+#else
+#define CVMX_PCIERCX_CFG008(block_id) (0x0000000000000020ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG009(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG009(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000024ull;
+}
+#else
+#define CVMX_PCIERCX_CFG009(block_id) (0x0000000000000024ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG010(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG010(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000028ull;
+}
+#else
+#define CVMX_PCIERCX_CFG010(block_id) (0x0000000000000028ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG011(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG011(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000002Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG011(block_id) (0x000000000000002Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG012(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG012(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000030ull;
+}
+#else
+#define CVMX_PCIERCX_CFG012(block_id) (0x0000000000000030ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG013(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG013(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000034ull;
+}
+#else
+#define CVMX_PCIERCX_CFG013(block_id) (0x0000000000000034ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG014(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG014(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000038ull;
+}
+#else
+#define CVMX_PCIERCX_CFG014(block_id) (0x0000000000000038ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG015(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG015(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000003Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG015(block_id) (0x000000000000003Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG016(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG016(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000040ull;
+}
+#else
+#define CVMX_PCIERCX_CFG016(block_id) (0x0000000000000040ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG017(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG017(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000044ull;
+}
+#else
+#define CVMX_PCIERCX_CFG017(block_id) (0x0000000000000044ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG020(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG020(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000050ull;
+}
+#else
+#define CVMX_PCIERCX_CFG020(block_id) (0x0000000000000050ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG021(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG021(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000054ull;
+}
+#else
+#define CVMX_PCIERCX_CFG021(block_id) (0x0000000000000054ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG022(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG022(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000058ull;
+}
+#else
+#define CVMX_PCIERCX_CFG022(block_id) (0x0000000000000058ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG023(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG023(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000005Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG023(block_id) (0x000000000000005Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG028(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG028(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000070ull;
+}
+#else
+#define CVMX_PCIERCX_CFG028(block_id) (0x0000000000000070ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG029(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG029(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000074ull;
+}
+#else
+#define CVMX_PCIERCX_CFG029(block_id) (0x0000000000000074ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG030(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG030(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000078ull;
+}
+#else
+#define CVMX_PCIERCX_CFG030(block_id) (0x0000000000000078ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG031(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG031(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000007Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG031(block_id) (0x000000000000007Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG032(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG032(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000080ull;
+}
+#else
+#define CVMX_PCIERCX_CFG032(block_id) (0x0000000000000080ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG033(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG033(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000084ull;
+}
+#else
+#define CVMX_PCIERCX_CFG033(block_id) (0x0000000000000084ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG034(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG034(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000088ull;
+}
+#else
+#define CVMX_PCIERCX_CFG034(block_id) (0x0000000000000088ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG035(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG035(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000008Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG035(block_id) (0x000000000000008Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG036(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG036(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000090ull;
+}
+#else
+#define CVMX_PCIERCX_CFG036(block_id) (0x0000000000000090ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG037(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG037(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000094ull;
+}
+#else
+#define CVMX_PCIERCX_CFG037(block_id) (0x0000000000000094ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG038(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG038(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000098ull;
+}
+#else
+#define CVMX_PCIERCX_CFG038(block_id) (0x0000000000000098ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG039(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG039(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000009Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG039(block_id) (0x000000000000009Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG040(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG040(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000000A0ull;
+}
+#else
+#define CVMX_PCIERCX_CFG040(block_id) (0x00000000000000A0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG041(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG041(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000000A4ull;
+}
+#else
+#define CVMX_PCIERCX_CFG041(block_id) (0x00000000000000A4ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG042(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG042(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000000A8ull;
+}
+#else
+#define CVMX_PCIERCX_CFG042(block_id) (0x00000000000000A8ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG064(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG064(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000100ull;
+}
+#else
+#define CVMX_PCIERCX_CFG064(block_id) (0x0000000000000100ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG065(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG065(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000104ull;
+}
+#else
+#define CVMX_PCIERCX_CFG065(block_id) (0x0000000000000104ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG066(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG066(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000108ull;
+}
+#else
+#define CVMX_PCIERCX_CFG066(block_id) (0x0000000000000108ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG067(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG067(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000010Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG067(block_id) (0x000000000000010Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG068(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG068(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000110ull;
+}
+#else
+#define CVMX_PCIERCX_CFG068(block_id) (0x0000000000000110ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG069(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG069(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000114ull;
+}
+#else
+#define CVMX_PCIERCX_CFG069(block_id) (0x0000000000000114ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG070(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG070(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000118ull;
+}
+#else
+#define CVMX_PCIERCX_CFG070(block_id) (0x0000000000000118ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG071(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG071(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000011Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG071(block_id) (0x000000000000011Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG072(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG072(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000120ull;
+}
+#else
+#define CVMX_PCIERCX_CFG072(block_id) (0x0000000000000120ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG073(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG073(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000124ull;
+}
+#else
+#define CVMX_PCIERCX_CFG073(block_id) (0x0000000000000124ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG074(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG074(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000128ull;
+}
+#else
+#define CVMX_PCIERCX_CFG074(block_id) (0x0000000000000128ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG075(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG075(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000012Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG075(block_id) (0x000000000000012Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG076(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG076(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000130ull;
+}
+#else
+#define CVMX_PCIERCX_CFG076(block_id) (0x0000000000000130ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG077(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG077(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000134ull;
+}
+#else
+#define CVMX_PCIERCX_CFG077(block_id) (0x0000000000000134ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG448(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG448(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000700ull;
+}
+#else
+#define CVMX_PCIERCX_CFG448(block_id) (0x0000000000000700ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG449(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG449(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000704ull;
+}
+#else
+#define CVMX_PCIERCX_CFG449(block_id) (0x0000000000000704ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG450(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG450(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000708ull;
+}
+#else
+#define CVMX_PCIERCX_CFG450(block_id) (0x0000000000000708ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG451(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG451(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000070Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG451(block_id) (0x000000000000070Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG452(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG452(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000710ull;
+}
+#else
+#define CVMX_PCIERCX_CFG452(block_id) (0x0000000000000710ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG453(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG453(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000714ull;
+}
+#else
+#define CVMX_PCIERCX_CFG453(block_id) (0x0000000000000714ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG454(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG454(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000718ull;
+}
+#else
+#define CVMX_PCIERCX_CFG454(block_id) (0x0000000000000718ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG455(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG455(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000071Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG455(block_id) (0x000000000000071Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG456(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG456(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000720ull;
+}
+#else
+#define CVMX_PCIERCX_CFG456(block_id) (0x0000000000000720ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG458(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG458(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000728ull;
+}
+#else
+#define CVMX_PCIERCX_CFG458(block_id) (0x0000000000000728ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG459(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG459(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000072Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG459(block_id) (0x000000000000072Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG460(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG460(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000730ull;
+}
+#else
+#define CVMX_PCIERCX_CFG460(block_id) (0x0000000000000730ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG461(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG461(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000734ull;
+}
+#else
+#define CVMX_PCIERCX_CFG461(block_id) (0x0000000000000734ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG462(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG462(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000738ull;
+}
+#else
+#define CVMX_PCIERCX_CFG462(block_id) (0x0000000000000738ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG463(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG463(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000073Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG463(block_id) (0x000000000000073Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG464(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG464(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000740ull;
+}
+#else
+#define CVMX_PCIERCX_CFG464(block_id) (0x0000000000000740ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG465(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG465(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000744ull;
+}
+#else
+#define CVMX_PCIERCX_CFG465(block_id) (0x0000000000000744ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG466(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG466(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000748ull;
+}
+#else
+#define CVMX_PCIERCX_CFG466(block_id) (0x0000000000000748ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG467(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG467(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000074Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG467(block_id) (0x000000000000074Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG468(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG468(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000750ull;
+}
+#else
+#define CVMX_PCIERCX_CFG468(block_id) (0x0000000000000750ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG490(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG490(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000007A8ull;
+}
+#else
+#define CVMX_PCIERCX_CFG490(block_id) (0x00000000000007A8ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG491(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG491(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000007ACull;
+}
+#else
+#define CVMX_PCIERCX_CFG491(block_id) (0x00000000000007ACull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG492(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG492(%lu) is invalid on this chip\n", block_id);
+ return 0x00000000000007B0ull;
+}
+#else
+#define CVMX_PCIERCX_CFG492(block_id) (0x00000000000007B0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG515(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG515(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000080Cull;
+}
+#else
+#define CVMX_PCIERCX_CFG515(block_id) (0x000000000000080Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG516(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG516(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000810ull;
+}
+#else
+#define CVMX_PCIERCX_CFG516(block_id) (0x0000000000000810ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCIERCX_CFG517(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PCIERCX_CFG517(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000814ull;
+}
+#else
+#define CVMX_PCIERCX_CFG517(block_id) (0x0000000000000814ull)
+#endif
+
+/**
+ * cvmx_pcierc#_cfg000
+ *
+ * PCIE_CFG000 = First 32-bits of PCIE type 1 config space (Device ID and Vendor ID Register)
+ *
+ */
+union cvmx_pciercx_cfg000 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg000_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t devid : 16; /**< Device ID, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t vendid : 16; /**< Vendor ID, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t vendid : 16;
+ uint32_t devid : 16;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg000_s cn52xx;
+ struct cvmx_pciercx_cfg000_s cn52xxp1;
+ struct cvmx_pciercx_cfg000_s cn56xx;
+ struct cvmx_pciercx_cfg000_s cn56xxp1;
+ struct cvmx_pciercx_cfg000_s cn61xx;
+ struct cvmx_pciercx_cfg000_s cn63xx;
+ struct cvmx_pciercx_cfg000_s cn63xxp1;
+ struct cvmx_pciercx_cfg000_s cn66xx;
+ struct cvmx_pciercx_cfg000_s cn68xx;
+ struct cvmx_pciercx_cfg000_s cn68xxp1;
+ struct cvmx_pciercx_cfg000_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg000 cvmx_pciercx_cfg000_t;
+
+/**
+ * cvmx_pcierc#_cfg001
+ *
+ * PCIE_CFG001 = Second 32-bits of PCIE type 1 config space (Command/Status Register)
+ *
+ */
+union cvmx_pciercx_cfg001 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg001_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dpe : 1; /**< Detected Parity Error */
+ uint32_t sse : 1; /**< Signaled System Error */
+ uint32_t rma : 1; /**< Received Master Abort */
+ uint32_t rta : 1; /**< Received Target Abort */
+ uint32_t sta : 1; /**< Signaled Target Abort */
+ uint32_t devt : 2; /**< DEVSEL Timing
+ Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t mdpe : 1; /**< Master Data Parity Error */
+ uint32_t fbb : 1; /**< Fast Back-to-Back Capable
+ Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_22_22 : 1;
+ uint32_t m66 : 1; /**< 66 MHz Capable
+ Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t cl : 1; /**< Capabilities List
+ Indicates presence of an extended capability item.
+ Hardwired to 1. */
+ uint32_t i_stat : 1; /**< INTx Status */
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_dis : 1; /**< INTx Assertion Disable */
+ uint32_t fbbe : 1; /**< Fast Back-to-Back Enable
+ Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t see : 1; /**< SERR# Enable */
+ uint32_t ids_wcc : 1; /**< IDSEL Stepping/Wait Cycle Control
+ Not applicable for PCI Express. Must be hardwired to 0 */
+ uint32_t per : 1; /**< Parity Error Response */
+ uint32_t vps : 1; /**< VGA Palette Snoop
+ Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t mwice : 1; /**< Memory Write and Invalidate
+ Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t scse : 1; /**< Special Cycle Enable
+ Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t me : 1; /**< Bus Master Enable */
+ uint32_t msae : 1; /**< Memory Space Enable */
+ uint32_t isae : 1; /**< I/O Space Enable */
+#else
+ uint32_t isae : 1;
+ uint32_t msae : 1;
+ uint32_t me : 1;
+ uint32_t scse : 1;
+ uint32_t mwice : 1;
+ uint32_t vps : 1;
+ uint32_t per : 1;
+ uint32_t ids_wcc : 1;
+ uint32_t see : 1;
+ uint32_t fbbe : 1;
+ uint32_t i_dis : 1;
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_stat : 1;
+ uint32_t cl : 1;
+ uint32_t m66 : 1;
+ uint32_t reserved_22_22 : 1;
+ uint32_t fbb : 1;
+ uint32_t mdpe : 1;
+ uint32_t devt : 2;
+ uint32_t sta : 1;
+ uint32_t rta : 1;
+ uint32_t rma : 1;
+ uint32_t sse : 1;
+ uint32_t dpe : 1;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg001_s cn52xx;
+ struct cvmx_pciercx_cfg001_s cn52xxp1;
+ struct cvmx_pciercx_cfg001_s cn56xx;
+ struct cvmx_pciercx_cfg001_s cn56xxp1;
+ struct cvmx_pciercx_cfg001_s cn61xx;
+ struct cvmx_pciercx_cfg001_s cn63xx;
+ struct cvmx_pciercx_cfg001_s cn63xxp1;
+ struct cvmx_pciercx_cfg001_s cn66xx;
+ struct cvmx_pciercx_cfg001_s cn68xx;
+ struct cvmx_pciercx_cfg001_s cn68xxp1;
+ struct cvmx_pciercx_cfg001_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg001 cvmx_pciercx_cfg001_t;
+
+/**
+ * cvmx_pcierc#_cfg002
+ *
+ * PCIE_CFG002 = Third 32-bits of PCIE type 1 config space (Revision ID/Class Code Register)
+ *
+ */
+union cvmx_pciercx_cfg002 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg002_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bcc : 8; /**< Base Class Code, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t sc : 8; /**< Subclass Code, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t pi : 8; /**< Programming Interface, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t rid : 8; /**< Revision ID, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t rid : 8;
+ uint32_t pi : 8;
+ uint32_t sc : 8;
+ uint32_t bcc : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg002_s cn52xx;
+ struct cvmx_pciercx_cfg002_s cn52xxp1;
+ struct cvmx_pciercx_cfg002_s cn56xx;
+ struct cvmx_pciercx_cfg002_s cn56xxp1;
+ struct cvmx_pciercx_cfg002_s cn61xx;
+ struct cvmx_pciercx_cfg002_s cn63xx;
+ struct cvmx_pciercx_cfg002_s cn63xxp1;
+ struct cvmx_pciercx_cfg002_s cn66xx;
+ struct cvmx_pciercx_cfg002_s cn68xx;
+ struct cvmx_pciercx_cfg002_s cn68xxp1;
+ struct cvmx_pciercx_cfg002_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg002 cvmx_pciercx_cfg002_t;
+
+/**
+ * cvmx_pcierc#_cfg003
+ *
+ * PCIE_CFG003 = Fourth 32-bits of PCIE type 1 config space (Cache Line Size/Master Latency Timer/Header Type Register/BIST Register)
+ *
+ */
+union cvmx_pciercx_cfg003 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg003_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bist : 8; /**< The BIST register functions are not supported.
+ All 8 bits of the BIST register are hardwired to 0. */
+ uint32_t mfd : 1; /**< Multi Function Device
+ The Multi Function Device bit is writable through PEM(0..1)_CFG_WR.
+ However, this is a single function device. Therefore, the
+ application must not write a 1 to this bit. */
+ uint32_t chf : 7; /**< Configuration Header Format
+ Hardwired to 1. */
+ uint32_t lt : 8; /**< Master Latency Timer
+ Not applicable for PCI Express, hardwired to 0. */
+ uint32_t cls : 8; /**< Cache Line Size
+ The Cache Line Size register is RW for legacy compatibility
+ purposes and is not applicable to PCI Express device
+ functionality. */
+#else
+ uint32_t cls : 8;
+ uint32_t lt : 8;
+ uint32_t chf : 7;
+ uint32_t mfd : 1;
+ uint32_t bist : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg003_s cn52xx;
+ struct cvmx_pciercx_cfg003_s cn52xxp1;
+ struct cvmx_pciercx_cfg003_s cn56xx;
+ struct cvmx_pciercx_cfg003_s cn56xxp1;
+ struct cvmx_pciercx_cfg003_s cn61xx;
+ struct cvmx_pciercx_cfg003_s cn63xx;
+ struct cvmx_pciercx_cfg003_s cn63xxp1;
+ struct cvmx_pciercx_cfg003_s cn66xx;
+ struct cvmx_pciercx_cfg003_s cn68xx;
+ struct cvmx_pciercx_cfg003_s cn68xxp1;
+ struct cvmx_pciercx_cfg003_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg003 cvmx_pciercx_cfg003_t;
+
+/**
+ * cvmx_pcierc#_cfg004
+ *
+ * PCIE_CFG004 = Fifth 32-bits of PCIE type 1 config space (Base Address Register 0 - Low)
+ *
+ */
+union cvmx_pciercx_cfg004 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg004_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg004_s cn52xx;
+ struct cvmx_pciercx_cfg004_s cn52xxp1;
+ struct cvmx_pciercx_cfg004_s cn56xx;
+ struct cvmx_pciercx_cfg004_s cn56xxp1;
+ struct cvmx_pciercx_cfg004_s cn61xx;
+ struct cvmx_pciercx_cfg004_s cn63xx;
+ struct cvmx_pciercx_cfg004_s cn63xxp1;
+ struct cvmx_pciercx_cfg004_s cn66xx;
+ struct cvmx_pciercx_cfg004_s cn68xx;
+ struct cvmx_pciercx_cfg004_s cn68xxp1;
+ struct cvmx_pciercx_cfg004_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg004 cvmx_pciercx_cfg004_t;
+
+/**
+ * cvmx_pcierc#_cfg005
+ *
+ * PCIE_CFG005 = Sixth 32-bits of PCIE type 1 config space (Base Address Register 0 - High)
+ *
+ */
+union cvmx_pciercx_cfg005 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg005_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg005_s cn52xx;
+ struct cvmx_pciercx_cfg005_s cn52xxp1;
+ struct cvmx_pciercx_cfg005_s cn56xx;
+ struct cvmx_pciercx_cfg005_s cn56xxp1;
+ struct cvmx_pciercx_cfg005_s cn61xx;
+ struct cvmx_pciercx_cfg005_s cn63xx;
+ struct cvmx_pciercx_cfg005_s cn63xxp1;
+ struct cvmx_pciercx_cfg005_s cn66xx;
+ struct cvmx_pciercx_cfg005_s cn68xx;
+ struct cvmx_pciercx_cfg005_s cn68xxp1;
+ struct cvmx_pciercx_cfg005_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg005 cvmx_pciercx_cfg005_t;
+
+/**
+ * cvmx_pcierc#_cfg006
+ *
+ * PCIE_CFG006 = Seventh 32-bits of PCIE type 1 config space (Bus Number Registers)
+ *
+ */
+union cvmx_pciercx_cfg006 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg006_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t slt : 8; /**< Secondary Latency Timer
+ Not applicable to PCI Express, hardwired to 0x00. */
+ uint32_t subbnum : 8; /**< Subordinate Bus Number */
+ uint32_t sbnum : 8; /**< Secondary Bus Number */
+ uint32_t pbnum : 8; /**< Primary Bus Number */
+#else
+ uint32_t pbnum : 8;
+ uint32_t sbnum : 8;
+ uint32_t subbnum : 8;
+ uint32_t slt : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg006_s cn52xx;
+ struct cvmx_pciercx_cfg006_s cn52xxp1;
+ struct cvmx_pciercx_cfg006_s cn56xx;
+ struct cvmx_pciercx_cfg006_s cn56xxp1;
+ struct cvmx_pciercx_cfg006_s cn61xx;
+ struct cvmx_pciercx_cfg006_s cn63xx;
+ struct cvmx_pciercx_cfg006_s cn63xxp1;
+ struct cvmx_pciercx_cfg006_s cn66xx;
+ struct cvmx_pciercx_cfg006_s cn68xx;
+ struct cvmx_pciercx_cfg006_s cn68xxp1;
+ struct cvmx_pciercx_cfg006_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg006 cvmx_pciercx_cfg006_t;
+
+/**
+ * cvmx_pcierc#_cfg007
+ *
+ * PCIE_CFG007 = Eighth 32-bits of PCIE type 1 config space (IO Base and IO Limit/Secondary Status Register)
+ *
+ */
+union cvmx_pciercx_cfg007 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg007_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dpe : 1; /**< Detected Parity Error */
+ uint32_t sse : 1; /**< Signaled System Error */
+ uint32_t rma : 1; /**< Received Master Abort */
+ uint32_t rta : 1; /**< Received Target Abort */
+ uint32_t sta : 1; /**< Signaled Target Abort */
+ uint32_t devt : 2; /**< DEVSEL Timing
+ Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t mdpe : 1; /**< Master Data Parity Error */
+ uint32_t fbb : 1; /**< Fast Back-to-Back Capable
+ Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_22_22 : 1;
+ uint32_t m66 : 1; /**< 66 MHz Capable
+ Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_16_20 : 5;
+ uint32_t lio_limi : 4; /**< I/O Space Limit */
+ uint32_t reserved_9_11 : 3;
+ uint32_t io32b : 1; /**< 32-Bit I/O Space */
+ uint32_t lio_base : 4; /**< I/O Space Base */
+ uint32_t reserved_1_3 : 3;
+ uint32_t io32a : 1; /**< 32-Bit I/O Space
+ o 0 = 16-bit I/O addressing
+ o 1 = 32-bit I/O addressing
+ This bit is writable through PEM(0..1)_CFG_WR.
+ When the application
+ writes to this bit through PEM(0..1)_CFG_WR,
+ the same value is written
+ to bit 8 of this register. */
+#else
+ uint32_t io32a : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t lio_base : 4;
+ uint32_t io32b : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t lio_limi : 4;
+ uint32_t reserved_16_20 : 5;
+ uint32_t m66 : 1;
+ uint32_t reserved_22_22 : 1;
+ uint32_t fbb : 1;
+ uint32_t mdpe : 1;
+ uint32_t devt : 2;
+ uint32_t sta : 1;
+ uint32_t rta : 1;
+ uint32_t rma : 1;
+ uint32_t sse : 1;
+ uint32_t dpe : 1;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg007_s cn52xx;
+ struct cvmx_pciercx_cfg007_s cn52xxp1;
+ struct cvmx_pciercx_cfg007_s cn56xx;
+ struct cvmx_pciercx_cfg007_s cn56xxp1;
+ struct cvmx_pciercx_cfg007_s cn61xx;
+ struct cvmx_pciercx_cfg007_s cn63xx;
+ struct cvmx_pciercx_cfg007_s cn63xxp1;
+ struct cvmx_pciercx_cfg007_s cn66xx;
+ struct cvmx_pciercx_cfg007_s cn68xx;
+ struct cvmx_pciercx_cfg007_s cn68xxp1;
+ struct cvmx_pciercx_cfg007_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg007 cvmx_pciercx_cfg007_t;
+
+/**
+ * cvmx_pcierc#_cfg008
+ *
+ * PCIE_CFG008 = Ninth 32-bits of PCIE type 1 config space (Memory Base and Memory Limit Register)
+ *
+ */
+union cvmx_pciercx_cfg008 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg008_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ml_addr : 12; /**< Memory Limit Address */
+ uint32_t reserved_16_19 : 4;
+ uint32_t mb_addr : 12; /**< Memory Base Address */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t mb_addr : 12;
+ uint32_t reserved_16_19 : 4;
+ uint32_t ml_addr : 12;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg008_s cn52xx;
+ struct cvmx_pciercx_cfg008_s cn52xxp1;
+ struct cvmx_pciercx_cfg008_s cn56xx;
+ struct cvmx_pciercx_cfg008_s cn56xxp1;
+ struct cvmx_pciercx_cfg008_s cn61xx;
+ struct cvmx_pciercx_cfg008_s cn63xx;
+ struct cvmx_pciercx_cfg008_s cn63xxp1;
+ struct cvmx_pciercx_cfg008_s cn66xx;
+ struct cvmx_pciercx_cfg008_s cn68xx;
+ struct cvmx_pciercx_cfg008_s cn68xxp1;
+ struct cvmx_pciercx_cfg008_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg008 cvmx_pciercx_cfg008_t;
+
+/**
+ * cvmx_pcierc#_cfg009
+ *
+ * PCIE_CFG009 = Tenth 32-bits of PCIE type 1 config space (Prefetchable Memory Base and Limit Register)
+ *
+ */
+union cvmx_pciercx_cfg009 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg009_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lmem_limit : 12; /**< Upper 12 bits of 32-bit Prefetchable Memory End Address */
+ uint32_t reserved_17_19 : 3;
+ uint32_t mem64b : 1; /**< 64-Bit Memory Addressing
+ o 0 = 32-bit memory addressing
+ o 1 = 64-bit memory addressing */
+ uint32_t lmem_base : 12; /**< Upper 12 bits of 32-bit Prefetchable Memory Start Address */
+ uint32_t reserved_1_3 : 3;
+ uint32_t mem64a : 1; /**< 64-Bit Memory Addressing
+ o 0 = 32-bit memory addressing
+ o 1 = 64-bit memory addressing
+ This bit is writable through PEM(0..1)_CFG_WR.
+ When the application
+ writes to this bit through PEM(0..1)_CFG_WR,
+ the same value is written
+ to bit 16 of this register. */
+#else
+ uint32_t mem64a : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t lmem_base : 12;
+ uint32_t mem64b : 1;
+ uint32_t reserved_17_19 : 3;
+ uint32_t lmem_limit : 12;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg009_s cn52xx;
+ struct cvmx_pciercx_cfg009_s cn52xxp1;
+ struct cvmx_pciercx_cfg009_s cn56xx;
+ struct cvmx_pciercx_cfg009_s cn56xxp1;
+ struct cvmx_pciercx_cfg009_s cn61xx;
+ struct cvmx_pciercx_cfg009_s cn63xx;
+ struct cvmx_pciercx_cfg009_s cn63xxp1;
+ struct cvmx_pciercx_cfg009_s cn66xx;
+ struct cvmx_pciercx_cfg009_s cn68xx;
+ struct cvmx_pciercx_cfg009_s cn68xxp1;
+ struct cvmx_pciercx_cfg009_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg009 cvmx_pciercx_cfg009_t;
+
+/**
+ * cvmx_pcierc#_cfg010
+ *
+ * PCIE_CFG010 = Eleventh 32-bits of PCIE type 1 config space (Prefetchable Base Upper 32 Bits Register)
+ *
+ */
+union cvmx_pciercx_cfg010 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg010_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t umem_base : 32; /**< Upper 32 Bits of Base Address of Prefetchable Memory Space
+ Used only when 64-bit prefetchable memory addressing is
+ enabled. */
+#else
+ uint32_t umem_base : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg010_s cn52xx;
+ struct cvmx_pciercx_cfg010_s cn52xxp1;
+ struct cvmx_pciercx_cfg010_s cn56xx;
+ struct cvmx_pciercx_cfg010_s cn56xxp1;
+ struct cvmx_pciercx_cfg010_s cn61xx;
+ struct cvmx_pciercx_cfg010_s cn63xx;
+ struct cvmx_pciercx_cfg010_s cn63xxp1;
+ struct cvmx_pciercx_cfg010_s cn66xx;
+ struct cvmx_pciercx_cfg010_s cn68xx;
+ struct cvmx_pciercx_cfg010_s cn68xxp1;
+ struct cvmx_pciercx_cfg010_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg010 cvmx_pciercx_cfg010_t;
+
+/**
+ * cvmx_pcierc#_cfg011
+ *
+ * PCIE_CFG011 = Twelfth 32-bits of PCIE type 1 config space (Prefetchable Limit Upper 32 Bits Register)
+ *
+ */
+union cvmx_pciercx_cfg011 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg011_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t umem_limit : 32; /**< Upper 32 Bits of Limit Address of Prefetchable Memory Space
+ Used only when 64-bit prefetchable memory addressing is
+ enabled. */
+#else
+ uint32_t umem_limit : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg011_s cn52xx;
+ struct cvmx_pciercx_cfg011_s cn52xxp1;
+ struct cvmx_pciercx_cfg011_s cn56xx;
+ struct cvmx_pciercx_cfg011_s cn56xxp1;
+ struct cvmx_pciercx_cfg011_s cn61xx;
+ struct cvmx_pciercx_cfg011_s cn63xx;
+ struct cvmx_pciercx_cfg011_s cn63xxp1;
+ struct cvmx_pciercx_cfg011_s cn66xx;
+ struct cvmx_pciercx_cfg011_s cn68xx;
+ struct cvmx_pciercx_cfg011_s cn68xxp1;
+ struct cvmx_pciercx_cfg011_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg011 cvmx_pciercx_cfg011_t;
+
+/**
+ * cvmx_pcierc#_cfg012
+ *
+ * PCIE_CFG012 = Thirteenth 32-bits of PCIE type 1 config space (IO Base and Limit Upper 16 Bits Register)
+ *
+ */
+union cvmx_pciercx_cfg012 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg012_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t uio_limit : 16; /**< Upper 16 Bits of I/O Limit (if 32-bit I/O decoding is supported
+ for devices on the secondary side) */
+ uint32_t uio_base : 16; /**< Upper 16 Bits of I/O Base (if 32-bit I/O decoding is supported
+ for devices on the secondary side) */
+#else
+ uint32_t uio_base : 16;
+ uint32_t uio_limit : 16;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg012_s cn52xx;
+ struct cvmx_pciercx_cfg012_s cn52xxp1;
+ struct cvmx_pciercx_cfg012_s cn56xx;
+ struct cvmx_pciercx_cfg012_s cn56xxp1;
+ struct cvmx_pciercx_cfg012_s cn61xx;
+ struct cvmx_pciercx_cfg012_s cn63xx;
+ struct cvmx_pciercx_cfg012_s cn63xxp1;
+ struct cvmx_pciercx_cfg012_s cn66xx;
+ struct cvmx_pciercx_cfg012_s cn68xx;
+ struct cvmx_pciercx_cfg012_s cn68xxp1;
+ struct cvmx_pciercx_cfg012_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg012 cvmx_pciercx_cfg012_t;
+
+/**
+ * cvmx_pcierc#_cfg013
+ *
+ * PCIE_CFG013 = Fourteenth 32-bits of PCIE type 1 config space (Capability Pointer Register)
+ *
+ */
+union cvmx_pciercx_cfg013 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg013_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t cp : 8; /**< First Capability Pointer.
+ Points to Power Management Capability structure by
+ default, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t cp : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg013_s cn52xx;
+ struct cvmx_pciercx_cfg013_s cn52xxp1;
+ struct cvmx_pciercx_cfg013_s cn56xx;
+ struct cvmx_pciercx_cfg013_s cn56xxp1;
+ struct cvmx_pciercx_cfg013_s cn61xx;
+ struct cvmx_pciercx_cfg013_s cn63xx;
+ struct cvmx_pciercx_cfg013_s cn63xxp1;
+ struct cvmx_pciercx_cfg013_s cn66xx;
+ struct cvmx_pciercx_cfg013_s cn68xx;
+ struct cvmx_pciercx_cfg013_s cn68xxp1;
+ struct cvmx_pciercx_cfg013_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg013 cvmx_pciercx_cfg013_t;
+
+/**
+ * cvmx_pcierc#_cfg014
+ *
+ * PCIE_CFG014 = Fifteenth 32-bits of PCIE type 1 config space (Expansion ROM Base Address Register)
+ *
+ */
+union cvmx_pciercx_cfg014 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg014_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg014_s cn52xx;
+ struct cvmx_pciercx_cfg014_s cn52xxp1;
+ struct cvmx_pciercx_cfg014_s cn56xx;
+ struct cvmx_pciercx_cfg014_s cn56xxp1;
+ struct cvmx_pciercx_cfg014_s cn61xx;
+ struct cvmx_pciercx_cfg014_s cn63xx;
+ struct cvmx_pciercx_cfg014_s cn63xxp1;
+ struct cvmx_pciercx_cfg014_s cn66xx;
+ struct cvmx_pciercx_cfg014_s cn68xx;
+ struct cvmx_pciercx_cfg014_s cn68xxp1;
+ struct cvmx_pciercx_cfg014_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg014 cvmx_pciercx_cfg014_t;
+
+/**
+ * cvmx_pcierc#_cfg015
+ *
+ * PCIE_CFG015 = Sixteenth 32-bits of PCIE type 1 config space (Interrupt Line Register/Interrupt Pin/Bridge Control Register)
+ *
+ */
+union cvmx_pciercx_cfg015 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg015_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_28_31 : 4;
+ uint32_t dtsees : 1; /**< Discard Timer SERR Enable Status
+ Not applicable to PCI Express, hardwired to 0. */
+ uint32_t dts : 1; /**< Discard Timer Status
+ Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sdt : 1; /**< Secondary Discard Timer
+ Not applicable to PCI Express, hardwired to 0. */
+ uint32_t pdt : 1; /**< Primary Discard Timer
+ Not applicable to PCI Express, hardwired to 0. */
+ uint32_t fbbe : 1; /**< Fast Back-to-Back Transactions Enable
+ Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sbrst : 1; /**< Secondary Bus Reset
+ Hot reset. Causes TS1s with the hot reset bit to be sent to
+ the link partner. When set, SW should wait 2ms before
+ clearing. The link partner normally responds by sending TS1s
+ with the hot reset bit set, which will cause a link
+ down event - refer to "PCIe Link-Down Reset in RC Mode"
+ section. */
+ uint32_t mam : 1; /**< Master Abort Mode
+ Not applicable to PCI Express, hardwired to 0. */
+ uint32_t vga16d : 1; /**< VGA 16-Bit Decode */
+ uint32_t vgae : 1; /**< VGA Enable */
+ uint32_t isae : 1; /**< ISA Enable */
+ uint32_t see : 1; /**< SERR Enable */
+ uint32_t pere : 1; /**< Parity Error Response Enable */
+ uint32_t inta : 8; /**< Interrupt Pin
+ Identifies the legacy interrupt Message that the device
+ (or device function) uses.
+ The Interrupt Pin register is writable through PEM(0..1)_CFG_WR.
+ In a single-function configuration, only INTA is used.
+ Therefore, the application must not change this field. */
+ uint32_t il : 8; /**< Interrupt Line */
+#else
+ uint32_t il : 8;
+ uint32_t inta : 8;
+ uint32_t pere : 1;
+ uint32_t see : 1;
+ uint32_t isae : 1;
+ uint32_t vgae : 1;
+ uint32_t vga16d : 1;
+ uint32_t mam : 1;
+ uint32_t sbrst : 1;
+ uint32_t fbbe : 1;
+ uint32_t pdt : 1;
+ uint32_t sdt : 1;
+ uint32_t dts : 1;
+ uint32_t dtsees : 1;
+ uint32_t reserved_28_31 : 4;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg015_s cn52xx;
+ struct cvmx_pciercx_cfg015_s cn52xxp1;
+ struct cvmx_pciercx_cfg015_s cn56xx;
+ struct cvmx_pciercx_cfg015_s cn56xxp1;
+ struct cvmx_pciercx_cfg015_s cn61xx;
+ struct cvmx_pciercx_cfg015_s cn63xx;
+ struct cvmx_pciercx_cfg015_s cn63xxp1;
+ struct cvmx_pciercx_cfg015_s cn66xx;
+ struct cvmx_pciercx_cfg015_s cn68xx;
+ struct cvmx_pciercx_cfg015_s cn68xxp1;
+ struct cvmx_pciercx_cfg015_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg015 cvmx_pciercx_cfg015_t;
+
+/**
+ * cvmx_pcierc#_cfg016
+ *
+ * PCIE_CFG016 = Seventeenth 32-bits of PCIE type 1 config space
+ * (Power Management Capability ID/
+ * Power Management Next Item Pointer/
+ * Power Management Capabilities Register)
+ */
+union cvmx_pciercx_cfg016 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg016_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pmes : 5; /**< PME_Support
+ A value of 0 for any bit indicates that the
+ device (or function) is not capable of generating PME Messages
+ while in that power state:
+ o Bit 11: If set, PME Messages can be generated from D0
+ o Bit 12: If set, PME Messages can be generated from D1
+ o Bit 13: If set, PME Messages can be generated from D2
+ o Bit 14: If set, PME Messages can be generated from D3hot
+ o Bit 15: If set, PME Messages can be generated from D3cold
+ The PME_Support field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t d2s : 1; /**< D2 Support, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t d1s : 1; /**< D1 Support, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t auxc : 3; /**< AUX Current, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t dsi : 1; /**< Device Specific Initialization (DSI), writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_20_20 : 1;
+ uint32_t pme_clock : 1; /**< PME Clock, hardwired to 0 */
+ uint32_t pmsv : 3; /**< Power Management Specification Version, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t ncp : 8; /**< Next Capability Pointer
+ Points to the MSI capabilities by default, writable
+ through PEM(0..1)_CFG_WR. */
+ uint32_t pmcid : 8; /**< Power Management Capability ID */
+#else
+ uint32_t pmcid : 8;
+ uint32_t ncp : 8;
+ uint32_t pmsv : 3;
+ uint32_t pme_clock : 1;
+ uint32_t reserved_20_20 : 1;
+ uint32_t dsi : 1;
+ uint32_t auxc : 3;
+ uint32_t d1s : 1;
+ uint32_t d2s : 1;
+ uint32_t pmes : 5;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg016_s cn52xx;
+ struct cvmx_pciercx_cfg016_s cn52xxp1;
+ struct cvmx_pciercx_cfg016_s cn56xx;
+ struct cvmx_pciercx_cfg016_s cn56xxp1;
+ struct cvmx_pciercx_cfg016_s cn61xx;
+ struct cvmx_pciercx_cfg016_s cn63xx;
+ struct cvmx_pciercx_cfg016_s cn63xxp1;
+ struct cvmx_pciercx_cfg016_s cn66xx;
+ struct cvmx_pciercx_cfg016_s cn68xx;
+ struct cvmx_pciercx_cfg016_s cn68xxp1;
+ struct cvmx_pciercx_cfg016_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg016 cvmx_pciercx_cfg016_t;
+
+/**
+ * cvmx_pcierc#_cfg017
+ *
+ * PCIE_CFG017 = Eighteenth 32-bits of PCIE type 1 config space (Power Management Control and Status Register)
+ *
+ */
+union cvmx_pciercx_cfg017 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg017_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pmdia : 8; /**< Data register for additional information (not supported) */
+ uint32_t bpccee : 1; /**< Bus Power/Clock Control Enable, hardwired to 0 */
+ uint32_t bd3h : 1; /**< B2/B3 Support, hardwired to 0 */
+ uint32_t reserved_16_21 : 6;
+ uint32_t pmess : 1; /**< PME Status
+ Indicates if a previously enabled PME event occurred or not. */
+ uint32_t pmedsia : 2; /**< Data Scale (not supported) */
+ uint32_t pmds : 4; /**< Data Select (not supported) */
+ uint32_t pmeens : 1; /**< PME Enable
+ A value of 1 indicates that the device is enabled to
+ generate PME. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t nsr : 1; /**< No Soft Reset, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_2_2 : 1;
+ uint32_t ps : 2; /**< Power State
+ Controls the device power state:
+ o 00b: D0
+ o 01b: D1
+ o 10b: D2
+ o 11b: D3
+ The written value is ignored if the specific state is
+ not supported. */
+#else
+ uint32_t ps : 2;
+ uint32_t reserved_2_2 : 1;
+ uint32_t nsr : 1;
+ uint32_t reserved_4_7 : 4;
+ uint32_t pmeens : 1;
+ uint32_t pmds : 4;
+ uint32_t pmedsia : 2;
+ uint32_t pmess : 1;
+ uint32_t reserved_16_21 : 6;
+ uint32_t bd3h : 1;
+ uint32_t bpccee : 1;
+ uint32_t pmdia : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg017_s cn52xx;
+ struct cvmx_pciercx_cfg017_s cn52xxp1;
+ struct cvmx_pciercx_cfg017_s cn56xx;
+ struct cvmx_pciercx_cfg017_s cn56xxp1;
+ struct cvmx_pciercx_cfg017_s cn61xx;
+ struct cvmx_pciercx_cfg017_s cn63xx;
+ struct cvmx_pciercx_cfg017_s cn63xxp1;
+ struct cvmx_pciercx_cfg017_s cn66xx;
+ struct cvmx_pciercx_cfg017_s cn68xx;
+ struct cvmx_pciercx_cfg017_s cn68xxp1;
+ struct cvmx_pciercx_cfg017_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg017 cvmx_pciercx_cfg017_t;
+
+/**
+ * cvmx_pcierc#_cfg020
+ *
+ * PCIE_CFG020 = Twenty-first 32-bits of PCIE type 1 config space
+ * (MSI Capability ID/
+ * MSI Next Item Pointer/
+ * MSI Control Register)
+ */
+union cvmx_pciercx_cfg020 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg020_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t pvm : 1; /**< Per-vector masking capable */
+ uint32_t m64 : 1; /**< 64-bit Address Capable, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t mme : 3; /**< Multiple Message Enabled
+ Indicates that multiple Message mode is enabled by system
+ software. The number of Messages enabled must be less than
+ or equal to the Multiple Message Capable value. */
+ uint32_t mmc : 3; /**< Multiple Message Capable, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t msien : 1; /**< MSI Enabled
+ When set, INTx must be disabled.
+ This bit must never be set, as internal-MSI is not supported in
+ RC mode. (Note that this has no effect on external MSI, which
+ will be commonly used in RC mode.) */
+ uint32_t ncp : 8; /**< Next Capability Pointer
+ Points to PCI Express Capabilities by default,
+ writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msicid : 8; /**< MSI Capability ID */
+#else
+ uint32_t msicid : 8;
+ uint32_t ncp : 8;
+ uint32_t msien : 1;
+ uint32_t mmc : 3;
+ uint32_t mme : 3;
+ uint32_t m64 : 1;
+ uint32_t pvm : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg020_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t m64 : 1; /**< 64-bit Address Capable, writable through PESC(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t mme : 3; /**< Multiple Message Enabled
+ Indicates that multiple Message mode is enabled by system
+ software. The number of Messages enabled must be less than
+ or equal to the Multiple Message Capable value. */
+ uint32_t mmc : 3; /**< Multiple Message Capable, writable through PESC(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t msien : 1; /**< MSI Enabled
+ When set, INTx must be disabled.
+ This bit must never be set, as internal-MSI is not supported in
+ RC mode. (Note that this has no effect on external MSI, which
+ will be commonly used in RC mode.) */
+ uint32_t ncp : 8; /**< Next Capability Pointer
+ Points to PCI Express Capabilities by default,
+ writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msicid : 8; /**< MSI Capability ID */
+#else
+ uint32_t msicid : 8;
+ uint32_t ncp : 8;
+ uint32_t msien : 1;
+ uint32_t mmc : 3;
+ uint32_t mme : 3;
+ uint32_t m64 : 1;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg020_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg020_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg020_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg020_s cn61xx;
+ struct cvmx_pciercx_cfg020_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg020_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg020_cn52xx cn66xx;
+ struct cvmx_pciercx_cfg020_cn52xx cn68xx;
+ struct cvmx_pciercx_cfg020_cn52xx cn68xxp1;
+ struct cvmx_pciercx_cfg020_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg020 cvmx_pciercx_cfg020_t;
+
+/**
+ * cvmx_pcierc#_cfg021
+ *
+ * PCIE_CFG021 = Twenty-second 32-bits of PCIE type 1 config space (MSI Lower 32 Bits Address Register)
+ *
+ */
+union cvmx_pciercx_cfg021 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg021_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lmsi : 30; /**< Lower 32-bit Address */
+ uint32_t reserved_0_1 : 2;
+#else
+ uint32_t reserved_0_1 : 2;
+ uint32_t lmsi : 30;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg021_s cn52xx;
+ struct cvmx_pciercx_cfg021_s cn52xxp1;
+ struct cvmx_pciercx_cfg021_s cn56xx;
+ struct cvmx_pciercx_cfg021_s cn56xxp1;
+ struct cvmx_pciercx_cfg021_s cn61xx;
+ struct cvmx_pciercx_cfg021_s cn63xx;
+ struct cvmx_pciercx_cfg021_s cn63xxp1;
+ struct cvmx_pciercx_cfg021_s cn66xx;
+ struct cvmx_pciercx_cfg021_s cn68xx;
+ struct cvmx_pciercx_cfg021_s cn68xxp1;
+ struct cvmx_pciercx_cfg021_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg021 cvmx_pciercx_cfg021_t;
+
+/**
+ * cvmx_pcierc#_cfg022
+ *
+ * PCIE_CFG022 = Twenty-third 32-bits of PCIE type 1 config space (MSI Upper 32 bits Address Register)
+ *
+ */
+union cvmx_pciercx_cfg022 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg022_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t umsi : 32; /**< Upper 32-bit Address */
+#else
+ uint32_t umsi : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg022_s cn52xx;
+ struct cvmx_pciercx_cfg022_s cn52xxp1;
+ struct cvmx_pciercx_cfg022_s cn56xx;
+ struct cvmx_pciercx_cfg022_s cn56xxp1;
+ struct cvmx_pciercx_cfg022_s cn61xx;
+ struct cvmx_pciercx_cfg022_s cn63xx;
+ struct cvmx_pciercx_cfg022_s cn63xxp1;
+ struct cvmx_pciercx_cfg022_s cn66xx;
+ struct cvmx_pciercx_cfg022_s cn68xx;
+ struct cvmx_pciercx_cfg022_s cn68xxp1;
+ struct cvmx_pciercx_cfg022_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg022 cvmx_pciercx_cfg022_t;
+
+/**
+ * cvmx_pcierc#_cfg023
+ *
+ * PCIE_CFG023 = Twenty-fourth 32-bits of PCIE type 1 config space (MSI Data Register)
+ *
+ */
+union cvmx_pciercx_cfg023 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg023_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t msimd : 16; /**< MSI Data
+ Pattern assigned by system software, bits [4:0] are Or-ed with
+ MSI_VECTOR to generate 32 MSI Messages per function. */
+#else
+ uint32_t msimd : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg023_s cn52xx;
+ struct cvmx_pciercx_cfg023_s cn52xxp1;
+ struct cvmx_pciercx_cfg023_s cn56xx;
+ struct cvmx_pciercx_cfg023_s cn56xxp1;
+ struct cvmx_pciercx_cfg023_s cn61xx;
+ struct cvmx_pciercx_cfg023_s cn63xx;
+ struct cvmx_pciercx_cfg023_s cn63xxp1;
+ struct cvmx_pciercx_cfg023_s cn66xx;
+ struct cvmx_pciercx_cfg023_s cn68xx;
+ struct cvmx_pciercx_cfg023_s cn68xxp1;
+ struct cvmx_pciercx_cfg023_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg023 cvmx_pciercx_cfg023_t;
+
+/**
+ * cvmx_pcierc#_cfg028
+ *
+ * PCIE_CFG028 = Twenty-ninth 32-bits of PCIE type 1 config space
+ * (PCI Express Capabilities List Register/
+ * PCI Express Capabilities Register)
+ */
+union cvmx_pciercx_cfg028 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg028_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t imn : 5; /**< Interrupt Message Number
+ Updated by hardware, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t si : 1; /**< Slot Implemented
+ This bit is writable through PEM(0..1)_CFG_WR.
+ However, it must 0 for an
+ Endpoint device. Therefore, the application must not write a
+ 1 to this bit. */
+ uint32_t dpt : 4; /**< Device Port Type */
+ uint32_t pciecv : 4; /**< PCI Express Capability Version */
+ uint32_t ncp : 8; /**< Next Capability Pointer
+ writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t pcieid : 8; /**< PCIE Capability ID */
+#else
+ uint32_t pcieid : 8;
+ uint32_t ncp : 8;
+ uint32_t pciecv : 4;
+ uint32_t dpt : 4;
+ uint32_t si : 1;
+ uint32_t imn : 5;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg028_s cn52xx;
+ struct cvmx_pciercx_cfg028_s cn52xxp1;
+ struct cvmx_pciercx_cfg028_s cn56xx;
+ struct cvmx_pciercx_cfg028_s cn56xxp1;
+ struct cvmx_pciercx_cfg028_s cn61xx;
+ struct cvmx_pciercx_cfg028_s cn63xx;
+ struct cvmx_pciercx_cfg028_s cn63xxp1;
+ struct cvmx_pciercx_cfg028_s cn66xx;
+ struct cvmx_pciercx_cfg028_s cn68xx;
+ struct cvmx_pciercx_cfg028_s cn68xxp1;
+ struct cvmx_pciercx_cfg028_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg028 cvmx_pciercx_cfg028_t;
+
+/**
+ * cvmx_pcierc#_cfg029
+ *
+ * PCIE_CFG029 = Thirtieth 32-bits of PCIE type 1 config space (Device Capabilities Register)
+ *
+ */
+union cvmx_pciercx_cfg029 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg029_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_28_31 : 4;
+ uint32_t cspls : 2; /**< Captured Slot Power Limit Scale
+ Not applicable for RC port, upstream port only. */
+ uint32_t csplv : 8; /**< Captured Slot Power Limit Value
+ Not applicable for RC port, upstream port only. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t rber : 1; /**< Role-Based Error Reporting, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t el1al : 3; /**< Endpoint L1 Acceptable Latency, writable through PEM(0..1)_CFG_WR
+ Must be 0x0 for non-endpoint devices. */
+ uint32_t el0al : 3; /**< Endpoint L0s Acceptable Latency, writable through PEM(0..1)_CFG_WR
+ Must be 0x0 for non-endpoint devices. */
+ uint32_t etfs : 1; /**< Extended Tag Field Supported
+ This bit is writable through PEM(0..1)_CFG_WR.
+ However, the application
+ must not write a 1 to this bit. */
+ uint32_t pfs : 2; /**< Phantom Function Supported
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, Phantom
+ Function is not supported. Therefore, the application must not
+ write any value other than 0x0 to this field. */
+ uint32_t mpss : 3; /**< Max_Payload_Size Supported, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t mpss : 3;
+ uint32_t pfs : 2;
+ uint32_t etfs : 1;
+ uint32_t el0al : 3;
+ uint32_t el1al : 3;
+ uint32_t reserved_12_14 : 3;
+ uint32_t rber : 1;
+ uint32_t reserved_16_17 : 2;
+ uint32_t csplv : 8;
+ uint32_t cspls : 2;
+ uint32_t reserved_28_31 : 4;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg029_s cn52xx;
+ struct cvmx_pciercx_cfg029_s cn52xxp1;
+ struct cvmx_pciercx_cfg029_s cn56xx;
+ struct cvmx_pciercx_cfg029_s cn56xxp1;
+ struct cvmx_pciercx_cfg029_s cn61xx;
+ struct cvmx_pciercx_cfg029_s cn63xx;
+ struct cvmx_pciercx_cfg029_s cn63xxp1;
+ struct cvmx_pciercx_cfg029_s cn66xx;
+ struct cvmx_pciercx_cfg029_s cn68xx;
+ struct cvmx_pciercx_cfg029_s cn68xxp1;
+ struct cvmx_pciercx_cfg029_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg029 cvmx_pciercx_cfg029_t;
+
+/**
+ * cvmx_pcierc#_cfg030
+ *
+ * PCIE_CFG030 = Thirty-first 32-bits of PCIE type 1 config space
+ * (Device Control Register/Device Status Register)
+ */
+union cvmx_pciercx_cfg030 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg030_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_22_31 : 10;
+ uint32_t tp : 1; /**< Transaction Pending
+ Hard-wired to 0. */
+ uint32_t ap_d : 1; /**< Aux Power Detected
+ Set to 1 if Aux power detected. */
+ uint32_t ur_d : 1; /**< Unsupported Request Detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ UR_D occurs when we receive something we don't support.
+ Unsupported requests are Nonfatal errors, so UR_D should
+ cause NFE_D. Receiving a vendor defined message should
+ cause an unsupported request. */
+ uint32_t fe_d : 1; /**< Fatal Error Detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ FE_D is set if receive any of the errors in PCIE_CFG066 that
+ has a severity set to Fatal. Malformed TLP's generally fit
+ into this category. */
+ uint32_t nfe_d : 1; /**< Non-Fatal Error detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ NFE_D is set if we receive any of the errors in PCIE_CFG066
+ that has a severity set to Nonfatal and does NOT meet Advisory
+ Nonfatal criteria , which
+ most poisoned TLP's should be. */
+ uint32_t ce_d : 1; /**< Correctable Error Detected
+ Errors are logged in this register regardless of whether
+ error reporting is enabled in the Device Control register.
+ CE_D is set if we receive any of the errors in PCIE_CFG068
+ for example a Replay Timer Timeout. Also, it can be set if
+ we get any of the errors in PCIE_CFG066 that has a severity
+ set to Nonfatal and meets the Advisory Nonfatal criteria,
+ which most ECRC errors should be. */
+ uint32_t reserved_15_15 : 1;
+ uint32_t mrrs : 3; /**< Max Read Request Size
+ 0 = 128B
+ 1 = 256B
+ 2 = 512B
+ 3 = 1024B
+ 4 = 2048B
+ 5 = 4096B
+ Note: SLI_S2M_PORT#_CTL[MRRS] and DPI_SLI_PRT#_CFG[MRRS] and
+ also must be set properly.
+ SLI_S2M_PORT#_CTL[MRRS] and DPI_SLI_PRT#_CFG[MRRS] must
+ not exceed the desired max read request size. */
+ uint32_t ns_en : 1; /**< Enable No Snoop */
+ uint32_t ap_en : 1; /**< AUX Power PM Enable */
+ uint32_t pf_en : 1; /**< Phantom Function Enable
+ This bit should never be set - OCTEON requests never use
+ phantom functions. */
+ uint32_t etf_en : 1; /**< Extended Tag Field Enable
+ This bit should never be set - OCTEON requests never use
+ extended tags. */
+ uint32_t mps : 3; /**< Max Payload Size
+ Legal values:
+ 0 = 128B
+ 1 = 256B
+ Larger sizes not supported.
+ Note: Both PCI Express Ports must be set to the same value
+ for Peer-to-Peer to function properly.
+ Note: DPI_SLI_PRT#_CFG[MPS] must also be set to the same
+ value for proper functionality. */
+ uint32_t ro_en : 1; /**< Enable Relaxed Ordering
+ This bit is not used. */
+ uint32_t ur_en : 1; /**< Unsupported Request Reporting Enable */
+ uint32_t fe_en : 1; /**< Fatal Error Reporting Enable */
+ uint32_t nfe_en : 1; /**< Non-Fatal Error Reporting Enable */
+ uint32_t ce_en : 1; /**< Correctable Error Reporting Enable */
+#else
+ uint32_t ce_en : 1;
+ uint32_t nfe_en : 1;
+ uint32_t fe_en : 1;
+ uint32_t ur_en : 1;
+ uint32_t ro_en : 1;
+ uint32_t mps : 3;
+ uint32_t etf_en : 1;
+ uint32_t pf_en : 1;
+ uint32_t ap_en : 1;
+ uint32_t ns_en : 1;
+ uint32_t mrrs : 3;
+ uint32_t reserved_15_15 : 1;
+ uint32_t ce_d : 1;
+ uint32_t nfe_d : 1;
+ uint32_t fe_d : 1;
+ uint32_t ur_d : 1;
+ uint32_t ap_d : 1;
+ uint32_t tp : 1;
+ uint32_t reserved_22_31 : 10;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg030_s cn52xx;
+ struct cvmx_pciercx_cfg030_s cn52xxp1;
+ struct cvmx_pciercx_cfg030_s cn56xx;
+ struct cvmx_pciercx_cfg030_s cn56xxp1;
+ struct cvmx_pciercx_cfg030_s cn61xx;
+ struct cvmx_pciercx_cfg030_s cn63xx;
+ struct cvmx_pciercx_cfg030_s cn63xxp1;
+ struct cvmx_pciercx_cfg030_s cn66xx;
+ struct cvmx_pciercx_cfg030_s cn68xx;
+ struct cvmx_pciercx_cfg030_s cn68xxp1;
+ struct cvmx_pciercx_cfg030_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg030 cvmx_pciercx_cfg030_t;
+
+/**
+ * cvmx_pcierc#_cfg031
+ *
+ * PCIE_CFG031 = Thirty-second 32-bits of PCIE type 1 config space
+ * (Link Capabilities Register)
+ */
+union cvmx_pciercx_cfg031 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg031_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pnum : 8; /**< Port Number
+ writable through PEM(0..1)_CFG_WR, however the application
+ must not change this field. */
+ uint32_t reserved_23_23 : 1;
+ uint32_t aspm : 1; /**< ASPM Optionality Compliance */
+ uint32_t lbnc : 1; /**< Link Bandwidth Notification Capability
+ Set to 1 for Root Complex devices. writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t dllarc : 1; /**< Data Link Layer Active Reporting Capable
+ Set to 1 for Root Complex devices and 0 for Endpoint devices. */
+ uint32_t sderc : 1; /**< Surprise Down Error Reporting Capable
+ Not supported, hardwired to 0x0. */
+ uint32_t cpm : 1; /**< Clock Power Management
+ The default value is the value you specify during hardware
+ configuration, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l1el : 3; /**< L1 Exit Latency
+ The default value is the value you specify during hardware
+ configuration, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0el : 3; /**< L0s Exit Latency
+ The default value is the value you specify during hardware
+ configuration, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t aslpms : 2; /**< Active State Link PM Support
+ The default value is the value you specify during hardware
+ configuration, writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t mlw : 6; /**< Maximum Link Width
+ The default value is the value you specify during hardware
+ configuration (x1 or x2) writable through PEM(0..1)_CFG_WR. */
+ uint32_t mls : 4; /**< Maximum Link Speed
+ The reset value of this field is controlled by a value sent from
+ the lsb of the MIO_QLM#_SPD register.
+ qlm#_spd[0] RST_VALUE NOTE
+ 1 0001b 2.5 GHz supported
+ 0 0010b 5.0 GHz and 2.5 GHz supported
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t mls : 4;
+ uint32_t mlw : 6;
+ uint32_t aslpms : 2;
+ uint32_t l0el : 3;
+ uint32_t l1el : 3;
+ uint32_t cpm : 1;
+ uint32_t sderc : 1;
+ uint32_t dllarc : 1;
+ uint32_t lbnc : 1;
+ uint32_t aspm : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t pnum : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg031_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pnum : 8; /**< Port Number, writable through PESC(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t lbnc : 1; /**< Link Bandwith Notification Capability */
+ uint32_t dllarc : 1; /**< Data Link Layer Active Reporting Capable
+ Set to 1 for Root Complex devices and 0 for Endpoint devices. */
+ uint32_t sderc : 1; /**< Surprise Down Error Reporting Capable
+ Not supported, hardwired to 0x0. */
+ uint32_t cpm : 1; /**< Clock Power Management
+ The default value is the value you specify during hardware
+ configuration, writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l1el : 3; /**< L1 Exit Latency
+ The default value is the value you specify during hardware
+ configuration, writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0el : 3; /**< L0s Exit Latency
+ The default value is the value you specify during hardware
+ configuration, writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t aslpms : 2; /**< Active State Link PM Support
+ The default value is the value you specify during hardware
+ configuration, writable through PESC(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t mlw : 6; /**< Maximum Link Width
+ The default value is the value you specify during hardware
+ configuration (x1, x4, x8, or x16), writable through PESC(0..1)_CFG_WR.
+ The SW needs to set this to 0x4 or 0x2 depending on the max
+ number of lanes (QLM_CFG == 1 set to 0x4 else 0x2). */
+ uint32_t mls : 4; /**< Maximum Link Speed
+ Default value is 0x1 for 2.5 Gbps Link.
+ This field is writable through PESC(0..1)_CFG_WR.
+ However, 0x1 is the
+ only supported value. Therefore, the application must not write
+ any value other than 0x1 to this field. */
+#else
+ uint32_t mls : 4;
+ uint32_t mlw : 6;
+ uint32_t aslpms : 2;
+ uint32_t l0el : 3;
+ uint32_t l1el : 3;
+ uint32_t cpm : 1;
+ uint32_t sderc : 1;
+ uint32_t dllarc : 1;
+ uint32_t lbnc : 1;
+ uint32_t reserved_22_23 : 2;
+ uint32_t pnum : 8;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg031_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg031_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg031_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg031_s cn61xx;
+ struct cvmx_pciercx_cfg031_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg031_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg031_s cn66xx;
+ struct cvmx_pciercx_cfg031_s cn68xx;
+ struct cvmx_pciercx_cfg031_cn52xx cn68xxp1;
+ struct cvmx_pciercx_cfg031_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg031 cvmx_pciercx_cfg031_t;
+
+/**
+ * cvmx_pcierc#_cfg032
+ *
+ * PCIE_CFG032 = Thirty-third 32-bits of PCIE type 1 config space
+ * (Link Control Register/Link Status Register)
+ */
+union cvmx_pciercx_cfg032 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg032_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lab : 1; /**< Link Autonomous Bandwidth Status
+ this bit is set to indicate that hardware has autonomously
+ changed Link speed or width, without the Port transitioning
+ through DL_Down status, for reasons other than to attempt
+ to correct unreliable Link operation. */
+ uint32_t lbm : 1; /**< Link Bandwidth Management Status
+ This bit is set to indicate either of the following has
+ occurred without the Port transitioning through DL_DOWN status
+ o A link retraining has completed following a write of 1b to
+ the Retrain Link bit
+ o Hardware has changed the Link speed or width to attempt to
+ correct unreliable Link operation, either through a LTSSM
+ timeout of higher level process. This bit must be set if
+ the Physical Layer reports a speed or width change was
+ inititiated by the Downstream component tha was not
+ indicated as an autonomous change */
+ uint32_t dlla : 1; /**< Data Link Layer Active */
+ uint32_t scc : 1; /**< Slot Clock Configuration
+ Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector. The default
+ value is the value you select during hardware configuration,
+ writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t lt : 1; /**< Link Training */
+ uint32_t reserved_26_26 : 1;
+ uint32_t nlw : 6; /**< Negotiated Link Width
+ Set automatically by hardware after Link initialization.
+ Value is undefined when link is not up. */
+ uint32_t ls : 4; /**< Link Speed
+ 0001 == The negotiated Link speed: 2.5 Gbps
+ 0010 == The negotiated Link speed: 5.0 Gbps
+ 0100 == The negotiated Link speed: 8.0 Gbps (Not Supported) */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lab_int_enb : 1; /**< Link Autonomous Bandwidth Interrupt Enable
+ When set, enables the generation of an interrupt to indicate
+ that the Link Autonomous Bandwidth Status bit has been set. */
+ uint32_t lbm_int_enb : 1; /**< Link Bandwidth Management Interrupt Enable
+ When set, enables the generation of an interrupt to indicate
+ that the Link Bandwidth Management Status bit has been set. */
+ uint32_t hawd : 1; /**< Hardware Autonomous Width Disable
+ (Not Supported) */
+ uint32_t ecpm : 1; /**< Enable Clock Power Management
+ Hardwired to 0 if Clock Power Management is disabled in
+ the Link Capabilities register. */
+ uint32_t es : 1; /**< Extended Synch */
+ uint32_t ccc : 1; /**< Common Clock Configuration */
+ uint32_t rl : 1; /**< Retrain Link */
+ uint32_t ld : 1; /**< Link Disable */
+ uint32_t rcb : 1; /**< Read Completion Boundary (RCB), writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field
+ because an RCB of 64 bytes is not supported. */
+ uint32_t reserved_2_2 : 1;
+ uint32_t aslpc : 2; /**< Active State Link PM Control */
+#else
+ uint32_t aslpc : 2;
+ uint32_t reserved_2_2 : 1;
+ uint32_t rcb : 1;
+ uint32_t ld : 1;
+ uint32_t rl : 1;
+ uint32_t ccc : 1;
+ uint32_t es : 1;
+ uint32_t ecpm : 1;
+ uint32_t hawd : 1;
+ uint32_t lbm_int_enb : 1;
+ uint32_t lab_int_enb : 1;
+ uint32_t reserved_12_15 : 4;
+ uint32_t ls : 4;
+ uint32_t nlw : 6;
+ uint32_t reserved_26_26 : 1;
+ uint32_t lt : 1;
+ uint32_t scc : 1;
+ uint32_t dlla : 1;
+ uint32_t lbm : 1;
+ uint32_t lab : 1;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg032_s cn52xx;
+ struct cvmx_pciercx_cfg032_s cn52xxp1;
+ struct cvmx_pciercx_cfg032_s cn56xx;
+ struct cvmx_pciercx_cfg032_s cn56xxp1;
+ struct cvmx_pciercx_cfg032_s cn61xx;
+ struct cvmx_pciercx_cfg032_s cn63xx;
+ struct cvmx_pciercx_cfg032_s cn63xxp1;
+ struct cvmx_pciercx_cfg032_s cn66xx;
+ struct cvmx_pciercx_cfg032_s cn68xx;
+ struct cvmx_pciercx_cfg032_s cn68xxp1;
+ struct cvmx_pciercx_cfg032_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg032 cvmx_pciercx_cfg032_t;
+
+/**
+ * cvmx_pcierc#_cfg033
+ *
+ * PCIE_CFG033 = Thirty-fourth 32-bits of PCIE type 1 config space
+ * (Slot Capabilities Register)
+ */
+union cvmx_pciercx_cfg033 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg033_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ps_num : 13; /**< Physical Slot Number, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t nccs : 1; /**< No Command Complete Support, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t emip : 1; /**< Electromechanical Interlock Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t sp_ls : 2; /**< Slot Power Limit Scale, writable through PEM(0..1)_CFG_WR. */
+ uint32_t sp_lv : 8; /**< Slot Power Limit Value, writable through PEM(0..1)_CFG_WR. */
+ uint32_t hp_c : 1; /**< Hot-Plug Capable, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t hp_s : 1; /**< Hot-Plug Surprise, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t pip : 1; /**< Power Indicator Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t aip : 1; /**< Attention Indicator Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t mrlsp : 1; /**< MRL Sensor Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t pcp : 1; /**< Power Controller Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+ uint32_t abp : 1; /**< Attention Button Present, writable through PEM(0..1)_CFG_WR
+ However, the application must not change this field. */
+#else
+ uint32_t abp : 1;
+ uint32_t pcp : 1;
+ uint32_t mrlsp : 1;
+ uint32_t aip : 1;
+ uint32_t pip : 1;
+ uint32_t hp_s : 1;
+ uint32_t hp_c : 1;
+ uint32_t sp_lv : 8;
+ uint32_t sp_ls : 2;
+ uint32_t emip : 1;
+ uint32_t nccs : 1;
+ uint32_t ps_num : 13;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg033_s cn52xx;
+ struct cvmx_pciercx_cfg033_s cn52xxp1;
+ struct cvmx_pciercx_cfg033_s cn56xx;
+ struct cvmx_pciercx_cfg033_s cn56xxp1;
+ struct cvmx_pciercx_cfg033_s cn61xx;
+ struct cvmx_pciercx_cfg033_s cn63xx;
+ struct cvmx_pciercx_cfg033_s cn63xxp1;
+ struct cvmx_pciercx_cfg033_s cn66xx;
+ struct cvmx_pciercx_cfg033_s cn68xx;
+ struct cvmx_pciercx_cfg033_s cn68xxp1;
+ struct cvmx_pciercx_cfg033_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg033 cvmx_pciercx_cfg033_t;
+
+/**
+ * cvmx_pcierc#_cfg034
+ *
+ * PCIE_CFG034 = Thirty-fifth 32-bits of PCIE type 1 config space
+ * (Slot Control Register/Slot Status Register)
+ */
+union cvmx_pciercx_cfg034 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg034_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t dlls_c : 1; /**< Data Link Layer State Changed */
+ uint32_t emis : 1; /**< Electromechanical Interlock Status */
+ uint32_t pds : 1; /**< Presence Detect State */
+ uint32_t mrlss : 1; /**< MRL Sensor State */
+ uint32_t ccint_d : 1; /**< Command Completed */
+ uint32_t pd_c : 1; /**< Presence Detect Changed */
+ uint32_t mrls_c : 1; /**< MRL Sensor Changed */
+ uint32_t pf_d : 1; /**< Power Fault Detected */
+ uint32_t abp_d : 1; /**< Attention Button Pressed */
+ uint32_t reserved_13_15 : 3;
+ uint32_t dlls_en : 1; /**< Data Link Layer State Changed Enable */
+ uint32_t emic : 1; /**< Electromechanical Interlock Control */
+ uint32_t pcc : 1; /**< Power Controller Control */
+ uint32_t pic : 2; /**< Power Indicator Control */
+ uint32_t aic : 2; /**< Attention Indicator Control */
+ uint32_t hpint_en : 1; /**< Hot-Plug Interrupt Enable */
+ uint32_t ccint_en : 1; /**< Command Completed Interrupt Enable */
+ uint32_t pd_en : 1; /**< Presence Detect Changed Enable */
+ uint32_t mrls_en : 1; /**< MRL Sensor Changed Enable */
+ uint32_t pf_en : 1; /**< Power Fault Detected Enable */
+ uint32_t abp_en : 1; /**< Attention Button Pressed Enable */
+#else
+ uint32_t abp_en : 1;
+ uint32_t pf_en : 1;
+ uint32_t mrls_en : 1;
+ uint32_t pd_en : 1;
+ uint32_t ccint_en : 1;
+ uint32_t hpint_en : 1;
+ uint32_t aic : 2;
+ uint32_t pic : 2;
+ uint32_t pcc : 1;
+ uint32_t emic : 1;
+ uint32_t dlls_en : 1;
+ uint32_t reserved_13_15 : 3;
+ uint32_t abp_d : 1;
+ uint32_t pf_d : 1;
+ uint32_t mrls_c : 1;
+ uint32_t pd_c : 1;
+ uint32_t ccint_d : 1;
+ uint32_t mrlss : 1;
+ uint32_t pds : 1;
+ uint32_t emis : 1;
+ uint32_t dlls_c : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg034_s cn52xx;
+ struct cvmx_pciercx_cfg034_s cn52xxp1;
+ struct cvmx_pciercx_cfg034_s cn56xx;
+ struct cvmx_pciercx_cfg034_s cn56xxp1;
+ struct cvmx_pciercx_cfg034_s cn61xx;
+ struct cvmx_pciercx_cfg034_s cn63xx;
+ struct cvmx_pciercx_cfg034_s cn63xxp1;
+ struct cvmx_pciercx_cfg034_s cn66xx;
+ struct cvmx_pciercx_cfg034_s cn68xx;
+ struct cvmx_pciercx_cfg034_s cn68xxp1;
+ struct cvmx_pciercx_cfg034_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg034 cvmx_pciercx_cfg034_t;
+
+/**
+ * cvmx_pcierc#_cfg035
+ *
+ * PCIE_CFG035 = Thirty-sixth 32-bits of PCIE type 1 config space
+ * (Root Control Register/Root Capabilities Register)
+ */
+union cvmx_pciercx_cfg035 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg035_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_17_31 : 15;
+ uint32_t crssv : 1; /**< CRS Software Visibility
+ Not supported, hardwired to 0x0. */
+ uint32_t reserved_5_15 : 11;
+ uint32_t crssve : 1; /**< CRS Software Visibility Enable
+ Not supported, hardwired to 0x0. */
+ uint32_t pmeie : 1; /**< PME Interrupt Enable */
+ uint32_t sefee : 1; /**< System Error on Fatal Error Enable */
+ uint32_t senfee : 1; /**< System Error on Non-fatal Error Enable */
+ uint32_t secee : 1; /**< System Error on Correctable Error Enable */
+#else
+ uint32_t secee : 1;
+ uint32_t senfee : 1;
+ uint32_t sefee : 1;
+ uint32_t pmeie : 1;
+ uint32_t crssve : 1;
+ uint32_t reserved_5_15 : 11;
+ uint32_t crssv : 1;
+ uint32_t reserved_17_31 : 15;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg035_s cn52xx;
+ struct cvmx_pciercx_cfg035_s cn52xxp1;
+ struct cvmx_pciercx_cfg035_s cn56xx;
+ struct cvmx_pciercx_cfg035_s cn56xxp1;
+ struct cvmx_pciercx_cfg035_s cn61xx;
+ struct cvmx_pciercx_cfg035_s cn63xx;
+ struct cvmx_pciercx_cfg035_s cn63xxp1;
+ struct cvmx_pciercx_cfg035_s cn66xx;
+ struct cvmx_pciercx_cfg035_s cn68xx;
+ struct cvmx_pciercx_cfg035_s cn68xxp1;
+ struct cvmx_pciercx_cfg035_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg035 cvmx_pciercx_cfg035_t;
+
+/**
+ * cvmx_pcierc#_cfg036
+ *
+ * PCIE_CFG036 = Thirty-seventh 32-bits of PCIE type 1 config space
+ * (Root Status Register)
+ */
+union cvmx_pciercx_cfg036 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg036_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_18_31 : 14;
+ uint32_t pme_pend : 1; /**< PME Pending */
+ uint32_t pme_stat : 1; /**< PME Status */
+ uint32_t pme_rid : 16; /**< PME Requester ID */
+#else
+ uint32_t pme_rid : 16;
+ uint32_t pme_stat : 1;
+ uint32_t pme_pend : 1;
+ uint32_t reserved_18_31 : 14;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg036_s cn52xx;
+ struct cvmx_pciercx_cfg036_s cn52xxp1;
+ struct cvmx_pciercx_cfg036_s cn56xx;
+ struct cvmx_pciercx_cfg036_s cn56xxp1;
+ struct cvmx_pciercx_cfg036_s cn61xx;
+ struct cvmx_pciercx_cfg036_s cn63xx;
+ struct cvmx_pciercx_cfg036_s cn63xxp1;
+ struct cvmx_pciercx_cfg036_s cn66xx;
+ struct cvmx_pciercx_cfg036_s cn68xx;
+ struct cvmx_pciercx_cfg036_s cn68xxp1;
+ struct cvmx_pciercx_cfg036_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg036 cvmx_pciercx_cfg036_t;
+
+/**
+ * cvmx_pcierc#_cfg037
+ *
+ * PCIE_CFG037 = Thirty-eighth 32-bits of PCIE type 1 config space
+ * (Device Capabilities 2 Register)
+ */
+union cvmx_pciercx_cfg037 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg037_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t obffs : 2; /**< Optimized Buffer Flush Fill (OBFF) Supported
+ (Not Supported) */
+ uint32_t reserved_12_17 : 6;
+ uint32_t ltrs : 1; /**< Latency Tolerance Reporting (LTR) Mechanism Supported
+ (Not Supported) */
+ uint32_t noroprpr : 1; /**< No RO-enabled PR-PR Passing
+ When set, the routing element never carries out the passing
+ permitted in the Relaxed Ordering Model. */
+ uint32_t atom128s : 1; /**< 128-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom64s : 1; /**< 64-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom32s : 1; /**< 32-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom_ops : 1; /**< AtomicOp Routing Supported
+ (Not Supported) */
+ uint32_t reserved_5_5 : 1;
+ uint32_t ctds : 1; /**< Completion Timeout Disable Supported */
+ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported */
+#else
+ uint32_t ctrs : 4;
+ uint32_t ctds : 1;
+ uint32_t reserved_5_5 : 1;
+ uint32_t atom_ops : 1;
+ uint32_t atom32s : 1;
+ uint32_t atom64s : 1;
+ uint32_t atom128s : 1;
+ uint32_t noroprpr : 1;
+ uint32_t ltrs : 1;
+ uint32_t reserved_12_17 : 6;
+ uint32_t obffs : 2;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg037_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_5_31 : 27;
+ uint32_t ctds : 1; /**< Completion Timeout Disable Supported */
+ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported
+ Value of 0 indicates that Completion Timeout Programming
+ is not supported
+ Completion timeout is 16.7ms. */
+#else
+ uint32_t ctrs : 4;
+ uint32_t ctds : 1;
+ uint32_t reserved_5_31 : 27;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg037_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg037_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg037_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg037_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t tph : 2; /**< TPH Completer Supported
+ (Not Supported) */
+ uint32_t reserved_11_11 : 1;
+ uint32_t noroprpr : 1; /**< No RO-enabled PR-PR Passing
+ When set, the routing element never carries out the passing
+ permitted in the Relaxed Ordering Model. */
+ uint32_t atom128s : 1; /**< 128-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom64s : 1; /**< 64-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom32s : 1; /**< 32-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom_ops : 1; /**< AtomicOp Routing Supported
+ (Not Supported) */
+ uint32_t ari_fw : 1; /**< ARI Forwarding Supported
+ (Not Supported) */
+ uint32_t ctds : 1; /**< Completion Timeout Disable Supported */
+ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported */
+#else
+ uint32_t ctrs : 4;
+ uint32_t ctds : 1;
+ uint32_t ari_fw : 1;
+ uint32_t atom_ops : 1;
+ uint32_t atom32s : 1;
+ uint32_t atom64s : 1;
+ uint32_t atom128s : 1;
+ uint32_t noroprpr : 1;
+ uint32_t reserved_11_11 : 1;
+ uint32_t tph : 2;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } cn61xx;
+ struct cvmx_pciercx_cfg037_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg037_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg037_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t tph : 2; /**< TPH Completer Supported
+ (Not Supported) */
+ uint32_t reserved_11_11 : 1;
+ uint32_t noroprpr : 1; /**< No RO-enabled PR-PR Passing
+ When set, the routing element never carries out the passing
+ permitted in the Relaxed Ordering Model. */
+ uint32_t atom128s : 1; /**< 128-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom64s : 1; /**< 64-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom32s : 1; /**< 32-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom_ops : 1; /**< AtomicOp Routing Supported
+ (Not Supported) */
+ uint32_t ari : 1; /**< Alternate Routing ID Forwarding Supported
+ (Not Supported) */
+ uint32_t ctds : 1; /**< Completion Timeout Disable Supported */
+ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported */
+#else
+ uint32_t ctrs : 4;
+ uint32_t ctds : 1;
+ uint32_t ari : 1;
+ uint32_t atom_ops : 1;
+ uint32_t atom32s : 1;
+ uint32_t atom64s : 1;
+ uint32_t atom128s : 1;
+ uint32_t noroprpr : 1;
+ uint32_t reserved_11_11 : 1;
+ uint32_t tph : 2;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } cn66xx;
+ struct cvmx_pciercx_cfg037_cn66xx cn68xx;
+ struct cvmx_pciercx_cfg037_cn66xx cn68xxp1;
+ struct cvmx_pciercx_cfg037_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t obffs : 2; /**< Optimized Buffer Flush Fill (OBFF) Supported
+ (Not Supported) */
+ uint32_t reserved_14_17 : 4;
+ uint32_t tphs : 2; /**< TPH Completer Supported
+ (Not Supported) */
+ uint32_t ltrs : 1; /**< Latency Tolerance Reporting (LTR) Mechanism Supported
+ (Not Supported) */
+ uint32_t noroprpr : 1; /**< No RO-enabled PR-PR Passing
+ When set, the routing element never carries out the passing
+ permitted in the Relaxed Ordering Model. */
+ uint32_t atom128s : 1; /**< 128-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom64s : 1; /**< 64-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom32s : 1; /**< 32-bit AtomicOp Supported
+ (Not Supported) */
+ uint32_t atom_ops : 1; /**< AtomicOp Routing Supported
+ (Not Supported) */
+ uint32_t ari_fw : 1; /**< ARI Forwarding Supported
+ (Not Supported) */
+ uint32_t ctds : 1; /**< Completion Timeout Disable Supported */
+ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported */
+#else
+ uint32_t ctrs : 4;
+ uint32_t ctds : 1;
+ uint32_t ari_fw : 1;
+ uint32_t atom_ops : 1;
+ uint32_t atom32s : 1;
+ uint32_t atom64s : 1;
+ uint32_t atom128s : 1;
+ uint32_t noroprpr : 1;
+ uint32_t ltrs : 1;
+ uint32_t tphs : 2;
+ uint32_t reserved_14_17 : 4;
+ uint32_t obffs : 2;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } cnf71xx;
+};
+typedef union cvmx_pciercx_cfg037 cvmx_pciercx_cfg037_t;
+
+/**
+ * cvmx_pcierc#_cfg038
+ *
+ * PCIE_CFG038 = Thirty-ninth 32-bits of PCIE type 1 config space
+ * (Device Control 2 Register)
+ */
+union cvmx_pciercx_cfg038 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg038_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_15_31 : 17;
+ uint32_t obffe : 2; /**< Optimized Buffer Flush Fill (OBFF) Enable
+ (Not Supported) */
+ uint32_t reserved_11_12 : 2;
+ uint32_t ltre : 1; /**< Latency Tolerance Reporting (LTR) Mechanism Enable
+ (Not Supported) */
+ uint32_t id0_cp : 1; /**< ID Based Ordering Completion Enable
+ (Not Supported) */
+ uint32_t id0_rq : 1; /**< ID Based Ordering Request Enable
+ (Not Supported) */
+ uint32_t atom_op_eb : 1; /**< AtomicOp Egress Blocking
+ (Not Supported)m */
+ uint32_t atom_op : 1; /**< AtomicOp Requester Enable
+ (Not Supported) */
+ uint32_t ari : 1; /**< Alternate Routing ID Forwarding Supported
+ (Not Supported) */
+ uint32_t ctd : 1; /**< Completion Timeout Disable */
+ uint32_t ctv : 4; /**< Completion Timeout Value
+ o 0000b Default range: 16 ms to 55 ms
+ o 0001b 50 us to 100 us
+ o 0010b 1 ms to 10 ms
+ o 0101b 16 ms to 55 ms
+ o 0110b 65 ms to 210 ms
+ o 1001b 260 ms to 900 ms
+ o 1010b 1 s to 3.5 s
+ o 1101b 4 s to 13 s
+ o 1110b 17 s to 64 s
+ Values not defined are reserved */
+#else
+ uint32_t ctv : 4;
+ uint32_t ctd : 1;
+ uint32_t ari : 1;
+ uint32_t atom_op : 1;
+ uint32_t atom_op_eb : 1;
+ uint32_t id0_rq : 1;
+ uint32_t id0_cp : 1;
+ uint32_t ltre : 1;
+ uint32_t reserved_11_12 : 2;
+ uint32_t obffe : 2;
+ uint32_t reserved_15_31 : 17;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg038_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_5_31 : 27;
+ uint32_t ctd : 1; /**< Completion Timeout Disable */
+ uint32_t ctv : 4; /**< Completion Timeout Value
+ Completion Timeout Programming is not supported
+ Completion timeout is 16.7ms. */
+#else
+ uint32_t ctv : 4;
+ uint32_t ctd : 1;
+ uint32_t reserved_5_31 : 27;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg038_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg038_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg038_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg038_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_10_31 : 22;
+ uint32_t id0_cp : 1; /**< ID Based Ordering Completion Enable
+ (Not Supported) */
+ uint32_t id0_rq : 1; /**< ID Based Ordering Request Enable
+ (Not Supported) */
+ uint32_t atom_op_eb : 1; /**< AtomicOp Egress Blocking
+ (Not Supported)m */
+ uint32_t atom_op : 1; /**< AtomicOp Requester Enable
+ (Not Supported) */
+ uint32_t ari : 1; /**< Alternate Routing ID Forwarding Supported
+ (Not Supported) */
+ uint32_t ctd : 1; /**< Completion Timeout Disable */
+ uint32_t ctv : 4; /**< Completion Timeout Value
+ o 0000b Default range: 16 ms to 55 ms
+ o 0001b 50 us to 100 us
+ o 0010b 1 ms to 10 ms
+ o 0101b 16 ms to 55 ms
+ o 0110b 65 ms to 210 ms
+ o 1001b 260 ms to 900 ms
+ o 1010b 1 s to 3.5 s
+ o 1101b 4 s to 13 s
+ o 1110b 17 s to 64 s
+ Values not defined are reserved */
+#else
+ uint32_t ctv : 4;
+ uint32_t ctd : 1;
+ uint32_t ari : 1;
+ uint32_t atom_op : 1;
+ uint32_t atom_op_eb : 1;
+ uint32_t id0_rq : 1;
+ uint32_t id0_cp : 1;
+ uint32_t reserved_10_31 : 22;
+#endif
+ } cn61xx;
+ struct cvmx_pciercx_cfg038_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg038_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg038_cn61xx cn66xx;
+ struct cvmx_pciercx_cfg038_cn61xx cn68xx;
+ struct cvmx_pciercx_cfg038_cn61xx cn68xxp1;
+ struct cvmx_pciercx_cfg038_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg038 cvmx_pciercx_cfg038_t;
+
+/**
+ * cvmx_pcierc#_cfg039
+ *
+ * PCIE_CFG039 = Fourtieth 32-bits of PCIE type 1 config space
+ * (Link Capabilities 2 Register)
+ */
+union cvmx_pciercx_cfg039 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg039_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t cls : 1; /**< Crosslink Supported */
+ uint32_t slsv : 7; /**< Supported Link Speeds Vector
+ Indicates the supported Link speeds of the associated Port.
+ For each bit, a value of 1b indicates that the cooresponding
+ Link speed is supported; otherwise, the Link speed is not
+ supported.
+ Bit definitions are:
+ Bit 1 2.5 GT/s
+ Bit 2 5.0 GT/s
+ Bit 3 8.0 GT/s (Not Supported)
+ Bits 7:4 reserved
+ The reset value of this field is controlled by a value sent from
+ the lsb of the MIO_QLM#_SPD register
+ qlm#_spd[0] RST_VALUE NOTE
+ 1 0001b 2.5 GHz supported
+ 0 0011b 5.0 GHz and 2.5 GHz supported */
+ uint32_t reserved_0_0 : 1;
+#else
+ uint32_t reserved_0_0 : 1;
+ uint32_t slsv : 7;
+ uint32_t cls : 1;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg039_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg039_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg039_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg039_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg039_s cn61xx;
+ struct cvmx_pciercx_cfg039_s cn63xx;
+ struct cvmx_pciercx_cfg039_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg039_s cn66xx;
+ struct cvmx_pciercx_cfg039_s cn68xx;
+ struct cvmx_pciercx_cfg039_s cn68xxp1;
+ struct cvmx_pciercx_cfg039_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg039 cvmx_pciercx_cfg039_t;
+
+/**
+ * cvmx_pcierc#_cfg040
+ *
+ * PCIE_CFG040 = Fourty-first 32-bits of PCIE type 1 config space
+ * (Link Control 2 Register/Link Status 2 Register)
+ */
+union cvmx_pciercx_cfg040 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg040_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_17_31 : 15;
+ uint32_t cdl : 1; /**< Current De-emphasis Level
+ When the Link is operating at 5 GT/s speed, this bit
+ reflects the level of de-emphasis. Encodings:
+ 1b: -3.5 dB
+ 0b: -6 dB
+ Note: The value in this bit is undefined when the Link is
+ operating at 2.5 GT/s speed */
+ uint32_t reserved_13_15 : 3;
+ uint32_t cde : 1; /**< Compliance De-emphasis
+ This bit sets the de-emphasis level in Polling. Compliance
+ state if the entry occurred due to the Tx Compliance
+ Receive bit being 1b. Encodings:
+ 1b: -3.5 dB
+ 0b: -6 dB
+ Note: When the Link is operating at 2.5 GT/s, the setting
+ of this bit has no effect. */
+ uint32_t csos : 1; /**< Compliance SOS
+ When set to 1b, the LTSSM is required to send SKP
+ Ordered Sets periodically in between the (modified)
+ compliance patterns.
+ Note: When the Link is operating at 2.5 GT/s, the setting
+ of this bit has no effect. */
+ uint32_t emc : 1; /**< Enter Modified Compliance
+ When this bit is set to 1b, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.
+ Compliance state. */
+ uint32_t tm : 3; /**< Transmit Margin
+ This field controls the value of the non-de-emphasized
+ voltage level at the Transmitter signals:
+ - 000: 800-1200 mV for full swing 400-600 mV for half-swing
+ - 001-010: values must be monotonic with a non-zero slope
+ - 011: 200-400 mV for full-swing and 100-200 mV for halfswing
+ - 100-111: reserved
+ This field is reset to 000b on entry to the LTSSM Polling.
+ Compliance substate.
+ When operating in 5.0 GT/s mode with full swing, the
+ de-emphasis ratio must be maintained within +/- 1 dB
+ from the specification-defined operational value
+ either -3.5 or -6 dB). */
+ uint32_t sde : 1; /**< Selectable De-emphasis
+ When the Link is operating at 5.0 GT/s speed, selects the
+ level of de-emphasis:
+ - 1: -3.5 dB
+ - 0: -6 dB
+ When the Link is operating at 2.5 GT/s speed, the setting
+ of this bit has no effect. */
+ uint32_t hasd : 1; /**< Hardware Autonomous Speed Disable
+ When asserted, the
+ application must disable hardware from changing the Link
+ speed for device-specific reasons other than attempting to
+ correct unreliable Link operation by reducing Link speed.
+ Initial transition to the highest supported common link
+ speed is not blocked by this signal. */
+ uint32_t ec : 1; /**< Enter Compliance
+ Software is permitted to force a link to enter Compliance
+ mode at the speed indicated in the Target Link Speed
+ field by setting this bit to 1b in both components on a link
+ and then initiating a hot reset on the link. */
+ uint32_t tls : 4; /**< Target Link Speed
+ For Downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by
+ the upstream component in its training sequences:
+ - 0001: 2.5Gb/s Target Link Speed
+ - 0010: 5Gb/s Target Link Speed
+ - 0100: 8Gb/s Target Link Speed (Not Supported)
+ All other encodings are reserved.
+ If a value is written to this field that does not correspond to
+ a speed included in the Supported Link Speeds field, the
+ result is undefined.
+ For both Upstream and Downstream ports, this field is
+ used to set the target compliance mode speed when
+ software is using the Enter Compliance bit to force a link
+ into compliance mode.
+ The reset value of this field is controlled by a value sent from
+ the lsb of the MIO_QLM#_SPD register.
+ qlm#_spd[0] RST_VALUE NOTE
+ 1 0001b 2.5 GHz supported
+ 0 0010b 5.0 GHz and 2.5 GHz supported */
+#else
+ uint32_t tls : 4;
+ uint32_t ec : 1;
+ uint32_t hasd : 1;
+ uint32_t sde : 1;
+ uint32_t tm : 3;
+ uint32_t emc : 1;
+ uint32_t csos : 1;
+ uint32_t cde : 1;
+ uint32_t reserved_13_15 : 3;
+ uint32_t cdl : 1;
+ uint32_t reserved_17_31 : 15;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg040_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg040_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg040_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg040_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg040_s cn61xx;
+ struct cvmx_pciercx_cfg040_s cn63xx;
+ struct cvmx_pciercx_cfg040_s cn63xxp1;
+ struct cvmx_pciercx_cfg040_s cn66xx;
+ struct cvmx_pciercx_cfg040_s cn68xx;
+ struct cvmx_pciercx_cfg040_s cn68xxp1;
+ struct cvmx_pciercx_cfg040_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg040 cvmx_pciercx_cfg040_t;
+
+/**
+ * cvmx_pcierc#_cfg041
+ *
+ * PCIE_CFG041 = Fourty-second 32-bits of PCIE type 1 config space
+ * (Slot Capabilities 2 Register)
+ */
+union cvmx_pciercx_cfg041 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg041_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg041_s cn52xx;
+ struct cvmx_pciercx_cfg041_s cn52xxp1;
+ struct cvmx_pciercx_cfg041_s cn56xx;
+ struct cvmx_pciercx_cfg041_s cn56xxp1;
+ struct cvmx_pciercx_cfg041_s cn61xx;
+ struct cvmx_pciercx_cfg041_s cn63xx;
+ struct cvmx_pciercx_cfg041_s cn63xxp1;
+ struct cvmx_pciercx_cfg041_s cn66xx;
+ struct cvmx_pciercx_cfg041_s cn68xx;
+ struct cvmx_pciercx_cfg041_s cn68xxp1;
+ struct cvmx_pciercx_cfg041_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg041 cvmx_pciercx_cfg041_t;
+
+/**
+ * cvmx_pcierc#_cfg042
+ *
+ * PCIE_CFG042 = Fourty-third 32-bits of PCIE type 1 config space
+ * (Slot Control 2 Register/Slot Status 2 Register)
+ */
+union cvmx_pciercx_cfg042 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg042_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_0_31 : 32;
+#else
+ uint32_t reserved_0_31 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg042_s cn52xx;
+ struct cvmx_pciercx_cfg042_s cn52xxp1;
+ struct cvmx_pciercx_cfg042_s cn56xx;
+ struct cvmx_pciercx_cfg042_s cn56xxp1;
+ struct cvmx_pciercx_cfg042_s cn61xx;
+ struct cvmx_pciercx_cfg042_s cn63xx;
+ struct cvmx_pciercx_cfg042_s cn63xxp1;
+ struct cvmx_pciercx_cfg042_s cn66xx;
+ struct cvmx_pciercx_cfg042_s cn68xx;
+ struct cvmx_pciercx_cfg042_s cn68xxp1;
+ struct cvmx_pciercx_cfg042_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg042 cvmx_pciercx_cfg042_t;
+
+/**
+ * cvmx_pcierc#_cfg064
+ *
+ * PCIE_CFG064 = Sixty-fifth 32-bits of PCIE type 1 config space
+ * (PCI Express Extended Capability Header)
+ */
+union cvmx_pciercx_cfg064 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg064_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t nco : 12; /**< Next Capability Offset */
+ uint32_t cv : 4; /**< Capability Version */
+ uint32_t pcieec : 16; /**< PCIE Express Extended Capability */
+#else
+ uint32_t pcieec : 16;
+ uint32_t cv : 4;
+ uint32_t nco : 12;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg064_s cn52xx;
+ struct cvmx_pciercx_cfg064_s cn52xxp1;
+ struct cvmx_pciercx_cfg064_s cn56xx;
+ struct cvmx_pciercx_cfg064_s cn56xxp1;
+ struct cvmx_pciercx_cfg064_s cn61xx;
+ struct cvmx_pciercx_cfg064_s cn63xx;
+ struct cvmx_pciercx_cfg064_s cn63xxp1;
+ struct cvmx_pciercx_cfg064_s cn66xx;
+ struct cvmx_pciercx_cfg064_s cn68xx;
+ struct cvmx_pciercx_cfg064_s cn68xxp1;
+ struct cvmx_pciercx_cfg064_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg064 cvmx_pciercx_cfg064_t;
+
+/**
+ * cvmx_pcierc#_cfg065
+ *
+ * PCIE_CFG065 = Sixty-sixth 32-bits of PCIE type 1 config space
+ * (Uncorrectable Error Status Register)
+ */
+union cvmx_pciercx_cfg065 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg065_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Status */
+ uint32_t reserved_23_23 : 1;
+ uint32_t ucies : 1; /**< Uncorrectable Internal Error Status */
+ uint32_t reserved_21_21 : 1;
+ uint32_t ures : 1; /**< Unsupported Request Error Status */
+ uint32_t ecrces : 1; /**< ECRC Error Status */
+ uint32_t mtlps : 1; /**< Malformed TLP Status */
+ uint32_t ros : 1; /**< Receiver Overflow Status */
+ uint32_t ucs : 1; /**< Unexpected Completion Status */
+ uint32_t cas : 1; /**< Completer Abort Status */
+ uint32_t cts : 1; /**< Completion Timeout Status */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Status */
+ uint32_t ptlps : 1; /**< Poisoned TLP Status */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Status (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Status */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_21 : 1;
+ uint32_t ucies : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg065_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t ures : 1; /**< Unsupported Request Error Status */
+ uint32_t ecrces : 1; /**< ECRC Error Status */
+ uint32_t mtlps : 1; /**< Malformed TLP Status */
+ uint32_t ros : 1; /**< Receiver Overflow Status */
+ uint32_t ucs : 1; /**< Unexpected Completion Status */
+ uint32_t cas : 1; /**< Completer Abort Status */
+ uint32_t cts : 1; /**< Completion Timeout Status */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Status */
+ uint32_t ptlps : 1; /**< Poisoned TLP Status */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Status (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Status */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg065_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg065_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg065_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg065_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Status */
+ uint32_t reserved_21_23 : 3;
+ uint32_t ures : 1; /**< Unsupported Request Error Status */
+ uint32_t ecrces : 1; /**< ECRC Error Status */
+ uint32_t mtlps : 1; /**< Malformed TLP Status */
+ uint32_t ros : 1; /**< Receiver Overflow Status */
+ uint32_t ucs : 1; /**< Unexpected Completion Status */
+ uint32_t cas : 1; /**< Completer Abort Status */
+ uint32_t cts : 1; /**< Completion Timeout Status */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Status */
+ uint32_t ptlps : 1; /**< Poisoned TLP Status */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Status (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Status */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_23 : 3;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cn61xx;
+ struct cvmx_pciercx_cfg065_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg065_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg065_cn61xx cn66xx;
+ struct cvmx_pciercx_cfg065_cn61xx cn68xx;
+ struct cvmx_pciercx_cfg065_cn52xx cn68xxp1;
+ struct cvmx_pciercx_cfg065_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg065 cvmx_pciercx_cfg065_t;
+
+/**
+ * cvmx_pcierc#_cfg066
+ *
+ * PCIE_CFG066 = Sixty-seventh 32-bits of PCIE type 1 config space
+ * (Uncorrectable Error Mask Register)
+ */
+union cvmx_pciercx_cfg066 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg066_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombm : 1; /**< Unsupported AtomicOp Egress Blocked Mask */
+ uint32_t reserved_23_23 : 1;
+ uint32_t uciem : 1; /**< Uncorrectable Internal Error Mask */
+ uint32_t reserved_21_21 : 1;
+ uint32_t urem : 1; /**< Unsupported Request Error Mask */
+ uint32_t ecrcem : 1; /**< ECRC Error Mask */
+ uint32_t mtlpm : 1; /**< Malformed TLP Mask */
+ uint32_t rom : 1; /**< Receiver Overflow Mask */
+ uint32_t ucm : 1; /**< Unexpected Completion Mask */
+ uint32_t cam : 1; /**< Completer Abort Mask */
+ uint32_t ctm : 1; /**< Completion Timeout Mask */
+ uint32_t fcpem : 1; /**< Flow Control Protocol Error Mask */
+ uint32_t ptlpm : 1; /**< Poisoned TLP Mask */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< Surprise Down Error Mask (not supported) */
+ uint32_t dlpem : 1; /**< Data Link Protocol Error Mask */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpem : 1;
+ uint32_t sdem : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1;
+ uint32_t fcpem : 1;
+ uint32_t ctm : 1;
+ uint32_t cam : 1;
+ uint32_t ucm : 1;
+ uint32_t rom : 1;
+ uint32_t mtlpm : 1;
+ uint32_t ecrcem : 1;
+ uint32_t urem : 1;
+ uint32_t reserved_21_21 : 1;
+ uint32_t uciem : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t uatombm : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg066_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t urem : 1; /**< Unsupported Request Error Mask */
+ uint32_t ecrcem : 1; /**< ECRC Error Mask */
+ uint32_t mtlpm : 1; /**< Malformed TLP Mask */
+ uint32_t rom : 1; /**< Receiver Overflow Mask */
+ uint32_t ucm : 1; /**< Unexpected Completion Mask */
+ uint32_t cam : 1; /**< Completer Abort Mask */
+ uint32_t ctm : 1; /**< Completion Timeout Mask */
+ uint32_t fcpem : 1; /**< Flow Control Protocol Error Mask */
+ uint32_t ptlpm : 1; /**< Poisoned TLP Mask */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< Surprise Down Error Mask (not supported) */
+ uint32_t dlpem : 1; /**< Data Link Protocol Error Mask */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpem : 1;
+ uint32_t sdem : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1;
+ uint32_t fcpem : 1;
+ uint32_t ctm : 1;
+ uint32_t cam : 1;
+ uint32_t ucm : 1;
+ uint32_t rom : 1;
+ uint32_t mtlpm : 1;
+ uint32_t ecrcem : 1;
+ uint32_t urem : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg066_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg066_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg066_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg066_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombm : 1; /**< Unsupported AtomicOp Egress Blocked Mask */
+ uint32_t reserved_21_23 : 3;
+ uint32_t urem : 1; /**< Unsupported Request Error Mask */
+ uint32_t ecrcem : 1; /**< ECRC Error Mask */
+ uint32_t mtlpm : 1; /**< Malformed TLP Mask */
+ uint32_t rom : 1; /**< Receiver Overflow Mask */
+ uint32_t ucm : 1; /**< Unexpected Completion Mask */
+ uint32_t cam : 1; /**< Completer Abort Mask */
+ uint32_t ctm : 1; /**< Completion Timeout Mask */
+ uint32_t fcpem : 1; /**< Flow Control Protocol Error Mask */
+ uint32_t ptlpm : 1; /**< Poisoned TLP Mask */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< Surprise Down Error Mask (not supported) */
+ uint32_t dlpem : 1; /**< Data Link Protocol Error Mask */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpem : 1;
+ uint32_t sdem : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1;
+ uint32_t fcpem : 1;
+ uint32_t ctm : 1;
+ uint32_t cam : 1;
+ uint32_t ucm : 1;
+ uint32_t rom : 1;
+ uint32_t mtlpm : 1;
+ uint32_t ecrcem : 1;
+ uint32_t urem : 1;
+ uint32_t reserved_21_23 : 3;
+ uint32_t uatombm : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cn61xx;
+ struct cvmx_pciercx_cfg066_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg066_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg066_cn61xx cn66xx;
+ struct cvmx_pciercx_cfg066_cn61xx cn68xx;
+ struct cvmx_pciercx_cfg066_cn52xx cn68xxp1;
+ struct cvmx_pciercx_cfg066_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg066 cvmx_pciercx_cfg066_t;
+
+/**
+ * cvmx_pcierc#_cfg067
+ *
+ * PCIE_CFG067 = Sixty-eighth 32-bits of PCIE type 1 config space
+ * (Uncorrectable Error Severity Register)
+ */
+union cvmx_pciercx_cfg067 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg067_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Severity */
+ uint32_t reserved_23_23 : 1;
+ uint32_t ucies : 1; /**< Uncorrectable Internal Error Severity */
+ uint32_t reserved_21_21 : 1;
+ uint32_t ures : 1; /**< Unsupported Request Error Severity */
+ uint32_t ecrces : 1; /**< ECRC Error Severity */
+ uint32_t mtlps : 1; /**< Malformed TLP Severity */
+ uint32_t ros : 1; /**< Receiver Overflow Severity */
+ uint32_t ucs : 1; /**< Unexpected Completion Severity */
+ uint32_t cas : 1; /**< Completer Abort Severity */
+ uint32_t cts : 1; /**< Completion Timeout Severity */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Severity */
+ uint32_t ptlps : 1; /**< Poisoned TLP Severity */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Severity (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Severity */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_21 : 1;
+ uint32_t ucies : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg067_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t ures : 1; /**< Unsupported Request Error Severity */
+ uint32_t ecrces : 1; /**< ECRC Error Severity */
+ uint32_t mtlps : 1; /**< Malformed TLP Severity */
+ uint32_t ros : 1; /**< Receiver Overflow Severity */
+ uint32_t ucs : 1; /**< Unexpected Completion Severity */
+ uint32_t cas : 1; /**< Completer Abort Severity */
+ uint32_t cts : 1; /**< Completion Timeout Severity */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Severity */
+ uint32_t ptlps : 1; /**< Poisoned TLP Severity */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Severity (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Severity */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg067_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg067_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg067_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg067_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t uatombs : 1; /**< Unsupported AtomicOp Egress Blocked Severity */
+ uint32_t reserved_21_23 : 3;
+ uint32_t ures : 1; /**< Unsupported Request Error Severity */
+ uint32_t ecrces : 1; /**< ECRC Error Severity */
+ uint32_t mtlps : 1; /**< Malformed TLP Severity */
+ uint32_t ros : 1; /**< Receiver Overflow Severity */
+ uint32_t ucs : 1; /**< Unexpected Completion Severity */
+ uint32_t cas : 1; /**< Completer Abort Severity */
+ uint32_t cts : 1; /**< Completion Timeout Severity */
+ uint32_t fcpes : 1; /**< Flow Control Protocol Error Severity */
+ uint32_t ptlps : 1; /**< Poisoned TLP Severity */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< Surprise Down Error Severity (not supported) */
+ uint32_t dlpes : 1; /**< Data Link Protocol Error Severity */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1;
+ uint32_t sdes : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1;
+ uint32_t fcpes : 1;
+ uint32_t cts : 1;
+ uint32_t cas : 1;
+ uint32_t ucs : 1;
+ uint32_t ros : 1;
+ uint32_t mtlps : 1;
+ uint32_t ecrces : 1;
+ uint32_t ures : 1;
+ uint32_t reserved_21_23 : 3;
+ uint32_t uatombs : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cn61xx;
+ struct cvmx_pciercx_cfg067_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg067_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg067_cn61xx cn66xx;
+ struct cvmx_pciercx_cfg067_cn61xx cn68xx;
+ struct cvmx_pciercx_cfg067_cn52xx cn68xxp1;
+ struct cvmx_pciercx_cfg067_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg067 cvmx_pciercx_cfg067_t;
+
+/**
+ * cvmx_pcierc#_cfg068
+ *
+ * PCIE_CFG068 = Sixty-ninth 32-bits of PCIE type 1 config space
+ * (Correctable Error Status Register)
+ */
+union cvmx_pciercx_cfg068 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg068_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_15_31 : 17;
+ uint32_t cies : 1; /**< Corrected Internal Error Status */
+ uint32_t anfes : 1; /**< Advisory Non-Fatal Error Status */
+ uint32_t rtts : 1; /**< Replay Timer Timeout Status */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrs : 1; /**< REPLAY_NUM Rollover Status */
+ uint32_t bdllps : 1; /**< Bad DLLP Status */
+ uint32_t btlps : 1; /**< Bad TLP Status */
+ uint32_t reserved_1_5 : 5;
+ uint32_t res : 1; /**< Receiver Error Status */
+#else
+ uint32_t res : 1;
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlps : 1;
+ uint32_t bdllps : 1;
+ uint32_t rnrs : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t rtts : 1;
+ uint32_t anfes : 1;
+ uint32_t cies : 1;
+ uint32_t reserved_15_31 : 17;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg068_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t anfes : 1; /**< Advisory Non-Fatal Error Status */
+ uint32_t rtts : 1; /**< Replay Timer Timeout Status */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrs : 1; /**< REPLAY_NUM Rollover Status */
+ uint32_t bdllps : 1; /**< Bad DLLP Status */
+ uint32_t btlps : 1; /**< Bad TLP Status */
+ uint32_t reserved_1_5 : 5;
+ uint32_t res : 1; /**< Receiver Error Status */
+#else
+ uint32_t res : 1;
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlps : 1;
+ uint32_t bdllps : 1;
+ uint32_t rnrs : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t rtts : 1;
+ uint32_t anfes : 1;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg068_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg068_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg068_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg068_cn52xx cn61xx;
+ struct cvmx_pciercx_cfg068_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg068_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg068_cn52xx cn66xx;
+ struct cvmx_pciercx_cfg068_cn52xx cn68xx;
+ struct cvmx_pciercx_cfg068_cn52xx cn68xxp1;
+ struct cvmx_pciercx_cfg068_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg068 cvmx_pciercx_cfg068_t;
+
+/**
+ * cvmx_pcierc#_cfg069
+ *
+ * PCIE_CFG069 = Seventieth 32-bits of PCIE type 1 config space
+ * (Correctable Error Mask Register)
+ */
+union cvmx_pciercx_cfg069 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg069_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_15_31 : 17;
+ uint32_t ciem : 1; /**< Corrected Internal Error Mask */
+ uint32_t anfem : 1; /**< Advisory Non-Fatal Error Mask */
+ uint32_t rttm : 1; /**< Replay Timer Timeout Mask */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrm : 1; /**< REPLAY_NUM Rollover Mask */
+ uint32_t bdllpm : 1; /**< Bad DLLP Mask */
+ uint32_t btlpm : 1; /**< Bad TLP Mask */
+ uint32_t reserved_1_5 : 5;
+ uint32_t rem : 1; /**< Receiver Error Mask */
+#else
+ uint32_t rem : 1;
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlpm : 1;
+ uint32_t bdllpm : 1;
+ uint32_t rnrm : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t rttm : 1;
+ uint32_t anfem : 1;
+ uint32_t ciem : 1;
+ uint32_t reserved_15_31 : 17;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg069_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t anfem : 1; /**< Advisory Non-Fatal Error Mask */
+ uint32_t rttm : 1; /**< Replay Timer Timeout Mask */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrm : 1; /**< REPLAY_NUM Rollover Mask */
+ uint32_t bdllpm : 1; /**< Bad DLLP Mask */
+ uint32_t btlpm : 1; /**< Bad TLP Mask */
+ uint32_t reserved_1_5 : 5;
+ uint32_t rem : 1; /**< Receiver Error Mask */
+#else
+ uint32_t rem : 1;
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlpm : 1;
+ uint32_t bdllpm : 1;
+ uint32_t rnrm : 1;
+ uint32_t reserved_9_11 : 3;
+ uint32_t rttm : 1;
+ uint32_t anfem : 1;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg069_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg069_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg069_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg069_cn52xx cn61xx;
+ struct cvmx_pciercx_cfg069_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg069_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg069_cn52xx cn66xx;
+ struct cvmx_pciercx_cfg069_cn52xx cn68xx;
+ struct cvmx_pciercx_cfg069_cn52xx cn68xxp1;
+ struct cvmx_pciercx_cfg069_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg069 cvmx_pciercx_cfg069_t;
+
+/**
+ * cvmx_pcierc#_cfg070
+ *
+ * PCIE_CFG070 = Seventy-first 32-bits of PCIE type 1 config space
+ * (Advanced Capabilities and Control Register)
+ */
+union cvmx_pciercx_cfg070 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg070_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t ce : 1; /**< ECRC Check Enable */
+ uint32_t cc : 1; /**< ECRC Check Capable */
+ uint32_t ge : 1; /**< ECRC Generation Enable */
+ uint32_t gc : 1; /**< ECRC Generation Capability */
+ uint32_t fep : 5; /**< First Error Pointer */
+#else
+ uint32_t fep : 5;
+ uint32_t gc : 1;
+ uint32_t ge : 1;
+ uint32_t cc : 1;
+ uint32_t ce : 1;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg070_s cn52xx;
+ struct cvmx_pciercx_cfg070_s cn52xxp1;
+ struct cvmx_pciercx_cfg070_s cn56xx;
+ struct cvmx_pciercx_cfg070_s cn56xxp1;
+ struct cvmx_pciercx_cfg070_s cn61xx;
+ struct cvmx_pciercx_cfg070_s cn63xx;
+ struct cvmx_pciercx_cfg070_s cn63xxp1;
+ struct cvmx_pciercx_cfg070_s cn66xx;
+ struct cvmx_pciercx_cfg070_s cn68xx;
+ struct cvmx_pciercx_cfg070_s cn68xxp1;
+ struct cvmx_pciercx_cfg070_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg070 cvmx_pciercx_cfg070_t;
+
+/**
+ * cvmx_pcierc#_cfg071
+ *
+ * PCIE_CFG071 = Seventy-second 32-bits of PCIE type 1 config space
+ * (Header Log Register 1)
+ *
+ * The Header Log registers collect the header for the TLP corresponding to a detected error.
+ */
+union cvmx_pciercx_cfg071 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg071_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dword1 : 32; /**< Header Log Register (first DWORD) */
+#else
+ uint32_t dword1 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg071_s cn52xx;
+ struct cvmx_pciercx_cfg071_s cn52xxp1;
+ struct cvmx_pciercx_cfg071_s cn56xx;
+ struct cvmx_pciercx_cfg071_s cn56xxp1;
+ struct cvmx_pciercx_cfg071_s cn61xx;
+ struct cvmx_pciercx_cfg071_s cn63xx;
+ struct cvmx_pciercx_cfg071_s cn63xxp1;
+ struct cvmx_pciercx_cfg071_s cn66xx;
+ struct cvmx_pciercx_cfg071_s cn68xx;
+ struct cvmx_pciercx_cfg071_s cn68xxp1;
+ struct cvmx_pciercx_cfg071_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg071 cvmx_pciercx_cfg071_t;
+
+/**
+ * cvmx_pcierc#_cfg072
+ *
+ * PCIE_CFG072 = Seventy-third 32-bits of PCIE type 1 config space
+ * (Header Log Register 2)
+ *
+ * The Header Log registers collect the header for the TLP corresponding to a detected error.
+ */
+union cvmx_pciercx_cfg072 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg072_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dword2 : 32; /**< Header Log Register (second DWORD) */
+#else
+ uint32_t dword2 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg072_s cn52xx;
+ struct cvmx_pciercx_cfg072_s cn52xxp1;
+ struct cvmx_pciercx_cfg072_s cn56xx;
+ struct cvmx_pciercx_cfg072_s cn56xxp1;
+ struct cvmx_pciercx_cfg072_s cn61xx;
+ struct cvmx_pciercx_cfg072_s cn63xx;
+ struct cvmx_pciercx_cfg072_s cn63xxp1;
+ struct cvmx_pciercx_cfg072_s cn66xx;
+ struct cvmx_pciercx_cfg072_s cn68xx;
+ struct cvmx_pciercx_cfg072_s cn68xxp1;
+ struct cvmx_pciercx_cfg072_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg072 cvmx_pciercx_cfg072_t;
+
+/**
+ * cvmx_pcierc#_cfg073
+ *
+ * PCIE_CFG073 = Seventy-fourth 32-bits of PCIE type 1 config space
+ * (Header Log Register 3)
+ *
+ * The Header Log registers collect the header for the TLP corresponding to a detected error.
+ */
+union cvmx_pciercx_cfg073 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg073_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dword3 : 32; /**< Header Log Register (third DWORD) */
+#else
+ uint32_t dword3 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg073_s cn52xx;
+ struct cvmx_pciercx_cfg073_s cn52xxp1;
+ struct cvmx_pciercx_cfg073_s cn56xx;
+ struct cvmx_pciercx_cfg073_s cn56xxp1;
+ struct cvmx_pciercx_cfg073_s cn61xx;
+ struct cvmx_pciercx_cfg073_s cn63xx;
+ struct cvmx_pciercx_cfg073_s cn63xxp1;
+ struct cvmx_pciercx_cfg073_s cn66xx;
+ struct cvmx_pciercx_cfg073_s cn68xx;
+ struct cvmx_pciercx_cfg073_s cn68xxp1;
+ struct cvmx_pciercx_cfg073_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg073 cvmx_pciercx_cfg073_t;
+
+/**
+ * cvmx_pcierc#_cfg074
+ *
+ * PCIE_CFG074 = Seventy-fifth 32-bits of PCIE type 1 config space
+ * (Header Log Register 4)
+ *
+ * The Header Log registers collect the header for the TLP corresponding to a detected error.
+ */
+union cvmx_pciercx_cfg074 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg074_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dword4 : 32; /**< Header Log Register (fourth DWORD) */
+#else
+ uint32_t dword4 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg074_s cn52xx;
+ struct cvmx_pciercx_cfg074_s cn52xxp1;
+ struct cvmx_pciercx_cfg074_s cn56xx;
+ struct cvmx_pciercx_cfg074_s cn56xxp1;
+ struct cvmx_pciercx_cfg074_s cn61xx;
+ struct cvmx_pciercx_cfg074_s cn63xx;
+ struct cvmx_pciercx_cfg074_s cn63xxp1;
+ struct cvmx_pciercx_cfg074_s cn66xx;
+ struct cvmx_pciercx_cfg074_s cn68xx;
+ struct cvmx_pciercx_cfg074_s cn68xxp1;
+ struct cvmx_pciercx_cfg074_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg074 cvmx_pciercx_cfg074_t;
+
+/**
+ * cvmx_pcierc#_cfg075
+ *
+ * PCIE_CFG075 = Seventy-sixth 32-bits of PCIE type 1 config space
+ * (Root Error Command Register)
+ */
+union cvmx_pciercx_cfg075 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg075_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_3_31 : 29;
+ uint32_t fere : 1; /**< Fatal Error Reporting Enable */
+ uint32_t nfere : 1; /**< Non-Fatal Error Reporting Enable */
+ uint32_t cere : 1; /**< Correctable Error Reporting Enable */
+#else
+ uint32_t cere : 1;
+ uint32_t nfere : 1;
+ uint32_t fere : 1;
+ uint32_t reserved_3_31 : 29;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg075_s cn52xx;
+ struct cvmx_pciercx_cfg075_s cn52xxp1;
+ struct cvmx_pciercx_cfg075_s cn56xx;
+ struct cvmx_pciercx_cfg075_s cn56xxp1;
+ struct cvmx_pciercx_cfg075_s cn61xx;
+ struct cvmx_pciercx_cfg075_s cn63xx;
+ struct cvmx_pciercx_cfg075_s cn63xxp1;
+ struct cvmx_pciercx_cfg075_s cn66xx;
+ struct cvmx_pciercx_cfg075_s cn68xx;
+ struct cvmx_pciercx_cfg075_s cn68xxp1;
+ struct cvmx_pciercx_cfg075_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg075 cvmx_pciercx_cfg075_t;
+
+/**
+ * cvmx_pcierc#_cfg076
+ *
+ * PCIE_CFG076 = Seventy-seventh 32-bits of PCIE type 1 config space
+ * (Root Error Status Register)
+ */
+union cvmx_pciercx_cfg076 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg076_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t aeimn : 5; /**< Advanced Error Interrupt Message Number,
+ writable through PEM(0..1)_CFG_WR */
+ uint32_t reserved_7_26 : 20;
+ uint32_t femr : 1; /**< Fatal Error Messages Received */
+ uint32_t nfemr : 1; /**< Non-Fatal Error Messages Received */
+ uint32_t fuf : 1; /**< First Uncorrectable Fatal */
+ uint32_t multi_efnfr : 1; /**< Multiple ERR_FATAL/NONFATAL Received */
+ uint32_t efnfr : 1; /**< ERR_FATAL/NONFATAL Received */
+ uint32_t multi_ecr : 1; /**< Multiple ERR_COR Received */
+ uint32_t ecr : 1; /**< ERR_COR Received */
+#else
+ uint32_t ecr : 1;
+ uint32_t multi_ecr : 1;
+ uint32_t efnfr : 1;
+ uint32_t multi_efnfr : 1;
+ uint32_t fuf : 1;
+ uint32_t nfemr : 1;
+ uint32_t femr : 1;
+ uint32_t reserved_7_26 : 20;
+ uint32_t aeimn : 5;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg076_s cn52xx;
+ struct cvmx_pciercx_cfg076_s cn52xxp1;
+ struct cvmx_pciercx_cfg076_s cn56xx;
+ struct cvmx_pciercx_cfg076_s cn56xxp1;
+ struct cvmx_pciercx_cfg076_s cn61xx;
+ struct cvmx_pciercx_cfg076_s cn63xx;
+ struct cvmx_pciercx_cfg076_s cn63xxp1;
+ struct cvmx_pciercx_cfg076_s cn66xx;
+ struct cvmx_pciercx_cfg076_s cn68xx;
+ struct cvmx_pciercx_cfg076_s cn68xxp1;
+ struct cvmx_pciercx_cfg076_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg076 cvmx_pciercx_cfg076_t;
+
+/**
+ * cvmx_pcierc#_cfg077
+ *
+ * PCIE_CFG077 = Seventy-eighth 32-bits of PCIE type 1 config space
+ * (Error Source Identification Register)
+ */
+union cvmx_pciercx_cfg077 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg077_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t efnfsi : 16; /**< ERR_FATAL/NONFATAL Source Identification */
+ uint32_t ecsi : 16; /**< ERR_COR Source Identification */
+#else
+ uint32_t ecsi : 16;
+ uint32_t efnfsi : 16;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg077_s cn52xx;
+ struct cvmx_pciercx_cfg077_s cn52xxp1;
+ struct cvmx_pciercx_cfg077_s cn56xx;
+ struct cvmx_pciercx_cfg077_s cn56xxp1;
+ struct cvmx_pciercx_cfg077_s cn61xx;
+ struct cvmx_pciercx_cfg077_s cn63xx;
+ struct cvmx_pciercx_cfg077_s cn63xxp1;
+ struct cvmx_pciercx_cfg077_s cn66xx;
+ struct cvmx_pciercx_cfg077_s cn68xx;
+ struct cvmx_pciercx_cfg077_s cn68xxp1;
+ struct cvmx_pciercx_cfg077_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg077 cvmx_pciercx_cfg077_t;
+
+/**
+ * cvmx_pcierc#_cfg448
+ *
+ * PCIE_CFG448 = Four hundred forty-ninth 32-bits of PCIE type 1 config space
+ * (Ack Latency Timer and Replay Timer Register)
+ */
+union cvmx_pciercx_cfg448 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg448_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rtl : 16; /**< Replay Time Limit
+ The replay timer expires when it reaches this limit. The PCI
+ Express bus initiates a replay upon reception of a Nak or when
+ the replay timer expires.
+ This value will be set correctly by the hardware out of reset
+ or when the negotiated Link-Width or Payload-Size changes. If
+ the user changes this value through a CSR write or by an
+ EEPROM load then they should refer to the PCIe Specification
+ for the correct value. */
+ uint32_t rtltl : 16; /**< Round Trip Latency Time Limit
+ The Ack/Nak latency timer expires when it reaches this limit.
+ This value will be set correctly by the hardware out of reset
+ or when the negotiated Link-Width or Payload-Size changes. If
+ the user changes this value through a CSR write or by an
+ EEPROM load then they should refer to the PCIe Specification
+ for the correct value. */
+#else
+ uint32_t rtltl : 16;
+ uint32_t rtl : 16;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg448_s cn52xx;
+ struct cvmx_pciercx_cfg448_s cn52xxp1;
+ struct cvmx_pciercx_cfg448_s cn56xx;
+ struct cvmx_pciercx_cfg448_s cn56xxp1;
+ struct cvmx_pciercx_cfg448_s cn61xx;
+ struct cvmx_pciercx_cfg448_s cn63xx;
+ struct cvmx_pciercx_cfg448_s cn63xxp1;
+ struct cvmx_pciercx_cfg448_s cn66xx;
+ struct cvmx_pciercx_cfg448_s cn68xx;
+ struct cvmx_pciercx_cfg448_s cn68xxp1;
+ struct cvmx_pciercx_cfg448_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg448 cvmx_pciercx_cfg448_t;
+
+/**
+ * cvmx_pcierc#_cfg449
+ *
+ * PCIE_CFG449 = Four hundred fiftieth 32-bits of PCIE type 1 config space
+ * (Other Message Register)
+ */
+union cvmx_pciercx_cfg449 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg449_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t omr : 32; /**< Other Message Register
+ This register can be used for either of the following purposes:
+ o To send a specific PCI Express Message, the application
+ writes the payload of the Message into this register, then
+ sets bit 0 of the Port Link Control Register to send the
+ Message.
+ o To store a corruption pattern for corrupting the LCRC on all
+ TLPs, the application places a 32-bit corruption pattern into
+ this register and enables this function by setting bit 25 of
+ the Port Link Control Register. When enabled, the transmit
+ LCRC result is XOR'd with this pattern before inserting
+ it into the packet. */
+#else
+ uint32_t omr : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg449_s cn52xx;
+ struct cvmx_pciercx_cfg449_s cn52xxp1;
+ struct cvmx_pciercx_cfg449_s cn56xx;
+ struct cvmx_pciercx_cfg449_s cn56xxp1;
+ struct cvmx_pciercx_cfg449_s cn61xx;
+ struct cvmx_pciercx_cfg449_s cn63xx;
+ struct cvmx_pciercx_cfg449_s cn63xxp1;
+ struct cvmx_pciercx_cfg449_s cn66xx;
+ struct cvmx_pciercx_cfg449_s cn68xx;
+ struct cvmx_pciercx_cfg449_s cn68xxp1;
+ struct cvmx_pciercx_cfg449_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg449 cvmx_pciercx_cfg449_t;
+
+/**
+ * cvmx_pcierc#_cfg450
+ *
+ * PCIE_CFG450 = Four hundred fifty-first 32-bits of PCIE type 1 config space
+ * (Port Force Link Register)
+ */
+union cvmx_pciercx_cfg450 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg450_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lpec : 8; /**< Low Power Entrance Count
+ The Power Management state will wait for this many clock cycles
+ for the associated completion of a CfgWr to PCIE_CFG017 register
+ Power State (PS) field register to go low-power. This register
+ is intended for applications that do not let the PCI Express
+ bus handle a completion for configuration request to the
+ Power Management Control and Status (PCIE_CFG017) register. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t link_state : 6; /**< Link State
+ The Link state that the PCI Express Bus will be forced to
+ when bit 15 (Force Link) is set.
+ State encoding:
+ o DETECT_QUIET 00h
+ o DETECT_ACT 01h
+ o POLL_ACTIVE 02h
+ o POLL_COMPLIANCE 03h
+ o POLL_CONFIG 04h
+ o PRE_DETECT_QUIET 05h
+ o DETECT_WAIT 06h
+ o CFG_LINKWD_START 07h
+ o CFG_LINKWD_ACEPT 08h
+ o CFG_LANENUM_WAIT 09h
+ o CFG_LANENUM_ACEPT 0Ah
+ o CFG_COMPLETE 0Bh
+ o CFG_IDLE 0Ch
+ o RCVRY_LOCK 0Dh
+ o RCVRY_SPEED 0Eh
+ o RCVRY_RCVRCFG 0Fh
+ o RCVRY_IDLE 10h
+ o L0 11h
+ o L0S 12h
+ o L123_SEND_EIDLE 13h
+ o L1_IDLE 14h
+ o L2_IDLE 15h
+ o L2_WAKE 16h
+ o DISABLED_ENTRY 17h
+ o DISABLED_IDLE 18h
+ o DISABLED 19h
+ o LPBK_ENTRY 1Ah
+ o LPBK_ACTIVE 1Bh
+ o LPBK_EXIT 1Ch
+ o LPBK_EXIT_TIMEOUT 1Dh
+ o HOT_RESET_ENTRY 1Eh
+ o HOT_RESET 1Fh */
+ uint32_t force_link : 1; /**< Force Link
+ Forces the Link to the state specified by the Link State field.
+ The Force Link pulse will trigger Link re-negotiation.
+ * As the The Force Link is a pulse, writing a 1 to it does
+ trigger the forced link state event, even thought reading it
+ always returns a 0. */
+ uint32_t reserved_8_14 : 7;
+ uint32_t link_num : 8; /**< Link Number */
+#else
+ uint32_t link_num : 8;
+ uint32_t reserved_8_14 : 7;
+ uint32_t force_link : 1;
+ uint32_t link_state : 6;
+ uint32_t reserved_22_23 : 2;
+ uint32_t lpec : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg450_s cn52xx;
+ struct cvmx_pciercx_cfg450_s cn52xxp1;
+ struct cvmx_pciercx_cfg450_s cn56xx;
+ struct cvmx_pciercx_cfg450_s cn56xxp1;
+ struct cvmx_pciercx_cfg450_s cn61xx;
+ struct cvmx_pciercx_cfg450_s cn63xx;
+ struct cvmx_pciercx_cfg450_s cn63xxp1;
+ struct cvmx_pciercx_cfg450_s cn66xx;
+ struct cvmx_pciercx_cfg450_s cn68xx;
+ struct cvmx_pciercx_cfg450_s cn68xxp1;
+ struct cvmx_pciercx_cfg450_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg450 cvmx_pciercx_cfg450_t;
+
+/**
+ * cvmx_pcierc#_cfg451
+ *
+ * PCIE_CFG451 = Four hundred fifty-second 32-bits of PCIE type 1 config space
+ * (Ack Frequency Register)
+ */
+union cvmx_pciercx_cfg451 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg451_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_31_31 : 1;
+ uint32_t easpml1 : 1; /**< Enter ASPM L1 without receive in L0s
+ Allow core to enter ASPM L1 even when link partner did
+ not go to L0s (receive is not in L0s).
+ When not set, core goes to ASPM L1 only after idle period
+ during which both receive and transmit are in L0s. */
+ uint32_t l1el : 3; /**< L1 Entrance Latency
+ Values correspond to:
+ o 000: 1 ms
+ o 001: 2 ms
+ o 010: 4 ms
+ o 011: 8 ms
+ o 100: 16 ms
+ o 101: 32 ms
+ o 110 or 111: 64 ms */
+ uint32_t l0el : 3; /**< L0s Entrance Latency
+ Values correspond to:
+ o 000: 1 ms
+ o 001: 2 ms
+ o 010: 3 ms
+ o 011: 4 ms
+ o 100: 5 ms
+ o 101: 6 ms
+ o 110 or 111: 7 ms */
+ uint32_t n_fts_cc : 8; /**< N_FTS when common clock is used.
+ The number of Fast Training Sequence ordered sets to be
+ transmitted when transitioning from L0s to L0. The maximum
+ number of FTS ordered-sets that a component can request is 255.
+ Note: The core does not support a value of zero; a value of
+ zero can cause the LTSSM to go into the recovery state
+ when exiting from L0s. */
+ uint32_t n_fts : 8; /**< N_FTS
+ The number of Fast Training Sequence ordered sets to be
+ transmitted when transitioning from L0s to L0. The maximum
+ number of FTS ordered-sets that a component can request is 255.
+ Note: The core does not support a value of zero; a value of
+ zero can cause the LTSSM to go into the recovery state
+ when exiting from L0s. */
+ uint32_t ack_freq : 8; /**< Ack Frequency
+ The number of pending Ack's specified here (up to 255) before
+ sending an Ack. */
+#else
+ uint32_t ack_freq : 8;
+ uint32_t n_fts : 8;
+ uint32_t n_fts_cc : 8;
+ uint32_t l0el : 3;
+ uint32_t l1el : 3;
+ uint32_t easpml1 : 1;
+ uint32_t reserved_31_31 : 1;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg451_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t l1el : 3; /**< L1 Entrance Latency
+ Values correspond to:
+ o 000: 1 ms
+ o 001: 2 ms
+ o 010: 4 ms
+ o 011: 8 ms
+ o 100: 16 ms
+ o 101: 32 ms
+ o 110 or 111: 64 ms */
+ uint32_t l0el : 3; /**< L0s Entrance Latency
+ Values correspond to:
+ o 000: 1 ms
+ o 001: 2 ms
+ o 010: 3 ms
+ o 011: 4 ms
+ o 100: 5 ms
+ o 101: 6 ms
+ o 110 or 111: 7 ms */
+ uint32_t n_fts_cc : 8; /**< N_FTS when common clock is used.
+ The number of Fast Training Sequence ordered sets to be
+ transmitted when transitioning from L0s to L0. The maximum
+ number of FTS ordered-sets that a component can request is 255.
+ Note: The core does not support a value of zero; a value of
+ zero can cause the LTSSM to go into the recovery state
+ when exiting from L0s. */
+ uint32_t n_fts : 8; /**< N_FTS
+ The number of Fast Training Sequence ordered sets to be
+ transmitted when transitioning from L0s to L0. The maximum
+ number of FTS ordered-sets that a component can request is 255.
+ Note: The core does not support a value of zero; a value of
+ zero can cause the LTSSM to go into the recovery state
+ when exiting from L0s. */
+ uint32_t ack_freq : 8; /**< Ack Frequency
+ The number of pending Ack's specified here (up to 255) before
+ sending an Ack. */
+#else
+ uint32_t ack_freq : 8;
+ uint32_t n_fts : 8;
+ uint32_t n_fts_cc : 8;
+ uint32_t l0el : 3;
+ uint32_t l1el : 3;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg451_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg451_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg451_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg451_s cn61xx;
+ struct cvmx_pciercx_cfg451_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg451_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg451_s cn66xx;
+ struct cvmx_pciercx_cfg451_s cn68xx;
+ struct cvmx_pciercx_cfg451_s cn68xxp1;
+ struct cvmx_pciercx_cfg451_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg451 cvmx_pciercx_cfg451_t;
+
+/**
+ * cvmx_pcierc#_cfg452
+ *
+ * PCIE_CFG452 = Four hundred fifty-third 32-bits of PCIE type 1 config space
+ * (Port Link Control Register)
+ */
+union cvmx_pciercx_cfg452 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg452_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_26_31 : 6;
+ uint32_t eccrc : 1; /**< Enable Corrupted CRC
+ Causes corrupt LCRC for TLPs when set,
+ using the pattern contained in the Other Message register.
+ This is a test feature, not to be used in normal operation. */
+ uint32_t reserved_22_24 : 3;
+ uint32_t lme : 6; /**< Link Mode Enable
+ o 000001: x1
+ o 000011: x2
+ o 000111: x4 (not supported)
+ o 001111: x8 (not supported)
+ o 011111: x16 (not supported)
+ o 111111: x32 (not supported)
+ This field indicates the MAXIMUM number of lanes supported
+ by the PCIe port. The value can be set less than 0x3
+ to limit the number of lanes the PCIe will attempt to use.
+ The programming of this field needs to be done by SW BEFORE
+ enabling the link. See also MLW.
+ (Note: The value of this field does NOT indicate the number
+ of lanes in use by the PCIe. LME sets the max number of lanes
+ in the PCIe core that COULD be used. As per the PCIe specs,
+ the PCIe core can negotiate a smaller link width, so
+ x1 is also supported when LME=0x3, for example.) */
+ uint32_t reserved_8_15 : 8;
+ uint32_t flm : 1; /**< Fast Link Mode
+ Sets all internal timers to fast mode for simulation purposes. */
+ uint32_t reserved_6_6 : 1;
+ uint32_t dllle : 1; /**< DLL Link Enable
+ Enables Link initialization. If DLL Link Enable = 0, the PCI
+ Express bus does not transmit InitFC DLLPs and does not
+ establish a Link. */
+ uint32_t reserved_4_4 : 1;
+ uint32_t ra : 1; /**< Reset Assert
+ Triggers a recovery and forces the LTSSM to the Hot Reset
+ state (downstream port only). */
+ uint32_t le : 1; /**< Loopback Enable
+ Initiate loopback mode as a master. On a 0->1 transition,
+ the PCIe core sends TS ordered sets with the loopback bit set
+ to cause the link partner to enter into loopback mode as a
+ slave. Normal transmission is not possible when LE=1. To exit
+ loopback mode, take the link through a reset sequence. */
+ uint32_t sd : 1; /**< Scramble Disable
+ Turns off data scrambling. */
+ uint32_t omr : 1; /**< Other Message Request
+ When software writes a `1' to this bit, the PCI Express bus
+ transmits the Message contained in the Other Message register. */
+#else
+ uint32_t omr : 1;
+ uint32_t sd : 1;
+ uint32_t le : 1;
+ uint32_t ra : 1;
+ uint32_t reserved_4_4 : 1;
+ uint32_t dllle : 1;
+ uint32_t reserved_6_6 : 1;
+ uint32_t flm : 1;
+ uint32_t reserved_8_15 : 8;
+ uint32_t lme : 6;
+ uint32_t reserved_22_24 : 3;
+ uint32_t eccrc : 1;
+ uint32_t reserved_26_31 : 6;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg452_s cn52xx;
+ struct cvmx_pciercx_cfg452_s cn52xxp1;
+ struct cvmx_pciercx_cfg452_s cn56xx;
+ struct cvmx_pciercx_cfg452_s cn56xxp1;
+ struct cvmx_pciercx_cfg452_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_22_31 : 10;
+ uint32_t lme : 6; /**< Link Mode Enable
+ o 000001: x1
+ o 000011: x2
+ o 000111: x4
+ o 001111: x8 (not supported)
+ o 011111: x16 (not supported)
+ o 111111: x32 (not supported)
+ This field indicates the MAXIMUM number of lanes supported
+ by the PCIe port. The value can be set less than 0x7
+ to limit the number of lanes the PCIe will attempt to use.
+ The programming of this field needs to be done by SW BEFORE
+ enabling the link. See also MLW.
+ (Note: The value of this field does NOT indicate the number
+ of lanes in use by the PCIe. LME sets the max number of lanes
+ in the PCIe core that COULD be used. As per the PCIe specs,
+ the PCIe core can negotiate a smaller link width, so all
+ of x4, x2, and x1 are supported when LME=0x7,
+ for example.) */
+ uint32_t reserved_8_15 : 8;
+ uint32_t flm : 1; /**< Fast Link Mode
+ Sets all internal timers to fast mode for simulation purposes. */
+ uint32_t reserved_6_6 : 1;
+ uint32_t dllle : 1; /**< DLL Link Enable
+ Enables Link initialization. If DLL Link Enable = 0, the PCI
+ Express bus does not transmit InitFC DLLPs and does not
+ establish a Link. */
+ uint32_t reserved_4_4 : 1;
+ uint32_t ra : 1; /**< Reset Assert
+ Triggers a recovery and forces the LTSSM to the Hot Reset
+ state (downstream port only). */
+ uint32_t le : 1; /**< Loopback Enable
+ Initiate loopback mode as a master. On a 0->1 transition,
+ the PCIe core sends TS ordered sets with the loopback bit set
+ to cause the link partner to enter into loopback mode as a
+ slave. Normal transmission is not possible when LE=1. To exit
+ loopback mode, take the link through a reset sequence. */
+ uint32_t sd : 1; /**< Scramble Disable
+ Turns off data scrambling. */
+ uint32_t omr : 1; /**< Other Message Request
+ When software writes a `1' to this bit, the PCI Express bus
+ transmits the Message contained in the Other Message register. */
+#else
+ uint32_t omr : 1;
+ uint32_t sd : 1;
+ uint32_t le : 1;
+ uint32_t ra : 1;
+ uint32_t reserved_4_4 : 1;
+ uint32_t dllle : 1;
+ uint32_t reserved_6_6 : 1;
+ uint32_t flm : 1;
+ uint32_t reserved_8_15 : 8;
+ uint32_t lme : 6;
+ uint32_t reserved_22_31 : 10;
+#endif
+ } cn61xx;
+ struct cvmx_pciercx_cfg452_s cn63xx;
+ struct cvmx_pciercx_cfg452_s cn63xxp1;
+ struct cvmx_pciercx_cfg452_cn61xx cn66xx;
+ struct cvmx_pciercx_cfg452_cn61xx cn68xx;
+ struct cvmx_pciercx_cfg452_cn61xx cn68xxp1;
+ struct cvmx_pciercx_cfg452_cn61xx cnf71xx;
+};
+typedef union cvmx_pciercx_cfg452 cvmx_pciercx_cfg452_t;
+
+/**
+ * cvmx_pcierc#_cfg453
+ *
+ * PCIE_CFG453 = Four hundred fifty-fourth 32-bits of PCIE type 1 config space
+ * (Lane Skew Register)
+ */
+union cvmx_pciercx_cfg453 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg453_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dlld : 1; /**< Disable Lane-to-Lane Deskew
+ Disables the internal Lane-to-Lane deskew logic. */
+ uint32_t reserved_26_30 : 5;
+ uint32_t ack_nak : 1; /**< Ack/Nak Disable
+ Prevents the PCI Express bus from sending Ack and Nak DLLPs. */
+ uint32_t fcd : 1; /**< Flow Control Disable
+ Prevents the PCI Express bus from sending FC DLLPs. */
+ uint32_t ilst : 24; /**< Insert Lane Skew for Transmit (not supported for x16)
+ Causes skew between lanes for test purposes. There are three
+ bits per Lane. The value is in units of one symbol time. For
+ example, the value 010b for a Lane forces a skew of two symbol
+ times for that Lane. The maximum skew value for any Lane is 5
+ symbol times. */
+#else
+ uint32_t ilst : 24;
+ uint32_t fcd : 1;
+ uint32_t ack_nak : 1;
+ uint32_t reserved_26_30 : 5;
+ uint32_t dlld : 1;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg453_s cn52xx;
+ struct cvmx_pciercx_cfg453_s cn52xxp1;
+ struct cvmx_pciercx_cfg453_s cn56xx;
+ struct cvmx_pciercx_cfg453_s cn56xxp1;
+ struct cvmx_pciercx_cfg453_s cn61xx;
+ struct cvmx_pciercx_cfg453_s cn63xx;
+ struct cvmx_pciercx_cfg453_s cn63xxp1;
+ struct cvmx_pciercx_cfg453_s cn66xx;
+ struct cvmx_pciercx_cfg453_s cn68xx;
+ struct cvmx_pciercx_cfg453_s cn68xxp1;
+ struct cvmx_pciercx_cfg453_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg453 cvmx_pciercx_cfg453_t;
+
+/**
+ * cvmx_pcierc#_cfg454
+ *
+ * PCIE_CFG454 = Four hundred fifty-fifth 32-bits of PCIE type 1 config space
+ * (Symbol Number Register)
+ */
+union cvmx_pciercx_cfg454 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg454_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cx_nfunc : 3; /**< Number of Functions (minus 1)
+ Configuration Requests targeted at function numbers above this
+ value will be returned with unsupported request */
+ uint32_t tmfcwt : 5; /**< Timer Modifier for Flow Control Watchdog Timer
+ Increases the timer value for the Flow Control watchdog timer,
+ in increments of 16 clock cycles. */
+ uint32_t tmanlt : 5; /**< Timer Modifier for Ack/Nak Latency Timer
+ Increases the timer value for the Ack/Nak latency timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< Timer Modifier for Replay Timer
+ Increases the timer value for the replay timer, in increments
+ of 64 clock cycles. */
+ uint32_t reserved_11_13 : 3;
+ uint32_t nskps : 3; /**< Number of SKP Symbols */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t nskps : 3;
+ uint32_t reserved_11_13 : 3;
+ uint32_t tmrt : 5;
+ uint32_t tmanlt : 5;
+ uint32_t tmfcwt : 5;
+ uint32_t cx_nfunc : 3;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg454_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31 : 3;
+ uint32_t tmfcwt : 5; /**< Timer Modifier for Flow Control Watchdog Timer
+ Increases the timer value for the Flow Control watchdog timer,
+ in increments of 16 clock cycles. */
+ uint32_t tmanlt : 5; /**< Timer Modifier for Ack/Nak Latency Timer
+ Increases the timer value for the Ack/Nak latency timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< Timer Modifier for Replay Timer
+ Increases the timer value for the replay timer, in increments
+ of 64 clock cycles. */
+ uint32_t reserved_11_13 : 3;
+ uint32_t nskps : 3; /**< Number of SKP Symbols */
+ uint32_t reserved_4_7 : 4;
+ uint32_t ntss : 4; /**< Number of TS Symbols
+ Sets the number of TS identifier symbols that are sent in TS1
+ and TS2 ordered sets. */
+#else
+ uint32_t ntss : 4;
+ uint32_t reserved_4_7 : 4;
+ uint32_t nskps : 3;
+ uint32_t reserved_11_13 : 3;
+ uint32_t tmrt : 5;
+ uint32_t tmanlt : 5;
+ uint32_t tmfcwt : 5;
+ uint32_t reserved_29_31 : 3;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg454_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg454_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg454_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg454_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cx_nfunc : 3; /**< Number of Functions (minus 1)
+ Configuration Requests targeted at function numbers above this
+ value will be returned with unsupported request */
+ uint32_t tmfcwt : 5; /**< Timer Modifier for Flow Control Watchdog Timer
+ Increases the timer value for the Flow Control watchdog timer,
+ in increments of 16 clock cycles. */
+ uint32_t tmanlt : 5; /**< Timer Modifier for Ack/Nak Latency Timer
+ Increases the timer value for the Ack/Nak latency timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< Timer Modifier for Replay Timer
+ Increases the timer value for the replay timer, in increments
+ of 64 clock cycles. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t mfuncn : 8; /**< Max Number of Functions Supported */
+#else
+ uint32_t mfuncn : 8;
+ uint32_t reserved_8_13 : 6;
+ uint32_t tmrt : 5;
+ uint32_t tmanlt : 5;
+ uint32_t tmfcwt : 5;
+ uint32_t cx_nfunc : 3;
+#endif
+ } cn61xx;
+ struct cvmx_pciercx_cfg454_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg454_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg454_cn61xx cn66xx;
+ struct cvmx_pciercx_cfg454_cn61xx cn68xx;
+ struct cvmx_pciercx_cfg454_cn52xx cn68xxp1;
+ struct cvmx_pciercx_cfg454_cn61xx cnf71xx;
+};
+typedef union cvmx_pciercx_cfg454 cvmx_pciercx_cfg454_t;
+
+/**
+ * cvmx_pcierc#_cfg455
+ *
+ * PCIE_CFG455 = Four hundred fifty-sixth 32-bits of PCIE type 1 config space
+ * (Symbol Timer Register/Filter Mask Register 1)
+ */
+union cvmx_pciercx_cfg455 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg455_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t m_cfg0_filt : 1; /**< Mask filtering of received Configuration Requests (RC mode only) */
+ uint32_t m_io_filt : 1; /**< Mask filtering of received I/O Requests (RC mode only) */
+ uint32_t msg_ctrl : 1; /**< Message Control
+ The application must not change this field. */
+ uint32_t m_cpl_ecrc_filt : 1; /**< Mask ECRC error filtering for Completions */
+ uint32_t m_ecrc_filt : 1; /**< Mask ECRC error filtering */
+ uint32_t m_cpl_len_err : 1; /**< Mask Length mismatch error for received Completions */
+ uint32_t m_cpl_attr_err : 1; /**< Mask Attributes mismatch error for received Completions */
+ uint32_t m_cpl_tc_err : 1; /**< Mask Traffic Class mismatch error for received Completions */
+ uint32_t m_cpl_fun_err : 1; /**< Mask function mismatch error for received Completions */
+ uint32_t m_cpl_rid_err : 1; /**< Mask Requester ID mismatch error for received Completions */
+ uint32_t m_cpl_tag_err : 1; /**< Mask Tag error rules for received Completions */
+ uint32_t m_lk_filt : 1; /**< Mask Locked Request filtering */
+ uint32_t m_cfg1_filt : 1; /**< Mask Type 1 Configuration Request filtering */
+ uint32_t m_bar_match : 1; /**< Mask BAR match filtering */
+ uint32_t m_pois_filt : 1; /**< Mask poisoned TLP filtering */
+ uint32_t m_fun : 1; /**< Mask function */
+ uint32_t dfcwt : 1; /**< Disable FC Watchdog Timer */
+ uint32_t reserved_11_14 : 4;
+ uint32_t skpiv : 11; /**< SKP Interval Value */
+#else
+ uint32_t skpiv : 11;
+ uint32_t reserved_11_14 : 4;
+ uint32_t dfcwt : 1;
+ uint32_t m_fun : 1;
+ uint32_t m_pois_filt : 1;
+ uint32_t m_bar_match : 1;
+ uint32_t m_cfg1_filt : 1;
+ uint32_t m_lk_filt : 1;
+ uint32_t m_cpl_tag_err : 1;
+ uint32_t m_cpl_rid_err : 1;
+ uint32_t m_cpl_fun_err : 1;
+ uint32_t m_cpl_tc_err : 1;
+ uint32_t m_cpl_attr_err : 1;
+ uint32_t m_cpl_len_err : 1;
+ uint32_t m_ecrc_filt : 1;
+ uint32_t m_cpl_ecrc_filt : 1;
+ uint32_t msg_ctrl : 1;
+ uint32_t m_io_filt : 1;
+ uint32_t m_cfg0_filt : 1;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg455_s cn52xx;
+ struct cvmx_pciercx_cfg455_s cn52xxp1;
+ struct cvmx_pciercx_cfg455_s cn56xx;
+ struct cvmx_pciercx_cfg455_s cn56xxp1;
+ struct cvmx_pciercx_cfg455_s cn61xx;
+ struct cvmx_pciercx_cfg455_s cn63xx;
+ struct cvmx_pciercx_cfg455_s cn63xxp1;
+ struct cvmx_pciercx_cfg455_s cn66xx;
+ struct cvmx_pciercx_cfg455_s cn68xx;
+ struct cvmx_pciercx_cfg455_s cn68xxp1;
+ struct cvmx_pciercx_cfg455_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg455 cvmx_pciercx_cfg455_t;
+
+/**
+ * cvmx_pcierc#_cfg456
+ *
+ * PCIE_CFG456 = Four hundred fifty-seventh 32-bits of PCIE type 1 config space
+ * (Filter Mask Register 2)
+ */
+union cvmx_pciercx_cfg456 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg456_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_4_31 : 28;
+ uint32_t m_handle_flush : 1; /**< Mask Core Filter to handle flush request */
+ uint32_t m_dabort_4ucpl : 1; /**< Mask DLLP abort for unexpected CPL */
+ uint32_t m_vend1_drp : 1; /**< Mask Vendor MSG Type 1 dropped silently */
+ uint32_t m_vend0_drp : 1; /**< Mask Vendor MSG Type 0 dropped with UR error reporting. */
+#else
+ uint32_t m_vend0_drp : 1;
+ uint32_t m_vend1_drp : 1;
+ uint32_t m_dabort_4ucpl : 1;
+ uint32_t m_handle_flush : 1;
+ uint32_t reserved_4_31 : 28;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg456_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_2_31 : 30;
+ uint32_t m_vend1_drp : 1; /**< Mask Vendor MSG Type 1 dropped silently */
+ uint32_t m_vend0_drp : 1; /**< Mask Vendor MSG Type 0 dropped with UR error reporting. */
+#else
+ uint32_t m_vend0_drp : 1;
+ uint32_t m_vend1_drp : 1;
+ uint32_t reserved_2_31 : 30;
+#endif
+ } cn52xx;
+ struct cvmx_pciercx_cfg456_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg456_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg456_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg456_s cn61xx;
+ struct cvmx_pciercx_cfg456_cn52xx cn63xx;
+ struct cvmx_pciercx_cfg456_cn52xx cn63xxp1;
+ struct cvmx_pciercx_cfg456_s cn66xx;
+ struct cvmx_pciercx_cfg456_s cn68xx;
+ struct cvmx_pciercx_cfg456_cn52xx cn68xxp1;
+ struct cvmx_pciercx_cfg456_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg456 cvmx_pciercx_cfg456_t;
+
+/**
+ * cvmx_pcierc#_cfg458
+ *
+ * PCIE_CFG458 = Four hundred fifty-ninth 32-bits of PCIE type 1 config space
+ * (Debug Register 0)
+ */
+union cvmx_pciercx_cfg458 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg458_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dbg_info_l32 : 32; /**< The value on cxpl_debug_info[31:0]. */
+#else
+ uint32_t dbg_info_l32 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg458_s cn52xx;
+ struct cvmx_pciercx_cfg458_s cn52xxp1;
+ struct cvmx_pciercx_cfg458_s cn56xx;
+ struct cvmx_pciercx_cfg458_s cn56xxp1;
+ struct cvmx_pciercx_cfg458_s cn61xx;
+ struct cvmx_pciercx_cfg458_s cn63xx;
+ struct cvmx_pciercx_cfg458_s cn63xxp1;
+ struct cvmx_pciercx_cfg458_s cn66xx;
+ struct cvmx_pciercx_cfg458_s cn68xx;
+ struct cvmx_pciercx_cfg458_s cn68xxp1;
+ struct cvmx_pciercx_cfg458_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg458 cvmx_pciercx_cfg458_t;
+
+/**
+ * cvmx_pcierc#_cfg459
+ *
+ * PCIE_CFG459 = Four hundred sixtieth 32-bits of PCIE type 1 config space
+ * (Debug Register 1)
+ */
+union cvmx_pciercx_cfg459 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg459_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dbg_info_u32 : 32; /**< The value on cxpl_debug_info[63:32]. */
+#else
+ uint32_t dbg_info_u32 : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg459_s cn52xx;
+ struct cvmx_pciercx_cfg459_s cn52xxp1;
+ struct cvmx_pciercx_cfg459_s cn56xx;
+ struct cvmx_pciercx_cfg459_s cn56xxp1;
+ struct cvmx_pciercx_cfg459_s cn61xx;
+ struct cvmx_pciercx_cfg459_s cn63xx;
+ struct cvmx_pciercx_cfg459_s cn63xxp1;
+ struct cvmx_pciercx_cfg459_s cn66xx;
+ struct cvmx_pciercx_cfg459_s cn68xx;
+ struct cvmx_pciercx_cfg459_s cn68xxp1;
+ struct cvmx_pciercx_cfg459_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg459 cvmx_pciercx_cfg459_t;
+
+/**
+ * cvmx_pcierc#_cfg460
+ *
+ * PCIE_CFG460 = Four hundred sixty-first 32-bits of PCIE type 1 config space
+ * (Transmit Posted FC Credit Status)
+ */
+union cvmx_pciercx_cfg460 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg460_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t tphfcc : 8; /**< Transmit Posted Header FC Credits
+ The Posted Header credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+ uint32_t tpdfcc : 12; /**< Transmit Posted Data FC Credits
+ The Posted Data credits advertised by the receiver at the other
+ end of the Link, updated with each UpdateFC DLLP. */
+#else
+ uint32_t tpdfcc : 12;
+ uint32_t tphfcc : 8;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg460_s cn52xx;
+ struct cvmx_pciercx_cfg460_s cn52xxp1;
+ struct cvmx_pciercx_cfg460_s cn56xx;
+ struct cvmx_pciercx_cfg460_s cn56xxp1;
+ struct cvmx_pciercx_cfg460_s cn61xx;
+ struct cvmx_pciercx_cfg460_s cn63xx;
+ struct cvmx_pciercx_cfg460_s cn63xxp1;
+ struct cvmx_pciercx_cfg460_s cn66xx;
+ struct cvmx_pciercx_cfg460_s cn68xx;
+ struct cvmx_pciercx_cfg460_s cn68xxp1;
+ struct cvmx_pciercx_cfg460_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg460 cvmx_pciercx_cfg460_t;
+
+/**
+ * cvmx_pcierc#_cfg461
+ *
+ * PCIE_CFG461 = Four hundred sixty-second 32-bits of PCIE type 1 config space
+ * (Transmit Non-Posted FC Credit Status)
+ */
+union cvmx_pciercx_cfg461 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg461_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t tchfcc : 8; /**< Transmit Non-Posted Header FC Credits
+ The Non-Posted Header credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+ uint32_t tcdfcc : 12; /**< Transmit Non-Posted Data FC Credits
+ The Non-Posted Data credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+#else
+ uint32_t tcdfcc : 12;
+ uint32_t tchfcc : 8;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg461_s cn52xx;
+ struct cvmx_pciercx_cfg461_s cn52xxp1;
+ struct cvmx_pciercx_cfg461_s cn56xx;
+ struct cvmx_pciercx_cfg461_s cn56xxp1;
+ struct cvmx_pciercx_cfg461_s cn61xx;
+ struct cvmx_pciercx_cfg461_s cn63xx;
+ struct cvmx_pciercx_cfg461_s cn63xxp1;
+ struct cvmx_pciercx_cfg461_s cn66xx;
+ struct cvmx_pciercx_cfg461_s cn68xx;
+ struct cvmx_pciercx_cfg461_s cn68xxp1;
+ struct cvmx_pciercx_cfg461_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg461 cvmx_pciercx_cfg461_t;
+
+/**
+ * cvmx_pcierc#_cfg462
+ *
+ * PCIE_CFG462 = Four hundred sixty-third 32-bits of PCIE type 1 config space
+ * (Transmit Completion FC Credit Status )
+ */
+union cvmx_pciercx_cfg462 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg462_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t tchfcc : 8; /**< Transmit Completion Header FC Credits
+ The Completion Header credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+ uint32_t tcdfcc : 12; /**< Transmit Completion Data FC Credits
+ The Completion Data credits advertised by the receiver at the
+ other end of the Link, updated with each UpdateFC DLLP. */
+#else
+ uint32_t tcdfcc : 12;
+ uint32_t tchfcc : 8;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg462_s cn52xx;
+ struct cvmx_pciercx_cfg462_s cn52xxp1;
+ struct cvmx_pciercx_cfg462_s cn56xx;
+ struct cvmx_pciercx_cfg462_s cn56xxp1;
+ struct cvmx_pciercx_cfg462_s cn61xx;
+ struct cvmx_pciercx_cfg462_s cn63xx;
+ struct cvmx_pciercx_cfg462_s cn63xxp1;
+ struct cvmx_pciercx_cfg462_s cn66xx;
+ struct cvmx_pciercx_cfg462_s cn68xx;
+ struct cvmx_pciercx_cfg462_s cn68xxp1;
+ struct cvmx_pciercx_cfg462_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg462 cvmx_pciercx_cfg462_t;
+
+/**
+ * cvmx_pcierc#_cfg463
+ *
+ * PCIE_CFG463 = Four hundred sixty-fourth 32-bits of PCIE type 1 config space
+ * (Queue Status)
+ */
+union cvmx_pciercx_cfg463 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg463_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_3_31 : 29;
+ uint32_t rqne : 1; /**< Received Queue Not Empty
+ Indicates there is data in one or more of the receive buffers. */
+ uint32_t trbne : 1; /**< Transmit Retry Buffer Not Empty
+ Indicates that there is data in the transmit retry buffer. */
+ uint32_t rtlpfccnr : 1; /**< Received TLP FC Credits Not Returned
+ Indicates that the PCI Express bus has sent a TLP but has not
+ yet received an UpdateFC DLLP indicating that the credits for
+ that TLP have been restored by the receiver at the other end of
+ the Link. */
+#else
+ uint32_t rtlpfccnr : 1;
+ uint32_t trbne : 1;
+ uint32_t rqne : 1;
+ uint32_t reserved_3_31 : 29;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg463_s cn52xx;
+ struct cvmx_pciercx_cfg463_s cn52xxp1;
+ struct cvmx_pciercx_cfg463_s cn56xx;
+ struct cvmx_pciercx_cfg463_s cn56xxp1;
+ struct cvmx_pciercx_cfg463_s cn61xx;
+ struct cvmx_pciercx_cfg463_s cn63xx;
+ struct cvmx_pciercx_cfg463_s cn63xxp1;
+ struct cvmx_pciercx_cfg463_s cn66xx;
+ struct cvmx_pciercx_cfg463_s cn68xx;
+ struct cvmx_pciercx_cfg463_s cn68xxp1;
+ struct cvmx_pciercx_cfg463_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg463 cvmx_pciercx_cfg463_t;
+
+/**
+ * cvmx_pcierc#_cfg464
+ *
+ * PCIE_CFG464 = Four hundred sixty-fifth 32-bits of PCIE type 1 config space
+ * (VC Transmit Arbitration Register 1)
+ */
+union cvmx_pciercx_cfg464 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg464_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t wrr_vc3 : 8; /**< WRR Weight for VC3 */
+ uint32_t wrr_vc2 : 8; /**< WRR Weight for VC2 */
+ uint32_t wrr_vc1 : 8; /**< WRR Weight for VC1 */
+ uint32_t wrr_vc0 : 8; /**< WRR Weight for VC0 */
+#else
+ uint32_t wrr_vc0 : 8;
+ uint32_t wrr_vc1 : 8;
+ uint32_t wrr_vc2 : 8;
+ uint32_t wrr_vc3 : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg464_s cn52xx;
+ struct cvmx_pciercx_cfg464_s cn52xxp1;
+ struct cvmx_pciercx_cfg464_s cn56xx;
+ struct cvmx_pciercx_cfg464_s cn56xxp1;
+ struct cvmx_pciercx_cfg464_s cn61xx;
+ struct cvmx_pciercx_cfg464_s cn63xx;
+ struct cvmx_pciercx_cfg464_s cn63xxp1;
+ struct cvmx_pciercx_cfg464_s cn66xx;
+ struct cvmx_pciercx_cfg464_s cn68xx;
+ struct cvmx_pciercx_cfg464_s cn68xxp1;
+ struct cvmx_pciercx_cfg464_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg464 cvmx_pciercx_cfg464_t;
+
+/**
+ * cvmx_pcierc#_cfg465
+ *
+ * PCIE_CFG465 = Four hundred sixty-sixth 32-bits of config space
+ * (VC Transmit Arbitration Register 2)
+ */
+union cvmx_pciercx_cfg465 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg465_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t wrr_vc7 : 8; /**< WRR Weight for VC7 */
+ uint32_t wrr_vc6 : 8; /**< WRR Weight for VC6 */
+ uint32_t wrr_vc5 : 8; /**< WRR Weight for VC5 */
+ uint32_t wrr_vc4 : 8; /**< WRR Weight for VC4 */
+#else
+ uint32_t wrr_vc4 : 8;
+ uint32_t wrr_vc5 : 8;
+ uint32_t wrr_vc6 : 8;
+ uint32_t wrr_vc7 : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg465_s cn52xx;
+ struct cvmx_pciercx_cfg465_s cn52xxp1;
+ struct cvmx_pciercx_cfg465_s cn56xx;
+ struct cvmx_pciercx_cfg465_s cn56xxp1;
+ struct cvmx_pciercx_cfg465_s cn61xx;
+ struct cvmx_pciercx_cfg465_s cn63xx;
+ struct cvmx_pciercx_cfg465_s cn63xxp1;
+ struct cvmx_pciercx_cfg465_s cn66xx;
+ struct cvmx_pciercx_cfg465_s cn68xx;
+ struct cvmx_pciercx_cfg465_s cn68xxp1;
+ struct cvmx_pciercx_cfg465_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg465 cvmx_pciercx_cfg465_t;
+
+/**
+ * cvmx_pcierc#_cfg466
+ *
+ * PCIE_CFG466 = Four hundred sixty-seventh 32-bits of PCIE type 1 config space
+ * (VC0 Posted Receive Queue Control)
+ */
+union cvmx_pciercx_cfg466 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg466_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rx_queue_order : 1; /**< VC Ordering for Receive Queues
+ Determines the VC ordering rule for the receive queues, used
+ only in the segmented-buffer configuration,
+ writable through PEM(0..1)_CFG_WR:
+ o 1: Strict ordering, higher numbered VCs have higher priority
+ o 0: Round robin
+ However, the application must not change this field. */
+ uint32_t type_ordering : 1; /**< TLP Type Ordering for VC0
+ Determines the TLP type ordering rule for VC0 receive queues,
+ used only in the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR:
+ o 1: Ordering of received TLPs follows the rules in
+ PCI Express Base Specification
+ o 0: Strict ordering for received TLPs: Posted, then
+ Completion, then Non-Posted
+ However, the application must not change this field. */
+ uint32_t reserved_24_29 : 6;
+ uint32_t queue_mode : 3; /**< VC0 Posted TLP Queue Mode
+ The operating mode of the Posted receive queue for VC0, used
+ only in the segmented-buffer configuration, writable through
+ PEM(0..1)_CFG_WR.
+ However, the application must not change this field.
+ Only one bit can be set at a time:
+ o Bit 23: Bypass
+ o Bit 22: Cut-through
+ o Bit 21: Store-and-forward */
+ uint32_t reserved_20_20 : 1;
+ uint32_t header_credits : 8; /**< VC0 Posted Header Credits
+ The number of initial Posted header credits for VC0, used for
+ all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< VC0 Posted Data Credits
+ The number of initial Posted data credits for VC0, used for all
+ receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_credits : 12;
+ uint32_t header_credits : 8;
+ uint32_t reserved_20_20 : 1;
+ uint32_t queue_mode : 3;
+ uint32_t reserved_24_29 : 6;
+ uint32_t type_ordering : 1;
+ uint32_t rx_queue_order : 1;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg466_s cn52xx;
+ struct cvmx_pciercx_cfg466_s cn52xxp1;
+ struct cvmx_pciercx_cfg466_s cn56xx;
+ struct cvmx_pciercx_cfg466_s cn56xxp1;
+ struct cvmx_pciercx_cfg466_s cn61xx;
+ struct cvmx_pciercx_cfg466_s cn63xx;
+ struct cvmx_pciercx_cfg466_s cn63xxp1;
+ struct cvmx_pciercx_cfg466_s cn66xx;
+ struct cvmx_pciercx_cfg466_s cn68xx;
+ struct cvmx_pciercx_cfg466_s cn68xxp1;
+ struct cvmx_pciercx_cfg466_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg466 cvmx_pciercx_cfg466_t;
+
+/**
+ * cvmx_pcierc#_cfg467
+ *
+ * PCIE_CFG467 = Four hundred sixty-eighth 32-bits of PCIE type 1 config space
+ * (VC0 Non-Posted Receive Queue Control)
+ */
+union cvmx_pciercx_cfg467 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg467_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t queue_mode : 3; /**< VC0 Non-Posted TLP Queue Mode
+ The operating mode of the Non-Posted receive queue for VC0,
+ used only in the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ Only one bit can be set at a time:
+ o Bit 23: Bypass
+ o Bit 22: Cut-through
+ o Bit 21: Store-and-forward
+ However, the application must not change this field. */
+ uint32_t reserved_20_20 : 1;
+ uint32_t header_credits : 8; /**< VC0 Non-Posted Header Credits
+ The number of initial Non-Posted header credits for VC0, used
+ for all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< VC0 Non-Posted Data Credits
+ The number of initial Non-Posted data credits for VC0, used for
+ all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_credits : 12;
+ uint32_t header_credits : 8;
+ uint32_t reserved_20_20 : 1;
+ uint32_t queue_mode : 3;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg467_s cn52xx;
+ struct cvmx_pciercx_cfg467_s cn52xxp1;
+ struct cvmx_pciercx_cfg467_s cn56xx;
+ struct cvmx_pciercx_cfg467_s cn56xxp1;
+ struct cvmx_pciercx_cfg467_s cn61xx;
+ struct cvmx_pciercx_cfg467_s cn63xx;
+ struct cvmx_pciercx_cfg467_s cn63xxp1;
+ struct cvmx_pciercx_cfg467_s cn66xx;
+ struct cvmx_pciercx_cfg467_s cn68xx;
+ struct cvmx_pciercx_cfg467_s cn68xxp1;
+ struct cvmx_pciercx_cfg467_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg467 cvmx_pciercx_cfg467_t;
+
+/**
+ * cvmx_pcierc#_cfg468
+ *
+ * PCIE_CFG468 = Four hundred sixty-ninth 32-bits of PCIE type 1 config space
+ * (VC0 Completion Receive Queue Control)
+ */
+union cvmx_pciercx_cfg468 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg468_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t queue_mode : 3; /**< VC0 Completion TLP Queue Mode
+ The operating mode of the Completion receive queue for VC0,
+ used only in the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ Only one bit can be set at a time:
+ o Bit 23: Bypass
+ o Bit 22: Cut-through
+ o Bit 21: Store-and-forward
+ However, the application must not change this field. */
+ uint32_t reserved_20_20 : 1;
+ uint32_t header_credits : 8; /**< VC0 Completion Header Credits
+ The number of initial Completion header credits for VC0, used
+ for all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< VC0 Completion Data Credits
+ The number of initial Completion data credits for VC0, used for
+ all receive queue buffer configurations.
+ This field is writable through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_credits : 12;
+ uint32_t header_credits : 8;
+ uint32_t reserved_20_20 : 1;
+ uint32_t queue_mode : 3;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg468_s cn52xx;
+ struct cvmx_pciercx_cfg468_s cn52xxp1;
+ struct cvmx_pciercx_cfg468_s cn56xx;
+ struct cvmx_pciercx_cfg468_s cn56xxp1;
+ struct cvmx_pciercx_cfg468_s cn61xx;
+ struct cvmx_pciercx_cfg468_s cn63xx;
+ struct cvmx_pciercx_cfg468_s cn63xxp1;
+ struct cvmx_pciercx_cfg468_s cn66xx;
+ struct cvmx_pciercx_cfg468_s cn68xx;
+ struct cvmx_pciercx_cfg468_s cn68xxp1;
+ struct cvmx_pciercx_cfg468_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg468 cvmx_pciercx_cfg468_t;
+
+/**
+ * cvmx_pcierc#_cfg490
+ *
+ * PCIE_CFG490 = Four hundred ninety-first 32-bits of PCIE type 1 config space
+ * (VC0 Posted Buffer Depth)
+ */
+union cvmx_pciercx_cfg490 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg490_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_26_31 : 6;
+ uint32_t header_depth : 10; /**< VC0 Posted Header Queue Depth
+ Sets the number of entries in the Posted header queue for VC0
+ when using the segmented-buffer configuration, writable through
+ PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t data_depth : 14; /**< VC0 Posted Data Queue Depth
+ Sets the number of entries in the Posted data queue for VC0
+ when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_depth : 14;
+ uint32_t reserved_14_15 : 2;
+ uint32_t header_depth : 10;
+ uint32_t reserved_26_31 : 6;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg490_s cn52xx;
+ struct cvmx_pciercx_cfg490_s cn52xxp1;
+ struct cvmx_pciercx_cfg490_s cn56xx;
+ struct cvmx_pciercx_cfg490_s cn56xxp1;
+ struct cvmx_pciercx_cfg490_s cn61xx;
+ struct cvmx_pciercx_cfg490_s cn63xx;
+ struct cvmx_pciercx_cfg490_s cn63xxp1;
+ struct cvmx_pciercx_cfg490_s cn66xx;
+ struct cvmx_pciercx_cfg490_s cn68xx;
+ struct cvmx_pciercx_cfg490_s cn68xxp1;
+ struct cvmx_pciercx_cfg490_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg490 cvmx_pciercx_cfg490_t;
+
+/**
+ * cvmx_pcierc#_cfg491
+ *
+ * PCIE_CFG491 = Four hundred ninety-second 32-bits of PCIE type 1 config space
+ * (VC0 Non-Posted Buffer Depth)
+ */
+union cvmx_pciercx_cfg491 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg491_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_26_31 : 6;
+ uint32_t header_depth : 10; /**< VC0 Non-Posted Header Queue Depth
+ Sets the number of entries in the Non-Posted header queue for
+ VC0 when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t data_depth : 14; /**< VC0 Non-Posted Data Queue Depth
+ Sets the number of entries in the Non-Posted data queue for VC0
+ when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_depth : 14;
+ uint32_t reserved_14_15 : 2;
+ uint32_t header_depth : 10;
+ uint32_t reserved_26_31 : 6;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg491_s cn52xx;
+ struct cvmx_pciercx_cfg491_s cn52xxp1;
+ struct cvmx_pciercx_cfg491_s cn56xx;
+ struct cvmx_pciercx_cfg491_s cn56xxp1;
+ struct cvmx_pciercx_cfg491_s cn61xx;
+ struct cvmx_pciercx_cfg491_s cn63xx;
+ struct cvmx_pciercx_cfg491_s cn63xxp1;
+ struct cvmx_pciercx_cfg491_s cn66xx;
+ struct cvmx_pciercx_cfg491_s cn68xx;
+ struct cvmx_pciercx_cfg491_s cn68xxp1;
+ struct cvmx_pciercx_cfg491_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg491 cvmx_pciercx_cfg491_t;
+
+/**
+ * cvmx_pcierc#_cfg492
+ *
+ * PCIE_CFG492 = Four hundred ninety-third 32-bits of PCIE type 1 config space
+ * (VC0 Completion Buffer Depth)
+ */
+union cvmx_pciercx_cfg492 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg492_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_26_31 : 6;
+ uint32_t header_depth : 10; /**< VC0 Completion Header Queue Depth
+ Sets the number of entries in the Completion header queue for
+ VC0 when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t data_depth : 14; /**< VC0 Completion Data Queue Depth
+ Sets the number of entries in the Completion data queue for VC0
+ when using the segmented-buffer configuration, writable
+ through PEM(0..1)_CFG_WR.
+ However, the application must not change this field. */
+#else
+ uint32_t data_depth : 14;
+ uint32_t reserved_14_15 : 2;
+ uint32_t header_depth : 10;
+ uint32_t reserved_26_31 : 6;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg492_s cn52xx;
+ struct cvmx_pciercx_cfg492_s cn52xxp1;
+ struct cvmx_pciercx_cfg492_s cn56xx;
+ struct cvmx_pciercx_cfg492_s cn56xxp1;
+ struct cvmx_pciercx_cfg492_s cn61xx;
+ struct cvmx_pciercx_cfg492_s cn63xx;
+ struct cvmx_pciercx_cfg492_s cn63xxp1;
+ struct cvmx_pciercx_cfg492_s cn66xx;
+ struct cvmx_pciercx_cfg492_s cn68xx;
+ struct cvmx_pciercx_cfg492_s cn68xxp1;
+ struct cvmx_pciercx_cfg492_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg492 cvmx_pciercx_cfg492_t;
+
+/**
+ * cvmx_pcierc#_cfg515
+ *
+ * PCIE_CFG515 = Five hundred sixteenth 32-bits of PCIE type 1 config space
+ * (Port Logic Register (Gen2))
+ */
+union cvmx_pciercx_cfg515 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg515_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t s_d_e : 1; /**< SEL_DE_EMPHASIS
+ Used to set the de-emphasis level for upstream ports. */
+ uint32_t ctcrb : 1; /**< Config Tx Compliance Receive Bit
+ When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t cpyts : 1; /**< Config PHY Tx Swing
+ Indicates the voltage level the PHY should drive. When set to
+ 1, indicates Full Swing. When set to 0, indicates Low Swing */
+ uint32_t dsc : 1; /**< Directed Speed Change
+ o a write of '1' will initiate a speed change
+ o always reads a zero */
+ uint32_t le : 9; /**< Lane Enable
+ Indicates the number of lanes to check for exit from electrical
+ idle in Polling.Active and Polling.Compliance. 1 = x1, 2 = x2,
+ etc. Used to limit the maximum link width to ignore broken
+ lanes that detect a receiver, but will not exit electrical
+ idle and
+ would otherwise prevent a valid link from being configured. */
+ uint32_t n_fts : 8; /**< N_FTS
+ Sets the Number of Fast Training Sequences (N_FTS) that
+ the core advertises as its N_FTS during GEN2 Link training.
+ This value is used to inform the Link partner about the PHYs
+ ability to recover synchronization after a low power state.
+ Note: Do not set N_FTS to zero; doing so can cause the
+ LTSSM to go into the recovery state when exiting from
+ L0s. */
+#else
+ uint32_t n_fts : 8;
+ uint32_t le : 9;
+ uint32_t dsc : 1;
+ uint32_t cpyts : 1;
+ uint32_t ctcrb : 1;
+ uint32_t s_d_e : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg515_s cn61xx;
+ struct cvmx_pciercx_cfg515_s cn63xx;
+ struct cvmx_pciercx_cfg515_s cn63xxp1;
+ struct cvmx_pciercx_cfg515_s cn66xx;
+ struct cvmx_pciercx_cfg515_s cn68xx;
+ struct cvmx_pciercx_cfg515_s cn68xxp1;
+ struct cvmx_pciercx_cfg515_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg515 cvmx_pciercx_cfg515_t;
+
+/**
+ * cvmx_pcierc#_cfg516
+ *
+ * PCIE_CFG516 = Five hundred seventeenth 32-bits of PCIE type 1 config space
+ * (PHY Status Register)
+ */
+union cvmx_pciercx_cfg516 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg516_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t phy_stat : 32; /**< PHY Status */
+#else
+ uint32_t phy_stat : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg516_s cn52xx;
+ struct cvmx_pciercx_cfg516_s cn52xxp1;
+ struct cvmx_pciercx_cfg516_s cn56xx;
+ struct cvmx_pciercx_cfg516_s cn56xxp1;
+ struct cvmx_pciercx_cfg516_s cn61xx;
+ struct cvmx_pciercx_cfg516_s cn63xx;
+ struct cvmx_pciercx_cfg516_s cn63xxp1;
+ struct cvmx_pciercx_cfg516_s cn66xx;
+ struct cvmx_pciercx_cfg516_s cn68xx;
+ struct cvmx_pciercx_cfg516_s cn68xxp1;
+ struct cvmx_pciercx_cfg516_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg516 cvmx_pciercx_cfg516_t;
+
+/**
+ * cvmx_pcierc#_cfg517
+ *
+ * PCIE_CFG517 = Five hundred eighteenth 32-bits of PCIE type 1 config space
+ * (PHY Control Register)
+ */
+union cvmx_pciercx_cfg517 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg517_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t phy_ctrl : 32; /**< PHY Control */
+#else
+ uint32_t phy_ctrl : 32;
+#endif
+ } s;
+ struct cvmx_pciercx_cfg517_s cn52xx;
+ struct cvmx_pciercx_cfg517_s cn52xxp1;
+ struct cvmx_pciercx_cfg517_s cn56xx;
+ struct cvmx_pciercx_cfg517_s cn56xxp1;
+ struct cvmx_pciercx_cfg517_s cn61xx;
+ struct cvmx_pciercx_cfg517_s cn63xx;
+ struct cvmx_pciercx_cfg517_s cn63xxp1;
+ struct cvmx_pciercx_cfg517_s cn66xx;
+ struct cvmx_pciercx_cfg517_s cn68xx;
+ struct cvmx_pciercx_cfg517_s cn68xxp1;
+ struct cvmx_pciercx_cfg517_s cnf71xx;
+};
+typedef union cvmx_pciercx_cfg517 cvmx_pciercx_cfg517_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pciercx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pcm-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pcm-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pcm-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,237 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pcm-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pcm.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PCM_DEFS_H__
+#define __CVMX_PCM_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCM_CLKX_CFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PCM_CLKX_CFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010000ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_PCM_CLKX_CFG(offset) (CVMX_ADD_IO_SEG(0x0001070000010000ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCM_CLKX_DBG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PCM_CLKX_DBG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010038ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_PCM_CLKX_DBG(offset) (CVMX_ADD_IO_SEG(0x0001070000010038ull) + ((offset) & 1) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCM_CLKX_GEN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PCM_CLKX_GEN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010008ull) + ((offset) & 1) * 16384;
+}
+#else
+#define CVMX_PCM_CLKX_GEN(offset) (CVMX_ADD_IO_SEG(0x0001070000010008ull) + ((offset) & 1) * 16384)
+#endif
+
+/**
+ * cvmx_pcm_clk#_cfg
+ */
+union cvmx_pcm_clkx_cfg {
+ uint64_t u64;
+ struct cvmx_pcm_clkx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fsyncgood : 1; /**< FSYNC status | NS
+ If 1, the last frame had a correctly positioned
+ fsync pulse
+ If 0, none/extra fsync pulse seen on most recent
+ frame
+ NOTE: this is intended for startup. the FSYNCEXTRA
+ and FSYNCMISSING interrupts are intended for
+ detecting loss of sync during normal operation. */
+ uint64_t reserved_48_62 : 15;
+ uint64_t fsyncsamp : 16; /**< Number of ECLKs from internal BCLK edge to | NS
+ sample FSYNC
+ NOTE: used to sync to the start of a frame and to
+ check for FSYNC errors. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t fsynclen : 5; /**< Number of 1/2 BCLKs FSYNC is asserted for | NS
+ NOTE: only used when GEN==1 */
+ uint64_t fsyncloc : 5; /**< FSYNC location, in 1/2 BCLKS before timeslot 0, | NS
+ bit 0.
+ NOTE: also used to detect framing errors and
+ therefore must have a correct value even if GEN==0 */
+ uint64_t numslots : 10; /**< Number of 8-bit slots in a frame | NS
+ NOTE: this, along with EXTRABIT and Fbclk
+ determines FSYNC frequency when GEN == 1
+ NOTE: also used to detect framing errors and
+ therefore must have a correct value even if GEN==0 */
+ uint64_t extrabit : 1; /**< If 0, no frame bit | NS
+ If 1, add one extra bit time for frame bit
+ NOTE: if GEN == 1, then FSYNC will be delayed one
+ extra bit time.
+ NOTE: also used to detect framing errors and
+ therefore must have a correct value even if GEN==0
+ NOTE: the extra bit comes from the LSB/MSB of the
+ first byte of the frame in the transmit memory
+ region. LSB vs MSB is determined from the setting
+ of PCMn_TDM_CFG[LSBFIRST]. */
+ uint64_t bitlen : 2; /**< Number of BCLKs in a bit time. | NS
+ 0 : 1 BCLK
+ 1 : 2 BCLKs
+ 2 : 4 BCLKs
+ 3 : operation undefined */
+ uint64_t bclkpol : 1; /**< If 0, BCLK rise edge is start of bit time | NS
+ If 1, BCLK fall edge is start of bit time
+ NOTE: also used to detect framing errors and
+ therefore must have a correct value even if GEN==0 */
+ uint64_t fsyncpol : 1; /**< If 0, FSYNC idles low, asserts high | NS
+ If 1, FSYNC idles high, asserts low
+ NOTE: also used to detect framing errors and
+ therefore must have a correct value even if GEN==0 */
+ uint64_t ena : 1; /**< If 0, Clock receiving logic is doing nothing | NS
+ 1, Clock receiving logic is looking for sync */
+#else
+ uint64_t ena : 1;
+ uint64_t fsyncpol : 1;
+ uint64_t bclkpol : 1;
+ uint64_t bitlen : 2;
+ uint64_t extrabit : 1;
+ uint64_t numslots : 10;
+ uint64_t fsyncloc : 5;
+ uint64_t fsynclen : 5;
+ uint64_t reserved_26_31 : 6;
+ uint64_t fsyncsamp : 16;
+ uint64_t reserved_48_62 : 15;
+ uint64_t fsyncgood : 1;
+#endif
+ } s;
+ struct cvmx_pcm_clkx_cfg_s cn30xx;
+ struct cvmx_pcm_clkx_cfg_s cn31xx;
+ struct cvmx_pcm_clkx_cfg_s cn50xx;
+ struct cvmx_pcm_clkx_cfg_s cn61xx;
+ struct cvmx_pcm_clkx_cfg_s cnf71xx;
+};
+typedef union cvmx_pcm_clkx_cfg cvmx_pcm_clkx_cfg_t;
+
+/**
+ * cvmx_pcm_clk#_dbg
+ */
+union cvmx_pcm_clkx_dbg {
+ uint64_t u64;
+ struct cvmx_pcm_clkx_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t debuginfo : 64; /**< Miscellaneous debug information | NS */
+#else
+ uint64_t debuginfo : 64;
+#endif
+ } s;
+ struct cvmx_pcm_clkx_dbg_s cn30xx;
+ struct cvmx_pcm_clkx_dbg_s cn31xx;
+ struct cvmx_pcm_clkx_dbg_s cn50xx;
+ struct cvmx_pcm_clkx_dbg_s cn61xx;
+ struct cvmx_pcm_clkx_dbg_s cnf71xx;
+};
+typedef union cvmx_pcm_clkx_dbg cvmx_pcm_clkx_dbg_t;
+
+/**
+ * cvmx_pcm_clk#_gen
+ */
+union cvmx_pcm_clkx_gen {
+ uint64_t u64;
+ struct cvmx_pcm_clkx_gen_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t deltasamp : 16; /**< Signed number of ECLKs to move sampled BCLK edge | NS
+ NOTE: the complete number of ECLKs to move is:
+ NUMSAMP + 2 + 1 + DELTASAMP
+ NUMSAMP to compensate for sampling delay
+ + 2 to compensate for dual-rank synchronizer
+ + 1 for uncertainity
+ + DELTASAMP to CMA/debugging */
+ uint64_t numsamp : 16; /**< Number of ECLK samples to detect BCLK change when | NS
+ receiving clock. */
+ uint64_t n : 32; /**< Determines BCLK frequency when generating clock | NS
+ NOTE: Fbclk = Feclk * N / 2^32
+ N = (Fbclk / Feclk) * 2^32
+ NOTE: writing N == 0 stops the clock generator, and
+ causes bclk and fsync to be RECEIVED */
+#else
+ uint64_t n : 32;
+ uint64_t numsamp : 16;
+ uint64_t deltasamp : 16;
+#endif
+ } s;
+ struct cvmx_pcm_clkx_gen_s cn30xx;
+ struct cvmx_pcm_clkx_gen_s cn31xx;
+ struct cvmx_pcm_clkx_gen_s cn50xx;
+ struct cvmx_pcm_clkx_gen_s cn61xx;
+ struct cvmx_pcm_clkx_gen_s cnf71xx;
+};
+typedef union cvmx_pcm_clkx_gen cvmx_pcm_clkx_gen_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pcm-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pcmx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pcmx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pcmx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1137 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pcmx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pcmx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PCMX_DEFS_H__
+#define __CVMX_PCMX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_DMA_CFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_DMA_CFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010018ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_DMA_CFG(offset) (CVMX_ADD_IO_SEG(0x0001070000010018ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_INT_ENA(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_INT_ENA(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010020ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_INT_ENA(offset) (CVMX_ADD_IO_SEG(0x0001070000010020ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_INT_SUM(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_INT_SUM(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010028ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_INT_SUM(offset) (CVMX_ADD_IO_SEG(0x0001070000010028ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010068ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXADDR(offset) (CVMX_ADD_IO_SEG(0x0001070000010068ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXCNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXCNT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010060ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000010060ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXMSK0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXMSK0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100C0ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXMSK0(offset) (CVMX_ADD_IO_SEG(0x00010700000100C0ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXMSK1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXMSK1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100C8ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXMSK1(offset) (CVMX_ADD_IO_SEG(0x00010700000100C8ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXMSK2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXMSK2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100D0ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXMSK2(offset) (CVMX_ADD_IO_SEG(0x00010700000100D0ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXMSK3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXMSK3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100D8ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXMSK3(offset) (CVMX_ADD_IO_SEG(0x00010700000100D8ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXMSK4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXMSK4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100E0ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXMSK4(offset) (CVMX_ADD_IO_SEG(0x00010700000100E0ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXMSK5(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXMSK5(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100E8ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXMSK5(offset) (CVMX_ADD_IO_SEG(0x00010700000100E8ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXMSK6(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXMSK6(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100F0ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXMSK6(offset) (CVMX_ADD_IO_SEG(0x00010700000100F0ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXMSK7(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXMSK7(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100F8ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXMSK7(offset) (CVMX_ADD_IO_SEG(0x00010700000100F8ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_RXSTART(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_RXSTART(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010058ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_RXSTART(offset) (CVMX_ADD_IO_SEG(0x0001070000010058ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TDM_CFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TDM_CFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010010ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TDM_CFG(offset) (CVMX_ADD_IO_SEG(0x0001070000010010ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TDM_DBG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TDM_DBG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010030ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TDM_DBG(offset) (CVMX_ADD_IO_SEG(0x0001070000010030ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010050ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXADDR(offset) (CVMX_ADD_IO_SEG(0x0001070000010050ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXCNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXCNT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010048ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000010048ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXMSK0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXMSK0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010080ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXMSK0(offset) (CVMX_ADD_IO_SEG(0x0001070000010080ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXMSK1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXMSK1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010088ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXMSK1(offset) (CVMX_ADD_IO_SEG(0x0001070000010088ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXMSK2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXMSK2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010090ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXMSK2(offset) (CVMX_ADD_IO_SEG(0x0001070000010090ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXMSK3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXMSK3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010098ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXMSK3(offset) (CVMX_ADD_IO_SEG(0x0001070000010098ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXMSK4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXMSK4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100A0ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXMSK4(offset) (CVMX_ADD_IO_SEG(0x00010700000100A0ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXMSK5(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXMSK5(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100A8ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXMSK5(offset) (CVMX_ADD_IO_SEG(0x00010700000100A8ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXMSK6(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXMSK6(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100B0ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXMSK6(offset) (CVMX_ADD_IO_SEG(0x00010700000100B0ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXMSK7(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXMSK7(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00010700000100B8ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXMSK7(offset) (CVMX_ADD_IO_SEG(0x00010700000100B8ull) + ((offset) & 3) * 16384)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PCMX_TXSTART(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PCMX_TXSTART(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000010040ull) + ((offset) & 3) * 16384;
+}
+#else
+#define CVMX_PCMX_TXSTART(offset) (CVMX_ADD_IO_SEG(0x0001070000010040ull) + ((offset) & 3) * 16384)
+#endif
+
+/**
+ * cvmx_pcm#_dma_cfg
+ */
+union cvmx_pcmx_dma_cfg {
+ uint64_t u64;
+ struct cvmx_pcmx_dma_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rdpend : 1; /**< If 0, no L2C read responses pending | NS
+ 1, L2C read responses are outstanding
+ NOTE: When restarting after stopping a running TDM
+ engine, software must wait for RDPEND to read 0
+ before writing PCMn_TDM_CFG[ENABLE] to a 1 */
+ uint64_t reserved_54_62 : 9;
+ uint64_t rxslots : 10; /**< Number of 8-bit slots to receive per frame | NS
+ (number of slots in a receive superframe) */
+ uint64_t reserved_42_43 : 2;
+ uint64_t txslots : 10; /**< Number of 8-bit slots to transmit per frame | NS
+ (number of slots in a transmit superframe) */
+ uint64_t reserved_30_31 : 2;
+ uint64_t rxst : 10; /**< Number of frame writes for interrupt | NS */
+ uint64_t reserved_19_19 : 1;
+ uint64_t useldt : 1; /**< If 0, use LDI command to read from L2C | NS
+ 1, use LDT command to read from L2C */
+ uint64_t txrd : 10; /**< Number of frame reads for interrupt | NS */
+ uint64_t fetchsiz : 4; /**< FETCHSIZ+1 timeslots are read when threshold is | NS
+ reached. */
+ uint64_t thresh : 4; /**< If number of bytes remaining in the DMA fifo is <=| NS
+ THRESH, initiate a fetch of timeslot data from the
+ transmit memory region.
+ NOTE: there are only 16B of buffer for each engine
+ so the seetings for FETCHSIZ and THRESH must be
+ such that the buffer will not be overrun:
+
+ THRESH + min(FETCHSIZ + 1,TXSLOTS) MUST BE <= 16 */
+#else
+ uint64_t thresh : 4;
+ uint64_t fetchsiz : 4;
+ uint64_t txrd : 10;
+ uint64_t useldt : 1;
+ uint64_t reserved_19_19 : 1;
+ uint64_t rxst : 10;
+ uint64_t reserved_30_31 : 2;
+ uint64_t txslots : 10;
+ uint64_t reserved_42_43 : 2;
+ uint64_t rxslots : 10;
+ uint64_t reserved_54_62 : 9;
+ uint64_t rdpend : 1;
+#endif
+ } s;
+ struct cvmx_pcmx_dma_cfg_s cn30xx;
+ struct cvmx_pcmx_dma_cfg_s cn31xx;
+ struct cvmx_pcmx_dma_cfg_s cn50xx;
+ struct cvmx_pcmx_dma_cfg_s cn61xx;
+ struct cvmx_pcmx_dma_cfg_s cnf71xx;
+};
+typedef union cvmx_pcmx_dma_cfg cvmx_pcmx_dma_cfg_t;
+
+/**
+ * cvmx_pcm#_int_ena
+ */
+union cvmx_pcmx_int_ena {
+ uint64_t u64;
+ struct cvmx_pcmx_int_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rxovf : 1; /**< Enable interrupt if RX byte overflows | NS */
+ uint64_t txempty : 1; /**< Enable interrupt on TX byte empty | NS */
+ uint64_t txrd : 1; /**< Enable DMA engine frame read interrupts | NS */
+ uint64_t txwrap : 1; /**< Enable TX region wrap interrupts | NS */
+ uint64_t rxst : 1; /**< Enable DMA engine frame store interrupts | NS */
+ uint64_t rxwrap : 1; /**< Enable RX region wrap interrupts | NS */
+ uint64_t fsyncextra : 1; /**< Enable FSYNC extra interrupts | NS
+ NOTE: FSYNCEXTRA errors are defined as an FSYNC
+ found in the "wrong" spot of a frame given the
+ programming of PCMn_CLK_CFG[NUMSLOTS] and
+ PCMn_CLK_CFG[EXTRABIT]. */
+ uint64_t fsyncmissed : 1; /**< Enable FSYNC missed interrupts | NS
+ NOTE: FSYNCMISSED errors are defined as an FSYNC
+ missing from the correct spot in a frame given
+ the programming of PCMn_CLK_CFG[NUMSLOTS] and
+ PCMn_CLK_CFG[EXTRABIT]. */
+#else
+ uint64_t fsyncmissed : 1;
+ uint64_t fsyncextra : 1;
+ uint64_t rxwrap : 1;
+ uint64_t rxst : 1;
+ uint64_t txwrap : 1;
+ uint64_t txrd : 1;
+ uint64_t txempty : 1;
+ uint64_t rxovf : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_pcmx_int_ena_s cn30xx;
+ struct cvmx_pcmx_int_ena_s cn31xx;
+ struct cvmx_pcmx_int_ena_s cn50xx;
+ struct cvmx_pcmx_int_ena_s cn61xx;
+ struct cvmx_pcmx_int_ena_s cnf71xx;
+};
+typedef union cvmx_pcmx_int_ena cvmx_pcmx_int_ena_t;
+
+/**
+ * cvmx_pcm#_int_sum
+ */
+union cvmx_pcmx_int_sum {
+ uint64_t u64;
+ struct cvmx_pcmx_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rxovf : 1; /**< RX byte overflowed | NS */
+ uint64_t txempty : 1; /**< TX byte was empty when sampled | NS */
+ uint64_t txrd : 1; /**< DMA engine frame read interrupt occurred | NS */
+ uint64_t txwrap : 1; /**< TX region wrap interrupt occurred | NS */
+ uint64_t rxst : 1; /**< DMA engine frame store interrupt occurred | NS */
+ uint64_t rxwrap : 1; /**< RX region wrap interrupt occurred | NS */
+ uint64_t fsyncextra : 1; /**< FSYNC extra interrupt occurred | NS */
+ uint64_t fsyncmissed : 1; /**< FSYNC missed interrupt occurred | NS */
+#else
+ uint64_t fsyncmissed : 1;
+ uint64_t fsyncextra : 1;
+ uint64_t rxwrap : 1;
+ uint64_t rxst : 1;
+ uint64_t txwrap : 1;
+ uint64_t txrd : 1;
+ uint64_t txempty : 1;
+ uint64_t rxovf : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_pcmx_int_sum_s cn30xx;
+ struct cvmx_pcmx_int_sum_s cn31xx;
+ struct cvmx_pcmx_int_sum_s cn50xx;
+ struct cvmx_pcmx_int_sum_s cn61xx;
+ struct cvmx_pcmx_int_sum_s cnf71xx;
+};
+typedef union cvmx_pcmx_int_sum cvmx_pcmx_int_sum_t;
+
+/**
+ * cvmx_pcm#_rxaddr
+ */
+union cvmx_pcmx_rxaddr {
+ uint64_t u64;
+ struct cvmx_pcmx_rxaddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Address of the next write to the receive memory | NS
+ region */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_pcmx_rxaddr_s cn30xx;
+ struct cvmx_pcmx_rxaddr_s cn31xx;
+ struct cvmx_pcmx_rxaddr_s cn50xx;
+ struct cvmx_pcmx_rxaddr_s cn61xx;
+ struct cvmx_pcmx_rxaddr_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxaddr cvmx_pcmx_rxaddr_t;
+
+/**
+ * cvmx_pcm#_rxcnt
+ */
+union cvmx_pcmx_rxcnt {
+ uint64_t u64;
+ struct cvmx_pcmx_rxcnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< Number of superframes in receive memory region | NS */
+#else
+ uint64_t cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcmx_rxcnt_s cn30xx;
+ struct cvmx_pcmx_rxcnt_s cn31xx;
+ struct cvmx_pcmx_rxcnt_s cn50xx;
+ struct cvmx_pcmx_rxcnt_s cn61xx;
+ struct cvmx_pcmx_rxcnt_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxcnt cvmx_pcmx_rxcnt_t;
+
+/**
+ * cvmx_pcm#_rxmsk0
+ */
+union cvmx_pcmx_rxmsk0 {
+ uint64_t u64;
+ struct cvmx_pcmx_rxmsk0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Receive mask bits for slots 63 to 0 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_rxmsk0_s cn30xx;
+ struct cvmx_pcmx_rxmsk0_s cn31xx;
+ struct cvmx_pcmx_rxmsk0_s cn50xx;
+ struct cvmx_pcmx_rxmsk0_s cn61xx;
+ struct cvmx_pcmx_rxmsk0_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxmsk0 cvmx_pcmx_rxmsk0_t;
+
+/**
+ * cvmx_pcm#_rxmsk1
+ */
+union cvmx_pcmx_rxmsk1 {
+ uint64_t u64;
+ struct cvmx_pcmx_rxmsk1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Receive mask bits for slots 127 to 64 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_rxmsk1_s cn30xx;
+ struct cvmx_pcmx_rxmsk1_s cn31xx;
+ struct cvmx_pcmx_rxmsk1_s cn50xx;
+ struct cvmx_pcmx_rxmsk1_s cn61xx;
+ struct cvmx_pcmx_rxmsk1_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxmsk1 cvmx_pcmx_rxmsk1_t;
+
+/**
+ * cvmx_pcm#_rxmsk2
+ */
+union cvmx_pcmx_rxmsk2 {
+ uint64_t u64;
+ struct cvmx_pcmx_rxmsk2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Receive mask bits for slots 191 to 128 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_rxmsk2_s cn30xx;
+ struct cvmx_pcmx_rxmsk2_s cn31xx;
+ struct cvmx_pcmx_rxmsk2_s cn50xx;
+ struct cvmx_pcmx_rxmsk2_s cn61xx;
+ struct cvmx_pcmx_rxmsk2_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxmsk2 cvmx_pcmx_rxmsk2_t;
+
+/**
+ * cvmx_pcm#_rxmsk3
+ */
+union cvmx_pcmx_rxmsk3 {
+ uint64_t u64;
+ struct cvmx_pcmx_rxmsk3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Receive mask bits for slots 255 to 192 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_rxmsk3_s cn30xx;
+ struct cvmx_pcmx_rxmsk3_s cn31xx;
+ struct cvmx_pcmx_rxmsk3_s cn50xx;
+ struct cvmx_pcmx_rxmsk3_s cn61xx;
+ struct cvmx_pcmx_rxmsk3_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxmsk3 cvmx_pcmx_rxmsk3_t;
+
+/**
+ * cvmx_pcm#_rxmsk4
+ */
+union cvmx_pcmx_rxmsk4 {
+ uint64_t u64;
+ struct cvmx_pcmx_rxmsk4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Receive mask bits for slots 319 to 256 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_rxmsk4_s cn30xx;
+ struct cvmx_pcmx_rxmsk4_s cn31xx;
+ struct cvmx_pcmx_rxmsk4_s cn50xx;
+ struct cvmx_pcmx_rxmsk4_s cn61xx;
+ struct cvmx_pcmx_rxmsk4_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxmsk4 cvmx_pcmx_rxmsk4_t;
+
+/**
+ * cvmx_pcm#_rxmsk5
+ */
+union cvmx_pcmx_rxmsk5 {
+ uint64_t u64;
+ struct cvmx_pcmx_rxmsk5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Receive mask bits for slots 383 to 320 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_rxmsk5_s cn30xx;
+ struct cvmx_pcmx_rxmsk5_s cn31xx;
+ struct cvmx_pcmx_rxmsk5_s cn50xx;
+ struct cvmx_pcmx_rxmsk5_s cn61xx;
+ struct cvmx_pcmx_rxmsk5_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxmsk5 cvmx_pcmx_rxmsk5_t;
+
+/**
+ * cvmx_pcm#_rxmsk6
+ */
+union cvmx_pcmx_rxmsk6 {
+ uint64_t u64;
+ struct cvmx_pcmx_rxmsk6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Receive mask bits for slots 447 to 384 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_rxmsk6_s cn30xx;
+ struct cvmx_pcmx_rxmsk6_s cn31xx;
+ struct cvmx_pcmx_rxmsk6_s cn50xx;
+ struct cvmx_pcmx_rxmsk6_s cn61xx;
+ struct cvmx_pcmx_rxmsk6_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxmsk6 cvmx_pcmx_rxmsk6_t;
+
+/**
+ * cvmx_pcm#_rxmsk7
+ */
+union cvmx_pcmx_rxmsk7 {
+ uint64_t u64;
+ struct cvmx_pcmx_rxmsk7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Receive mask bits for slots 511 to 448 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_rxmsk7_s cn30xx;
+ struct cvmx_pcmx_rxmsk7_s cn31xx;
+ struct cvmx_pcmx_rxmsk7_s cn50xx;
+ struct cvmx_pcmx_rxmsk7_s cn61xx;
+ struct cvmx_pcmx_rxmsk7_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxmsk7 cvmx_pcmx_rxmsk7_t;
+
+/**
+ * cvmx_pcm#_rxstart
+ */
+union cvmx_pcmx_rxstart {
+ uint64_t u64;
+ struct cvmx_pcmx_rxstart_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 33; /**< Starting address for the receive memory region | NS */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t addr : 33;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_pcmx_rxstart_s cn30xx;
+ struct cvmx_pcmx_rxstart_s cn31xx;
+ struct cvmx_pcmx_rxstart_s cn50xx;
+ struct cvmx_pcmx_rxstart_s cn61xx;
+ struct cvmx_pcmx_rxstart_s cnf71xx;
+};
+typedef union cvmx_pcmx_rxstart cvmx_pcmx_rxstart_t;
+
+/**
+ * cvmx_pcm#_tdm_cfg
+ */
+union cvmx_pcmx_tdm_cfg {
+ uint64_t u64;
+ struct cvmx_pcmx_tdm_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drvtim : 16; /**< Number of ECLKs from start of bit time to stop | NS
+ driving last bit of timeslot (if not driving next
+ timeslot) */
+ uint64_t samppt : 16; /**< Number of ECLKs from start of bit time to sample | NS
+ data bit. */
+ uint64_t reserved_3_31 : 29;
+ uint64_t lsbfirst : 1; /**< If 0, shift/receive MSB first | NS
+ 1, shift/receive LSB first */
+ uint64_t useclk1 : 1; /**< If 0, this PCM is based on BCLK/FSYNC0 | NS
+ 1, this PCM is based on BCLK/FSYNC1 */
+ uint64_t enable : 1; /**< If 1, PCM is enabled, otherwise pins are GPIOs | NS
+ NOTE: when TDM is disabled by detection of an
+ FSYNC error all transmission and reception is
+ halted. In addition, PCMn_TX/RXADDR are updated
+ to point to the position at which the error was
+ detected. */
+#else
+ uint64_t enable : 1;
+ uint64_t useclk1 : 1;
+ uint64_t lsbfirst : 1;
+ uint64_t reserved_3_31 : 29;
+ uint64_t samppt : 16;
+ uint64_t drvtim : 16;
+#endif
+ } s;
+ struct cvmx_pcmx_tdm_cfg_s cn30xx;
+ struct cvmx_pcmx_tdm_cfg_s cn31xx;
+ struct cvmx_pcmx_tdm_cfg_s cn50xx;
+ struct cvmx_pcmx_tdm_cfg_s cn61xx;
+ struct cvmx_pcmx_tdm_cfg_s cnf71xx;
+};
+typedef union cvmx_pcmx_tdm_cfg cvmx_pcmx_tdm_cfg_t;
+
+/**
+ * cvmx_pcm#_tdm_dbg
+ */
+union cvmx_pcmx_tdm_dbg {
+ uint64_t u64;
+ struct cvmx_pcmx_tdm_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t debuginfo : 64; /**< Miscellaneous debug information | NS */
+#else
+ uint64_t debuginfo : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_tdm_dbg_s cn30xx;
+ struct cvmx_pcmx_tdm_dbg_s cn31xx;
+ struct cvmx_pcmx_tdm_dbg_s cn50xx;
+ struct cvmx_pcmx_tdm_dbg_s cn61xx;
+ struct cvmx_pcmx_tdm_dbg_s cnf71xx;
+};
+typedef union cvmx_pcmx_tdm_dbg cvmx_pcmx_tdm_dbg_t;
+
+/**
+ * cvmx_pcm#_txaddr
+ */
+union cvmx_pcmx_txaddr {
+ uint64_t u64;
+ struct cvmx_pcmx_txaddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 33; /**< Address of the next read from the transmit memory | NS
+ region */
+ uint64_t fram : 3; /**< Frame offset | NS
+ NOTE: this is used to extract the correct byte from
+ each 64b word read from the transmit memory region */
+#else
+ uint64_t fram : 3;
+ uint64_t addr : 33;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_pcmx_txaddr_s cn30xx;
+ struct cvmx_pcmx_txaddr_s cn31xx;
+ struct cvmx_pcmx_txaddr_s cn50xx;
+ struct cvmx_pcmx_txaddr_s cn61xx;
+ struct cvmx_pcmx_txaddr_s cnf71xx;
+};
+typedef union cvmx_pcmx_txaddr cvmx_pcmx_txaddr_t;
+
+/**
+ * cvmx_pcm#_txcnt
+ */
+union cvmx_pcmx_txcnt {
+ uint64_t u64;
+ struct cvmx_pcmx_txcnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< Number of superframes in transmit memory region | NS */
+#else
+ uint64_t cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcmx_txcnt_s cn30xx;
+ struct cvmx_pcmx_txcnt_s cn31xx;
+ struct cvmx_pcmx_txcnt_s cn50xx;
+ struct cvmx_pcmx_txcnt_s cn61xx;
+ struct cvmx_pcmx_txcnt_s cnf71xx;
+};
+typedef union cvmx_pcmx_txcnt cvmx_pcmx_txcnt_t;
+
+/**
+ * cvmx_pcm#_txmsk0
+ */
+union cvmx_pcmx_txmsk0 {
+ uint64_t u64;
+ struct cvmx_pcmx_txmsk0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Transmit mask bits for slots 63 to 0 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_txmsk0_s cn30xx;
+ struct cvmx_pcmx_txmsk0_s cn31xx;
+ struct cvmx_pcmx_txmsk0_s cn50xx;
+ struct cvmx_pcmx_txmsk0_s cn61xx;
+ struct cvmx_pcmx_txmsk0_s cnf71xx;
+};
+typedef union cvmx_pcmx_txmsk0 cvmx_pcmx_txmsk0_t;
+
+/**
+ * cvmx_pcm#_txmsk1
+ */
+union cvmx_pcmx_txmsk1 {
+ uint64_t u64;
+ struct cvmx_pcmx_txmsk1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Transmit mask bits for slots 127 to 64 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_txmsk1_s cn30xx;
+ struct cvmx_pcmx_txmsk1_s cn31xx;
+ struct cvmx_pcmx_txmsk1_s cn50xx;
+ struct cvmx_pcmx_txmsk1_s cn61xx;
+ struct cvmx_pcmx_txmsk1_s cnf71xx;
+};
+typedef union cvmx_pcmx_txmsk1 cvmx_pcmx_txmsk1_t;
+
+/**
+ * cvmx_pcm#_txmsk2
+ */
+union cvmx_pcmx_txmsk2 {
+ uint64_t u64;
+ struct cvmx_pcmx_txmsk2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Transmit mask bits for slots 191 to 128 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_txmsk2_s cn30xx;
+ struct cvmx_pcmx_txmsk2_s cn31xx;
+ struct cvmx_pcmx_txmsk2_s cn50xx;
+ struct cvmx_pcmx_txmsk2_s cn61xx;
+ struct cvmx_pcmx_txmsk2_s cnf71xx;
+};
+typedef union cvmx_pcmx_txmsk2 cvmx_pcmx_txmsk2_t;
+
+/**
+ * cvmx_pcm#_txmsk3
+ */
+union cvmx_pcmx_txmsk3 {
+ uint64_t u64;
+ struct cvmx_pcmx_txmsk3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Transmit mask bits for slots 255 to 192 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_txmsk3_s cn30xx;
+ struct cvmx_pcmx_txmsk3_s cn31xx;
+ struct cvmx_pcmx_txmsk3_s cn50xx;
+ struct cvmx_pcmx_txmsk3_s cn61xx;
+ struct cvmx_pcmx_txmsk3_s cnf71xx;
+};
+typedef union cvmx_pcmx_txmsk3 cvmx_pcmx_txmsk3_t;
+
+/**
+ * cvmx_pcm#_txmsk4
+ */
+union cvmx_pcmx_txmsk4 {
+ uint64_t u64;
+ struct cvmx_pcmx_txmsk4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Transmit mask bits for slots 319 to 256 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_txmsk4_s cn30xx;
+ struct cvmx_pcmx_txmsk4_s cn31xx;
+ struct cvmx_pcmx_txmsk4_s cn50xx;
+ struct cvmx_pcmx_txmsk4_s cn61xx;
+ struct cvmx_pcmx_txmsk4_s cnf71xx;
+};
+typedef union cvmx_pcmx_txmsk4 cvmx_pcmx_txmsk4_t;
+
+/**
+ * cvmx_pcm#_txmsk5
+ */
+union cvmx_pcmx_txmsk5 {
+ uint64_t u64;
+ struct cvmx_pcmx_txmsk5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Transmit mask bits for slots 383 to 320 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_txmsk5_s cn30xx;
+ struct cvmx_pcmx_txmsk5_s cn31xx;
+ struct cvmx_pcmx_txmsk5_s cn50xx;
+ struct cvmx_pcmx_txmsk5_s cn61xx;
+ struct cvmx_pcmx_txmsk5_s cnf71xx;
+};
+typedef union cvmx_pcmx_txmsk5 cvmx_pcmx_txmsk5_t;
+
+/**
+ * cvmx_pcm#_txmsk6
+ */
+union cvmx_pcmx_txmsk6 {
+ uint64_t u64;
+ struct cvmx_pcmx_txmsk6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Transmit mask bits for slots 447 to 384 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_txmsk6_s cn30xx;
+ struct cvmx_pcmx_txmsk6_s cn31xx;
+ struct cvmx_pcmx_txmsk6_s cn50xx;
+ struct cvmx_pcmx_txmsk6_s cn61xx;
+ struct cvmx_pcmx_txmsk6_s cnf71xx;
+};
+typedef union cvmx_pcmx_txmsk6 cvmx_pcmx_txmsk6_t;
+
+/**
+ * cvmx_pcm#_txmsk7
+ */
+union cvmx_pcmx_txmsk7 {
+ uint64_t u64;
+ struct cvmx_pcmx_txmsk7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask : 64; /**< Transmit mask bits for slots 511 to 448 | NS
+ (1 means transmit, 0 means don't transmit) */
+#else
+ uint64_t mask : 64;
+#endif
+ } s;
+ struct cvmx_pcmx_txmsk7_s cn30xx;
+ struct cvmx_pcmx_txmsk7_s cn31xx;
+ struct cvmx_pcmx_txmsk7_s cn50xx;
+ struct cvmx_pcmx_txmsk7_s cn61xx;
+ struct cvmx_pcmx_txmsk7_s cnf71xx;
+};
+typedef union cvmx_pcmx_txmsk7 cvmx_pcmx_txmsk7_t;
+
+/**
+ * cvmx_pcm#_txstart
+ */
+union cvmx_pcmx_txstart {
+ uint64_t u64;
+ struct cvmx_pcmx_txstart_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 33; /**< Starting address for the transmit memory region | NS */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t addr : 33;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_pcmx_txstart_s cn30xx;
+ struct cvmx_pcmx_txstart_s cn31xx;
+ struct cvmx_pcmx_txstart_s cn50xx;
+ struct cvmx_pcmx_txstart_s cn61xx;
+ struct cvmx_pcmx_txstart_s cnf71xx;
+};
+typedef union cvmx_pcmx_txstart cvmx_pcmx_txstart_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pcmx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pcsx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pcsx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pcsx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1451 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pcsx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pcsx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PCSX_DEFS_H__
+#define __CVMX_PCSX_DEFS_H__
+
+static inline uint64_t CVMX_PCSX_ANX_ADV_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_ANX_ADV_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_ANX_EXT_ST_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_ANX_EXT_ST_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_ANX_LP_ABIL_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_ANX_LP_ABIL_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_ANX_RESULTS_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_ANX_RESULTS_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_INTX_EN_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_INTX_EN_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_INTX_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_INTX_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_LINKX_TIMER_COUNT_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_LINKX_TIMER_COUNT_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_LOG_ANLX_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_LOG_ANLX_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_MISCX_CTL_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_MISCX_CTL_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_MRX_CONTROL_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_MRX_CONTROL_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_MRX_STATUS_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_MRX_STATUS_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_RXX_STATES_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_RXX_STATES_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_RXX_SYNC_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_RXX_SYNC_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_SGMX_AN_ADV_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_SGMX_AN_ADV_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_SGMX_LP_ADV_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_SGMX_LP_ADV_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_TXX_STATES_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_TXX_STATES_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+static inline uint64_t CVMX_PCSX_TX_RXX_POLARITY_REG(unsigned long offset, unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 1)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id == 0)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + (((offset) & 3) + ((block_id) & 0) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 1)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + (((offset) & 3) + ((block_id) & 1) * 0x20000ull) * 1024;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if (((offset <= 3)) && ((block_id <= 4)))
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + (((offset) & 3) + ((block_id) & 7) * 0x4000ull) * 1024;
+ break;
+ }
+ cvmx_warn("CVMX_PCSX_TX_RXX_POLARITY_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + (((offset) & 1) + ((block_id) & 0) * 0x20000ull) * 1024;
+}
+
+/**
+ * cvmx_pcs#_an#_adv_reg
+ *
+ * Bits [15:9] in the Status Register indicate ability to operate as per those signalling specification,
+ * when misc ctl reg MAC_PHY bit is set to MAC mode. Bits [15:9] will all, always read 1'b0, indicating
+ * that the chip cannot operate in the corresponding modes.
+ *
+ * Bit [4] RM_FLT is a don't care when the selected mode is SGMII.
+ *
+ *
+ *
+ * PCS_AN_ADV_REG = AN Advertisement Register4
+ */
+union cvmx_pcsx_anx_adv_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_adv_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t np : 1; /**< Always 0, no next page capability supported */
+ uint64_t reserved_14_14 : 1;
+ uint64_t rem_flt : 2; /**< [<13>,<12>]
+ 0 0 Link OK XMIT=DATA
+ 0 1 Link failure (loss of sync, XMIT!= DATA)
+ 1 0 local device Offline
+ 1 1 AN Error failure to complete AN
+ AN Error is set if resolution function
+ precludes operation with link partner */
+ uint64_t reserved_9_11 : 3;
+ uint64_t pause : 2; /**< [<8>, <7>] Pause frame flow capability across link
+ Exchanged during Auto Negotiation
+ 0 0 No Pause
+ 0 1 Symmetric pause
+ 1 0 Asymmetric Pause
+ 1 1 Both symm and asymm pause to local device */
+ uint64_t hfd : 1; /**< 1 means local device Half Duplex capable */
+ uint64_t fd : 1; /**< 1 means local device Full Duplex capable */
+ uint64_t reserved_0_4 : 5;
+#else
+ uint64_t reserved_0_4 : 5;
+ uint64_t fd : 1;
+ uint64_t hfd : 1;
+ uint64_t pause : 2;
+ uint64_t reserved_9_11 : 3;
+ uint64_t rem_flt : 2;
+ uint64_t reserved_14_14 : 1;
+ uint64_t np : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsx_anx_adv_reg_s cn52xx;
+ struct cvmx_pcsx_anx_adv_reg_s cn52xxp1;
+ struct cvmx_pcsx_anx_adv_reg_s cn56xx;
+ struct cvmx_pcsx_anx_adv_reg_s cn56xxp1;
+ struct cvmx_pcsx_anx_adv_reg_s cn61xx;
+ struct cvmx_pcsx_anx_adv_reg_s cn63xx;
+ struct cvmx_pcsx_anx_adv_reg_s cn63xxp1;
+ struct cvmx_pcsx_anx_adv_reg_s cn66xx;
+ struct cvmx_pcsx_anx_adv_reg_s cn68xx;
+ struct cvmx_pcsx_anx_adv_reg_s cn68xxp1;
+ struct cvmx_pcsx_anx_adv_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_anx_adv_reg cvmx_pcsx_anx_adv_reg_t;
+
+/**
+ * cvmx_pcs#_an#_ext_st_reg
+ *
+ * NOTE:
+ * an_results_reg is don't care when AN_OVRD is set to 1. If AN_OVRD=0 and AN_CPT=1
+ * the an_results_reg is valid.
+ *
+ *
+ * PCS_AN_EXT_ST_REG = AN Extended Status Register15
+ * as per IEEE802.3 Clause 22
+ */
+union cvmx_pcsx_anx_ext_st_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_ext_st_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t thou_xfd : 1; /**< 1 means PHY is 1000BASE-X Full Dup capable */
+ uint64_t thou_xhd : 1; /**< 1 means PHY is 1000BASE-X Half Dup capable */
+ uint64_t thou_tfd : 1; /**< 1 means PHY is 1000BASE-T Full Dup capable */
+ uint64_t thou_thd : 1; /**< 1 means PHY is 1000BASE-T Half Dup capable */
+ uint64_t reserved_0_11 : 12;
+#else
+ uint64_t reserved_0_11 : 12;
+ uint64_t thou_thd : 1;
+ uint64_t thou_tfd : 1;
+ uint64_t thou_xhd : 1;
+ uint64_t thou_xfd : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn52xx;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn52xxp1;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn56xx;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn56xxp1;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn61xx;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn63xx;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn63xxp1;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn66xx;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn68xx;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn68xxp1;
+ struct cvmx_pcsx_anx_ext_st_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_anx_ext_st_reg cvmx_pcsx_anx_ext_st_reg_t;
+
+/**
+ * cvmx_pcs#_an#_lp_abil_reg
+ *
+ * PCS_AN_LP_ABIL_REG = AN link Partner Ability Register5
+ * as per IEEE802.3 Clause 37
+ */
+union cvmx_pcsx_anx_lp_abil_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_lp_abil_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t np : 1; /**< 1=lp next page capable, 0=lp not next page capable */
+ uint64_t ack : 1; /**< 1=Acknowledgement received */
+ uint64_t rem_flt : 2; /**< [<13>,<12>] Link Partner's link status
+ 0 0 Link OK
+ 0 1 Offline
+ 1 0 Link failure
+ 1 1 AN Error */
+ uint64_t reserved_9_11 : 3;
+ uint64_t pause : 2; /**< [<8>, <7>] Link Partner Pause setting
+ 0 0 No Pause
+ 0 1 Symmetric pause
+ 1 0 Asymmetric Pause
+ 1 1 Both symm and asymm pause to local device */
+ uint64_t hfd : 1; /**< 1 means link partner Half Duplex capable */
+ uint64_t fd : 1; /**< 1 means link partner Full Duplex capable */
+ uint64_t reserved_0_4 : 5;
+#else
+ uint64_t reserved_0_4 : 5;
+ uint64_t fd : 1;
+ uint64_t hfd : 1;
+ uint64_t pause : 2;
+ uint64_t reserved_9_11 : 3;
+ uint64_t rem_flt : 2;
+ uint64_t ack : 1;
+ uint64_t np : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn52xx;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn52xxp1;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn56xx;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn56xxp1;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn61xx;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn63xx;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn63xxp1;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn66xx;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn68xx;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn68xxp1;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_anx_lp_abil_reg cvmx_pcsx_anx_lp_abil_reg_t;
+
+/**
+ * cvmx_pcs#_an#_results_reg
+ *
+ * PCS_AN_RESULTS_REG = AN Results Register
+ *
+ */
+union cvmx_pcsx_anx_results_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_results_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t pause : 2; /**< [<6>, <5>] PAUSE Selection (Don't care for SGMII)
+ 0 0 Disable Pause, TX and RX
+ 0 1 Enable pause frames RX only
+ 1 0 Enable Pause frames TX only
+ 1 1 Enable pause frames TX and RX */
+ uint64_t spd : 2; /**< [<4>, <3>] Link Speed Selection
+ 0 0 10Mb/s
+ 0 1 100Mb/s
+ 1 0 1000Mb/s
+ 1 1 NS */
+ uint64_t an_cpt : 1; /**< 1=AN Completed, 0=AN not completed or failed */
+ uint64_t dup : 1; /**< 1=Full Duplex, 0=Half Duplex */
+ uint64_t link_ok : 1; /**< 1=Link up(OK), 0=Link down */
+#else
+ uint64_t link_ok : 1;
+ uint64_t dup : 1;
+ uint64_t an_cpt : 1;
+ uint64_t spd : 2;
+ uint64_t pause : 2;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_pcsx_anx_results_reg_s cn52xx;
+ struct cvmx_pcsx_anx_results_reg_s cn52xxp1;
+ struct cvmx_pcsx_anx_results_reg_s cn56xx;
+ struct cvmx_pcsx_anx_results_reg_s cn56xxp1;
+ struct cvmx_pcsx_anx_results_reg_s cn61xx;
+ struct cvmx_pcsx_anx_results_reg_s cn63xx;
+ struct cvmx_pcsx_anx_results_reg_s cn63xxp1;
+ struct cvmx_pcsx_anx_results_reg_s cn66xx;
+ struct cvmx_pcsx_anx_results_reg_s cn68xx;
+ struct cvmx_pcsx_anx_results_reg_s cn68xxp1;
+ struct cvmx_pcsx_anx_results_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_anx_results_reg cvmx_pcsx_anx_results_reg_t;
+
+/**
+ * cvmx_pcs#_int#_en_reg
+ *
+ * NOTE: RXERR and TXERR conditions to be discussed with Dan before finalising
+ * DBG_SYNC interrupt fires when code group synchronization state machine makes a transition from
+ * SYNC_ACQUIRED_1 state to SYNC_ACQUIRED_2 state(See IEEE 802.3-2005 figure 37-9). It is an indication that a bad code group
+ * was received after code group synchronizaton was achieved. This interrupt should be disabled during normal link operation.
+ * Use it as a debug help feature only.
+ *
+ *
+ * PCS Interrupt Enable Register
+ */
+union cvmx_pcsx_intx_en_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_intx_en_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t dbg_sync_en : 1; /**< Code Group sync failure debug help */
+ uint64_t dup : 1; /**< Enable duplex mode changed interrupt */
+ uint64_t sync_bad_en : 1; /**< Enable rx sync st machine in bad state interrupt */
+ uint64_t an_bad_en : 1; /**< Enable AN state machine bad state interrupt */
+ uint64_t rxlock_en : 1; /**< Enable rx code group sync/bit lock failure interrupt */
+ uint64_t rxbad_en : 1; /**< Enable rx state machine in bad state interrupt */
+ uint64_t rxerr_en : 1; /**< Enable RX error condition interrupt */
+ uint64_t txbad_en : 1; /**< Enable tx state machine in bad state interrupt */
+ uint64_t txfifo_en : 1; /**< Enable tx fifo overflow condition interrupt */
+ uint64_t txfifu_en : 1; /**< Enable tx fifo underflow condition intrrupt */
+ uint64_t an_err_en : 1; /**< Enable AN Error condition interrupt */
+ uint64_t xmit_en : 1; /**< Enable XMIT variable state change interrupt */
+ uint64_t lnkspd_en : 1; /**< Enable Link Speed has changed interrupt */
+#else
+ uint64_t lnkspd_en : 1;
+ uint64_t xmit_en : 1;
+ uint64_t an_err_en : 1;
+ uint64_t txfifu_en : 1;
+ uint64_t txfifo_en : 1;
+ uint64_t txbad_en : 1;
+ uint64_t rxerr_en : 1;
+ uint64_t rxbad_en : 1;
+ uint64_t rxlock_en : 1;
+ uint64_t an_bad_en : 1;
+ uint64_t sync_bad_en : 1;
+ uint64_t dup : 1;
+ uint64_t dbg_sync_en : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_pcsx_intx_en_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t dup : 1; /**< Enable duplex mode changed interrupt */
+ uint64_t sync_bad_en : 1; /**< Enable rx sync st machine in bad state interrupt */
+ uint64_t an_bad_en : 1; /**< Enable AN state machine bad state interrupt */
+ uint64_t rxlock_en : 1; /**< Enable rx code group sync/bit lock failure interrupt */
+ uint64_t rxbad_en : 1; /**< Enable rx state machine in bad state interrupt */
+ uint64_t rxerr_en : 1; /**< Enable RX error condition interrupt */
+ uint64_t txbad_en : 1; /**< Enable tx state machine in bad state interrupt */
+ uint64_t txfifo_en : 1; /**< Enable tx fifo overflow condition interrupt */
+ uint64_t txfifu_en : 1; /**< Enable tx fifo underflow condition intrrupt */
+ uint64_t an_err_en : 1; /**< Enable AN Error condition interrupt */
+ uint64_t xmit_en : 1; /**< Enable XMIT variable state change interrupt */
+ uint64_t lnkspd_en : 1; /**< Enable Link Speed has changed interrupt */
+#else
+ uint64_t lnkspd_en : 1;
+ uint64_t xmit_en : 1;
+ uint64_t an_err_en : 1;
+ uint64_t txfifu_en : 1;
+ uint64_t txfifo_en : 1;
+ uint64_t txbad_en : 1;
+ uint64_t rxerr_en : 1;
+ uint64_t rxbad_en : 1;
+ uint64_t rxlock_en : 1;
+ uint64_t an_bad_en : 1;
+ uint64_t sync_bad_en : 1;
+ uint64_t dup : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn52xx;
+ struct cvmx_pcsx_intx_en_reg_cn52xx cn52xxp1;
+ struct cvmx_pcsx_intx_en_reg_cn52xx cn56xx;
+ struct cvmx_pcsx_intx_en_reg_cn52xx cn56xxp1;
+ struct cvmx_pcsx_intx_en_reg_s cn61xx;
+ struct cvmx_pcsx_intx_en_reg_s cn63xx;
+ struct cvmx_pcsx_intx_en_reg_s cn63xxp1;
+ struct cvmx_pcsx_intx_en_reg_s cn66xx;
+ struct cvmx_pcsx_intx_en_reg_s cn68xx;
+ struct cvmx_pcsx_intx_en_reg_s cn68xxp1;
+ struct cvmx_pcsx_intx_en_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_intx_en_reg cvmx_pcsx_intx_en_reg_t;
+
+/**
+ * cvmx_pcs#_int#_reg
+ *
+ * SGMII bit [12] is really a misnomer, it is a decode of pi_qlm_cfg pins to indicate SGMII or 1000Base-X modes.
+ *
+ * Note: MODE bit
+ * When MODE=1, 1000Base-X mode is selected. Auto negotiation will follow IEEE 802.3 clause 37.
+ * When MODE=0, SGMII mode is selected and the following note will apply.
+ * Repeat note from SGM_AN_ADV register
+ * NOTE: The SGMII AN Advertisement Register above will be sent during Auto Negotiation if the MAC_PHY mode bit in misc_ctl_reg
+ * is set (1=PHY mode). If the bit is not set (0=MAC mode), the tx_config_reg[14] becomes ACK bit and [0] is always 1.
+ * All other bits in tx_config_reg sent will be 0. The PHY dictates the Auto Negotiation results.
+ *
+ * PCS Interrupt Register
+ */
+union cvmx_pcsx_intx_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_intx_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t dbg_sync : 1; /**< Code Group sync failure debug help */
+ uint64_t dup : 1; /**< Set whenever Duplex mode changes on the link */
+ uint64_t sync_bad : 1; /**< Set by HW whenever rx sync st machine reaches a bad
+ state. Should never be set during normal operation */
+ uint64_t an_bad : 1; /**< Set by HW whenever AN st machine reaches a bad
+ state. Should never be set during normal operation */
+ uint64_t rxlock : 1; /**< Set by HW whenever code group Sync or bit lock
+ failure occurs
+ Cannot fire in loopback1 mode */
+ uint64_t rxbad : 1; /**< Set by HW whenever rx st machine reaches a bad
+ state. Should never be set during normal operation */
+ uint64_t rxerr : 1; /**< Set whenever RX receives a code group error in
+ 10 bit to 8 bit decode logic
+ Cannot fire in loopback1 mode */
+ uint64_t txbad : 1; /**< Set by HW whenever tx st machine reaches a bad
+ state. Should never be set during normal operation */
+ uint64_t txfifo : 1; /**< Set whenever HW detects a TX fifo overflow
+ condition */
+ uint64_t txfifu : 1; /**< Set whenever HW detects a TX fifo underflowflow
+ condition */
+ uint64_t an_err : 1; /**< AN Error, AN resolution function failed */
+ uint64_t xmit : 1; /**< Set whenever HW detects a change in the XMIT
+ variable. XMIT variable states are IDLE, CONFIG and
+ DATA */
+ uint64_t lnkspd : 1; /**< Set by HW whenever Link Speed has changed */
+#else
+ uint64_t lnkspd : 1;
+ uint64_t xmit : 1;
+ uint64_t an_err : 1;
+ uint64_t txfifu : 1;
+ uint64_t txfifo : 1;
+ uint64_t txbad : 1;
+ uint64_t rxerr : 1;
+ uint64_t rxbad : 1;
+ uint64_t rxlock : 1;
+ uint64_t an_bad : 1;
+ uint64_t sync_bad : 1;
+ uint64_t dup : 1;
+ uint64_t dbg_sync : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_pcsx_intx_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t dup : 1; /**< Set whenever Duplex mode changes on the link */
+ uint64_t sync_bad : 1; /**< Set by HW whenever rx sync st machine reaches a bad
+ state. Should never be set during normal operation */
+ uint64_t an_bad : 1; /**< Set by HW whenever AN st machine reaches a bad
+ state. Should never be set during normal operation */
+ uint64_t rxlock : 1; /**< Set by HW whenever code group Sync or bit lock
+ failure occurs
+ Cannot fire in loopback1 mode */
+ uint64_t rxbad : 1; /**< Set by HW whenever rx st machine reaches a bad
+ state. Should never be set during normal operation */
+ uint64_t rxerr : 1; /**< Set whenever RX receives a code group error in
+ 10 bit to 8 bit decode logic
+ Cannot fire in loopback1 mode */
+ uint64_t txbad : 1; /**< Set by HW whenever tx st machine reaches a bad
+ state. Should never be set during normal operation */
+ uint64_t txfifo : 1; /**< Set whenever HW detects a TX fifo overflow
+ condition */
+ uint64_t txfifu : 1; /**< Set whenever HW detects a TX fifo underflowflow
+ condition */
+ uint64_t an_err : 1; /**< AN Error, AN resolution function failed */
+ uint64_t xmit : 1; /**< Set whenever HW detects a change in the XMIT
+ variable. XMIT variable states are IDLE, CONFIG and
+ DATA */
+ uint64_t lnkspd : 1; /**< Set by HW whenever Link Speed has changed */
+#else
+ uint64_t lnkspd : 1;
+ uint64_t xmit : 1;
+ uint64_t an_err : 1;
+ uint64_t txfifu : 1;
+ uint64_t txfifo : 1;
+ uint64_t txbad : 1;
+ uint64_t rxerr : 1;
+ uint64_t rxbad : 1;
+ uint64_t rxlock : 1;
+ uint64_t an_bad : 1;
+ uint64_t sync_bad : 1;
+ uint64_t dup : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn52xx;
+ struct cvmx_pcsx_intx_reg_cn52xx cn52xxp1;
+ struct cvmx_pcsx_intx_reg_cn52xx cn56xx;
+ struct cvmx_pcsx_intx_reg_cn52xx cn56xxp1;
+ struct cvmx_pcsx_intx_reg_s cn61xx;
+ struct cvmx_pcsx_intx_reg_s cn63xx;
+ struct cvmx_pcsx_intx_reg_s cn63xxp1;
+ struct cvmx_pcsx_intx_reg_s cn66xx;
+ struct cvmx_pcsx_intx_reg_s cn68xx;
+ struct cvmx_pcsx_intx_reg_s cn68xxp1;
+ struct cvmx_pcsx_intx_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_intx_reg cvmx_pcsx_intx_reg_t;
+
+/**
+ * cvmx_pcs#_link#_timer_count_reg
+ *
+ * PCS_LINK_TIMER_COUNT_REG = 1.6ms nominal link timer register
+ *
+ */
+union cvmx_pcsx_linkx_timer_count_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_linkx_timer_count_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t count : 16; /**< (core clock period times 1024) times "COUNT" should
+ be 1.6ms(SGMII)/10ms(otherwise) which is the link
+ timer used in auto negotiation.
+ Reset assums a 700MHz eclk for 1.6ms link timer */
+#else
+ uint64_t count : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn52xx;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn52xxp1;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn56xx;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn56xxp1;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn61xx;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn63xx;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn63xxp1;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn66xx;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn68xx;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn68xxp1;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_linkx_timer_count_reg cvmx_pcsx_linkx_timer_count_reg_t;
+
+/**
+ * cvmx_pcs#_log_anl#_reg
+ *
+ * PCS Logic Analyzer Register
+ *
+ */
+union cvmx_pcsx_log_anlx_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_log_anlx_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t lafifovfl : 1; /**< 1=logic analyser fif overflowed during packetization
+ Write 1 to clear this bit */
+ uint64_t la_en : 1; /**< 1= Logic Analyzer enabled, 0=Logic Analyzer disabled */
+ uint64_t pkt_sz : 2; /**< [<1>, <0>] Logic Analyzer Packet Size
+ 0 0 Packet size 1k bytes
+ 0 1 Packet size 4k bytes
+ 1 0 Packet size 8k bytes
+ 1 1 Packet size 16k bytes */
+#else
+ uint64_t pkt_sz : 2;
+ uint64_t la_en : 1;
+ uint64_t lafifovfl : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_pcsx_log_anlx_reg_s cn52xx;
+ struct cvmx_pcsx_log_anlx_reg_s cn52xxp1;
+ struct cvmx_pcsx_log_anlx_reg_s cn56xx;
+ struct cvmx_pcsx_log_anlx_reg_s cn56xxp1;
+ struct cvmx_pcsx_log_anlx_reg_s cn61xx;
+ struct cvmx_pcsx_log_anlx_reg_s cn63xx;
+ struct cvmx_pcsx_log_anlx_reg_s cn63xxp1;
+ struct cvmx_pcsx_log_anlx_reg_s cn66xx;
+ struct cvmx_pcsx_log_anlx_reg_s cn68xx;
+ struct cvmx_pcsx_log_anlx_reg_s cn68xxp1;
+ struct cvmx_pcsx_log_anlx_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_log_anlx_reg cvmx_pcsx_log_anlx_reg_t;
+
+/**
+ * cvmx_pcs#_misc#_ctl_reg
+ *
+ * SGMII Misc Control Register
+ *
+ */
+union cvmx_pcsx_miscx_ctl_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_miscx_ctl_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t sgmii : 1; /**< 1=SGMII or 1000Base-X mode selected,
+ 0=XAUI or PCIE mode selected
+ This bit represents pi_qlm1/3_cfg[1:0] pin status */
+ uint64_t gmxeno : 1; /**< GMX Enable override. When set to 1, forces GMX to
+ appear disabled. The enable/disable status of GMX
+ is checked only at SOP of every packet. */
+ uint64_t loopbck2 : 1; /**< Sets external loopback mode to return rx data back
+ out via tx data path. 0=no loopback, 1=loopback */
+ uint64_t mac_phy : 1; /**< 0=MAC, 1=PHY decides the tx_config_reg value to be
+ sent during auto negotiation.
+ See SGMII spec ENG-46158 from CISCO */
+ uint64_t mode : 1; /**< 0=SGMII or 1= 1000 Base X */
+ uint64_t an_ovrd : 1; /**< 0=disable, 1= enable over ride AN results
+ Auto negotiation is allowed to happen but the
+ results are ignored when set. Duplex and Link speed
+ values are set from the pcs_mr_ctrl reg */
+ uint64_t samp_pt : 7; /**< Byte# in elongated frames for 10/100Mb/s operation
+ for data sampling on RX side in PCS.
+ Recommended values are 0x5 for 100Mb/s operation
+ and 0x32 for 10Mb/s operation.
+ For 10Mb/s operaton this field should be set to a
+ value less than 99 and greater than 0. If set out
+ of this range a value of 50 will be used for actual
+ sampling internally without affecting the CSR field
+ For 100Mb/s operation this field should be set to a
+ value less than 9 and greater than 0. If set out of
+ this range a value of 5 will be used for actual
+ sampling internally without affecting the CSR field */
+#else
+ uint64_t samp_pt : 7;
+ uint64_t an_ovrd : 1;
+ uint64_t mode : 1;
+ uint64_t mac_phy : 1;
+ uint64_t loopbck2 : 1;
+ uint64_t gmxeno : 1;
+ uint64_t sgmii : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn52xx;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn52xxp1;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn56xx;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn56xxp1;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn61xx;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn63xx;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn63xxp1;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn66xx;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn68xx;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn68xxp1;
+ struct cvmx_pcsx_miscx_ctl_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_miscx_ctl_reg cvmx_pcsx_miscx_ctl_reg_t;
+
+/**
+ * cvmx_pcs#_mr#_control_reg
+ *
+ * PCS_MR_CONTROL_REG = Control Register0
+ *
+ */
+union cvmx_pcsx_mrx_control_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_mrx_control_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t reset : 1; /**< 1=SW Reset, the bit will return to 0 after pcs has
+ been reset. Takes 32 eclk cycles to reset pcs */
+ uint64_t loopbck1 : 1; /**< 0=normal operation, 1=loopback. The loopback mode
+ will return(loopback) tx data from GMII tx back to
+ GMII rx interface. The loopback happens in the pcs
+ module. Auto Negotiation will be disabled even if
+ the AN_EN bit is set, during loopback */
+ uint64_t spdlsb : 1; /**< See bit 6 description */
+ uint64_t an_en : 1; /**< 1=AN Enable, 0=AN Disable */
+ uint64_t pwr_dn : 1; /**< 1=Power Down(HW reset), 0=Normal operation */
+ uint64_t reserved_10_10 : 1;
+ uint64_t rst_an : 1; /**< If bit 12 is set and bit 3 of status reg is 1
+ Auto Negotiation begins. Else,SW writes are ignored
+ and this bit remians at 0. This bit clears itself
+ to 0, when AN starts. */
+ uint64_t dup : 1; /**< 1=full duplex, 0=half duplex; effective only if AN
+ disabled. If status register bits [15:9] and and
+ extended status reg bits [15:12] allow only one
+ duplex mode|, this bit will correspond to that
+ value and any attempt to write will be ignored. */
+ uint64_t coltst : 1; /**< 1=enable COL signal test, 0=disable test
+ During COL test, the COL signal will reflect the
+ GMII TX_EN signal with less than 16BT delay */
+ uint64_t spdmsb : 1; /**< [<6>, <13>]Link Speed effective only if AN disabled
+ 0 0 10Mb/s
+ 0 1 100Mb/s
+ 1 0 1000Mb/s
+ 1 1 NS */
+ uint64_t uni : 1; /**< Unidirectional (Std 802.3-2005, Clause 66.2)
+ This bit will override the AN_EN bit and disable
+ auto-negotiation variable mr_an_enable, when set
+ Used in both 1000Base-X and SGMII modes */
+ uint64_t reserved_0_4 : 5;
+#else
+ uint64_t reserved_0_4 : 5;
+ uint64_t uni : 1;
+ uint64_t spdmsb : 1;
+ uint64_t coltst : 1;
+ uint64_t dup : 1;
+ uint64_t rst_an : 1;
+ uint64_t reserved_10_10 : 1;
+ uint64_t pwr_dn : 1;
+ uint64_t an_en : 1;
+ uint64_t spdlsb : 1;
+ uint64_t loopbck1 : 1;
+ uint64_t reset : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsx_mrx_control_reg_s cn52xx;
+ struct cvmx_pcsx_mrx_control_reg_s cn52xxp1;
+ struct cvmx_pcsx_mrx_control_reg_s cn56xx;
+ struct cvmx_pcsx_mrx_control_reg_s cn56xxp1;
+ struct cvmx_pcsx_mrx_control_reg_s cn61xx;
+ struct cvmx_pcsx_mrx_control_reg_s cn63xx;
+ struct cvmx_pcsx_mrx_control_reg_s cn63xxp1;
+ struct cvmx_pcsx_mrx_control_reg_s cn66xx;
+ struct cvmx_pcsx_mrx_control_reg_s cn68xx;
+ struct cvmx_pcsx_mrx_control_reg_s cn68xxp1;
+ struct cvmx_pcsx_mrx_control_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_mrx_control_reg cvmx_pcsx_mrx_control_reg_t;
+
+/**
+ * cvmx_pcs#_mr#_status_reg
+ *
+ * NOTE:
+ * Whenever AN_EN bit[12] is set, Auto negotiation is allowed to happen. The results
+ * of the auto negotiation process set the fields in the AN_RESULTS reg. When AN_EN is not set,
+ * AN_RESULTS reg is don't care. The effective SPD, DUP etc.. get their values
+ * from the pcs_mr_ctrl reg.
+ *
+ * PCS_MR_STATUS_REG = Status Register1
+ */
+union cvmx_pcsx_mrx_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_mrx_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t hun_t4 : 1; /**< 1 means 100Base-T4 capable */
+ uint64_t hun_xfd : 1; /**< 1 means 100Base-X Full Duplex */
+ uint64_t hun_xhd : 1; /**< 1 means 100Base-X Half Duplex */
+ uint64_t ten_fd : 1; /**< 1 means 10Mb/s Full Duplex */
+ uint64_t ten_hd : 1; /**< 1 means 10Mb/s Half Duplex */
+ uint64_t hun_t2fd : 1; /**< 1 means 100Base-T2 Full Duplex */
+ uint64_t hun_t2hd : 1; /**< 1 means 100Base-T2 Half Duplex */
+ uint64_t ext_st : 1; /**< 1 means extended status info in reg15 */
+ uint64_t reserved_7_7 : 1;
+ uint64_t prb_sup : 1; /**< 1 means able to work without preamble bytes at the
+ beginning of frames. 0 means not able to accept
+ frames without preamble bytes preceding them. */
+ uint64_t an_cpt : 1; /**< 1 means Auto Negotiation is complete and the
+ contents of the an_results_reg are valid. */
+ uint64_t rm_flt : 1; /**< Set to 1 when remote flt condition occurs. This bit
+ implements a latching Hi behavior. It is cleared by
+ SW read of this reg or when reset bit [15] in
+ Control Reg is asserted.
+ See an adv reg[13:12] for flt conditions */
+ uint64_t an_abil : 1; /**< 1 means Auto Negotiation capable */
+ uint64_t lnk_st : 1; /**< 1=link up, 0=link down. Set during AN process
+ Set whenever XMIT=DATA. Latching Lo behavior when
+ link goes down. Link down value of the bit stays
+ low until SW reads the reg. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t extnd : 1; /**< Always 0, no extended capability regs present */
+#else
+ uint64_t extnd : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t lnk_st : 1;
+ uint64_t an_abil : 1;
+ uint64_t rm_flt : 1;
+ uint64_t an_cpt : 1;
+ uint64_t prb_sup : 1;
+ uint64_t reserved_7_7 : 1;
+ uint64_t ext_st : 1;
+ uint64_t hun_t2hd : 1;
+ uint64_t hun_t2fd : 1;
+ uint64_t ten_hd : 1;
+ uint64_t ten_fd : 1;
+ uint64_t hun_xhd : 1;
+ uint64_t hun_xfd : 1;
+ uint64_t hun_t4 : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsx_mrx_status_reg_s cn52xx;
+ struct cvmx_pcsx_mrx_status_reg_s cn52xxp1;
+ struct cvmx_pcsx_mrx_status_reg_s cn56xx;
+ struct cvmx_pcsx_mrx_status_reg_s cn56xxp1;
+ struct cvmx_pcsx_mrx_status_reg_s cn61xx;
+ struct cvmx_pcsx_mrx_status_reg_s cn63xx;
+ struct cvmx_pcsx_mrx_status_reg_s cn63xxp1;
+ struct cvmx_pcsx_mrx_status_reg_s cn66xx;
+ struct cvmx_pcsx_mrx_status_reg_s cn68xx;
+ struct cvmx_pcsx_mrx_status_reg_s cn68xxp1;
+ struct cvmx_pcsx_mrx_status_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_mrx_status_reg cvmx_pcsx_mrx_status_reg_t;
+
+/**
+ * cvmx_pcs#_rx#_states_reg
+ *
+ * PCS_RX_STATES_REG = RX State Machines states register
+ *
+ */
+union cvmx_pcsx_rxx_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_rxx_states_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t rx_bad : 1; /**< Receive state machine in an illegal state */
+ uint64_t rx_st : 5; /**< Receive state machine state */
+ uint64_t sync_bad : 1; /**< Receive synchronization SM in an illegal state */
+ uint64_t sync : 4; /**< Receive synchronization SM state */
+ uint64_t an_bad : 1; /**< Auto Negotiation state machine in an illegal state */
+ uint64_t an_st : 4; /**< Auto Negotiation state machine state */
+#else
+ uint64_t an_st : 4;
+ uint64_t an_bad : 1;
+ uint64_t sync : 4;
+ uint64_t sync_bad : 1;
+ uint64_t rx_st : 5;
+ uint64_t rx_bad : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsx_rxx_states_reg_s cn52xx;
+ struct cvmx_pcsx_rxx_states_reg_s cn52xxp1;
+ struct cvmx_pcsx_rxx_states_reg_s cn56xx;
+ struct cvmx_pcsx_rxx_states_reg_s cn56xxp1;
+ struct cvmx_pcsx_rxx_states_reg_s cn61xx;
+ struct cvmx_pcsx_rxx_states_reg_s cn63xx;
+ struct cvmx_pcsx_rxx_states_reg_s cn63xxp1;
+ struct cvmx_pcsx_rxx_states_reg_s cn66xx;
+ struct cvmx_pcsx_rxx_states_reg_s cn68xx;
+ struct cvmx_pcsx_rxx_states_reg_s cn68xxp1;
+ struct cvmx_pcsx_rxx_states_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_rxx_states_reg cvmx_pcsx_rxx_states_reg_t;
+
+/**
+ * cvmx_pcs#_rx#_sync_reg
+ *
+ * Note:
+ * r_tx_rx_polarity_reg bit [2] will show correct polarity needed on the link receive path after code grp synchronization is achieved.
+ *
+ *
+ * PCS_RX_SYNC_REG = Code Group synchronization reg
+ */
+union cvmx_pcsx_rxx_sync_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_rxx_sync_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t sync : 1; /**< 1 means code group synchronization achieved */
+ uint64_t bit_lock : 1; /**< 1 means bit lock achieved */
+#else
+ uint64_t bit_lock : 1;
+ uint64_t sync : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_pcsx_rxx_sync_reg_s cn52xx;
+ struct cvmx_pcsx_rxx_sync_reg_s cn52xxp1;
+ struct cvmx_pcsx_rxx_sync_reg_s cn56xx;
+ struct cvmx_pcsx_rxx_sync_reg_s cn56xxp1;
+ struct cvmx_pcsx_rxx_sync_reg_s cn61xx;
+ struct cvmx_pcsx_rxx_sync_reg_s cn63xx;
+ struct cvmx_pcsx_rxx_sync_reg_s cn63xxp1;
+ struct cvmx_pcsx_rxx_sync_reg_s cn66xx;
+ struct cvmx_pcsx_rxx_sync_reg_s cn68xx;
+ struct cvmx_pcsx_rxx_sync_reg_s cn68xxp1;
+ struct cvmx_pcsx_rxx_sync_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_rxx_sync_reg cvmx_pcsx_rxx_sync_reg_t;
+
+/**
+ * cvmx_pcs#_sgm#_an_adv_reg
+ *
+ * SGMII AN Advertisement Register (sent out as tx_config_reg)
+ *
+ */
+union cvmx_pcsx_sgmx_an_adv_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t link : 1; /**< Link status 1 Link Up, 0 Link Down */
+ uint64_t ack : 1; /**< Auto negotiation ack */
+ uint64_t reserved_13_13 : 1;
+ uint64_t dup : 1; /**< Duplex mode 1=full duplex, 0=half duplex */
+ uint64_t speed : 2; /**< Link Speed
+ 0 0 10Mb/s
+ 0 1 100Mb/s
+ 1 0 1000Mb/s
+ 1 1 NS */
+ uint64_t reserved_1_9 : 9;
+ uint64_t one : 1; /**< Always set to match tx_config_reg<0> */
+#else
+ uint64_t one : 1;
+ uint64_t reserved_1_9 : 9;
+ uint64_t speed : 2;
+ uint64_t dup : 1;
+ uint64_t reserved_13_13 : 1;
+ uint64_t ack : 1;
+ uint64_t link : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xx;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xxp1;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xx;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xxp1;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn61xx;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn63xx;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn63xxp1;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn66xx;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn68xx;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn68xxp1;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_sgmx_an_adv_reg cvmx_pcsx_sgmx_an_adv_reg_t;
+
+/**
+ * cvmx_pcs#_sgm#_lp_adv_reg
+ *
+ * NOTE: The SGMII AN Advertisement Register above will be sent during Auto Negotiation if the MAC_PHY mode bit in misc_ctl_reg
+ * is set (1=PHY mode). If the bit is not set (0=MAC mode), the tx_config_reg[14] becomes ACK bit and [0] is always 1.
+ * All other bits in tx_config_reg sent will be 0. The PHY dictates the Auto Negotiation results.
+ *
+ * SGMII LP Advertisement Register (received as rx_config_reg)
+ */
+union cvmx_pcsx_sgmx_lp_adv_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t link : 1; /**< Link status 1 Link Up, 0 Link Down */
+ uint64_t reserved_13_14 : 2;
+ uint64_t dup : 1; /**< Duplex mode 1=full duplex, 0=half duplex */
+ uint64_t speed : 2; /**< Link Speed
+ 0 0 10Mb/s
+ 0 1 100Mb/s
+ 1 0 1000Mb/s
+ 1 1 NS */
+ uint64_t reserved_1_9 : 9;
+ uint64_t one : 1; /**< Always set to match tx_config_reg<0> */
+#else
+ uint64_t one : 1;
+ uint64_t reserved_1_9 : 9;
+ uint64_t speed : 2;
+ uint64_t dup : 1;
+ uint64_t reserved_13_14 : 2;
+ uint64_t link : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xx;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xxp1;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xx;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xxp1;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn61xx;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn63xx;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn63xxp1;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn66xx;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn68xx;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn68xxp1;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_sgmx_lp_adv_reg cvmx_pcsx_sgmx_lp_adv_reg_t;
+
+/**
+ * cvmx_pcs#_tx#_states_reg
+ *
+ * PCS_TX_STATES_REG = TX State Machines states register
+ *
+ */
+union cvmx_pcsx_txx_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_txx_states_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t xmit : 2; /**< 0=undefined, 1=config, 2=idle, 3=data */
+ uint64_t tx_bad : 1; /**< Xmit state machine in a bad state */
+ uint64_t ord_st : 4; /**< Xmit ordered set state machine state */
+#else
+ uint64_t ord_st : 4;
+ uint64_t tx_bad : 1;
+ uint64_t xmit : 2;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_pcsx_txx_states_reg_s cn52xx;
+ struct cvmx_pcsx_txx_states_reg_s cn52xxp1;
+ struct cvmx_pcsx_txx_states_reg_s cn56xx;
+ struct cvmx_pcsx_txx_states_reg_s cn56xxp1;
+ struct cvmx_pcsx_txx_states_reg_s cn61xx;
+ struct cvmx_pcsx_txx_states_reg_s cn63xx;
+ struct cvmx_pcsx_txx_states_reg_s cn63xxp1;
+ struct cvmx_pcsx_txx_states_reg_s cn66xx;
+ struct cvmx_pcsx_txx_states_reg_s cn68xx;
+ struct cvmx_pcsx_txx_states_reg_s cn68xxp1;
+ struct cvmx_pcsx_txx_states_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_txx_states_reg cvmx_pcsx_txx_states_reg_t;
+
+/**
+ * cvmx_pcs#_tx_rx#_polarity_reg
+ *
+ * PCS_POLARITY_REG = TX_RX polarity reg
+ *
+ */
+union cvmx_pcsx_tx_rxx_polarity_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t rxovrd : 1; /**< When 0, <2> determines polarity
+ when 1, <1> determines polarity */
+ uint64_t autorxpl : 1; /**< Auto RX polarity detected. 1=inverted, 0=normal
+ This bit always represents the correct rx polarity
+ setting needed for successful rx path operartion,
+ once a successful code group sync is obtained */
+ uint64_t rxplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */
+ uint64_t txplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */
+#else
+ uint64_t txplrt : 1;
+ uint64_t rxplrt : 1;
+ uint64_t autorxpl : 1;
+ uint64_t rxovrd : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xx;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xxp1;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xx;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xxp1;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn61xx;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn63xx;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn63xxp1;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn66xx;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn68xx;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn68xxp1;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cnf71xx;
+};
+typedef union cvmx_pcsx_tx_rxx_polarity_reg cvmx_pcsx_tx_rxx_polarity_reg_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pcsx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pcsxx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pcsxx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pcsxx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1088 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pcsxx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pcsxx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PCSXX_DEFS_H__
+#define __CVMX_PCSXX_DEFS_H__
+
+static inline uint64_t CVMX_PCSXX_10GBX_STATUS_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_10GBX_STATUS_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_BIST_STATUS_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_BIST_STATUS_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_BIT_LOCK_STATUS_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_BIT_LOCK_STATUS_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_CONTROL1_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_CONTROL1_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_CONTROL2_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_CONTROL2_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_INT_EN_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_INT_EN_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_INT_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_INT_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_LOG_ANL_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_LOG_ANL_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_MISC_CTL_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_MISC_CTL_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_RX_SYNC_STATES_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_RX_SYNC_STATES_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_SPD_ABIL_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_SPD_ABIL_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_STATUS1_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_STATUS1_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_STATUS2_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_STATUS2_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_TX_RX_POLARITY_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_TX_RX_POLARITY_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + ((block_id) & 7) * 0x1000000ull;
+}
+static inline uint64_t CVMX_PCSXX_TX_RX_STATES_REG(unsigned long block_id)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 1))
+ return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + ((block_id) & 1) * 0x8000000ull;
+ break;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((block_id == 0))
+ return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + ((block_id) & 0) * 0x8000000ull;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((block_id <= 4))
+ return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + ((block_id) & 7) * 0x1000000ull;
+ break;
+ }
+ cvmx_warn("CVMX_PCSXX_TX_RX_STATES_REG (block_id = %lu) not supported on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + ((block_id) & 7) * 0x1000000ull;
+}
+
+/**
+ * cvmx_pcsx#_10gbx_status_reg
+ *
+ * PCSX_10GBX_STATUS_REG = 10gbx_status_reg
+ *
+ */
+union cvmx_pcsxx_10gbx_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_10gbx_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t alignd : 1; /**< 1=Lane alignment achieved, 0=Lanes not aligned */
+ uint64_t pattst : 1; /**< Always at 0, no pattern testing capability */
+ uint64_t reserved_4_10 : 7;
+ uint64_t l3sync : 1; /**< 1=Rcv lane 3 code grp synchronized, 0=not sync'ed */
+ uint64_t l2sync : 1; /**< 1=Rcv lane 2 code grp synchronized, 0=not sync'ed */
+ uint64_t l1sync : 1; /**< 1=Rcv lane 1 code grp synchronized, 0=not sync'ed */
+ uint64_t l0sync : 1; /**< 1=Rcv lane 0 code grp synchronized, 0=not sync'ed */
+#else
+ uint64_t l0sync : 1;
+ uint64_t l1sync : 1;
+ uint64_t l2sync : 1;
+ uint64_t l3sync : 1;
+ uint64_t reserved_4_10 : 7;
+ uint64_t pattst : 1;
+ uint64_t alignd : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn61xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn63xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn63xxp1;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn66xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn68xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_10gbx_status_reg cvmx_pcsxx_10gbx_status_reg_t;
+
+/**
+ * cvmx_pcsx#_bist_status_reg
+ *
+ * NOTE: Logic Analyzer is enabled with LA_EN for xaui only. PKT_SZ is effective only when LA_EN=1
+ * For normal operation(xaui), this bit must be 0. The dropped lane is used to send rxc[3:0].
+ * See pcs.csr for sgmii/1000Base-X logic analyzer mode.
+ * For full description see document at .../rtl/pcs/readme_logic_analyzer.txt
+ *
+ *
+ * PCSX Bist Status Register
+ */
+union cvmx_pcsxx_bist_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_bist_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t bist_status : 1; /**< 1=bist failure, 0=bisted memory ok or bist in progress
+ pcsx.tx_sm.drf8x36m1_async_bist */
+#else
+ uint64_t bist_status : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_pcsxx_bist_status_reg_s cn52xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
+ struct cvmx_pcsxx_bist_status_reg_s cn56xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
+ struct cvmx_pcsxx_bist_status_reg_s cn61xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn63xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn63xxp1;
+ struct cvmx_pcsxx_bist_status_reg_s cn66xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn68xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_bist_status_reg cvmx_pcsxx_bist_status_reg_t;
+
+/**
+ * cvmx_pcsx#_bit_lock_status_reg
+ *
+ * LN_SWAP for XAUI is to simplify interconnection layout between devices
+ *
+ *
+ * PCSX Bit Lock Status Register
+ */
+union cvmx_pcsxx_bit_lock_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_bit_lock_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t bitlck3 : 1; /**< Receive Lane 3 bit lock status */
+ uint64_t bitlck2 : 1; /**< Receive Lane 2 bit lock status */
+ uint64_t bitlck1 : 1; /**< Receive Lane 1 bit lock status */
+ uint64_t bitlck0 : 1; /**< Receive Lane 0 bit lock status */
+#else
+ uint64_t bitlck0 : 1;
+ uint64_t bitlck1 : 1;
+ uint64_t bitlck2 : 1;
+ uint64_t bitlck3 : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn61xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn63xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn63xxp1;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn66xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn68xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_bit_lock_status_reg cvmx_pcsxx_bit_lock_status_reg_t;
+
+/**
+ * cvmx_pcsx#_control1_reg
+ *
+ * NOTE: Logic Analyzer is enabled with LA_EN for the specified PCS lane only. PKT_SZ is effective only when LA_EN=1
+ * For normal operation(sgmii or 1000Base-X), this bit must be 0.
+ * See pcsx.csr for xaui logic analyzer mode.
+ * For full description see document at .../rtl/pcs/readme_logic_analyzer.txt
+ *
+ *
+ * PCSX regs follow IEEE Std 802.3-2005, Section: 45.2.3
+ *
+ *
+ * PCSX_CONTROL1_REG = Control Register1
+ */
+union cvmx_pcsxx_control1_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_control1_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t reset : 1; /**< 1=SW PCSX Reset, the bit will return to 0 after pcs
+ has been reset. Takes 32 eclk cycles to reset pcs
+ 0=Normal operation */
+ uint64_t loopbck1 : 1; /**< 0=normal operation, 1=internal loopback mode
+ xgmii tx data received from gmx tx port is returned
+ back into gmx, xgmii rx port. */
+ uint64_t spdsel1 : 1; /**< See bit 6 description */
+ uint64_t reserved_12_12 : 1;
+ uint64_t lo_pwr : 1; /**< 1=Power Down(HW reset), 0=Normal operation */
+ uint64_t reserved_7_10 : 4;
+ uint64_t spdsel0 : 1; /**< SPDSEL1 and SPDSEL0 are always at 1'b1. Write has
+ no effect.
+ [<6>, <13>]Link Speed selection
+ 1 1 Bits 5:2 select speed */
+ uint64_t spd : 4; /**< Always select 10Gb/s, writes have no effect */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t spd : 4;
+ uint64_t spdsel0 : 1;
+ uint64_t reserved_7_10 : 4;
+ uint64_t lo_pwr : 1;
+ uint64_t reserved_12_12 : 1;
+ uint64_t spdsel1 : 1;
+ uint64_t loopbck1 : 1;
+ uint64_t reset : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsxx_control1_reg_s cn52xx;
+ struct cvmx_pcsxx_control1_reg_s cn52xxp1;
+ struct cvmx_pcsxx_control1_reg_s cn56xx;
+ struct cvmx_pcsxx_control1_reg_s cn56xxp1;
+ struct cvmx_pcsxx_control1_reg_s cn61xx;
+ struct cvmx_pcsxx_control1_reg_s cn63xx;
+ struct cvmx_pcsxx_control1_reg_s cn63xxp1;
+ struct cvmx_pcsxx_control1_reg_s cn66xx;
+ struct cvmx_pcsxx_control1_reg_s cn68xx;
+ struct cvmx_pcsxx_control1_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_control1_reg cvmx_pcsxx_control1_reg_t;
+
+/**
+ * cvmx_pcsx#_control2_reg
+ *
+ * PCSX_CONTROL2_REG = Control Register2
+ *
+ */
+union cvmx_pcsxx_control2_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_control2_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t type : 2; /**< Always 2'b01, 10GBASE-X only supported */
+#else
+ uint64_t type : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_pcsxx_control2_reg_s cn52xx;
+ struct cvmx_pcsxx_control2_reg_s cn52xxp1;
+ struct cvmx_pcsxx_control2_reg_s cn56xx;
+ struct cvmx_pcsxx_control2_reg_s cn56xxp1;
+ struct cvmx_pcsxx_control2_reg_s cn61xx;
+ struct cvmx_pcsxx_control2_reg_s cn63xx;
+ struct cvmx_pcsxx_control2_reg_s cn63xxp1;
+ struct cvmx_pcsxx_control2_reg_s cn66xx;
+ struct cvmx_pcsxx_control2_reg_s cn68xx;
+ struct cvmx_pcsxx_control2_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_control2_reg cvmx_pcsxx_control2_reg_t;
+
+/**
+ * cvmx_pcsx#_int_en_reg
+ *
+ * Note: DBG_SYNC is a edge triggered interrupt. When set it indicates PCS Synchronization state machine in
+ * Figure 48-7 state diagram in IEEE Std 802.3-2005 changes state SYNC_ACQUIRED_1 to SYNC_ACQUIRED_2
+ * indicating an invalid code group was received on one of the 4 receive lanes.
+ * This interrupt should be always disabled and used only for link problem debugging help.
+ *
+ *
+ * PCSX Interrupt Enable Register
+ */
+union cvmx_pcsxx_int_en_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_int_en_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t dbg_sync_en : 1; /**< Code Group sync failure debug help */
+ uint64_t algnlos_en : 1; /**< Enable ALGNLOS interrupt */
+ uint64_t synlos_en : 1; /**< Enable SYNLOS interrupt */
+ uint64_t bitlckls_en : 1; /**< Enable BITLCKLS interrupt */
+ uint64_t rxsynbad_en : 1; /**< Enable RXSYNBAD interrupt */
+ uint64_t rxbad_en : 1; /**< Enable RXBAD interrupt */
+ uint64_t txflt_en : 1; /**< Enable TXFLT interrupt */
+#else
+ uint64_t txflt_en : 1;
+ uint64_t rxbad_en : 1;
+ uint64_t rxsynbad_en : 1;
+ uint64_t bitlckls_en : 1;
+ uint64_t synlos_en : 1;
+ uint64_t algnlos_en : 1;
+ uint64_t dbg_sync_en : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_pcsxx_int_en_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t algnlos_en : 1; /**< Enable ALGNLOS interrupt */
+ uint64_t synlos_en : 1; /**< Enable SYNLOS interrupt */
+ uint64_t bitlckls_en : 1; /**< Enable BITLCKLS interrupt */
+ uint64_t rxsynbad_en : 1; /**< Enable RXSYNBAD interrupt */
+ uint64_t rxbad_en : 1; /**< Enable RXBAD interrupt */
+ uint64_t txflt_en : 1; /**< Enable TXFLT interrupt */
+#else
+ uint64_t txflt_en : 1;
+ uint64_t rxbad_en : 1;
+ uint64_t rxsynbad_en : 1;
+ uint64_t bitlckls_en : 1;
+ uint64_t synlos_en : 1;
+ uint64_t algnlos_en : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn52xx;
+ struct cvmx_pcsxx_int_en_reg_cn52xx cn52xxp1;
+ struct cvmx_pcsxx_int_en_reg_cn52xx cn56xx;
+ struct cvmx_pcsxx_int_en_reg_cn52xx cn56xxp1;
+ struct cvmx_pcsxx_int_en_reg_s cn61xx;
+ struct cvmx_pcsxx_int_en_reg_s cn63xx;
+ struct cvmx_pcsxx_int_en_reg_s cn63xxp1;
+ struct cvmx_pcsxx_int_en_reg_s cn66xx;
+ struct cvmx_pcsxx_int_en_reg_s cn68xx;
+ struct cvmx_pcsxx_int_en_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_int_en_reg cvmx_pcsxx_int_en_reg_t;
+
+/**
+ * cvmx_pcsx#_int_reg
+ *
+ * PCSX Interrupt Register
+ *
+ */
+union cvmx_pcsxx_int_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t dbg_sync : 1; /**< Code Group sync failure debug help, see Note below */
+ uint64_t algnlos : 1; /**< Set when XAUI lanes lose alignment */
+ uint64_t synlos : 1; /**< Set when Code group sync lost on 1 or more lanes */
+ uint64_t bitlckls : 1; /**< Set when Bit lock lost on 1 or more xaui lanes */
+ uint64_t rxsynbad : 1; /**< Set when RX code grp sync st machine in bad state
+ in one of the 4 xaui lanes */
+ uint64_t rxbad : 1; /**< Set when RX state machine in bad state */
+ uint64_t txflt : 1; /**< None defined at this time, always 0x0 */
+#else
+ uint64_t txflt : 1;
+ uint64_t rxbad : 1;
+ uint64_t rxsynbad : 1;
+ uint64_t bitlckls : 1;
+ uint64_t synlos : 1;
+ uint64_t algnlos : 1;
+ uint64_t dbg_sync : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_pcsxx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t algnlos : 1; /**< Set when XAUI lanes lose alignment */
+ uint64_t synlos : 1; /**< Set when Code group sync lost on 1 or more lanes */
+ uint64_t bitlckls : 1; /**< Set when Bit lock lost on 1 or more xaui lanes */
+ uint64_t rxsynbad : 1; /**< Set when RX code grp sync st machine in bad state
+ in one of the 4 xaui lanes */
+ uint64_t rxbad : 1; /**< Set when RX state machine in bad state */
+ uint64_t txflt : 1; /**< None defined at this time, always 0x0 */
+#else
+ uint64_t txflt : 1;
+ uint64_t rxbad : 1;
+ uint64_t rxsynbad : 1;
+ uint64_t bitlckls : 1;
+ uint64_t synlos : 1;
+ uint64_t algnlos : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn52xx;
+ struct cvmx_pcsxx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_pcsxx_int_reg_cn52xx cn56xx;
+ struct cvmx_pcsxx_int_reg_cn52xx cn56xxp1;
+ struct cvmx_pcsxx_int_reg_s cn61xx;
+ struct cvmx_pcsxx_int_reg_s cn63xx;
+ struct cvmx_pcsxx_int_reg_s cn63xxp1;
+ struct cvmx_pcsxx_int_reg_s cn66xx;
+ struct cvmx_pcsxx_int_reg_s cn68xx;
+ struct cvmx_pcsxx_int_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_int_reg cvmx_pcsxx_int_reg_t;
+
+/**
+ * cvmx_pcsx#_log_anl_reg
+ *
+ * PCSX Logic Analyzer Register
+ *
+ */
+union cvmx_pcsxx_log_anl_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_log_anl_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t enc_mode : 1; /**< 1=send xaui encoded data, 0=send xaui raw data to GMX
+ See .../rtl/pcs/readme_logic_analyzer.txt for details */
+ uint64_t drop_ln : 2; /**< xaui lane# to drop from logic analyzer packets
+ [<5>, <4>] Drop lane \#
+ 0 0 Drop lane 0 data
+ 0 1 Drop lane 1 data
+ 1 0 Drop lane 2 data
+ 1 1 Drop lane 3 data */
+ uint64_t lafifovfl : 1; /**< 1=logic analyser fif overflowed one or more times
+ during packetization.
+ Write 1 to clear this bit */
+ uint64_t la_en : 1; /**< 1= Logic Analyzer enabled, 0=Logic Analyzer disabled */
+ uint64_t pkt_sz : 2; /**< [<1>, <0>] Logic Analyzer Packet Size
+ 0 0 Packet size 1k bytes
+ 0 1 Packet size 4k bytes
+ 1 0 Packet size 8k bytes
+ 1 1 Packet size 16k bytes */
+#else
+ uint64_t pkt_sz : 2;
+ uint64_t la_en : 1;
+ uint64_t lafifovfl : 1;
+ uint64_t drop_ln : 2;
+ uint64_t enc_mode : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_pcsxx_log_anl_reg_s cn52xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
+ struct cvmx_pcsxx_log_anl_reg_s cn56xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
+ struct cvmx_pcsxx_log_anl_reg_s cn61xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn63xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn63xxp1;
+ struct cvmx_pcsxx_log_anl_reg_s cn66xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn68xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_log_anl_reg cvmx_pcsxx_log_anl_reg_t;
+
+/**
+ * cvmx_pcsx#_misc_ctl_reg
+ *
+ * RX lane polarity vector [3:0] = XOR_RXPLRT<9:6> ^ [4[RXPLRT<1>]];
+ *
+ * TX lane polarity vector [3:0] = XOR_TXPLRT<5:2> ^ [4[TXPLRT<0>]];
+ *
+ * In short keep <1:0> to 2'b00, and use <5:2> and <9:6> fields to define per lane polarities
+ *
+ *
+ *
+ * PCSX Misc Control Register
+ */
+union cvmx_pcsxx_misc_ctl_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_misc_ctl_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t tx_swap : 1; /**< 0=do not swap xaui lanes going out to qlm's
+ 1=swap lanes 3 <-> 0 and 2 <-> 1 */
+ uint64_t rx_swap : 1; /**< 0=do not swap xaui lanes coming in from qlm's
+ 1=swap lanes 3 <-> 0 and 2 <-> 1 */
+ uint64_t xaui : 1; /**< 1=XAUI mode selected, 0=not XAUI mode selected
+ This bit represents pi_qlm1/3_cfg[1:0] pin status */
+ uint64_t gmxeno : 1; /**< GMX port enable override, GMX en/dis status is held
+ during data packet reception. */
+#else
+ uint64_t gmxeno : 1;
+ uint64_t xaui : 1;
+ uint64_t rx_swap : 1;
+ uint64_t tx_swap : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn61xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn63xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn63xxp1;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn66xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn68xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_misc_ctl_reg cvmx_pcsxx_misc_ctl_reg_t;
+
+/**
+ * cvmx_pcsx#_rx_sync_states_reg
+ *
+ * PCSX_RX_SYNC_STATES_REG = Receive Sync States Register
+ *
+ */
+union cvmx_pcsxx_rx_sync_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_rx_sync_states_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t sync3st : 4; /**< Receive lane 3 code grp sync state machine state */
+ uint64_t sync2st : 4; /**< Receive lane 2 code grp sync state machine state */
+ uint64_t sync1st : 4; /**< Receive lane 1 code grp sync state machine state */
+ uint64_t sync0st : 4; /**< Receive lane 0 code grp sync state machine state */
+#else
+ uint64_t sync0st : 4;
+ uint64_t sync1st : 4;
+ uint64_t sync2st : 4;
+ uint64_t sync3st : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn61xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn63xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn63xxp1;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn66xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn68xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_rx_sync_states_reg cvmx_pcsxx_rx_sync_states_reg_t;
+
+/**
+ * cvmx_pcsx#_spd_abil_reg
+ *
+ * PCSX_SPD_ABIL_REG = Speed ability register
+ *
+ */
+union cvmx_pcsxx_spd_abil_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_spd_abil_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t tenpasst : 1; /**< Always 0, no 10PASS-TS/2BASE-TL capability support */
+ uint64_t tengb : 1; /**< Always 1, 10Gb/s supported */
+#else
+ uint64_t tengb : 1;
+ uint64_t tenpasst : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
+ struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
+ struct cvmx_pcsxx_spd_abil_reg_s cn61xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn63xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn63xxp1;
+ struct cvmx_pcsxx_spd_abil_reg_s cn66xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn68xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_spd_abil_reg cvmx_pcsxx_spd_abil_reg_t;
+
+/**
+ * cvmx_pcsx#_status1_reg
+ *
+ * PCSX_STATUS1_REG = Status Register1
+ *
+ */
+union cvmx_pcsxx_status1_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_status1_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t flt : 1; /**< 1=Fault condition detected, 0=No fault condition
+ This bit is a logical OR of Status2 reg bits 11,10 */
+ uint64_t reserved_3_6 : 4;
+ uint64_t rcv_lnk : 1; /**< 1=Receive Link up, 0=Receive Link down
+ Latching Low version of r_10gbx_status_reg[12],
+ Link down status continues until SW read. */
+ uint64_t lpable : 1; /**< Always set to 1 for Low Power ablility indication */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t lpable : 1;
+ uint64_t rcv_lnk : 1;
+ uint64_t reserved_3_6 : 4;
+ uint64_t flt : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_pcsxx_status1_reg_s cn52xx;
+ struct cvmx_pcsxx_status1_reg_s cn52xxp1;
+ struct cvmx_pcsxx_status1_reg_s cn56xx;
+ struct cvmx_pcsxx_status1_reg_s cn56xxp1;
+ struct cvmx_pcsxx_status1_reg_s cn61xx;
+ struct cvmx_pcsxx_status1_reg_s cn63xx;
+ struct cvmx_pcsxx_status1_reg_s cn63xxp1;
+ struct cvmx_pcsxx_status1_reg_s cn66xx;
+ struct cvmx_pcsxx_status1_reg_s cn68xx;
+ struct cvmx_pcsxx_status1_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_status1_reg cvmx_pcsxx_status1_reg_t;
+
+/**
+ * cvmx_pcsx#_status2_reg
+ *
+ * PCSX_STATUS2_REG = Status Register2
+ *
+ */
+union cvmx_pcsxx_status2_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_status2_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t dev : 2; /**< Always at 2'b10, means a Device present at the addr */
+ uint64_t reserved_12_13 : 2;
+ uint64_t xmtflt : 1; /**< 0=No xmit fault, 1=xmit fault. Implements latching
+ High function until SW read. */
+ uint64_t rcvflt : 1; /**< 0=No rcv fault, 1=rcv fault. Implements latching
+ High function until SW read */
+ uint64_t reserved_3_9 : 7;
+ uint64_t tengb_w : 1; /**< Always 0, no 10GBASE-W capability */
+ uint64_t tengb_x : 1; /**< Always 1, 10GBASE-X capable */
+ uint64_t tengb_r : 1; /**< Always 0, no 10GBASE-R capability */
+#else
+ uint64_t tengb_r : 1;
+ uint64_t tengb_x : 1;
+ uint64_t tengb_w : 1;
+ uint64_t reserved_3_9 : 7;
+ uint64_t rcvflt : 1;
+ uint64_t xmtflt : 1;
+ uint64_t reserved_12_13 : 2;
+ uint64_t dev : 2;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pcsxx_status2_reg_s cn52xx;
+ struct cvmx_pcsxx_status2_reg_s cn52xxp1;
+ struct cvmx_pcsxx_status2_reg_s cn56xx;
+ struct cvmx_pcsxx_status2_reg_s cn56xxp1;
+ struct cvmx_pcsxx_status2_reg_s cn61xx;
+ struct cvmx_pcsxx_status2_reg_s cn63xx;
+ struct cvmx_pcsxx_status2_reg_s cn63xxp1;
+ struct cvmx_pcsxx_status2_reg_s cn66xx;
+ struct cvmx_pcsxx_status2_reg_s cn68xx;
+ struct cvmx_pcsxx_status2_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_status2_reg cvmx_pcsxx_status2_reg_t;
+
+/**
+ * cvmx_pcsx#_tx_rx_polarity_reg
+ *
+ * PCSX_POLARITY_REG = TX_RX polarity reg
+ *
+ */
+union cvmx_pcsxx_tx_rx_polarity_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t xor_rxplrt : 4; /**< Per lane RX polarity control */
+ uint64_t xor_txplrt : 4; /**< Per lane TX polarity control */
+ uint64_t rxplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */
+ uint64_t txplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */
+#else
+ uint64_t txplrt : 1;
+ uint64_t rxplrt : 1;
+ uint64_t xor_txplrt : 4;
+ uint64_t xor_rxplrt : 4;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t rxplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */
+ uint64_t txplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */
+#else
+ uint64_t txplrt : 1;
+ uint64_t rxplrt : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn52xxp1;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn61xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xxp1;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn66xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_tx_rx_polarity_reg cvmx_pcsxx_tx_rx_polarity_reg_t;
+
+/**
+ * cvmx_pcsx#_tx_rx_states_reg
+ *
+ * PCSX_TX_RX_STATES_REG = Transmit Receive States Register
+ *
+ */
+union cvmx_pcsxx_tx_rx_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_tx_rx_states_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t term_err : 1; /**< 1=Check end function detected error in packet
+ terminate ||T|| column or the one after it */
+ uint64_t syn3bad : 1; /**< 1=lane 3 code grp sync state machine in bad state */
+ uint64_t syn2bad : 1; /**< 1=lane 2 code grp sync state machine in bad state */
+ uint64_t syn1bad : 1; /**< 1=lane 1 code grp sync state machine in bad state */
+ uint64_t syn0bad : 1; /**< 1=lane 0 code grp sync state machine in bad state */
+ uint64_t rxbad : 1; /**< 1=Rcv state machine in a bad state, HW malfunction */
+ uint64_t algn_st : 3; /**< Lane alignment state machine state state */
+ uint64_t rx_st : 2; /**< Receive state machine state state */
+ uint64_t tx_st : 3; /**< Transmit state machine state state */
+#else
+ uint64_t tx_st : 3;
+ uint64_t rx_st : 2;
+ uint64_t algn_st : 3;
+ uint64_t rxbad : 1;
+ uint64_t syn0bad : 1;
+ uint64_t syn1bad : 1;
+ uint64_t syn2bad : 1;
+ uint64_t syn3bad : 1;
+ uint64_t term_err : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t syn3bad : 1; /**< 1=lane 3 code grp sync state machine in bad state */
+ uint64_t syn2bad : 1; /**< 1=lane 2 code grp sync state machine in bad state */
+ uint64_t syn1bad : 1; /**< 1=lane 1 code grp sync state machine in bad state */
+ uint64_t syn0bad : 1; /**< 1=lane 0 code grp sync state machine in bad state */
+ uint64_t rxbad : 1; /**< 1=Rcv state machine in a bad state, HW malfunction */
+ uint64_t algn_st : 3; /**< Lane alignment state machine state state */
+ uint64_t rx_st : 2; /**< Receive state machine state state */
+ uint64_t tx_st : 3; /**< Transmit state machine state state */
+#else
+ uint64_t tx_st : 3;
+ uint64_t rx_st : 2;
+ uint64_t algn_st : 3;
+ uint64_t rxbad : 1;
+ uint64_t syn0bad : 1;
+ uint64_t syn1bad : 1;
+ uint64_t syn2bad : 1;
+ uint64_t syn3bad : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn52xxp1;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn61xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn63xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn63xxp1;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn66xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn68xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn68xxp1;
+};
+typedef union cvmx_pcsxx_tx_rx_states_reg cvmx_pcsxx_tx_rx_states_reg_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pcsxx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pemx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pemx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pemx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1439 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pemx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pemx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PEMX_DEFS_H__
+#define __CVMX_PEMX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_BAR1_INDEXX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 15)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_PEMX_BAR1_INDEXX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C00000A8ull) + (((offset) & 15) + ((block_id) & 1) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_PEMX_BAR1_INDEXX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C00000A8ull) + (((offset) & 15) + ((block_id) & 1) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_BAR2_MASK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_BAR2_MASK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000130ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_BAR2_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000130ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_BAR_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_BAR_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000128ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_BAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000128ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_BIST_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_BIST_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000018ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000018ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_BIST_STATUS2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_BIST_STATUS2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000420ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_BIST_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000420ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_CFG_RD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_CFG_RD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000030ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_CFG_RD(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000030ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_CFG_WR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_CFG_WR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000028ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_CFG_WR(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000028ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_CPL_LUT_VALID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_CPL_LUT_VALID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000098ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_CPL_LUT_VALID(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000098ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_CTL_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_CTL_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000000ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000000ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_DBG_INFO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_DBG_INFO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000008ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_DBG_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000008ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_DBG_INFO_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_DBG_INFO_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C00000A0ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_DBG_INFO_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800C00000A0ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_DIAG_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_DIAG_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000020ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_DIAG_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000020ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_INB_READ_CREDITS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_INB_READ_CREDITS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000138ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_INB_READ_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000138ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_INT_ENB(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_INT_ENB(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000410ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_INT_ENB(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000410ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_INT_ENB_INT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_INT_ENB_INT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000418ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_INT_ENB_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000418ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_INT_SUM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_INT_SUM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000408ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_INT_SUM(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000408ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_P2N_BAR0_START(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_P2N_BAR0_START(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000080ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_P2N_BAR0_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000080ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_P2N_BAR1_START(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_P2N_BAR1_START(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000088ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_P2N_BAR1_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000088ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_P2N_BAR2_START(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_P2N_BAR2_START(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000090ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_P2N_BAR2_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000090ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_P2P_BARX_END(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_PEMX_P2P_BARX_END(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x100000ull) * 16;
+}
+#else
+#define CVMX_PEMX_P2P_BARX_END(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x100000ull) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_P2P_BARX_START(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_PEMX_P2P_BARX_START(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x100000ull) * 16;
+}
+#else
+#define CVMX_PEMX_P2P_BARX_START(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C0000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x100000ull) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEMX_TLP_CREDITS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PEMX_TLP_CREDITS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C0000038ull) + ((block_id) & 1) * 0x1000000ull;
+}
+#else
+#define CVMX_PEMX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000038ull) + ((block_id) & 1) * 0x1000000ull)
+#endif
+
+/**
+ * cvmx_pem#_bar1_index#
+ *
+ * PEM_BAR1_INDEXX = PEM BAR1 IndexX Register
+ *
+ * Contains address index and control bits for access to memory ranges of BAR-1. Index is build from supplied address [25:22].
+ */
+union cvmx_pemx_bar1_indexx {
+ uint64_t u64;
+ struct cvmx_pemx_bar1_indexx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t addr_idx : 16; /**< Address bits [37:22] sent to L2C */
+ uint64_t ca : 1; /**< Set '1' when access is not to be cached in L2. */
+ uint64_t end_swp : 2; /**< Endian Swap Mode */
+ uint64_t addr_v : 1; /**< Set '1' when the selected address range is valid. */
+#else
+ uint64_t addr_v : 1;
+ uint64_t end_swp : 2;
+ uint64_t ca : 1;
+ uint64_t addr_idx : 16;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_pemx_bar1_indexx_s cn61xx;
+ struct cvmx_pemx_bar1_indexx_s cn63xx;
+ struct cvmx_pemx_bar1_indexx_s cn63xxp1;
+ struct cvmx_pemx_bar1_indexx_s cn66xx;
+ struct cvmx_pemx_bar1_indexx_s cn68xx;
+ struct cvmx_pemx_bar1_indexx_s cn68xxp1;
+ struct cvmx_pemx_bar1_indexx_s cnf71xx;
+};
+typedef union cvmx_pemx_bar1_indexx cvmx_pemx_bar1_indexx_t;
+
+/**
+ * cvmx_pem#_bar2_mask
+ *
+ * PEM_BAR2_MASK = PEM BAR2 MASK
+ *
+ * The mask pattern that is ANDED with the address from PCIe core for BAR2 hits.
+ */
+union cvmx_pemx_bar2_mask {
+ uint64_t u64;
+ struct cvmx_pemx_bar2_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t mask : 35; /**< The value to be ANDED with the address sent to
+ the Octeon memory. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t mask : 35;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_pemx_bar2_mask_s cn61xx;
+ struct cvmx_pemx_bar2_mask_s cn66xx;
+ struct cvmx_pemx_bar2_mask_s cn68xx;
+ struct cvmx_pemx_bar2_mask_s cn68xxp1;
+ struct cvmx_pemx_bar2_mask_s cnf71xx;
+};
+typedef union cvmx_pemx_bar2_mask cvmx_pemx_bar2_mask_t;
+
+/**
+ * cvmx_pem#_bar_ctl
+ *
+ * PEM_BAR_CTL = PEM BAR Control
+ *
+ * Contains control for BAR accesses.
+ */
+union cvmx_pemx_bar_ctl {
+ uint64_t u64;
+ struct cvmx_pemx_bar_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t bar1_siz : 3; /**< Pcie-Port0, Bar1 Size. 1 == 64MB, 2 == 128MB,
+ 3 == 256MB, 4 == 512MB, 5 == 1024MB, 6 == 2048MB,
+ 0 and 7 are reserved. */
+ uint64_t bar2_enb : 1; /**< When set '1' BAR2 is enable and will respond when
+ clear '0' BAR2 access will cause UR responses. */
+ uint64_t bar2_esx : 2; /**< Value will be XORed with pci-address[39:38] to
+ determine the endian swap mode. */
+ uint64_t bar2_cax : 1; /**< Value will be XORed with pcie-address[40] to
+ determine the L2 cache attribute.
+ Not cached in L2 if XOR result is 1 */
+#else
+ uint64_t bar2_cax : 1;
+ uint64_t bar2_esx : 2;
+ uint64_t bar2_enb : 1;
+ uint64_t bar1_siz : 3;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_pemx_bar_ctl_s cn61xx;
+ struct cvmx_pemx_bar_ctl_s cn63xx;
+ struct cvmx_pemx_bar_ctl_s cn63xxp1;
+ struct cvmx_pemx_bar_ctl_s cn66xx;
+ struct cvmx_pemx_bar_ctl_s cn68xx;
+ struct cvmx_pemx_bar_ctl_s cn68xxp1;
+ struct cvmx_pemx_bar_ctl_s cnf71xx;
+};
+typedef union cvmx_pemx_bar_ctl cvmx_pemx_bar_ctl_t;
+
+/**
+ * cvmx_pem#_bist_status
+ *
+ * PEM_BIST_STATUS = PEM Bist Status
+ *
+ * Contains the diffrent interrupt summary bits of the PEM.
+ */
+union cvmx_pemx_bist_status {
+ uint64_t u64;
+ struct cvmx_pemx_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t retry : 1; /**< Retry Buffer. */
+ uint64_t rqdata0 : 1; /**< Rx Queue Data Memory0. */
+ uint64_t rqdata1 : 1; /**< Rx Queue Data Memory1. */
+ uint64_t rqdata2 : 1; /**< Rx Queue Data Memory2. */
+ uint64_t rqdata3 : 1; /**< Rx Queue Data Memory3. */
+ uint64_t rqhdr1 : 1; /**< Rx Queue Header1. */
+ uint64_t rqhdr0 : 1; /**< Rx Queue Header0. */
+ uint64_t sot : 1; /**< SOT Buffer. */
+#else
+ uint64_t sot : 1;
+ uint64_t rqhdr0 : 1;
+ uint64_t rqhdr1 : 1;
+ uint64_t rqdata3 : 1;
+ uint64_t rqdata2 : 1;
+ uint64_t rqdata1 : 1;
+ uint64_t rqdata0 : 1;
+ uint64_t retry : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_pemx_bist_status_s cn61xx;
+ struct cvmx_pemx_bist_status_s cn63xx;
+ struct cvmx_pemx_bist_status_s cn63xxp1;
+ struct cvmx_pemx_bist_status_s cn66xx;
+ struct cvmx_pemx_bist_status_s cn68xx;
+ struct cvmx_pemx_bist_status_s cn68xxp1;
+ struct cvmx_pemx_bist_status_s cnf71xx;
+};
+typedef union cvmx_pemx_bist_status cvmx_pemx_bist_status_t;
+
+/**
+ * cvmx_pem#_bist_status2
+ *
+ * PEM(0..1)_BIST_STATUS2 = PEM BIST Status Register
+ *
+ * Results from BIST runs of PEM's memories.
+ */
+union cvmx_pemx_bist_status2 {
+ uint64_t u64;
+ struct cvmx_pemx_bist_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t e2p_cpl : 1; /**< BIST Status for the e2p_cpl_fifo */
+ uint64_t e2p_n : 1; /**< BIST Status for the e2p_n_fifo */
+ uint64_t e2p_p : 1; /**< BIST Status for the e2p_p_fifo */
+ uint64_t peai_p2e : 1; /**< BIST Status for the peai__pesc_fifo */
+ uint64_t pef_tpf1 : 1; /**< BIST Status for the pef_tlp_p_fifo1 */
+ uint64_t pef_tpf0 : 1; /**< BIST Status for the pef_tlp_p_fifo0 */
+ uint64_t pef_tnf : 1; /**< BIST Status for the pef_tlp_n_fifo */
+ uint64_t pef_tcf1 : 1; /**< BIST Status for the pef_tlp_cpl_fifo1 */
+ uint64_t pef_tc0 : 1; /**< BIST Status for the pef_tlp_cpl_fifo0 */
+ uint64_t ppf : 1; /**< BIST Status for the ppf_fifo */
+#else
+ uint64_t ppf : 1;
+ uint64_t pef_tc0 : 1;
+ uint64_t pef_tcf1 : 1;
+ uint64_t pef_tnf : 1;
+ uint64_t pef_tpf0 : 1;
+ uint64_t pef_tpf1 : 1;
+ uint64_t peai_p2e : 1;
+ uint64_t e2p_p : 1;
+ uint64_t e2p_n : 1;
+ uint64_t e2p_cpl : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_pemx_bist_status2_s cn61xx;
+ struct cvmx_pemx_bist_status2_s cn63xx;
+ struct cvmx_pemx_bist_status2_s cn63xxp1;
+ struct cvmx_pemx_bist_status2_s cn66xx;
+ struct cvmx_pemx_bist_status2_s cn68xx;
+ struct cvmx_pemx_bist_status2_s cn68xxp1;
+ struct cvmx_pemx_bist_status2_s cnf71xx;
+};
+typedef union cvmx_pemx_bist_status2 cvmx_pemx_bist_status2_t;
+
+/**
+ * cvmx_pem#_cfg_rd
+ *
+ * PEM_CFG_RD = PEM Configuration Read
+ *
+ * Allows read access to the configuration in the PCIe Core.
+ */
+union cvmx_pemx_cfg_rd {
+ uint64_t u64;
+ struct cvmx_pemx_cfg_rd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 32; /**< Data. */
+ uint64_t addr : 32; /**< Address to read. A write to this register
+ starts a read operation. */
+#else
+ uint64_t addr : 32;
+ uint64_t data : 32;
+#endif
+ } s;
+ struct cvmx_pemx_cfg_rd_s cn61xx;
+ struct cvmx_pemx_cfg_rd_s cn63xx;
+ struct cvmx_pemx_cfg_rd_s cn63xxp1;
+ struct cvmx_pemx_cfg_rd_s cn66xx;
+ struct cvmx_pemx_cfg_rd_s cn68xx;
+ struct cvmx_pemx_cfg_rd_s cn68xxp1;
+ struct cvmx_pemx_cfg_rd_s cnf71xx;
+};
+typedef union cvmx_pemx_cfg_rd cvmx_pemx_cfg_rd_t;
+
+/**
+ * cvmx_pem#_cfg_wr
+ *
+ * PEM_CFG_WR = PEM Configuration Write
+ *
+ * Allows write access to the configuration in the PCIe Core.
+ */
+union cvmx_pemx_cfg_wr {
+ uint64_t u64;
+ struct cvmx_pemx_cfg_wr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 32; /**< Data to write. A write to this register starts
+ a write operation. */
+ uint64_t addr : 32; /**< Address to write. A write to this register starts
+ a write operation. */
+#else
+ uint64_t addr : 32;
+ uint64_t data : 32;
+#endif
+ } s;
+ struct cvmx_pemx_cfg_wr_s cn61xx;
+ struct cvmx_pemx_cfg_wr_s cn63xx;
+ struct cvmx_pemx_cfg_wr_s cn63xxp1;
+ struct cvmx_pemx_cfg_wr_s cn66xx;
+ struct cvmx_pemx_cfg_wr_s cn68xx;
+ struct cvmx_pemx_cfg_wr_s cn68xxp1;
+ struct cvmx_pemx_cfg_wr_s cnf71xx;
+};
+typedef union cvmx_pemx_cfg_wr cvmx_pemx_cfg_wr_t;
+
+/**
+ * cvmx_pem#_cpl_lut_valid
+ *
+ * PEM_CPL_LUT_VALID = PEM Cmpletion Lookup Table Valid
+ *
+ * Bit set for outstanding tag read.
+ */
+union cvmx_pemx_cpl_lut_valid {
+ uint64_t u64;
+ struct cvmx_pemx_cpl_lut_valid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t tag : 32; /**< Bit vector set cooresponds to an outstanding tag
+ expecting a completion. */
+#else
+ uint64_t tag : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pemx_cpl_lut_valid_s cn61xx;
+ struct cvmx_pemx_cpl_lut_valid_s cn63xx;
+ struct cvmx_pemx_cpl_lut_valid_s cn63xxp1;
+ struct cvmx_pemx_cpl_lut_valid_s cn66xx;
+ struct cvmx_pemx_cpl_lut_valid_s cn68xx;
+ struct cvmx_pemx_cpl_lut_valid_s cn68xxp1;
+ struct cvmx_pemx_cpl_lut_valid_s cnf71xx;
+};
+typedef union cvmx_pemx_cpl_lut_valid cvmx_pemx_cpl_lut_valid_t;
+
+/**
+ * cvmx_pem#_ctl_status
+ *
+ * NOTE: Logic Analyzer is enabled with LA_EN for the specified PCS lane only. PKT_SZ is effective only when LA_EN=1
+ * For normal operation(sgmii or 1000Base-X), this bit must be 0.
+ * See pcsx.csr for xaui logic analyzer mode.
+ * For full description see document at .../rtl/pcs/readme_logic_analyzer.txt
+ *
+ *
+ * PEM_CTL_STATUS = PEM Control Status
+ *
+ * General control and status of the PEM.
+ */
+union cvmx_pemx_ctl_status {
+ uint64_t u64;
+ struct cvmx_pemx_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t auto_sd : 1; /**< Link Hardware Autonomous Speed Disable. */
+ uint64_t dnum : 5; /**< Primary bus device number. */
+ uint64_t pbus : 8; /**< Primary bus number. */
+ uint64_t reserved_32_33 : 2;
+ uint64_t cfg_rtry : 16; /**< The time x 0x10000 in core clocks to wait for a
+ CPL to a CFG RD that does not carry a Retry Status.
+ Until such time that the timeout occurs and Retry
+ Status is received for a CFG RD, the Read CFG Read
+ will be resent. A value of 0 disables retries and
+ treats a CPL Retry as a CPL UR.
+ When enabled only one CFG RD may be issued until
+ either successful completion or CPL UR. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pm_xtoff : 1; /**< When WRITTEN with a '1' a single cycle pulse is
+ to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t pm_xpme : 1; /**< When WRITTEN with a '1' a single cycle pulse is
+ to the PCIe core pm_xmt_pme port. EP mode. */
+ uint64_t ob_p_cmd : 1; /**< When WRITTEN with a '1' a single cycle pulse is
+ to the PCIe core outband_pwrup_cmd port. EP mode. */
+ uint64_t reserved_7_8 : 2;
+ uint64_t nf_ecrc : 1; /**< Do not forward peer-to-peer ECRC TLPs. */
+ uint64_t dly_one : 1; /**< When set the output client state machines will
+ wait one cycle before starting a new TLP out. */
+ uint64_t lnk_enb : 1; /**< When set '1' the link is enabled when '0' the
+ link is disabled. This bit only is active when in
+ RC mode. */
+ uint64_t ro_ctlp : 1; /**< When set '1' C-TLPs that have the RO bit set will
+ not wait for P-TLPs that normaly would be sent
+ first. */
+ uint64_t fast_lm : 1; /**< When '1' forces fast link mode. */
+ uint64_t inv_ecrc : 1; /**< When '1' causes the LSB of the ECRC to be inverted. */
+ uint64_t inv_lcrc : 1; /**< When '1' causes the LSB of the LCRC to be inverted. */
+#else
+ uint64_t inv_lcrc : 1;
+ uint64_t inv_ecrc : 1;
+ uint64_t fast_lm : 1;
+ uint64_t ro_ctlp : 1;
+ uint64_t lnk_enb : 1;
+ uint64_t dly_one : 1;
+ uint64_t nf_ecrc : 1;
+ uint64_t reserved_7_8 : 2;
+ uint64_t ob_p_cmd : 1;
+ uint64_t pm_xpme : 1;
+ uint64_t pm_xtoff : 1;
+ uint64_t reserved_12_15 : 4;
+ uint64_t cfg_rtry : 16;
+ uint64_t reserved_32_33 : 2;
+ uint64_t pbus : 8;
+ uint64_t dnum : 5;
+ uint64_t auto_sd : 1;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_pemx_ctl_status_s cn61xx;
+ struct cvmx_pemx_ctl_status_s cn63xx;
+ struct cvmx_pemx_ctl_status_s cn63xxp1;
+ struct cvmx_pemx_ctl_status_s cn66xx;
+ struct cvmx_pemx_ctl_status_s cn68xx;
+ struct cvmx_pemx_ctl_status_s cn68xxp1;
+ struct cvmx_pemx_ctl_status_s cnf71xx;
+};
+typedef union cvmx_pemx_ctl_status cvmx_pemx_ctl_status_t;
+
+/**
+ * cvmx_pem#_dbg_info
+ *
+ * PEM(0..1)_DBG_INFO = PEM Debug Information
+ *
+ * General debug info.
+ */
+union cvmx_pemx_dbg_info {
+ uint64_t u64;
+ struct cvmx_pemx_dbg_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t ecrc_e : 1; /**< Received a ECRC error.
+ radm_ecrc_err */
+ uint64_t rawwpp : 1; /**< Received a write with poisoned payload
+ radm_rcvd_wreq_poisoned */
+ uint64_t racpp : 1; /**< Received a completion with poisoned payload
+ radm_rcvd_cpl_poisoned */
+ uint64_t ramtlp : 1; /**< Received a malformed TLP
+ radm_mlf_tlp_err */
+ uint64_t rarwdns : 1; /**< Recieved a request which device does not support
+ radm_rcvd_ur_req */
+ uint64_t caar : 1; /**< Completer aborted a request
+ radm_rcvd_ca_req
+ This bit will never be set because Octeon does
+ not generate Completer Aborts. */
+ uint64_t racca : 1; /**< Received a completion with CA status
+ radm_rcvd_cpl_ca */
+ uint64_t racur : 1; /**< Received a completion with UR status
+ radm_rcvd_cpl_ur */
+ uint64_t rauc : 1; /**< Received an unexpected completion
+ radm_unexp_cpl_err */
+ uint64_t rqo : 1; /**< Receive queue overflow. Normally happens only when
+ flow control advertisements are ignored
+ radm_qoverflow */
+ uint64_t fcuv : 1; /**< Flow Control Update Violation (opt. checks)
+ int_xadm_fc_prot_err */
+ uint64_t rpe : 1; /**< When the PHY reports 8B/10B decode error
+ (RxStatus = 3b100) or disparity error
+ (RxStatus = 3b111), the signal rmlh_rcvd_err will
+ be asserted.
+ rmlh_rcvd_err */
+ uint64_t fcpvwt : 1; /**< Flow Control Protocol Violation (Watchdog Timer)
+ rtlh_fc_prot_err */
+ uint64_t dpeoosd : 1; /**< DLLP protocol error (out of sequence DLLP)
+ rdlh_prot_err */
+ uint64_t rtwdle : 1; /**< Received TLP with DataLink Layer Error
+ rdlh_bad_tlp_err */
+ uint64_t rdwdle : 1; /**< Received DLLP with DataLink Layer Error
+ rdlh_bad_dllp_err */
+ uint64_t mre : 1; /**< Max Retries Exceeded
+ xdlh_replay_num_rlover_err */
+ uint64_t rte : 1; /**< Replay Timer Expired
+ xdlh_replay_timeout_err
+ This bit is set when the REPLAY_TIMER expires in
+ the PCIE core. The probability of this bit being
+ set will increase with the traffic load. */
+ uint64_t acto : 1; /**< A Completion Timeout Occured
+ pedc_radm_cpl_timeout */
+ uint64_t rvdm : 1; /**< Received Vendor-Defined Message
+ pedc_radm_vendor_msg */
+ uint64_t rumep : 1; /**< Received Unlock Message (EP Mode Only)
+ pedc_radm_msg_unlock */
+ uint64_t rptamrc : 1; /**< Received PME Turnoff Acknowledge Message
+ (RC Mode only)
+ pedc_radm_pm_to_ack */
+ uint64_t rpmerc : 1; /**< Received PME Message (RC Mode only)
+ pedc_radm_pm_pme */
+ uint64_t rfemrc : 1; /**< Received Fatal Error Message (RC Mode only)
+ pedc_radm_fatal_err
+ Bit set when a message with ERR_FATAL is set. */
+ uint64_t rnfemrc : 1; /**< Received Non-Fatal Error Message (RC Mode only)
+ pedc_radm_nonfatal_err */
+ uint64_t rcemrc : 1; /**< Received Correctable Error Message (RC Mode only)
+ pedc_radm_correctable_err */
+ uint64_t rpoison : 1; /**< Received Poisoned TLP
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv */
+ uint64_t recrce : 1; /**< Received ECRC Error
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot */
+ uint64_t rtlplle : 1; /**< Received TLP has link layer error
+ pedc_radm_trgt1_dllp_abort & pedc__radm_trgt1_eot */
+ uint64_t rtlpmal : 1; /**< Received TLP is malformed or a message.
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot
+ If the core receives a MSG (or Vendor Message)
+ this bit will be set. */
+ uint64_t spoison : 1; /**< Poisoned TLP sent
+ peai__client0_tlp_ep & peai__client0_tlp_hv */
+#else
+ uint64_t spoison : 1;
+ uint64_t rtlpmal : 1;
+ uint64_t rtlplle : 1;
+ uint64_t recrce : 1;
+ uint64_t rpoison : 1;
+ uint64_t rcemrc : 1;
+ uint64_t rnfemrc : 1;
+ uint64_t rfemrc : 1;
+ uint64_t rpmerc : 1;
+ uint64_t rptamrc : 1;
+ uint64_t rumep : 1;
+ uint64_t rvdm : 1;
+ uint64_t acto : 1;
+ uint64_t rte : 1;
+ uint64_t mre : 1;
+ uint64_t rdwdle : 1;
+ uint64_t rtwdle : 1;
+ uint64_t dpeoosd : 1;
+ uint64_t fcpvwt : 1;
+ uint64_t rpe : 1;
+ uint64_t fcuv : 1;
+ uint64_t rqo : 1;
+ uint64_t rauc : 1;
+ uint64_t racur : 1;
+ uint64_t racca : 1;
+ uint64_t caar : 1;
+ uint64_t rarwdns : 1;
+ uint64_t ramtlp : 1;
+ uint64_t racpp : 1;
+ uint64_t rawwpp : 1;
+ uint64_t ecrc_e : 1;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_pemx_dbg_info_s cn61xx;
+ struct cvmx_pemx_dbg_info_s cn63xx;
+ struct cvmx_pemx_dbg_info_s cn63xxp1;
+ struct cvmx_pemx_dbg_info_s cn66xx;
+ struct cvmx_pemx_dbg_info_s cn68xx;
+ struct cvmx_pemx_dbg_info_s cn68xxp1;
+ struct cvmx_pemx_dbg_info_s cnf71xx;
+};
+typedef union cvmx_pemx_dbg_info cvmx_pemx_dbg_info_t;
+
+/**
+ * cvmx_pem#_dbg_info_en
+ *
+ * PEM(0..1)_DBG_INFO_EN = PEM Debug Information Enable
+ *
+ * Allows PEM_DBG_INFO to generate interrupts when cooresponding enable bit is set.
+ */
+union cvmx_pemx_dbg_info_en {
+ uint64_t u64;
+ struct cvmx_pemx_dbg_info_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t ecrc_e : 1; /**< Allows PEM_DBG_INFO[30] to generate an interrupt. */
+ uint64_t rawwpp : 1; /**< Allows PEM_DBG_INFO[29] to generate an interrupt. */
+ uint64_t racpp : 1; /**< Allows PEM_DBG_INFO[28] to generate an interrupt. */
+ uint64_t ramtlp : 1; /**< Allows PEM_DBG_INFO[27] to generate an interrupt. */
+ uint64_t rarwdns : 1; /**< Allows PEM_DBG_INFO[26] to generate an interrupt. */
+ uint64_t caar : 1; /**< Allows PEM_DBG_INFO[25] to generate an interrupt. */
+ uint64_t racca : 1; /**< Allows PEM_DBG_INFO[24] to generate an interrupt. */
+ uint64_t racur : 1; /**< Allows PEM_DBG_INFO[23] to generate an interrupt. */
+ uint64_t rauc : 1; /**< Allows PEM_DBG_INFO[22] to generate an interrupt. */
+ uint64_t rqo : 1; /**< Allows PEM_DBG_INFO[21] to generate an interrupt. */
+ uint64_t fcuv : 1; /**< Allows PEM_DBG_INFO[20] to generate an interrupt. */
+ uint64_t rpe : 1; /**< Allows PEM_DBG_INFO[19] to generate an interrupt. */
+ uint64_t fcpvwt : 1; /**< Allows PEM_DBG_INFO[18] to generate an interrupt. */
+ uint64_t dpeoosd : 1; /**< Allows PEM_DBG_INFO[17] to generate an interrupt. */
+ uint64_t rtwdle : 1; /**< Allows PEM_DBG_INFO[16] to generate an interrupt. */
+ uint64_t rdwdle : 1; /**< Allows PEM_DBG_INFO[15] to generate an interrupt. */
+ uint64_t mre : 1; /**< Allows PEM_DBG_INFO[14] to generate an interrupt. */
+ uint64_t rte : 1; /**< Allows PEM_DBG_INFO[13] to generate an interrupt. */
+ uint64_t acto : 1; /**< Allows PEM_DBG_INFO[12] to generate an interrupt. */
+ uint64_t rvdm : 1; /**< Allows PEM_DBG_INFO[11] to generate an interrupt. */
+ uint64_t rumep : 1; /**< Allows PEM_DBG_INFO[10] to generate an interrupt. */
+ uint64_t rptamrc : 1; /**< Allows PEM_DBG_INFO[9] to generate an interrupt. */
+ uint64_t rpmerc : 1; /**< Allows PEM_DBG_INFO[8] to generate an interrupt. */
+ uint64_t rfemrc : 1; /**< Allows PEM_DBG_INFO[7] to generate an interrupt. */
+ uint64_t rnfemrc : 1; /**< Allows PEM_DBG_INFO[6] to generate an interrupt. */
+ uint64_t rcemrc : 1; /**< Allows PEM_DBG_INFO[5] to generate an interrupt. */
+ uint64_t rpoison : 1; /**< Allows PEM_DBG_INFO[4] to generate an interrupt. */
+ uint64_t recrce : 1; /**< Allows PEM_DBG_INFO[3] to generate an interrupt. */
+ uint64_t rtlplle : 1; /**< Allows PEM_DBG_INFO[2] to generate an interrupt. */
+ uint64_t rtlpmal : 1; /**< Allows PEM_DBG_INFO[1] to generate an interrupt. */
+ uint64_t spoison : 1; /**< Allows PEM_DBG_INFO[0] to generate an interrupt. */
+#else
+ uint64_t spoison : 1;
+ uint64_t rtlpmal : 1;
+ uint64_t rtlplle : 1;
+ uint64_t recrce : 1;
+ uint64_t rpoison : 1;
+ uint64_t rcemrc : 1;
+ uint64_t rnfemrc : 1;
+ uint64_t rfemrc : 1;
+ uint64_t rpmerc : 1;
+ uint64_t rptamrc : 1;
+ uint64_t rumep : 1;
+ uint64_t rvdm : 1;
+ uint64_t acto : 1;
+ uint64_t rte : 1;
+ uint64_t mre : 1;
+ uint64_t rdwdle : 1;
+ uint64_t rtwdle : 1;
+ uint64_t dpeoosd : 1;
+ uint64_t fcpvwt : 1;
+ uint64_t rpe : 1;
+ uint64_t fcuv : 1;
+ uint64_t rqo : 1;
+ uint64_t rauc : 1;
+ uint64_t racur : 1;
+ uint64_t racca : 1;
+ uint64_t caar : 1;
+ uint64_t rarwdns : 1;
+ uint64_t ramtlp : 1;
+ uint64_t racpp : 1;
+ uint64_t rawwpp : 1;
+ uint64_t ecrc_e : 1;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_pemx_dbg_info_en_s cn61xx;
+ struct cvmx_pemx_dbg_info_en_s cn63xx;
+ struct cvmx_pemx_dbg_info_en_s cn63xxp1;
+ struct cvmx_pemx_dbg_info_en_s cn66xx;
+ struct cvmx_pemx_dbg_info_en_s cn68xx;
+ struct cvmx_pemx_dbg_info_en_s cn68xxp1;
+ struct cvmx_pemx_dbg_info_en_s cnf71xx;
+};
+typedef union cvmx_pemx_dbg_info_en cvmx_pemx_dbg_info_en_t;
+
+/**
+ * cvmx_pem#_diag_status
+ *
+ * PEM_DIAG_STATUS = PEM Diagnostic Status
+ *
+ * Selection control for the cores diagnostic bus.
+ */
+union cvmx_pemx_diag_status {
+ uint64_t u64;
+ struct cvmx_pemx_diag_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t pm_dst : 1; /**< Current power management DSTATE. */
+ uint64_t pm_stat : 1; /**< Power Management Status. */
+ uint64_t pm_en : 1; /**< Power Management Event Enable. */
+ uint64_t aux_en : 1; /**< Auxilary Power Enable. */
+#else
+ uint64_t aux_en : 1;
+ uint64_t pm_en : 1;
+ uint64_t pm_stat : 1;
+ uint64_t pm_dst : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_pemx_diag_status_s cn61xx;
+ struct cvmx_pemx_diag_status_s cn63xx;
+ struct cvmx_pemx_diag_status_s cn63xxp1;
+ struct cvmx_pemx_diag_status_s cn66xx;
+ struct cvmx_pemx_diag_status_s cn68xx;
+ struct cvmx_pemx_diag_status_s cn68xxp1;
+ struct cvmx_pemx_diag_status_s cnf71xx;
+};
+typedef union cvmx_pemx_diag_status cvmx_pemx_diag_status_t;
+
+/**
+ * cvmx_pem#_inb_read_credits
+ *
+ * PEM_INB_READ_CREDITS
+ *
+ * The number of in flight reads from PCIe core to SLI
+ */
+union cvmx_pemx_inb_read_credits {
+ uint64_t u64;
+ struct cvmx_pemx_inb_read_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t num : 6; /**< The number of reads that may be in flight from
+ the PCIe core to the SLI. Min number is 2 max
+ number is 32. */
+#else
+ uint64_t num : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_pemx_inb_read_credits_s cn61xx;
+ struct cvmx_pemx_inb_read_credits_s cn66xx;
+ struct cvmx_pemx_inb_read_credits_s cn68xx;
+ struct cvmx_pemx_inb_read_credits_s cnf71xx;
+};
+typedef union cvmx_pemx_inb_read_credits cvmx_pemx_inb_read_credits_t;
+
+/**
+ * cvmx_pem#_int_enb
+ *
+ * PEM(0..1)_INT_ENB = PEM Interrupt Enable
+ *
+ * Enables interrupt conditions for the PEM to generate an RSL interrupt.
+ */
+union cvmx_pemx_int_enb {
+ uint64_t u64;
+ struct cvmx_pemx_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< Enables PEM_INT_SUM[13] to generate an
+ interrupt to the MIO. */
+ uint64_t crs_er : 1; /**< Enables PEM_INT_SUM[12] to generate an
+ interrupt to the MIO. */
+ uint64_t rdlk : 1; /**< Enables PEM_INT_SUM[11] to generate an
+ interrupt to the MIO. */
+ uint64_t exc : 1; /**< Enables PEM_INT_SUM[10] to generate an
+ interrupt to the MIO. */
+ uint64_t un_bx : 1; /**< Enables PEM_INT_SUM[9] to generate an
+ interrupt to the MIO. */
+ uint64_t un_b2 : 1; /**< Enables PEM_INT_SUM[8] to generate an
+ interrupt to the MIO. */
+ uint64_t un_b1 : 1; /**< Enables PEM_INT_SUM[7] to generate an
+ interrupt to the MIO. */
+ uint64_t up_bx : 1; /**< Enables PEM_INT_SUM[6] to generate an
+ interrupt to the MIO. */
+ uint64_t up_b2 : 1; /**< Enables PEM_INT_SUM[5] to generate an
+ interrupt to the MIO. */
+ uint64_t up_b1 : 1; /**< Enables PEM_INT_SUM[4] to generate an
+ interrupt to the MIO. */
+ uint64_t pmem : 1; /**< Enables PEM_INT_SUM[3] to generate an
+ interrupt to the MIO. */
+ uint64_t pmei : 1; /**< Enables PEM_INT_SUM[2] to generate an
+ interrupt to the MIO. */
+ uint64_t se : 1; /**< Enables PEM_INT_SUM[1] to generate an
+ interrupt to the MIO. */
+ uint64_t aeri : 1; /**< Enables PEM_INT_SUM[0] to generate an
+ interrupt to the MIO. */
+#else
+ uint64_t aeri : 1;
+ uint64_t se : 1;
+ uint64_t pmei : 1;
+ uint64_t pmem : 1;
+ uint64_t up_b1 : 1;
+ uint64_t up_b2 : 1;
+ uint64_t up_bx : 1;
+ uint64_t un_b1 : 1;
+ uint64_t un_b2 : 1;
+ uint64_t un_bx : 1;
+ uint64_t exc : 1;
+ uint64_t rdlk : 1;
+ uint64_t crs_er : 1;
+ uint64_t crs_dr : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_pemx_int_enb_s cn61xx;
+ struct cvmx_pemx_int_enb_s cn63xx;
+ struct cvmx_pemx_int_enb_s cn63xxp1;
+ struct cvmx_pemx_int_enb_s cn66xx;
+ struct cvmx_pemx_int_enb_s cn68xx;
+ struct cvmx_pemx_int_enb_s cn68xxp1;
+ struct cvmx_pemx_int_enb_s cnf71xx;
+};
+typedef union cvmx_pemx_int_enb cvmx_pemx_int_enb_t;
+
+/**
+ * cvmx_pem#_int_enb_int
+ *
+ * PEM(0..1)_INT_ENB_INT = PEM Interrupt Enable
+ *
+ * Enables interrupt conditions for the PEM to generate an RSL interrupt.
+ */
+union cvmx_pemx_int_enb_int {
+ uint64_t u64;
+ struct cvmx_pemx_int_enb_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< Enables PEM_INT_SUM[13] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t crs_er : 1; /**< Enables PEM_INT_SUM[12] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t rdlk : 1; /**< Enables PEM_INT_SUM[11] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t exc : 1; /**< Enables PEM_INT_SUM[10] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t un_bx : 1; /**< Enables PEM_INT_SUM[9] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t un_b2 : 1; /**< Enables PEM_INT_SUM[8] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t un_b1 : 1; /**< Enables PEM_INT_SUM[7] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t up_bx : 1; /**< Enables PEM_INT_SUM[6] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t up_b2 : 1; /**< Enables PEM_INT_SUM[5] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t up_b1 : 1; /**< Enables PEM_INT_SUM[4] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t pmem : 1; /**< Enables PEM_INT_SUM[3] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t pmei : 1; /**< Enables PEM_INT_SUM[2] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t se : 1; /**< Enables PEM_INT_SUM[1] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+ uint64_t aeri : 1; /**< Enables PEM_INT_SUM[0] to generate an
+ interrupt to the SLI as SLI_INT_SUM[MAC#_INT]. */
+#else
+ uint64_t aeri : 1;
+ uint64_t se : 1;
+ uint64_t pmei : 1;
+ uint64_t pmem : 1;
+ uint64_t up_b1 : 1;
+ uint64_t up_b2 : 1;
+ uint64_t up_bx : 1;
+ uint64_t un_b1 : 1;
+ uint64_t un_b2 : 1;
+ uint64_t un_bx : 1;
+ uint64_t exc : 1;
+ uint64_t rdlk : 1;
+ uint64_t crs_er : 1;
+ uint64_t crs_dr : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_pemx_int_enb_int_s cn61xx;
+ struct cvmx_pemx_int_enb_int_s cn63xx;
+ struct cvmx_pemx_int_enb_int_s cn63xxp1;
+ struct cvmx_pemx_int_enb_int_s cn66xx;
+ struct cvmx_pemx_int_enb_int_s cn68xx;
+ struct cvmx_pemx_int_enb_int_s cn68xxp1;
+ struct cvmx_pemx_int_enb_int_s cnf71xx;
+};
+typedef union cvmx_pemx_int_enb_int cvmx_pemx_int_enb_int_t;
+
+/**
+ * cvmx_pem#_int_sum
+ *
+ * Below are in pesc_csr
+ *
+ * PEM(0..1)_INT_SUM = PEM Interrupt Summary
+ *
+ * Interrupt conditions for the PEM.
+ */
+union cvmx_pemx_int_sum {
+ uint64_t u64;
+ struct cvmx_pemx_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< Had a CRS Timeout when Retries were disabled. */
+ uint64_t crs_er : 1; /**< Had a CRS Timeout when Retries were enabled. */
+ uint64_t rdlk : 1; /**< Received Read Lock TLP. */
+ uint64_t exc : 1; /**< Set when the PEM_DBG_INFO register has a bit
+ set and its cooresponding PEM_DBG_INFO_EN bit
+ is set. */
+ uint64_t un_bx : 1; /**< Received N-TLP for an unknown Bar. */
+ uint64_t un_b2 : 1; /**< Received N-TLP for Bar2 when bar2 is disabled. */
+ uint64_t un_b1 : 1; /**< Received N-TLP for Bar1 when bar1 index valid
+ is not set. */
+ uint64_t up_bx : 1; /**< Received P-TLP for an unknown Bar. */
+ uint64_t up_b2 : 1; /**< Received P-TLP for Bar2 when bar2 is disabeld. */
+ uint64_t up_b1 : 1; /**< Received P-TLP for Bar1 when bar1 index valid
+ is not set. */
+ uint64_t pmem : 1; /**< Recived PME MSG.
+ (radm_pm_pme) */
+ uint64_t pmei : 1; /**< PME Interrupt.
+ (cfg_pme_int) */
+ uint64_t se : 1; /**< System Error, RC Mode Only.
+ (cfg_sys_err_rc) */
+ uint64_t aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only.
+ (cfg_aer_rc_err_int). */
+#else
+ uint64_t aeri : 1;
+ uint64_t se : 1;
+ uint64_t pmei : 1;
+ uint64_t pmem : 1;
+ uint64_t up_b1 : 1;
+ uint64_t up_b2 : 1;
+ uint64_t up_bx : 1;
+ uint64_t un_b1 : 1;
+ uint64_t un_b2 : 1;
+ uint64_t un_bx : 1;
+ uint64_t exc : 1;
+ uint64_t rdlk : 1;
+ uint64_t crs_er : 1;
+ uint64_t crs_dr : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_pemx_int_sum_s cn61xx;
+ struct cvmx_pemx_int_sum_s cn63xx;
+ struct cvmx_pemx_int_sum_s cn63xxp1;
+ struct cvmx_pemx_int_sum_s cn66xx;
+ struct cvmx_pemx_int_sum_s cn68xx;
+ struct cvmx_pemx_int_sum_s cn68xxp1;
+ struct cvmx_pemx_int_sum_s cnf71xx;
+};
+typedef union cvmx_pemx_int_sum cvmx_pemx_int_sum_t;
+
+/**
+ * cvmx_pem#_p2n_bar0_start
+ *
+ * PEM_P2N_BAR0_START = PEM PCIe to Npei BAR0 Start
+ *
+ * The starting address for addresses to forwarded to the SLI in RC Mode.
+ */
+union cvmx_pemx_p2n_bar0_start {
+ uint64_t u64;
+ struct cvmx_pemx_p2n_bar0_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 50; /**< The starting address of the 16KB address space that
+ is the BAR0 address space. */
+ uint64_t reserved_0_13 : 14;
+#else
+ uint64_t reserved_0_13 : 14;
+ uint64_t addr : 50;
+#endif
+ } s;
+ struct cvmx_pemx_p2n_bar0_start_s cn61xx;
+ struct cvmx_pemx_p2n_bar0_start_s cn63xx;
+ struct cvmx_pemx_p2n_bar0_start_s cn63xxp1;
+ struct cvmx_pemx_p2n_bar0_start_s cn66xx;
+ struct cvmx_pemx_p2n_bar0_start_s cn68xx;
+ struct cvmx_pemx_p2n_bar0_start_s cn68xxp1;
+ struct cvmx_pemx_p2n_bar0_start_s cnf71xx;
+};
+typedef union cvmx_pemx_p2n_bar0_start cvmx_pemx_p2n_bar0_start_t;
+
+/**
+ * cvmx_pem#_p2n_bar1_start
+ *
+ * PEM_P2N_BAR1_START = PEM PCIe to Npei BAR1 Start
+ *
+ * The starting address for addresses to forwarded to the SLI in RC Mode.
+ */
+union cvmx_pemx_p2n_bar1_start {
+ uint64_t u64;
+ struct cvmx_pemx_p2n_bar1_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 38; /**< The starting address of the 64KB address space
+ that is the BAR1 address space. */
+ uint64_t reserved_0_25 : 26;
+#else
+ uint64_t reserved_0_25 : 26;
+ uint64_t addr : 38;
+#endif
+ } s;
+ struct cvmx_pemx_p2n_bar1_start_s cn61xx;
+ struct cvmx_pemx_p2n_bar1_start_s cn63xx;
+ struct cvmx_pemx_p2n_bar1_start_s cn63xxp1;
+ struct cvmx_pemx_p2n_bar1_start_s cn66xx;
+ struct cvmx_pemx_p2n_bar1_start_s cn68xx;
+ struct cvmx_pemx_p2n_bar1_start_s cn68xxp1;
+ struct cvmx_pemx_p2n_bar1_start_s cnf71xx;
+};
+typedef union cvmx_pemx_p2n_bar1_start cvmx_pemx_p2n_bar1_start_t;
+
+/**
+ * cvmx_pem#_p2n_bar2_start
+ *
+ * PEM_P2N_BAR2_START = PEM PCIe to Npei BAR2 Start
+ *
+ * The starting address for addresses to forwarded to the SLI in RC Mode.
+ */
+union cvmx_pemx_p2n_bar2_start {
+ uint64_t u64;
+ struct cvmx_pemx_p2n_bar2_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 23; /**< The starting address of the 2^41 address space
+ that is the BAR2 address space. */
+ uint64_t reserved_0_40 : 41;
+#else
+ uint64_t reserved_0_40 : 41;
+ uint64_t addr : 23;
+#endif
+ } s;
+ struct cvmx_pemx_p2n_bar2_start_s cn61xx;
+ struct cvmx_pemx_p2n_bar2_start_s cn63xx;
+ struct cvmx_pemx_p2n_bar2_start_s cn63xxp1;
+ struct cvmx_pemx_p2n_bar2_start_s cn66xx;
+ struct cvmx_pemx_p2n_bar2_start_s cn68xx;
+ struct cvmx_pemx_p2n_bar2_start_s cn68xxp1;
+ struct cvmx_pemx_p2n_bar2_start_s cnf71xx;
+};
+typedef union cvmx_pemx_p2n_bar2_start cvmx_pemx_p2n_bar2_start_t;
+
+/**
+ * cvmx_pem#_p2p_bar#_end
+ *
+ * PEM_P2P_BAR#_END = PEM Peer-To-Peer BAR0 End
+ *
+ * The ending address for addresses to forwarded to the PCIe peer port.
+ */
+union cvmx_pemx_p2p_barx_end {
+ uint64_t u64;
+ struct cvmx_pemx_p2p_barx_end_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 52; /**< The ending address of the address window created
+ this field and the PEM_P2P_BAR0_START[63:12]
+ field. The full 64-bits of address are created by:
+ [ADDR[63:12], 12'b0]. */
+ uint64_t reserved_0_11 : 12;
+#else
+ uint64_t reserved_0_11 : 12;
+ uint64_t addr : 52;
+#endif
+ } s;
+ struct cvmx_pemx_p2p_barx_end_s cn63xx;
+ struct cvmx_pemx_p2p_barx_end_s cn63xxp1;
+ struct cvmx_pemx_p2p_barx_end_s cn66xx;
+ struct cvmx_pemx_p2p_barx_end_s cn68xx;
+ struct cvmx_pemx_p2p_barx_end_s cn68xxp1;
+};
+typedef union cvmx_pemx_p2p_barx_end cvmx_pemx_p2p_barx_end_t;
+
+/**
+ * cvmx_pem#_p2p_bar#_start
+ *
+ * PEM_P2P_BAR#_START = PEM Peer-To-Peer BAR0 Start
+ *
+ * The starting address and enable for addresses to forwarded to the PCIe peer port.
+ */
+union cvmx_pemx_p2p_barx_start {
+ uint64_t u64;
+ struct cvmx_pemx_p2p_barx_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 52; /**< The starting address of the address window created
+ by this field and the PEM_P2P_BAR0_END[63:12]
+ field. The full 64-bits of address are created by:
+ [ADDR[63:12], 12'b0]. */
+ uint64_t reserved_0_11 : 12;
+#else
+ uint64_t reserved_0_11 : 12;
+ uint64_t addr : 52;
+#endif
+ } s;
+ struct cvmx_pemx_p2p_barx_start_s cn63xx;
+ struct cvmx_pemx_p2p_barx_start_s cn63xxp1;
+ struct cvmx_pemx_p2p_barx_start_s cn66xx;
+ struct cvmx_pemx_p2p_barx_start_s cn68xx;
+ struct cvmx_pemx_p2p_barx_start_s cn68xxp1;
+};
+typedef union cvmx_pemx_p2p_barx_start cvmx_pemx_p2p_barx_start_t;
+
+/**
+ * cvmx_pem#_tlp_credits
+ *
+ * PEM_TLP_CREDITS = PEM TLP Credits
+ *
+ * Specifies the number of credits the PEM for use in moving TLPs. When this register is written the credit values are
+ * reset to the register value. A write to this register should take place BEFORE traffic flow starts.
+ */
+union cvmx_pemx_tlp_credits {
+ uint64_t u64;
+ struct cvmx_pemx_tlp_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t peai_ppf : 8; /**< TLP credits for Completion TLPs in the Peer.
+ The value in this register should not be changed.
+ Values other than 0x80 can lead to unpredictable
+ behavior */
+ uint64_t pem_cpl : 8; /**< TLP credits for Completion TLPs in the Peer.
+ Legal values are 0x24 to 0x80. */
+ uint64_t pem_np : 8; /**< TLP credits for Non-Posted TLPs in the Peer.
+ Legal values are 0x4 to 0x10. */
+ uint64_t pem_p : 8; /**< TLP credits for Posted TLPs in the Peer.
+ Legal values are 0x24 to 0x80. */
+ uint64_t sli_cpl : 8; /**< TLP credits for Completion TLPs in the SLI.
+ Legal values are 0x24 to 0x80. */
+ uint64_t sli_np : 8; /**< TLP credits for Non-Posted TLPs in the SLI.
+ Legal values are 0x4 to 0x10. */
+ uint64_t sli_p : 8; /**< TLP credits for Posted TLPs in the SLI.
+ Legal values are 0x24 to 0x80. */
+#else
+ uint64_t sli_p : 8;
+ uint64_t sli_np : 8;
+ uint64_t sli_cpl : 8;
+ uint64_t pem_p : 8;
+ uint64_t pem_np : 8;
+ uint64_t pem_cpl : 8;
+ uint64_t peai_ppf : 8;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_pemx_tlp_credits_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t peai_ppf : 8; /**< TLP credits for Completion TLPs in the Peer.
+ The value in this register should not be changed.
+ Values other than 0x80 can lead to unpredictable
+ behavior */
+ uint64_t reserved_24_47 : 24;
+ uint64_t sli_cpl : 8; /**< TLP credits for Completion TLPs in the SLI.
+ Legal values are 0x24 to 0x80. */
+ uint64_t sli_np : 8; /**< TLP credits for Non-Posted TLPs in the SLI.
+ Legal values are 0x4 to 0x10. */
+ uint64_t sli_p : 8; /**< TLP credits for Posted TLPs in the SLI.
+ Legal values are 0x24 to 0x80. */
+#else
+ uint64_t sli_p : 8;
+ uint64_t sli_np : 8;
+ uint64_t sli_cpl : 8;
+ uint64_t reserved_24_47 : 24;
+ uint64_t peai_ppf : 8;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn61xx;
+ struct cvmx_pemx_tlp_credits_s cn63xx;
+ struct cvmx_pemx_tlp_credits_s cn63xxp1;
+ struct cvmx_pemx_tlp_credits_s cn66xx;
+ struct cvmx_pemx_tlp_credits_s cn68xx;
+ struct cvmx_pemx_tlp_credits_s cn68xxp1;
+ struct cvmx_pemx_tlp_credits_cn61xx cnf71xx;
+};
+typedef union cvmx_pemx_tlp_credits cvmx_pemx_tlp_credits_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pemx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pescx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pescx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pescx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1056 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pescx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pescx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PESCX_DEFS_H__
+#define __CVMX_PESCX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_BIST_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_BIST_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000018ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000018ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_BIST_STATUS2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_BIST_STATUS2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000418ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_BIST_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000418ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_CFG_RD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_CFG_RD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000030ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_CFG_RD(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000030ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_CFG_WR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_CFG_WR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000028ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_CFG_WR(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000028ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_CPL_LUT_VALID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_CPL_LUT_VALID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000098ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_CPL_LUT_VALID(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000098ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_CTL_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_CTL_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000000ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000000ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_CTL_STATUS2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_CTL_STATUS2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000400ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_CTL_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000400ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_DBG_INFO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_DBG_INFO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000008ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_DBG_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000008ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_DBG_INFO_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_DBG_INFO_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C80000A0ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_DBG_INFO_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800C80000A0ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_DIAG_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_DIAG_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000020ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_DIAG_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000020ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_P2N_BAR0_START(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_P2N_BAR0_START(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000080ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_P2N_BAR0_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000080ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_P2N_BAR1_START(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_P2N_BAR1_START(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000088ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_P2N_BAR1_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000088ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_P2N_BAR2_START(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_P2N_BAR2_START(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000090ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_P2N_BAR2_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000090ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_P2P_BARX_END(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_PESCX_P2P_BARX_END(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16;
+}
+#else
+#define CVMX_PESCX_P2P_BARX_END(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_P2P_BARX_START(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_PESCX_P2P_BARX_START(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16;
+}
+#else
+#define CVMX_PESCX_P2P_BARX_START(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PESCX_TLP_CREDITS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_PESCX_TLP_CREDITS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000038ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_PESCX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000038ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+
+/**
+ * cvmx_pesc#_bist_status
+ *
+ * PESC_BIST_STATUS = PESC Bist Status
+ *
+ * Contains the diffrent interrupt summary bits of the PESC.
+ */
+union cvmx_pescx_bist_status {
+ uint64_t u64;
+ struct cvmx_pescx_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t rqdata5 : 1; /**< Rx Queue Data Memory5. */
+ uint64_t ctlp_or : 1; /**< C-TLP Order Fifo. */
+ uint64_t ntlp_or : 1; /**< N-TLP Order Fifo. */
+ uint64_t ptlp_or : 1; /**< P-TLP Order Fifo. */
+ uint64_t retry : 1; /**< Retry Buffer. */
+ uint64_t rqdata0 : 1; /**< Rx Queue Data Memory0. */
+ uint64_t rqdata1 : 1; /**< Rx Queue Data Memory1. */
+ uint64_t rqdata2 : 1; /**< Rx Queue Data Memory2. */
+ uint64_t rqdata3 : 1; /**< Rx Queue Data Memory3. */
+ uint64_t rqdata4 : 1; /**< Rx Queue Data Memory4. */
+ uint64_t rqhdr1 : 1; /**< Rx Queue Header1. */
+ uint64_t rqhdr0 : 1; /**< Rx Queue Header0. */
+ uint64_t sot : 1; /**< SOT Buffer. */
+#else
+ uint64_t sot : 1;
+ uint64_t rqhdr0 : 1;
+ uint64_t rqhdr1 : 1;
+ uint64_t rqdata4 : 1;
+ uint64_t rqdata3 : 1;
+ uint64_t rqdata2 : 1;
+ uint64_t rqdata1 : 1;
+ uint64_t rqdata0 : 1;
+ uint64_t retry : 1;
+ uint64_t ptlp_or : 1;
+ uint64_t ntlp_or : 1;
+ uint64_t ctlp_or : 1;
+ uint64_t rqdata5 : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_pescx_bist_status_s cn52xx;
+ struct cvmx_pescx_bist_status_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t ctlp_or : 1; /**< C-TLP Order Fifo. */
+ uint64_t ntlp_or : 1; /**< N-TLP Order Fifo. */
+ uint64_t ptlp_or : 1; /**< P-TLP Order Fifo. */
+ uint64_t retry : 1; /**< Retry Buffer. */
+ uint64_t rqdata0 : 1; /**< Rx Queue Data Memory0. */
+ uint64_t rqdata1 : 1; /**< Rx Queue Data Memory1. */
+ uint64_t rqdata2 : 1; /**< Rx Queue Data Memory2. */
+ uint64_t rqdata3 : 1; /**< Rx Queue Data Memory3. */
+ uint64_t rqdata4 : 1; /**< Rx Queue Data Memory4. */
+ uint64_t rqhdr1 : 1; /**< Rx Queue Header1. */
+ uint64_t rqhdr0 : 1; /**< Rx Queue Header0. */
+ uint64_t sot : 1; /**< SOT Buffer. */
+#else
+ uint64_t sot : 1;
+ uint64_t rqhdr0 : 1;
+ uint64_t rqhdr1 : 1;
+ uint64_t rqdata4 : 1;
+ uint64_t rqdata3 : 1;
+ uint64_t rqdata2 : 1;
+ uint64_t rqdata1 : 1;
+ uint64_t rqdata0 : 1;
+ uint64_t retry : 1;
+ uint64_t ptlp_or : 1;
+ uint64_t ntlp_or : 1;
+ uint64_t ctlp_or : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn52xxp1;
+ struct cvmx_pescx_bist_status_s cn56xx;
+ struct cvmx_pescx_bist_status_cn52xxp1 cn56xxp1;
+};
+typedef union cvmx_pescx_bist_status cvmx_pescx_bist_status_t;
+
+/**
+ * cvmx_pesc#_bist_status2
+ *
+ * PESC(0..1)_BIST_STATUS2 = PESC BIST Status Register
+ *
+ * Results from BIST runs of PESC's memories.
+ */
+union cvmx_pescx_bist_status2 {
+ uint64_t u64;
+ struct cvmx_pescx_bist_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t cto_p2e : 1; /**< BIST Status for the cto_p2e_fifo */
+ uint64_t e2p_cpl : 1; /**< BIST Status for the e2p_cpl_fifo */
+ uint64_t e2p_n : 1; /**< BIST Status for the e2p_n_fifo */
+ uint64_t e2p_p : 1; /**< BIST Status for the e2p_p_fifo */
+ uint64_t e2p_rsl : 1; /**< BIST Status for the e2p_rsl__fifo */
+ uint64_t dbg_p2e : 1; /**< BIST Status for the dbg_p2e_fifo */
+ uint64_t peai_p2e : 1; /**< BIST Status for the peai__pesc_fifo */
+ uint64_t rsl_p2e : 1; /**< BIST Status for the rsl_p2e_fifo */
+ uint64_t pef_tpf1 : 1; /**< BIST Status for the pef_tlp_p_fifo1 */
+ uint64_t pef_tpf0 : 1; /**< BIST Status for the pef_tlp_p_fifo0 */
+ uint64_t pef_tnf : 1; /**< BIST Status for the pef_tlp_n_fifo */
+ uint64_t pef_tcf1 : 1; /**< BIST Status for the pef_tlp_cpl_fifo1 */
+ uint64_t pef_tc0 : 1; /**< BIST Status for the pef_tlp_cpl_fifo0 */
+ uint64_t ppf : 1; /**< BIST Status for the ppf_fifo */
+#else
+ uint64_t ppf : 1;
+ uint64_t pef_tc0 : 1;
+ uint64_t pef_tcf1 : 1;
+ uint64_t pef_tnf : 1;
+ uint64_t pef_tpf0 : 1;
+ uint64_t pef_tpf1 : 1;
+ uint64_t rsl_p2e : 1;
+ uint64_t peai_p2e : 1;
+ uint64_t dbg_p2e : 1;
+ uint64_t e2p_rsl : 1;
+ uint64_t e2p_p : 1;
+ uint64_t e2p_n : 1;
+ uint64_t e2p_cpl : 1;
+ uint64_t cto_p2e : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_pescx_bist_status2_s cn52xx;
+ struct cvmx_pescx_bist_status2_s cn52xxp1;
+ struct cvmx_pescx_bist_status2_s cn56xx;
+ struct cvmx_pescx_bist_status2_s cn56xxp1;
+};
+typedef union cvmx_pescx_bist_status2 cvmx_pescx_bist_status2_t;
+
+/**
+ * cvmx_pesc#_cfg_rd
+ *
+ * PESC_CFG_RD = PESC Configuration Read
+ *
+ * Allows read access to the configuration in the PCIe Core.
+ */
+union cvmx_pescx_cfg_rd {
+ uint64_t u64;
+ struct cvmx_pescx_cfg_rd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 32; /**< Data. */
+ uint64_t addr : 32; /**< Address to read. A write to this register
+ starts a read operation. */
+#else
+ uint64_t addr : 32;
+ uint64_t data : 32;
+#endif
+ } s;
+ struct cvmx_pescx_cfg_rd_s cn52xx;
+ struct cvmx_pescx_cfg_rd_s cn52xxp1;
+ struct cvmx_pescx_cfg_rd_s cn56xx;
+ struct cvmx_pescx_cfg_rd_s cn56xxp1;
+};
+typedef union cvmx_pescx_cfg_rd cvmx_pescx_cfg_rd_t;
+
+/**
+ * cvmx_pesc#_cfg_wr
+ *
+ * PESC_CFG_WR = PESC Configuration Write
+ *
+ * Allows write access to the configuration in the PCIe Core.
+ */
+union cvmx_pescx_cfg_wr {
+ uint64_t u64;
+ struct cvmx_pescx_cfg_wr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 32; /**< Data to write. A write to this register starts
+ a write operation. */
+ uint64_t addr : 32; /**< Address to write. A write to this register starts
+ a write operation. */
+#else
+ uint64_t addr : 32;
+ uint64_t data : 32;
+#endif
+ } s;
+ struct cvmx_pescx_cfg_wr_s cn52xx;
+ struct cvmx_pescx_cfg_wr_s cn52xxp1;
+ struct cvmx_pescx_cfg_wr_s cn56xx;
+ struct cvmx_pescx_cfg_wr_s cn56xxp1;
+};
+typedef union cvmx_pescx_cfg_wr cvmx_pescx_cfg_wr_t;
+
+/**
+ * cvmx_pesc#_cpl_lut_valid
+ *
+ * PESC_CPL_LUT_VALID = PESC Cmpletion Lookup Table Valid
+ *
+ * Bit set for outstanding tag read.
+ */
+union cvmx_pescx_cpl_lut_valid {
+ uint64_t u64;
+ struct cvmx_pescx_cpl_lut_valid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t tag : 32; /**< Bit vector set cooresponds to an outstanding tag
+ expecting a completion. */
+#else
+ uint64_t tag : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pescx_cpl_lut_valid_s cn52xx;
+ struct cvmx_pescx_cpl_lut_valid_s cn52xxp1;
+ struct cvmx_pescx_cpl_lut_valid_s cn56xx;
+ struct cvmx_pescx_cpl_lut_valid_s cn56xxp1;
+};
+typedef union cvmx_pescx_cpl_lut_valid cvmx_pescx_cpl_lut_valid_t;
+
+/**
+ * cvmx_pesc#_ctl_status
+ *
+ * PESC_CTL_STATUS = PESC Control Status
+ *
+ * General control and status of the PESC.
+ */
+union cvmx_pescx_ctl_status {
+ uint64_t u64;
+ struct cvmx_pescx_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t dnum : 5; /**< Primary bus device number. */
+ uint64_t pbus : 8; /**< Primary bus number. */
+ uint64_t qlm_cfg : 2; /**< The QLM configuration pad bits. */
+ uint64_t lane_swp : 1; /**< Lane Swap. For PEDC1, when 0 NO LANE SWAP when '1'
+ enables LANE SWAP. THis bit has no effect on PEDC0.
+ This bit should be set before enabling PEDC1. */
+ uint64_t pm_xtoff : 1; /**< When WRITTEN with a '1' a single cycle pulse is
+ to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t pm_xpme : 1; /**< When WRITTEN with a '1' a single cycle pulse is
+ to the PCIe core pm_xmt_pme port. EP mode. */
+ uint64_t ob_p_cmd : 1; /**< When WRITTEN with a '1' a single cycle pulse is
+ to the PCIe core outband_pwrup_cmd port. EP mode. */
+ uint64_t reserved_7_8 : 2;
+ uint64_t nf_ecrc : 1; /**< Do not forward peer-to-peer ECRC TLPs. */
+ uint64_t dly_one : 1; /**< When set the output client state machines will
+ wait one cycle before starting a new TLP out. */
+ uint64_t lnk_enb : 1; /**< When set '1' the link is enabled when '0' the
+ link is disabled. This bit only is active when in
+ RC mode. */
+ uint64_t ro_ctlp : 1; /**< When set '1' C-TLPs that have the RO bit set will
+ not wait for P-TLPs that normaly would be sent
+ first. */
+ uint64_t reserved_2_2 : 1;
+ uint64_t inv_ecrc : 1; /**< When '1' causes the LSB of the ECRC to be inverted. */
+ uint64_t inv_lcrc : 1; /**< When '1' causes the LSB of the LCRC to be inverted. */
+#else
+ uint64_t inv_lcrc : 1;
+ uint64_t inv_ecrc : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t ro_ctlp : 1;
+ uint64_t lnk_enb : 1;
+ uint64_t dly_one : 1;
+ uint64_t nf_ecrc : 1;
+ uint64_t reserved_7_8 : 2;
+ uint64_t ob_p_cmd : 1;
+ uint64_t pm_xpme : 1;
+ uint64_t pm_xtoff : 1;
+ uint64_t lane_swp : 1;
+ uint64_t qlm_cfg : 2;
+ uint64_t pbus : 8;
+ uint64_t dnum : 5;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_pescx_ctl_status_s cn52xx;
+ struct cvmx_pescx_ctl_status_s cn52xxp1;
+ struct cvmx_pescx_ctl_status_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t dnum : 5; /**< Primary bus device number. */
+ uint64_t pbus : 8; /**< Primary bus number. */
+ uint64_t qlm_cfg : 2; /**< The QLM configuration pad bits. */
+ uint64_t reserved_12_12 : 1;
+ uint64_t pm_xtoff : 1; /**< When WRITTEN with a '1' a single cycle pulse is
+ to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t pm_xpme : 1; /**< When WRITTEN with a '1' a single cycle pulse is
+ to the PCIe core pm_xmt_pme port. EP mode. */
+ uint64_t ob_p_cmd : 1; /**< When WRITTEN with a '1' a single cycle pulse is
+ to the PCIe core outband_pwrup_cmd port. EP mode. */
+ uint64_t reserved_7_8 : 2;
+ uint64_t nf_ecrc : 1; /**< Do not forward peer-to-peer ECRC TLPs. */
+ uint64_t dly_one : 1; /**< When set the output client state machines will
+ wait one cycle before starting a new TLP out. */
+ uint64_t lnk_enb : 1; /**< When set '1' the link is enabled when '0' the
+ link is disabled. This bit only is active when in
+ RC mode. */
+ uint64_t ro_ctlp : 1; /**< When set '1' C-TLPs that have the RO bit set will
+ not wait for P-TLPs that normaly would be sent
+ first. */
+ uint64_t reserved_2_2 : 1;
+ uint64_t inv_ecrc : 1; /**< When '1' causes the LSB of the ECRC to be inverted. */
+ uint64_t inv_lcrc : 1; /**< When '1' causes the LSB of the LCRC to be inverted. */
+#else
+ uint64_t inv_lcrc : 1;
+ uint64_t inv_ecrc : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t ro_ctlp : 1;
+ uint64_t lnk_enb : 1;
+ uint64_t dly_one : 1;
+ uint64_t nf_ecrc : 1;
+ uint64_t reserved_7_8 : 2;
+ uint64_t ob_p_cmd : 1;
+ uint64_t pm_xpme : 1;
+ uint64_t pm_xtoff : 1;
+ uint64_t reserved_12_12 : 1;
+ uint64_t qlm_cfg : 2;
+ uint64_t pbus : 8;
+ uint64_t dnum : 5;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn56xx;
+ struct cvmx_pescx_ctl_status_cn56xx cn56xxp1;
+};
+typedef union cvmx_pescx_ctl_status cvmx_pescx_ctl_status_t;
+
+/**
+ * cvmx_pesc#_ctl_status2
+ *
+ * Below are in PESC
+ *
+ * PESC(0..1)_BIST_STATUS2 = PESC BIST Status Register
+ *
+ * Results from BIST runs of PESC's memories.
+ */
+union cvmx_pescx_ctl_status2 {
+ uint64_t u64;
+ struct cvmx_pescx_ctl_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t pclk_run : 1; /**< When the pce_clk is running this bit will be '1'.
+ Writing a '1' to this location will cause the
+ bit to be cleared, but if the pce_clk is running
+ this bit will be re-set. */
+ uint64_t pcierst : 1; /**< Set to '1' when PCIe is in reset. */
+#else
+ uint64_t pcierst : 1;
+ uint64_t pclk_run : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_pescx_ctl_status2_s cn52xx;
+ struct cvmx_pescx_ctl_status2_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t pcierst : 1; /**< Set to '1' when PCIe is in reset. */
+#else
+ uint64_t pcierst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn52xxp1;
+ struct cvmx_pescx_ctl_status2_s cn56xx;
+ struct cvmx_pescx_ctl_status2_cn52xxp1 cn56xxp1;
+};
+typedef union cvmx_pescx_ctl_status2 cvmx_pescx_ctl_status2_t;
+
+/**
+ * cvmx_pesc#_dbg_info
+ *
+ * PESC(0..1)_DBG_INFO = PESC Debug Information
+ *
+ * General debug info.
+ */
+union cvmx_pescx_dbg_info {
+ uint64_t u64;
+ struct cvmx_pescx_dbg_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t ecrc_e : 1; /**< Received a ECRC error.
+ radm_ecrc_err */
+ uint64_t rawwpp : 1; /**< Received a write with poisoned payload
+ radm_rcvd_wreq_poisoned */
+ uint64_t racpp : 1; /**< Received a completion with poisoned payload
+ radm_rcvd_cpl_poisoned */
+ uint64_t ramtlp : 1; /**< Received a malformed TLP
+ radm_mlf_tlp_err */
+ uint64_t rarwdns : 1; /**< Recieved a request which device does not support
+ radm_rcvd_ur_req */
+ uint64_t caar : 1; /**< Completer aborted a request
+ radm_rcvd_ca_req
+ This bit will never be set because Octeon does
+ not generate Completer Aborts. */
+ uint64_t racca : 1; /**< Received a completion with CA status
+ radm_rcvd_cpl_ca */
+ uint64_t racur : 1; /**< Received a completion with UR status
+ radm_rcvd_cpl_ur */
+ uint64_t rauc : 1; /**< Received an unexpected completion
+ radm_unexp_cpl_err */
+ uint64_t rqo : 1; /**< Receive queue overflow. Normally happens only when
+ flow control advertisements are ignored
+ radm_qoverflow */
+ uint64_t fcuv : 1; /**< Flow Control Update Violation (opt. checks)
+ int_xadm_fc_prot_err */
+ uint64_t rpe : 1; /**< When the PHY reports 8B/10B decode error
+ (RxStatus = 3b100) or disparity error
+ (RxStatus = 3b111), the signal rmlh_rcvd_err will
+ be asserted.
+ rmlh_rcvd_err */
+ uint64_t fcpvwt : 1; /**< Flow Control Protocol Violation (Watchdog Timer)
+ rtlh_fc_prot_err */
+ uint64_t dpeoosd : 1; /**< DLLP protocol error (out of sequence DLLP)
+ rdlh_prot_err */
+ uint64_t rtwdle : 1; /**< Received TLP with DataLink Layer Error
+ rdlh_bad_tlp_err */
+ uint64_t rdwdle : 1; /**< Received DLLP with DataLink Layer Error
+ rdlh_bad_dllp_err */
+ uint64_t mre : 1; /**< Max Retries Exceeded
+ xdlh_replay_num_rlover_err */
+ uint64_t rte : 1; /**< Replay Timer Expired
+ xdlh_replay_timeout_err
+ This bit is set when the REPLAY_TIMER expires in
+ the PCIE core. The probability of this bit being
+ set will increase with the traffic load. */
+ uint64_t acto : 1; /**< A Completion Timeout Occured
+ pedc_radm_cpl_timeout */
+ uint64_t rvdm : 1; /**< Received Vendor-Defined Message
+ pedc_radm_vendor_msg */
+ uint64_t rumep : 1; /**< Received Unlock Message (EP Mode Only)
+ pedc_radm_msg_unlock */
+ uint64_t rptamrc : 1; /**< Received PME Turnoff Acknowledge Message
+ (RC Mode only)
+ pedc_radm_pm_to_ack */
+ uint64_t rpmerc : 1; /**< Received PME Message (RC Mode only)
+ pedc_radm_pm_pme */
+ uint64_t rfemrc : 1; /**< Received Fatal Error Message (RC Mode only)
+ pedc_radm_fatal_err
+ Bit set when a message with ERR_FATAL is set. */
+ uint64_t rnfemrc : 1; /**< Received Non-Fatal Error Message (RC Mode only)
+ pedc_radm_nonfatal_err */
+ uint64_t rcemrc : 1; /**< Received Correctable Error Message (RC Mode only)
+ pedc_radm_correctable_err */
+ uint64_t rpoison : 1; /**< Received Poisoned TLP
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv */
+ uint64_t recrce : 1; /**< Received ECRC Error
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot */
+ uint64_t rtlplle : 1; /**< Received TLP has link layer error
+ pedc_radm_trgt1_dllp_abort & pedc__radm_trgt1_eot */
+ uint64_t rtlpmal : 1; /**< Received TLP is malformed or a message.
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot
+ If the core receives a MSG (or Vendor Message)
+ this bit will be set. */
+ uint64_t spoison : 1; /**< Poisoned TLP sent
+ peai__client0_tlp_ep & peai__client0_tlp_hv */
+#else
+ uint64_t spoison : 1;
+ uint64_t rtlpmal : 1;
+ uint64_t rtlplle : 1;
+ uint64_t recrce : 1;
+ uint64_t rpoison : 1;
+ uint64_t rcemrc : 1;
+ uint64_t rnfemrc : 1;
+ uint64_t rfemrc : 1;
+ uint64_t rpmerc : 1;
+ uint64_t rptamrc : 1;
+ uint64_t rumep : 1;
+ uint64_t rvdm : 1;
+ uint64_t acto : 1;
+ uint64_t rte : 1;
+ uint64_t mre : 1;
+ uint64_t rdwdle : 1;
+ uint64_t rtwdle : 1;
+ uint64_t dpeoosd : 1;
+ uint64_t fcpvwt : 1;
+ uint64_t rpe : 1;
+ uint64_t fcuv : 1;
+ uint64_t rqo : 1;
+ uint64_t rauc : 1;
+ uint64_t racur : 1;
+ uint64_t racca : 1;
+ uint64_t caar : 1;
+ uint64_t rarwdns : 1;
+ uint64_t ramtlp : 1;
+ uint64_t racpp : 1;
+ uint64_t rawwpp : 1;
+ uint64_t ecrc_e : 1;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_pescx_dbg_info_s cn52xx;
+ struct cvmx_pescx_dbg_info_s cn52xxp1;
+ struct cvmx_pescx_dbg_info_s cn56xx;
+ struct cvmx_pescx_dbg_info_s cn56xxp1;
+};
+typedef union cvmx_pescx_dbg_info cvmx_pescx_dbg_info_t;
+
+/**
+ * cvmx_pesc#_dbg_info_en
+ *
+ * PESC(0..1)_DBG_INFO_EN = PESC Debug Information Enable
+ *
+ * Allows PESC_DBG_INFO to generate interrupts when cooresponding enable bit is set.
+ */
+union cvmx_pescx_dbg_info_en {
+ uint64_t u64;
+ struct cvmx_pescx_dbg_info_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t ecrc_e : 1; /**< Allows PESC_DBG_INFO[30] to generate an interrupt. */
+ uint64_t rawwpp : 1; /**< Allows PESC_DBG_INFO[29] to generate an interrupt. */
+ uint64_t racpp : 1; /**< Allows PESC_DBG_INFO[28] to generate an interrupt. */
+ uint64_t ramtlp : 1; /**< Allows PESC_DBG_INFO[27] to generate an interrupt. */
+ uint64_t rarwdns : 1; /**< Allows PESC_DBG_INFO[26] to generate an interrupt. */
+ uint64_t caar : 1; /**< Allows PESC_DBG_INFO[25] to generate an interrupt. */
+ uint64_t racca : 1; /**< Allows PESC_DBG_INFO[24] to generate an interrupt. */
+ uint64_t racur : 1; /**< Allows PESC_DBG_INFO[23] to generate an interrupt. */
+ uint64_t rauc : 1; /**< Allows PESC_DBG_INFO[22] to generate an interrupt. */
+ uint64_t rqo : 1; /**< Allows PESC_DBG_INFO[21] to generate an interrupt. */
+ uint64_t fcuv : 1; /**< Allows PESC_DBG_INFO[20] to generate an interrupt. */
+ uint64_t rpe : 1; /**< Allows PESC_DBG_INFO[19] to generate an interrupt. */
+ uint64_t fcpvwt : 1; /**< Allows PESC_DBG_INFO[18] to generate an interrupt. */
+ uint64_t dpeoosd : 1; /**< Allows PESC_DBG_INFO[17] to generate an interrupt. */
+ uint64_t rtwdle : 1; /**< Allows PESC_DBG_INFO[16] to generate an interrupt. */
+ uint64_t rdwdle : 1; /**< Allows PESC_DBG_INFO[15] to generate an interrupt. */
+ uint64_t mre : 1; /**< Allows PESC_DBG_INFO[14] to generate an interrupt. */
+ uint64_t rte : 1; /**< Allows PESC_DBG_INFO[13] to generate an interrupt. */
+ uint64_t acto : 1; /**< Allows PESC_DBG_INFO[12] to generate an interrupt. */
+ uint64_t rvdm : 1; /**< Allows PESC_DBG_INFO[11] to generate an interrupt. */
+ uint64_t rumep : 1; /**< Allows PESC_DBG_INFO[10] to generate an interrupt. */
+ uint64_t rptamrc : 1; /**< Allows PESC_DBG_INFO[9] to generate an interrupt. */
+ uint64_t rpmerc : 1; /**< Allows PESC_DBG_INFO[8] to generate an interrupt. */
+ uint64_t rfemrc : 1; /**< Allows PESC_DBG_INFO[7] to generate an interrupt. */
+ uint64_t rnfemrc : 1; /**< Allows PESC_DBG_INFO[6] to generate an interrupt. */
+ uint64_t rcemrc : 1; /**< Allows PESC_DBG_INFO[5] to generate an interrupt. */
+ uint64_t rpoison : 1; /**< Allows PESC_DBG_INFO[4] to generate an interrupt. */
+ uint64_t recrce : 1; /**< Allows PESC_DBG_INFO[3] to generate an interrupt. */
+ uint64_t rtlplle : 1; /**< Allows PESC_DBG_INFO[2] to generate an interrupt. */
+ uint64_t rtlpmal : 1; /**< Allows PESC_DBG_INFO[1] to generate an interrupt. */
+ uint64_t spoison : 1; /**< Allows PESC_DBG_INFO[0] to generate an interrupt. */
+#else
+ uint64_t spoison : 1;
+ uint64_t rtlpmal : 1;
+ uint64_t rtlplle : 1;
+ uint64_t recrce : 1;
+ uint64_t rpoison : 1;
+ uint64_t rcemrc : 1;
+ uint64_t rnfemrc : 1;
+ uint64_t rfemrc : 1;
+ uint64_t rpmerc : 1;
+ uint64_t rptamrc : 1;
+ uint64_t rumep : 1;
+ uint64_t rvdm : 1;
+ uint64_t acto : 1;
+ uint64_t rte : 1;
+ uint64_t mre : 1;
+ uint64_t rdwdle : 1;
+ uint64_t rtwdle : 1;
+ uint64_t dpeoosd : 1;
+ uint64_t fcpvwt : 1;
+ uint64_t rpe : 1;
+ uint64_t fcuv : 1;
+ uint64_t rqo : 1;
+ uint64_t rauc : 1;
+ uint64_t racur : 1;
+ uint64_t racca : 1;
+ uint64_t caar : 1;
+ uint64_t rarwdns : 1;
+ uint64_t ramtlp : 1;
+ uint64_t racpp : 1;
+ uint64_t rawwpp : 1;
+ uint64_t ecrc_e : 1;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_pescx_dbg_info_en_s cn52xx;
+ struct cvmx_pescx_dbg_info_en_s cn52xxp1;
+ struct cvmx_pescx_dbg_info_en_s cn56xx;
+ struct cvmx_pescx_dbg_info_en_s cn56xxp1;
+};
+typedef union cvmx_pescx_dbg_info_en cvmx_pescx_dbg_info_en_t;
+
+/**
+ * cvmx_pesc#_diag_status
+ *
+ * PESC_DIAG_STATUS = PESC Diagnostic Status
+ *
+ * Selection control for the cores diagnostic bus.
+ */
+union cvmx_pescx_diag_status {
+ uint64_t u64;
+ struct cvmx_pescx_diag_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t pm_dst : 1; /**< Current power management DSTATE. */
+ uint64_t pm_stat : 1; /**< Power Management Status. */
+ uint64_t pm_en : 1; /**< Power Management Event Enable. */
+ uint64_t aux_en : 1; /**< Auxilary Power Enable. */
+#else
+ uint64_t aux_en : 1;
+ uint64_t pm_en : 1;
+ uint64_t pm_stat : 1;
+ uint64_t pm_dst : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_pescx_diag_status_s cn52xx;
+ struct cvmx_pescx_diag_status_s cn52xxp1;
+ struct cvmx_pescx_diag_status_s cn56xx;
+ struct cvmx_pescx_diag_status_s cn56xxp1;
+};
+typedef union cvmx_pescx_diag_status cvmx_pescx_diag_status_t;
+
+/**
+ * cvmx_pesc#_p2n_bar0_start
+ *
+ * PESC_P2N_BAR0_START = PESC PCIe to Npei BAR0 Start
+ *
+ * The starting address for addresses to forwarded to the NPEI in RC Mode.
+ */
+union cvmx_pescx_p2n_bar0_start {
+ uint64_t u64;
+ struct cvmx_pescx_p2n_bar0_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 50; /**< The starting address of the 16KB address space that
+ is the BAR0 address space. */
+ uint64_t reserved_0_13 : 14;
+#else
+ uint64_t reserved_0_13 : 14;
+ uint64_t addr : 50;
+#endif
+ } s;
+ struct cvmx_pescx_p2n_bar0_start_s cn52xx;
+ struct cvmx_pescx_p2n_bar0_start_s cn52xxp1;
+ struct cvmx_pescx_p2n_bar0_start_s cn56xx;
+ struct cvmx_pescx_p2n_bar0_start_s cn56xxp1;
+};
+typedef union cvmx_pescx_p2n_bar0_start cvmx_pescx_p2n_bar0_start_t;
+
+/**
+ * cvmx_pesc#_p2n_bar1_start
+ *
+ * PESC_P2N_BAR1_START = PESC PCIe to Npei BAR1 Start
+ *
+ * The starting address for addresses to forwarded to the NPEI in RC Mode.
+ */
+union cvmx_pescx_p2n_bar1_start {
+ uint64_t u64;
+ struct cvmx_pescx_p2n_bar1_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 38; /**< The starting address of the 64KB address space
+ that is the BAR1 address space. */
+ uint64_t reserved_0_25 : 26;
+#else
+ uint64_t reserved_0_25 : 26;
+ uint64_t addr : 38;
+#endif
+ } s;
+ struct cvmx_pescx_p2n_bar1_start_s cn52xx;
+ struct cvmx_pescx_p2n_bar1_start_s cn52xxp1;
+ struct cvmx_pescx_p2n_bar1_start_s cn56xx;
+ struct cvmx_pescx_p2n_bar1_start_s cn56xxp1;
+};
+typedef union cvmx_pescx_p2n_bar1_start cvmx_pescx_p2n_bar1_start_t;
+
+/**
+ * cvmx_pesc#_p2n_bar2_start
+ *
+ * PESC_P2N_BAR2_START = PESC PCIe to Npei BAR2 Start
+ *
+ * The starting address for addresses to forwarded to the NPEI in RC Mode.
+ */
+union cvmx_pescx_p2n_bar2_start {
+ uint64_t u64;
+ struct cvmx_pescx_p2n_bar2_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 25; /**< The starting address of the 2^39 address space
+ that is the BAR2 address space. */
+ uint64_t reserved_0_38 : 39;
+#else
+ uint64_t reserved_0_38 : 39;
+ uint64_t addr : 25;
+#endif
+ } s;
+ struct cvmx_pescx_p2n_bar2_start_s cn52xx;
+ struct cvmx_pescx_p2n_bar2_start_s cn52xxp1;
+ struct cvmx_pescx_p2n_bar2_start_s cn56xx;
+ struct cvmx_pescx_p2n_bar2_start_s cn56xxp1;
+};
+typedef union cvmx_pescx_p2n_bar2_start cvmx_pescx_p2n_bar2_start_t;
+
+/**
+ * cvmx_pesc#_p2p_bar#_end
+ *
+ * PESC_P2P_BAR#_END = PESC Peer-To-Peer BAR0 End
+ *
+ * The ending address for addresses to forwarded to the PCIe peer port.
+ */
+union cvmx_pescx_p2p_barx_end {
+ uint64_t u64;
+ struct cvmx_pescx_p2p_barx_end_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 52; /**< The ending address of the address window created
+ this field and the PESC_P2P_BAR0_START[63:12]
+ field. The full 64-bits of address are created by:
+ [ADDR[63:12], 12'b0]. */
+ uint64_t reserved_0_11 : 12;
+#else
+ uint64_t reserved_0_11 : 12;
+ uint64_t addr : 52;
+#endif
+ } s;
+ struct cvmx_pescx_p2p_barx_end_s cn52xx;
+ struct cvmx_pescx_p2p_barx_end_s cn52xxp1;
+ struct cvmx_pescx_p2p_barx_end_s cn56xx;
+ struct cvmx_pescx_p2p_barx_end_s cn56xxp1;
+};
+typedef union cvmx_pescx_p2p_barx_end cvmx_pescx_p2p_barx_end_t;
+
+/**
+ * cvmx_pesc#_p2p_bar#_start
+ *
+ * PESC_P2P_BAR#_START = PESC Peer-To-Peer BAR0 Start
+ *
+ * The starting address and enable for addresses to forwarded to the PCIe peer port.
+ */
+union cvmx_pescx_p2p_barx_start {
+ uint64_t u64;
+ struct cvmx_pescx_p2p_barx_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 52; /**< The starting address of the address window created
+ this field and the PESC_P2P_BAR0_END[63:12] field.
+ The full 64-bits of address are created by:
+ [ADDR[63:12], 12'b0]. */
+ uint64_t reserved_0_11 : 12;
+#else
+ uint64_t reserved_0_11 : 12;
+ uint64_t addr : 52;
+#endif
+ } s;
+ struct cvmx_pescx_p2p_barx_start_s cn52xx;
+ struct cvmx_pescx_p2p_barx_start_s cn52xxp1;
+ struct cvmx_pescx_p2p_barx_start_s cn56xx;
+ struct cvmx_pescx_p2p_barx_start_s cn56xxp1;
+};
+typedef union cvmx_pescx_p2p_barx_start cvmx_pescx_p2p_barx_start_t;
+
+/**
+ * cvmx_pesc#_tlp_credits
+ *
+ * PESC_TLP_CREDITS = PESC TLP Credits
+ *
+ * Specifies the number of credits the PESC for use in moving TLPs. When this register is written the credit values are
+ * reset to the register value. A write to this register should take place BEFORE traffic flow starts.
+ */
+union cvmx_pescx_tlp_credits {
+ uint64_t u64;
+ struct cvmx_pescx_tlp_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pescx_tlp_credits_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t peai_ppf : 8; /**< TLP credits for Completion TLPs in the Peer.
+ Legal values are 0x24 to 0x80. */
+ uint64_t pesc_cpl : 8; /**< TLP credits for Completion TLPs in the Peer.
+ Legal values are 0x24 to 0x80. */
+ uint64_t pesc_np : 8; /**< TLP credits for Non-Posted TLPs in the Peer.
+ Legal values are 0x4 to 0x10. */
+ uint64_t pesc_p : 8; /**< TLP credits for Posted TLPs in the Peer.
+ Legal values are 0x24 to 0x80. */
+ uint64_t npei_cpl : 8; /**< TLP credits for Completion TLPs in the NPEI.
+ Legal values are 0x24 to 0x80. */
+ uint64_t npei_np : 8; /**< TLP credits for Non-Posted TLPs in the NPEI.
+ Legal values are 0x4 to 0x10. */
+ uint64_t npei_p : 8; /**< TLP credits for Posted TLPs in the NPEI.
+ Legal values are 0x24 to 0x80. */
+#else
+ uint64_t npei_p : 8;
+ uint64_t npei_np : 8;
+ uint64_t npei_cpl : 8;
+ uint64_t pesc_p : 8;
+ uint64_t pesc_np : 8;
+ uint64_t pesc_cpl : 8;
+ uint64_t peai_ppf : 8;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn52xx;
+ struct cvmx_pescx_tlp_credits_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t peai_ppf : 8; /**< TLP credits in core clk pre-buffer that holds TLPs
+ being sent from PCIe Core to NPEI or PEER. */
+ uint64_t pesc_cpl : 5; /**< TLP credits for Completion TLPs in the Peer. */
+ uint64_t pesc_np : 5; /**< TLP credits for Non-Posted TLPs in the Peer. */
+ uint64_t pesc_p : 5; /**< TLP credits for Posted TLPs in the Peer. */
+ uint64_t npei_cpl : 5; /**< TLP credits for Completion TLPs in the NPEI. */
+ uint64_t npei_np : 5; /**< TLP credits for Non-Posted TLPs in the NPEI. */
+ uint64_t npei_p : 5; /**< TLP credits for Posted TLPs in the NPEI. */
+#else
+ uint64_t npei_p : 5;
+ uint64_t npei_np : 5;
+ uint64_t npei_cpl : 5;
+ uint64_t pesc_p : 5;
+ uint64_t pesc_np : 5;
+ uint64_t pesc_cpl : 5;
+ uint64_t peai_ppf : 8;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } cn52xxp1;
+ struct cvmx_pescx_tlp_credits_cn52xx cn56xx;
+ struct cvmx_pescx_tlp_credits_cn52xxp1 cn56xxp1;
+};
+typedef union cvmx_pescx_tlp_credits cvmx_pescx_tlp_credits_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pescx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pexp-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pexp-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pexp-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,2203 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pexp-defs.h
+ *
+ * Configuration and status register (CSR) definitions for
+ * OCTEON PEXP.
+ *
+ * <hr>$Revision$<hr>
+ */
+#ifndef __CVMX_PEXP_DEFS_H__
+#define __CVMX_PEXP_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_BAR1_INDEXX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_BAR1_INDEXX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000008000ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_BAR1_INDEXX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008000ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_BIST_STATUS CVMX_PEXP_NPEI_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_BIST_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_BIST_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008580ull);
+}
+#else
+#define CVMX_PEXP_NPEI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008580ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_BIST_STATUS2 CVMX_PEXP_NPEI_BIST_STATUS2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_BIST_STATUS2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_BIST_STATUS2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008680ull);
+}
+#else
+#define CVMX_PEXP_NPEI_BIST_STATUS2 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_CTL_PORT0 CVMX_PEXP_NPEI_CTL_PORT0_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_CTL_PORT0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_CTL_PORT0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008250ull);
+}
+#else
+#define CVMX_PEXP_NPEI_CTL_PORT0 (CVMX_ADD_IO_SEG(0x00011F0000008250ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_CTL_PORT1 CVMX_PEXP_NPEI_CTL_PORT1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_CTL_PORT1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_CTL_PORT1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008260ull);
+}
+#else
+#define CVMX_PEXP_NPEI_CTL_PORT1 (CVMX_ADD_IO_SEG(0x00011F0000008260ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_CTL_STATUS CVMX_PEXP_NPEI_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_CTL_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_CTL_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008570ull);
+}
+#else
+#define CVMX_PEXP_NPEI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008570ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_CTL_STATUS2 CVMX_PEXP_NPEI_CTL_STATUS2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_CTL_STATUS2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_CTL_STATUS2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC00ull);
+}
+#else
+#define CVMX_PEXP_NPEI_CTL_STATUS2 (CVMX_ADD_IO_SEG(0x00011F000000BC00ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DATA_OUT_CNT CVMX_PEXP_NPEI_DATA_OUT_CNT_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DATA_OUT_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DATA_OUT_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000085F0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000085F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DBG_DATA CVMX_PEXP_NPEI_DBG_DATA_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DBG_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DBG_DATA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008510ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000008510ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DBG_SELECT CVMX_PEXP_NPEI_DBG_SELECT_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DBG_SELECT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DBG_SELECT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008500ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000008500ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL CVMX_PEXP_NPEI_DMA0_INT_LEVEL_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DMA0_INT_LEVEL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DMA0_INT_LEVEL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000085C0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL CVMX_PEXP_NPEI_DMA1_INT_LEVEL_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DMA1_INT_LEVEL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DMA1_INT_LEVEL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000085D0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_DMAX_COUNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_PEXP_NPEI_DMAX_COUNTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000008450ull) + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_DMAX_COUNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000008450ull) + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_DMAX_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_PEXP_NPEI_DMAX_DBELL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F00000083B0ull) + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_DMAX_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F00000083B0ull) + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000008400ull) + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000008400ull) + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_DMAX_NADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_PEXP_NPEI_DMAX_NADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F00000084A0ull) + ((offset) & 7) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_DMAX_NADDR(offset) (CVMX_ADD_IO_SEG(0x00011F00000084A0ull) + ((offset) & 7) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DMA_CNTS CVMX_PEXP_NPEI_DMA_CNTS_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DMA_CNTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DMA_CNTS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000085E0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DMA_CNTS (CVMX_ADD_IO_SEG(0x00011F00000085E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DMA_CONTROL CVMX_PEXP_NPEI_DMA_CONTROL_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DMA_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DMA_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000083A0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x00011F00000083A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000085B0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM (CVMX_ADD_IO_SEG(0x00011F00000085B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DMA_STATE1 CVMX_PEXP_NPEI_DMA_STATE1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DMA_STATE1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DMA_STATE1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000086C0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DMA_STATE1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
+#endif
+#define CVMX_PEXP_NPEI_DMA_STATE1_P1 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_DMA_STATE2 CVMX_PEXP_NPEI_DMA_STATE2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_DMA_STATE2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_DMA_STATE2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000086D0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_DMA_STATE2 (CVMX_ADD_IO_SEG(0x00011F00000086D0ull))
+#endif
+#define CVMX_PEXP_NPEI_DMA_STATE2_P1 (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
+#define CVMX_PEXP_NPEI_DMA_STATE3_P1 (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE4_P1 (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE5_P1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_INT_A_ENB CVMX_PEXP_NPEI_INT_A_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_INT_A_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_INT_A_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008560ull);
+}
+#else
+#define CVMX_PEXP_NPEI_INT_A_ENB (CVMX_ADD_IO_SEG(0x00011F0000008560ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_INT_A_ENB2 CVMX_PEXP_NPEI_INT_A_ENB2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_INT_A_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_INT_A_ENB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BCE0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_INT_A_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCE0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_INT_A_SUM CVMX_PEXP_NPEI_INT_A_SUM_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_INT_A_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_INT_A_SUM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008550ull);
+}
+#else
+#define CVMX_PEXP_NPEI_INT_A_SUM (CVMX_ADD_IO_SEG(0x00011F0000008550ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_INT_ENB CVMX_PEXP_NPEI_INT_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_INT_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008540ull);
+}
+#else
+#define CVMX_PEXP_NPEI_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000008540ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_INT_ENB2 CVMX_PEXP_NPEI_INT_ENB2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_INT_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_INT_ENB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BCD0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_INT_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCD0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_INT_INFO CVMX_PEXP_NPEI_INT_INFO_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_INT_INFO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_INT_INFO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008590ull);
+}
+#else
+#define CVMX_PEXP_NPEI_INT_INFO (CVMX_ADD_IO_SEG(0x00011F0000008590ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_INT_SUM CVMX_PEXP_NPEI_INT_SUM_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_INT_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_INT_SUM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008530ull);
+}
+#else
+#define CVMX_PEXP_NPEI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000008530ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_INT_SUM2 CVMX_PEXP_NPEI_INT_SUM2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_INT_SUM2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_INT_SUM2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BCC0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_INT_SUM2 (CVMX_ADD_IO_SEG(0x00011F000000BCC0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 CVMX_PEXP_NPEI_LAST_WIN_RDATA0_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_LAST_WIN_RDATA0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_LAST_WIN_RDATA0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008600ull);
+}
+#else
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000008600ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 CVMX_PEXP_NPEI_LAST_WIN_RDATA1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_LAST_WIN_RDATA1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_LAST_WIN_RDATA1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008610ull);
+}
+#else
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000008610ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL CVMX_PEXP_NPEI_MEM_ACCESS_CTL_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MEM_ACCESS_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MEM_ACCESS_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000084F0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000084F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 12) && (offset <= 27))))))
+ cvmx_warn("CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000008280ull) + ((offset) & 31) * 16 - 16*12;
+}
+#else
+#define CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008280ull) + ((offset) & 31) * 16 - 16*12)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_ENB0 CVMX_PEXP_NPEI_MSI_ENB0_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_ENB0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC50ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BC50ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_ENB1 CVMX_PEXP_NPEI_MSI_ENB1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_ENB1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC60ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BC60ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_ENB2 CVMX_PEXP_NPEI_MSI_ENB2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_ENB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC70ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BC70ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_ENB3 CVMX_PEXP_NPEI_MSI_ENB3_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_ENB3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC80ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BC80ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_RCV0 CVMX_PEXP_NPEI_MSI_RCV0_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_RCV0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_RCV0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC10ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F000000BC10ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_RCV1 CVMX_PEXP_NPEI_MSI_RCV1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_RCV1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_RCV1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC20ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F000000BC20ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_RCV2 CVMX_PEXP_NPEI_MSI_RCV2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_RCV2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_RCV2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC30ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F000000BC30ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_RCV3 CVMX_PEXP_NPEI_MSI_RCV3_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_RCV3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_RCV3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC40ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F000000BC40ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_RD_MAP CVMX_PEXP_NPEI_MSI_RD_MAP_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_RD_MAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_RD_MAP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BCA0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F000000BCA0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 CVMX_PEXP_NPEI_MSI_W1C_ENB0_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_W1C_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_W1C_ENB0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BCF0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BCF0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 CVMX_PEXP_NPEI_MSI_W1C_ENB1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_W1C_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_W1C_ENB1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BD00ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD00ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 CVMX_PEXP_NPEI_MSI_W1C_ENB2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_W1C_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_W1C_ENB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BD10ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD10ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 CVMX_PEXP_NPEI_MSI_W1C_ENB3_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_W1C_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_W1C_ENB3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BD20ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD20ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 CVMX_PEXP_NPEI_MSI_W1S_ENB0_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_W1S_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_W1S_ENB0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BD30ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BD30ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 CVMX_PEXP_NPEI_MSI_W1S_ENB1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_W1S_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_W1S_ENB1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BD40ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD40ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 CVMX_PEXP_NPEI_MSI_W1S_ENB2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_W1S_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_W1S_ENB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BD50ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD50ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 CVMX_PEXP_NPEI_MSI_W1S_ENB3_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_W1S_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_W1S_ENB3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BD60ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD60ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_MSI_WR_MAP CVMX_PEXP_NPEI_MSI_WR_MAP_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_MSI_WR_MAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_MSI_WR_MAP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BC90ull);
+}
+#else
+#define CVMX_PEXP_NPEI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F000000BC90ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT CVMX_PEXP_NPEI_PCIE_CREDIT_CNT_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PCIE_CREDIT_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PCIE_CREDIT_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BD70ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F000000BD70ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV CVMX_PEXP_NPEI_PCIE_MSI_RCV_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PCIE_MSI_RCV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PCIE_MSI_RCV not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F000000BCB0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F000000BCB0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008650ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000008650ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008660ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000008660ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008670ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000008670ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKTX_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKTX_CNTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F000000A400ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A400ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F000000A800ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F000000A800ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F000000AC00ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F000000AC00ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F000000B000ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F000000B000ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F000000B400ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F000000B400ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKTX_IN_BP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKTX_IN_BP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F000000B800ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F000000B800ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000009400ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000009400ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000009800ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000009800ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000009C00ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000009C00ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_CNT_INT CVMX_PEXP_NPEI_PKT_CNT_INT_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_CNT_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_CNT_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009110ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000009110ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB CVMX_PEXP_NPEI_PKT_CNT_INT_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_CNT_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_CNT_INT_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009130ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009130ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES CVMX_PEXP_NPEI_PKT_DATA_OUT_ES_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_DATA_OUT_ES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_DATA_OUT_ES not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000090B0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000090B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS CVMX_PEXP_NPEI_PKT_DATA_OUT_NS_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_DATA_OUT_NS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_DATA_OUT_NS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000090A0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000090A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009090ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000009090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_DPADDR CVMX_PEXP_NPEI_PKT_DPADDR_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_DPADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_DPADDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009080ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000009080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL CVMX_PEXP_NPEI_PKT_INPUT_CONTROL_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_INPUT_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_INPUT_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009150ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000009150ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_INSTR_ENB CVMX_PEXP_NPEI_PKT_INSTR_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_INSTR_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_INSTR_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009000ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000009000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009190ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009190ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE CVMX_PEXP_NPEI_PKT_INSTR_SIZE_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_INSTR_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_INSTR_SIZE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009020ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_INT_LEVELS CVMX_PEXP_NPEI_PKT_INT_LEVELS_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_INT_LEVELS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_INT_LEVELS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009100ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000009100ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_IN_BP CVMX_PEXP_NPEI_PKT_IN_BP_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_IN_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_IN_BP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000086B0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F000000A000ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A000ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000086A0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000091A0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000091A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_IPTR CVMX_PEXP_NPEI_PKT_IPTR_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_IPTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_IPTR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009070ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000009070ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009160ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000009160ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_OUT_BMODE CVMX_PEXP_NPEI_PKT_OUT_BMODE_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_OUT_BMODE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_OUT_BMODE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000090D0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000090D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_OUT_ENB CVMX_PEXP_NPEI_PKT_OUT_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_OUT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_OUT_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009010ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_PCIE_PORT CVMX_PEXP_NPEI_PKT_PCIE_PORT_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_PCIE_PORT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_PCIE_PORT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000090E0ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000090E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST CVMX_PEXP_NPEI_PKT_PORT_IN_RST_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_PORT_IN_RST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_PORT_IN_RST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008690ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_SLIST_ES CVMX_PEXP_NPEI_PKT_SLIST_ES_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_SLIST_ES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_SLIST_ES not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009050ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000009050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009180ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009180ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_SLIST_NS CVMX_PEXP_NPEI_PKT_SLIST_NS_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_SLIST_NS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_SLIST_NS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009040ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000009040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_SLIST_ROR CVMX_PEXP_NPEI_PKT_SLIST_ROR_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_SLIST_ROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_SLIST_ROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009030ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000009030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_TIME_INT CVMX_PEXP_NPEI_PKT_TIME_INT_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_TIME_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_TIME_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009120ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000009120ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB CVMX_PEXP_NPEI_PKT_TIME_INT_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_PKT_TIME_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_PKT_TIME_INT_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000009140ull);
+}
+#else
+#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009140ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS CVMX_PEXP_NPEI_RSL_INT_BLOCKS_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_RSL_INT_BLOCKS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_RSL_INT_BLOCKS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008520ull);
+}
+#else
+#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS (CVMX_ADD_IO_SEG(0x00011F0000008520ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_SCRATCH_1 CVMX_PEXP_NPEI_SCRATCH_1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_SCRATCH_1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_SCRATCH_1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008270ull);
+}
+#else
+#define CVMX_PEXP_NPEI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F0000008270ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_STATE1 CVMX_PEXP_NPEI_STATE1_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_STATE1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_STATE1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008620ull);
+}
+#else
+#define CVMX_PEXP_NPEI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000008620ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_STATE2 CVMX_PEXP_NPEI_STATE2_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_STATE2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_STATE2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008630ull);
+}
+#else
+#define CVMX_PEXP_NPEI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000008630ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_STATE3 CVMX_PEXP_NPEI_STATE3_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_STATE3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_STATE3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008640ull);
+}
+#else
+#define CVMX_PEXP_NPEI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000008640ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_NPEI_WINDOW_CTL CVMX_PEXP_NPEI_WINDOW_CTL_FUNC()
+static inline uint64_t CVMX_PEXP_NPEI_WINDOW_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_PEXP_NPEI_WINDOW_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000008380ull);
+}
+#else
+#define CVMX_PEXP_NPEI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F0000008380ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_BIST_STATUS CVMX_PEXP_SLI_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_BIST_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_BIST_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010580ull);
+}
+#else
+#define CVMX_PEXP_SLI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010580ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_CTL_PORTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PEXP_SLI_CTL_PORTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000010050ull) + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_CTL_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010050ull) + ((offset) & 3) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_CTL_STATUS CVMX_PEXP_SLI_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_CTL_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_CTL_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010570ull);
+}
+#else
+#define CVMX_PEXP_SLI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010570ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_DATA_OUT_CNT CVMX_PEXP_SLI_DATA_OUT_CNT_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_DATA_OUT_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_DATA_OUT_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000105F0ull);
+}
+#else
+#define CVMX_PEXP_SLI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000105F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_DBG_DATA CVMX_PEXP_SLI_DBG_DATA_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_DBG_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_DBG_DATA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010310ull);
+}
+#else
+#define CVMX_PEXP_SLI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000010310ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_DBG_SELECT CVMX_PEXP_SLI_DBG_SELECT_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_DBG_SELECT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_DBG_SELECT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010300ull);
+}
+#else
+#define CVMX_PEXP_SLI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000010300ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_DMAX_CNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PEXP_SLI_DMAX_CNT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000010400ull) + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_DMAX_CNT(offset) (CVMX_ADD_IO_SEG(0x00011F0000010400ull) + ((offset) & 1) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_DMAX_INT_LEVEL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PEXP_SLI_DMAX_INT_LEVEL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F00000103E0ull) + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_DMAX_INT_LEVEL(offset) (CVMX_ADD_IO_SEG(0x00011F00000103E0ull) + ((offset) & 1) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_DMAX_TIM(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PEXP_SLI_DMAX_TIM(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000010420ull) + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_DMAX_TIM(offset) (CVMX_ADD_IO_SEG(0x00011F0000010420ull) + ((offset) & 1) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_INT_ENB_CIU CVMX_PEXP_SLI_INT_ENB_CIU_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_INT_ENB_CIU_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_INT_ENB_CIU not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013CD0ull);
+}
+#else
+#define CVMX_PEXP_SLI_INT_ENB_CIU (CVMX_ADD_IO_SEG(0x00011F0000013CD0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_INT_ENB_PORTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PEXP_SLI_INT_ENB_PORTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000010340ull) + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_INT_ENB_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010340ull) + ((offset) & 1) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_INT_SUM CVMX_PEXP_SLI_INT_SUM_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_INT_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_INT_SUM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010330ull);
+}
+#else
+#define CVMX_PEXP_SLI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000010330ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA0 CVMX_PEXP_SLI_LAST_WIN_RDATA0_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_LAST_WIN_RDATA0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_LAST_WIN_RDATA0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010600ull);
+}
+#else
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000010600ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA1 CVMX_PEXP_SLI_LAST_WIN_RDATA1_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_LAST_WIN_RDATA1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_LAST_WIN_RDATA1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010610ull);
+}
+#else
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000010610ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA2 CVMX_PEXP_SLI_LAST_WIN_RDATA2_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_LAST_WIN_RDATA2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_LAST_WIN_RDATA2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000106C0ull);
+}
+#else
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA2 (CVMX_ADD_IO_SEG(0x00011F00000106C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA3 CVMX_PEXP_SLI_LAST_WIN_RDATA3_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_LAST_WIN_RDATA3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_LAST_WIN_RDATA3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000106D0ull);
+}
+#else
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA3 (CVMX_ADD_IO_SEG(0x00011F00000106D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MAC_CREDIT_CNT CVMX_PEXP_SLI_MAC_CREDIT_CNT_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MAC_CREDIT_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MAC_CREDIT_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013D70ull);
+}
+#else
+#define CVMX_PEXP_SLI_MAC_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F0000013D70ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MAC_CREDIT_CNT2 CVMX_PEXP_SLI_MAC_CREDIT_CNT2_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MAC_CREDIT_CNT2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MAC_CREDIT_CNT2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013E10ull);
+}
+#else
+#define CVMX_PEXP_SLI_MAC_CREDIT_CNT2 (CVMX_ADD_IO_SEG(0x00011F0000013E10ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MEM_ACCESS_CTL CVMX_PEXP_SLI_MEM_ACCESS_CTL_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MEM_ACCESS_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MEM_ACCESS_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000102F0ull);
+}
+#else
+#define CVMX_PEXP_SLI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000102F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 12) && (offset <= 27))))))
+ cvmx_warn("CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F00000100E0ull) + ((offset) & 31) * 16 - 16*12;
+}
+#else
+#define CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F00000100E0ull) + ((offset) & 31) * 16 - 16*12)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_ENB0 CVMX_PEXP_SLI_MSI_ENB0_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_ENB0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013C50ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013C50ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_ENB1 CVMX_PEXP_SLI_MSI_ENB1_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_ENB1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013C60ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013C60ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_ENB2 CVMX_PEXP_SLI_MSI_ENB2_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_ENB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013C70ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013C70ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_ENB3 CVMX_PEXP_SLI_MSI_ENB3_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_ENB3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013C80ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013C80ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_RCV0 CVMX_PEXP_SLI_MSI_RCV0_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_RCV0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_RCV0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013C10ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F0000013C10ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_RCV1 CVMX_PEXP_SLI_MSI_RCV1_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_RCV1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_RCV1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013C20ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F0000013C20ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_RCV2 CVMX_PEXP_SLI_MSI_RCV2_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_RCV2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_RCV2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013C30ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F0000013C30ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_RCV3 CVMX_PEXP_SLI_MSI_RCV3_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_RCV3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_RCV3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013C40ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F0000013C40ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_RD_MAP CVMX_PEXP_SLI_MSI_RD_MAP_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_RD_MAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_RD_MAP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013CA0ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F0000013CA0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_W1C_ENB0 CVMX_PEXP_SLI_MSI_W1C_ENB0_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_W1C_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_W1C_ENB0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013CF0ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013CF0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_W1C_ENB1 CVMX_PEXP_SLI_MSI_W1C_ENB1_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_W1C_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_W1C_ENB1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013D00ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D00ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_W1C_ENB2 CVMX_PEXP_SLI_MSI_W1C_ENB2_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_W1C_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_W1C_ENB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013D10ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D10ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_W1C_ENB3 CVMX_PEXP_SLI_MSI_W1C_ENB3_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_W1C_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_W1C_ENB3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013D20ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D20ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_W1S_ENB0 CVMX_PEXP_SLI_MSI_W1S_ENB0_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_W1S_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_W1S_ENB0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013D30ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013D30ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_W1S_ENB1 CVMX_PEXP_SLI_MSI_W1S_ENB1_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_W1S_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_W1S_ENB1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013D40ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D40ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_W1S_ENB2 CVMX_PEXP_SLI_MSI_W1S_ENB2_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_W1S_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_W1S_ENB2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013D50ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D50ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_W1S_ENB3 CVMX_PEXP_SLI_MSI_W1S_ENB3_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_W1S_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_W1S_ENB3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013D60ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D60ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_MSI_WR_MAP CVMX_PEXP_SLI_MSI_WR_MAP_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_MSI_WR_MAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_MSI_WR_MAP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013C90ull);
+}
+#else
+#define CVMX_PEXP_SLI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F0000013C90ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV CVMX_PEXP_SLI_PCIE_MSI_RCV_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PCIE_MSI_RCV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PCIE_MSI_RCV not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000013CB0ull);
+}
+#else
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F0000013CB0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B1 CVMX_PEXP_SLI_PCIE_MSI_RCV_B1_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PCIE_MSI_RCV_B1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PCIE_MSI_RCV_B1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010650ull);
+}
+#else
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000010650ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B2 CVMX_PEXP_SLI_PCIE_MSI_RCV_B2_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PCIE_MSI_RCV_B2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PCIE_MSI_RCV_B2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010660ull);
+}
+#else
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000010660ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B3 CVMX_PEXP_SLI_PCIE_MSI_RCV_B3_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PCIE_MSI_RCV_B3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PCIE_MSI_RCV_B3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010670ull);
+}
+#else
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000010670ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_CNTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000012400ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012400ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_INSTR_BADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_INSTR_BADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000012800ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000012800ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_INSTR_BAOFF_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_INSTR_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000012C00ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000012C00ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_INSTR_FIFO_RSIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_INSTR_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000013000ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000013000ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_INSTR_HEADER(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_INSTR_HEADER(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000013400ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F0000013400ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_IN_BP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_IN_BP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000013800ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F0000013800ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_OUT_SIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_OUT_SIZE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000010C00ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_OUT_SIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000010C00ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_SLIST_BADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_SLIST_BADDR(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000011400ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000011400ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_SLIST_BAOFF_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_SLIST_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000011800ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000011800ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKTX_SLIST_FIFO_RSIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKTX_SLIST_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000011C00ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000011C00ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_CNT_INT CVMX_PEXP_SLI_PKT_CNT_INT_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_CNT_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_CNT_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011130ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000011130ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_CNT_INT_ENB CVMX_PEXP_SLI_PKT_CNT_INT_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_CNT_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_CNT_INT_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011150ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011150ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_CTL CVMX_PEXP_SLI_PKT_CTL_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011220ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_CTL (CVMX_ADD_IO_SEG(0x00011F0000011220ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ES CVMX_PEXP_SLI_PKT_DATA_OUT_ES_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_DATA_OUT_ES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_DATA_OUT_ES not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000110B0ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000110B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_NS CVMX_PEXP_SLI_PKT_DATA_OUT_NS_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_DATA_OUT_NS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_DATA_OUT_NS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000110A0ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000110A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ROR CVMX_PEXP_SLI_PKT_DATA_OUT_ROR_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_DATA_OUT_ROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_DATA_OUT_ROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011090ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000011090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_DPADDR CVMX_PEXP_SLI_PKT_DPADDR_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_DPADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_DPADDR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011080ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000011080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_INPUT_CONTROL CVMX_PEXP_SLI_PKT_INPUT_CONTROL_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_INPUT_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_INPUT_CONTROL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011170ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000011170ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_INSTR_ENB CVMX_PEXP_SLI_PKT_INSTR_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_INSTR_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_INSTR_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011000ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000011000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_INSTR_RD_SIZE CVMX_PEXP_SLI_PKT_INSTR_RD_SIZE_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_INSTR_RD_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_INSTR_RD_SIZE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000111A0ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F00000111A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_INSTR_SIZE CVMX_PEXP_SLI_PKT_INSTR_SIZE_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_INSTR_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_INSTR_SIZE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011020ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000011020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_INT_LEVELS CVMX_PEXP_SLI_PKT_INT_LEVELS_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_INT_LEVELS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_INT_LEVELS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011120ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000011120ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_IN_BP CVMX_PEXP_SLI_PKT_IN_BP_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_IN_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_IN_BP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011210ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F0000011210ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PKT_IN_DONEX_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_IN_DONEX_CNTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000012000ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012000ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_IN_INSTR_COUNTS CVMX_PEXP_SLI_PKT_IN_INSTR_COUNTS_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_IN_INSTR_COUNTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_IN_INSTR_COUNTS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011200ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000011200ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_IN_PCIE_PORT CVMX_PEXP_SLI_PKT_IN_PCIE_PORT_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_IN_PCIE_PORT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_IN_PCIE_PORT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000111B0ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000111B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_IPTR CVMX_PEXP_SLI_PKT_IPTR_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_IPTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_IPTR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011070ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000011070ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_OUTPUT_WMARK CVMX_PEXP_SLI_PKT_OUTPUT_WMARK_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_OUTPUT_WMARK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_OUTPUT_WMARK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011180ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000011180ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_OUT_BMODE CVMX_PEXP_SLI_PKT_OUT_BMODE_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_OUT_BMODE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_OUT_BMODE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000110D0ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000110D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_OUT_BP_EN CVMX_PEXP_SLI_PKT_OUT_BP_EN_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_OUT_BP_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_OUT_BP_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011240ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_OUT_BP_EN (CVMX_ADD_IO_SEG(0x00011F0000011240ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_OUT_ENB CVMX_PEXP_SLI_PKT_OUT_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_OUT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_OUT_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011010ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_PCIE_PORT CVMX_PEXP_SLI_PKT_PCIE_PORT_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_PCIE_PORT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_PCIE_PORT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000110E0ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000110E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_PORT_IN_RST CVMX_PEXP_SLI_PKT_PORT_IN_RST_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_PORT_IN_RST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_PORT_IN_RST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000111F0ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F00000111F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_SLIST_ES CVMX_PEXP_SLI_PKT_SLIST_ES_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_SLIST_ES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_SLIST_ES not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011050ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000011050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_SLIST_NS CVMX_PEXP_SLI_PKT_SLIST_NS_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_SLIST_NS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_SLIST_NS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011040ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000011040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_SLIST_ROR CVMX_PEXP_SLI_PKT_SLIST_ROR_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_SLIST_ROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_SLIST_ROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011030ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000011030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_TIME_INT CVMX_PEXP_SLI_PKT_TIME_INT_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_TIME_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_TIME_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011140ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000011140ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_PKT_TIME_INT_ENB CVMX_PEXP_SLI_PKT_TIME_INT_ENB_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_PKT_TIME_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_PKT_TIME_INT_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011160ull);
+}
+#else
+#define CVMX_PEXP_SLI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011160ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_PORTX_PKIND(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_PEXP_SLI_PORTX_PKIND(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000010800ull) + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_PORTX_PKIND(offset) (CVMX_ADD_IO_SEG(0x00011F0000010800ull) + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PEXP_SLI_S2M_PORTX_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PEXP_SLI_S2M_PORTX_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011F0000013D80ull) + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_PEXP_SLI_S2M_PORTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011F0000013D80ull) + ((offset) & 3) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_SCRATCH_1 CVMX_PEXP_SLI_SCRATCH_1_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_SCRATCH_1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_SCRATCH_1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000103C0ull);
+}
+#else
+#define CVMX_PEXP_SLI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F00000103C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_SCRATCH_2 CVMX_PEXP_SLI_SCRATCH_2_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_SCRATCH_2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_SCRATCH_2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000103D0ull);
+}
+#else
+#define CVMX_PEXP_SLI_SCRATCH_2 (CVMX_ADD_IO_SEG(0x00011F00000103D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_STATE1 CVMX_PEXP_SLI_STATE1_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_STATE1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_STATE1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010620ull);
+}
+#else
+#define CVMX_PEXP_SLI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000010620ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_STATE2 CVMX_PEXP_SLI_STATE2_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_STATE2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_STATE2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010630ull);
+}
+#else
+#define CVMX_PEXP_SLI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000010630ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_STATE3 CVMX_PEXP_SLI_STATE3_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_STATE3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_STATE3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000010640ull);
+}
+#else
+#define CVMX_PEXP_SLI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000010640ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_TX_PIPE CVMX_PEXP_SLI_TX_PIPE_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_TX_PIPE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PEXP_SLI_TX_PIPE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F0000011230ull);
+}
+#else
+#define CVMX_PEXP_SLI_TX_PIPE (CVMX_ADD_IO_SEG(0x00011F0000011230ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PEXP_SLI_WINDOW_CTL CVMX_PEXP_SLI_WINDOW_CTL_FUNC()
+static inline uint64_t CVMX_PEXP_SLI_WINDOW_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PEXP_SLI_WINDOW_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011F00000102E0ull);
+}
+#else
+#define CVMX_PEXP_SLI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F00000102E0ull))
+#endif
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pexp-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pip-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pip-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pip-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,6126 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pip-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pip.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PIP_DEFS_H__
+#define __CVMX_PIP_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_ALT_SKIP_CFGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PIP_ALT_SKIP_CFGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002A00ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_PIP_ALT_SKIP_CFGX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002A00ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PIP_BCK_PRS CVMX_PIP_BCK_PRS_FUNC()
+static inline uint64_t CVMX_PIP_BCK_PRS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PIP_BCK_PRS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800A0000038ull);
+}
+#else
+#define CVMX_PIP_BCK_PRS (CVMX_ADD_IO_SEG(0x00011800A0000038ull))
+#endif
+#define CVMX_PIP_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800A0000000ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_BSEL_EXT_CFGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PIP_BSEL_EXT_CFGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002800ull) + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_PIP_BSEL_EXT_CFGX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002800ull) + ((offset) & 3) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_BSEL_EXT_POSX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PIP_BSEL_EXT_POSX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002808ull) + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_PIP_BSEL_EXT_POSX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002808ull) + ((offset) & 3) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_BSEL_TBL_ENTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 511))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 511))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 511)))))
+ cvmx_warn("CVMX_PIP_BSEL_TBL_ENTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0003000ull) + ((offset) & 511) * 8;
+}
+#else
+#define CVMX_PIP_BSEL_TBL_ENTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0003000ull) + ((offset) & 511) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PIP_CLKEN CVMX_PIP_CLKEN_FUNC()
+static inline uint64_t CVMX_PIP_CLKEN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PIP_CLKEN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800A0000040ull);
+}
+#else
+#define CVMX_PIP_CLKEN (CVMX_ADD_IO_SEG(0x00011800A0000040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_CRC_CTLX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PIP_CRC_CTLX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000040ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_PIP_CRC_CTLX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000040ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_CRC_IVX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PIP_CRC_IVX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000050ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_PIP_CRC_IVX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000050ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_DEC_IPSECX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_PIP_DEC_IPSECX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000080ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_PIP_DEC_IPSECX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000080ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PIP_DSA_SRC_GRP CVMX_PIP_DSA_SRC_GRP_FUNC()
+static inline uint64_t CVMX_PIP_DSA_SRC_GRP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PIP_DSA_SRC_GRP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800A0000190ull);
+}
+#else
+#define CVMX_PIP_DSA_SRC_GRP (CVMX_ADD_IO_SEG(0x00011800A0000190ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PIP_DSA_VID_GRP CVMX_PIP_DSA_VID_GRP_FUNC()
+static inline uint64_t CVMX_PIP_DSA_VID_GRP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PIP_DSA_VID_GRP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800A0000198ull);
+}
+#else
+#define CVMX_PIP_DSA_VID_GRP (CVMX_ADD_IO_SEG(0x00011800A0000198ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_FRM_LEN_CHKX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0)))))
+ cvmx_warn("CVMX_PIP_FRM_LEN_CHKX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000180ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_PIP_FRM_LEN_CHKX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000180ull) + ((offset) & 1) * 8)
+#endif
+#define CVMX_PIP_GBL_CFG (CVMX_ADD_IO_SEG(0x00011800A0000028ull))
+#define CVMX_PIP_GBL_CTL (CVMX_ADD_IO_SEG(0x00011800A0000020ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PIP_HG_PRI_QOS CVMX_PIP_HG_PRI_QOS_FUNC()
+static inline uint64_t CVMX_PIP_HG_PRI_QOS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PIP_HG_PRI_QOS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800A00001A0ull);
+}
+#else
+#define CVMX_PIP_HG_PRI_QOS (CVMX_ADD_IO_SEG(0x00011800A00001A0ull))
+#endif
+#define CVMX_PIP_INT_EN (CVMX_ADD_IO_SEG(0x00011800A0000010ull))
+#define CVMX_PIP_INT_REG (CVMX_ADD_IO_SEG(0x00011800A0000008ull))
+#define CVMX_PIP_IP_OFFSET (CVMX_ADD_IO_SEG(0x00011800A0000060ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_PRI_TBLX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 255)))))
+ cvmx_warn("CVMX_PIP_PRI_TBLX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0004000ull) + ((offset) & 255) * 8;
+}
+#else
+#define CVMX_PIP_PRI_TBLX(offset) (CVMX_ADD_IO_SEG(0x00011800A0004000ull) + ((offset) & 255) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_PRT_CFGBX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 43)) || ((offset >= 44) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_PRT_CFGBX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0008000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_PIP_PRT_CFGBX(offset) (CVMX_ADD_IO_SEG(0x00011800A0008000ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_PRT_CFGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_PRT_CFGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000200ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_PIP_PRT_CFGX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000200ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_PRT_TAGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_PRT_TAGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000400ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_PIP_PRT_TAGX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000400ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_QOS_DIFFX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_QOS_DIFFX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000600ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_PIP_QOS_DIFFX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000600ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_QOS_VLANX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_PIP_QOS_VLANX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A00000C0ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_PIP_QOS_VLANX(offset) (CVMX_ADD_IO_SEG(0x00011800A00000C0ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_QOS_WATCHX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_PIP_QOS_WATCHX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000100ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_PIP_QOS_WATCHX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000100ull) + ((offset) & 7) * 8)
+#endif
+#define CVMX_PIP_RAW_WORD (CVMX_ADD_IO_SEG(0x00011800A00000B0ull))
+#define CVMX_PIP_SFT_RST (CVMX_ADD_IO_SEG(0x00011800A0000030ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT0_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT0_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000800ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT0_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000800ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT0_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT0_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040000ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT0_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040000ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT10_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT10_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0001480ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_PIP_STAT10_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001480ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT10_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT10_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040050ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT10_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040050ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT11_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT11_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0001488ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_PIP_STAT11_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001488ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT11_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT11_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040058ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT11_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040058ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT1_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT1_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000808ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT1_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000808ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT1_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT1_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040008ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT1_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040008ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT2_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT2_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000810ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT2_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000810ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT2_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT2_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040010ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT2_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040010ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT3_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT3_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000818ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT3_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000818ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT3_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT3_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040018ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT3_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040018ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT4_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT4_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000820ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT4_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000820ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT4_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT4_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040020ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT4_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040020ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT5_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT5_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000828ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT5_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000828ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT5_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT5_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040028ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT5_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040028ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT6_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT6_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000830ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT6_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000830ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT6_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT6_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040030ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT6_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040030ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT7_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT7_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000838ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT7_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000838ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT7_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT7_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040038ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT7_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040038ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT8_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT8_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000840ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT8_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000840ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT8_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT8_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040040ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT8_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040040ull) + ((offset) & 63) * 128)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT9_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT9_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0000848ull) + ((offset) & 63) * 80;
+}
+#else
+#define CVMX_PIP_STAT9_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000848ull) + ((offset) & 63) * 80)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT9_X(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT9_X(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0040048ull) + ((offset) & 63) * 128;
+}
+#else
+#define CVMX_PIP_STAT9_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040048ull) + ((offset) & 63) * 128)
+#endif
+#define CVMX_PIP_STAT_CTL (CVMX_ADD_IO_SEG(0x00011800A0000018ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT_INB_ERRSX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT_INB_ERRSX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0001A10ull) + ((offset) & 63) * 32;
+}
+#else
+#define CVMX_PIP_STAT_INB_ERRSX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001A10ull) + ((offset) & 63) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT_INB_ERRS_PKNDX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT_INB_ERRS_PKNDX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0020010ull) + ((offset) & 63) * 32;
+}
+#else
+#define CVMX_PIP_STAT_INB_ERRS_PKNDX(offset) (CVMX_ADD_IO_SEG(0x00011800A0020010ull) + ((offset) & 63) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT_INB_OCTSX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT_INB_OCTSX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0001A08ull) + ((offset) & 63) * 32;
+}
+#else
+#define CVMX_PIP_STAT_INB_OCTSX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001A08ull) + ((offset) & 63) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT_INB_OCTS_PKNDX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT_INB_OCTS_PKNDX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0020008ull) + ((offset) & 63) * 32;
+}
+#else
+#define CVMX_PIP_STAT_INB_OCTS_PKNDX(offset) (CVMX_ADD_IO_SEG(0x00011800A0020008ull) + ((offset) & 63) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT_INB_PKTSX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39)) || ((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1) || ((offset >= 32) && (offset <= 35)) || ((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_PIP_STAT_INB_PKTSX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0001A00ull) + ((offset) & 63) * 32;
+}
+#else
+#define CVMX_PIP_STAT_INB_PKTSX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001A00ull) + ((offset) & 63) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_STAT_INB_PKTS_PKNDX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_STAT_INB_PKTS_PKNDX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0020000ull) + ((offset) & 63) * 32;
+}
+#else
+#define CVMX_PIP_STAT_INB_PKTS_PKNDX(offset) (CVMX_ADD_IO_SEG(0x00011800A0020000ull) + ((offset) & 63) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_SUB_PKIND_FCSX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_PIP_SUB_PKIND_FCSX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A0080000ull);
+}
+#else
+#define CVMX_PIP_SUB_PKIND_FCSX(block_id) (CVMX_ADD_IO_SEG(0x00011800A0080000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_TAG_INCX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_PIP_TAG_INCX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0001800ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_PIP_TAG_INCX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001800ull) + ((offset) & 63) * 8)
+#endif
+#define CVMX_PIP_TAG_MASK (CVMX_ADD_IO_SEG(0x00011800A0000070ull))
+#define CVMX_PIP_TAG_SECRET (CVMX_ADD_IO_SEG(0x00011800A0000068ull))
+#define CVMX_PIP_TODO_ENTRY (CVMX_ADD_IO_SEG(0x00011800A0000078ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_VLAN_ETYPESX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PIP_VLAN_ETYPESX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A00001C0ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_PIP_VLAN_ETYPESX(offset) (CVMX_ADD_IO_SEG(0x00011800A00001C0ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT0_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT0_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002000ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT0_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002000ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT10_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT10_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0001700ull) + ((offset) & 63) * 16 - 16*40;
+}
+#else
+#define CVMX_PIP_XSTAT10_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001700ull) + ((offset) & 63) * 16 - 16*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT11_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT11_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0001708ull) + ((offset) & 63) * 16 - 16*40;
+}
+#else
+#define CVMX_PIP_XSTAT11_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001708ull) + ((offset) & 63) * 16 - 16*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT1_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT1_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002008ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT1_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002008ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT2_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT2_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002010ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT2_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002010ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT3_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT3_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002018ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT3_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002018ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT4_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT4_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002020ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT4_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002020ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT5_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT5_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002028ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT5_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002028ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT6_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT6_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002030ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT6_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002030ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT7_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT7_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002038ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT7_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002038ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT8_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT8_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002040ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT8_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002040ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PIP_XSTAT9_PRTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 40) && (offset <= 43)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 40) && (offset <= 41)) || ((offset >= 44) && (offset <= 47))))))
+ cvmx_warn("CVMX_PIP_XSTAT9_PRTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800A0002048ull) + ((offset) & 63) * 80 - 80*40;
+}
+#else
+#define CVMX_PIP_XSTAT9_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002048ull) + ((offset) & 63) * 80 - 80*40)
+#endif
+
+/**
+ * cvmx_pip_alt_skip_cfg#
+ *
+ * Notes:
+ * The actual SKIP I determined by HW is based on the packet contents. BIT0 and
+ * BIT1 make up a two value value that the selects the skip value as follows.
+ *
+ * lookup_value = LEN ? ( packet_in_bits[BIT1], packet_in_bits[BIT0] ) : ( 0, packet_in_bits[BIT0] );
+ * SKIP I = lookup_value == 3 ? SKIP3 :
+ * lookup_value == 2 ? SKIP2 :
+ * lookup_value == 1 ? SKIP1 :
+ * PIP_PRT_CFG<pknd>[SKIP];
+ */
+union cvmx_pip_alt_skip_cfgx {
+ uint64_t u64;
+ struct cvmx_pip_alt_skip_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63 : 7;
+ uint64_t len : 1; /**< Indicates the length of the selection field
+ 0 = 0, BIT0
+ 1 = BIT1, BIT0 */
+ uint64_t reserved_46_55 : 10;
+ uint64_t bit1 : 6; /**< Indicates the bit location in the first word of
+ the packet to use to select the skip amount.
+ BIT1 must be present in the packet. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t bit0 : 6; /**< Indicates the bit location in the first word of
+ the packet to use to select the skip amount.
+ BIT0 must be present in the packet. */
+ uint64_t reserved_23_31 : 9;
+ uint64_t skip3 : 7; /**< Indicates number of bytes to skip from start of
+ packet 0-64 */
+ uint64_t reserved_15_15 : 1;
+ uint64_t skip2 : 7; /**< Indicates number of bytes to skip from start of
+ packet 0-64 */
+ uint64_t reserved_7_7 : 1;
+ uint64_t skip1 : 7; /**< Indicates number of bytes to skip from start of
+ packet 0-64 */
+#else
+ uint64_t skip1 : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t skip2 : 7;
+ uint64_t reserved_15_15 : 1;
+ uint64_t skip3 : 7;
+ uint64_t reserved_23_31 : 9;
+ uint64_t bit0 : 6;
+ uint64_t reserved_38_39 : 2;
+ uint64_t bit1 : 6;
+ uint64_t reserved_46_55 : 10;
+ uint64_t len : 1;
+ uint64_t reserved_57_63 : 7;
+#endif
+ } s;
+ struct cvmx_pip_alt_skip_cfgx_s cn61xx;
+ struct cvmx_pip_alt_skip_cfgx_s cn66xx;
+ struct cvmx_pip_alt_skip_cfgx_s cn68xx;
+ struct cvmx_pip_alt_skip_cfgx_s cnf71xx;
+};
+typedef union cvmx_pip_alt_skip_cfgx cvmx_pip_alt_skip_cfgx_t;
+
+/**
+ * cvmx_pip_bck_prs
+ *
+ * PIP_BCK_PRS = PIP's Back Pressure Register
+ *
+ * When to assert backpressure based on the todo list filling up
+ */
+union cvmx_pip_bck_prs {
+ uint64_t u64;
+ struct cvmx_pip_bck_prs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bckprs : 1; /**< PIP is currently asserting backpressure to IOB
+ Backpressure from PIP will assert when the
+ entries to the todo list exceed HIWATER.
+ Backpressure will be held until the todo entries
+ is less than or equal to LOWATER. */
+ uint64_t reserved_13_62 : 50;
+ uint64_t hiwater : 5; /**< Water mark in the todo list to assert backpressure
+ Legal values are 1-26. A 0 value will deadlock
+ the machine. A value > 26, will trash memory */
+ uint64_t reserved_5_7 : 3;
+ uint64_t lowater : 5; /**< Water mark in the todo list to release backpressure
+ The LOWATER value should be < HIWATER. */
+#else
+ uint64_t lowater : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t hiwater : 5;
+ uint64_t reserved_13_62 : 50;
+ uint64_t bckprs : 1;
+#endif
+ } s;
+ struct cvmx_pip_bck_prs_s cn38xx;
+ struct cvmx_pip_bck_prs_s cn38xxp2;
+ struct cvmx_pip_bck_prs_s cn56xx;
+ struct cvmx_pip_bck_prs_s cn56xxp1;
+ struct cvmx_pip_bck_prs_s cn58xx;
+ struct cvmx_pip_bck_prs_s cn58xxp1;
+ struct cvmx_pip_bck_prs_s cn61xx;
+ struct cvmx_pip_bck_prs_s cn63xx;
+ struct cvmx_pip_bck_prs_s cn63xxp1;
+ struct cvmx_pip_bck_prs_s cn66xx;
+ struct cvmx_pip_bck_prs_s cn68xx;
+ struct cvmx_pip_bck_prs_s cn68xxp1;
+ struct cvmx_pip_bck_prs_s cnf71xx;
+};
+typedef union cvmx_pip_bck_prs cvmx_pip_bck_prs_t;
+
+/**
+ * cvmx_pip_bist_status
+ *
+ * PIP_BIST_STATUS = PIP's BIST Results
+ *
+ */
+union cvmx_pip_bist_status {
+ uint64_t u64;
+ struct cvmx_pip_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t bist : 22; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 22;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_pip_bist_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t bist : 18; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 18;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn30xx;
+ struct cvmx_pip_bist_status_cn30xx cn31xx;
+ struct cvmx_pip_bist_status_cn30xx cn38xx;
+ struct cvmx_pip_bist_status_cn30xx cn38xxp2;
+ struct cvmx_pip_bist_status_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t bist : 17; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 17;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn50xx;
+ struct cvmx_pip_bist_status_cn30xx cn52xx;
+ struct cvmx_pip_bist_status_cn30xx cn52xxp1;
+ struct cvmx_pip_bist_status_cn30xx cn56xx;
+ struct cvmx_pip_bist_status_cn30xx cn56xxp1;
+ struct cvmx_pip_bist_status_cn30xx cn58xx;
+ struct cvmx_pip_bist_status_cn30xx cn58xxp1;
+ struct cvmx_pip_bist_status_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t bist : 20; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn61xx;
+ struct cvmx_pip_bist_status_cn30xx cn63xx;
+ struct cvmx_pip_bist_status_cn30xx cn63xxp1;
+ struct cvmx_pip_bist_status_cn61xx cn66xx;
+ struct cvmx_pip_bist_status_s cn68xx;
+ struct cvmx_pip_bist_status_cn61xx cn68xxp1;
+ struct cvmx_pip_bist_status_cn61xx cnf71xx;
+};
+typedef union cvmx_pip_bist_status cvmx_pip_bist_status_t;
+
+/**
+ * cvmx_pip_bsel_ext_cfg#
+ *
+ * PIP_BSEL_EXT_CFGX = Bit Select Extractor config register containing the
+ * tag, offset, and skip values to be used when using the corresponding extractor.
+ */
+union cvmx_pip_bsel_ext_cfgx {
+ uint64_t u64;
+ struct cvmx_pip_bsel_ext_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t upper_tag : 16; /**< Extra Tag bits to be added to tag field from table
+ Only included when PIP_PRT_TAG[INC_PRT]=0
+ WORD2[TAG<31:16>] */
+ uint64_t tag : 8; /**< Extra Tag bits to be added to tag field from table
+ WORD2[TAG<15:8>] */
+ uint64_t reserved_25_31 : 7;
+ uint64_t offset : 9; /**< Indicates offset to add to extractor mem adr
+ to get final address to the lookup table */
+ uint64_t reserved_7_15 : 9;
+ uint64_t skip : 7; /**< Indicates number of bytes to skip from start of
+ packet 0-64 */
+#else
+ uint64_t skip : 7;
+ uint64_t reserved_7_15 : 9;
+ uint64_t offset : 9;
+ uint64_t reserved_25_31 : 7;
+ uint64_t tag : 8;
+ uint64_t upper_tag : 16;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_pip_bsel_ext_cfgx_s cn61xx;
+ struct cvmx_pip_bsel_ext_cfgx_s cn68xx;
+ struct cvmx_pip_bsel_ext_cfgx_s cnf71xx;
+};
+typedef union cvmx_pip_bsel_ext_cfgx cvmx_pip_bsel_ext_cfgx_t;
+
+/**
+ * cvmx_pip_bsel_ext_pos#
+ *
+ * PIP_BSEL_EXT_POSX = Bit Select Extractor config register containing the 8
+ * bit positions and valids to be used when using the corresponding extractor.
+ *
+ * Notes:
+ * Examples on bit positioning:
+ * the most-significant-bit of the 3rd byte ... PIP_BSEL_EXT_CFG*[SKIP]=1 POSn=15 (decimal) or
+ * PIP_BSEL_EXT_CFG*[SKIP]=0 POSn=23 (decimal)
+ * the least-significant-bit of the 5th byte ... PIP_BSEL_EXT_CFG*[SKIP]=4 POSn=0
+ * the second-least-significant bit of the 1st byte ... PIP_BSEL_EXT_CFG*[SKIP]=0 POSn=1
+ *
+ * POSn_VAL and POSn correspond to <n> in the resultant index into
+ * PIP_BSEL_TBL_ENT. When only x bits (0 < x < 7) are to be extracted,
+ * POS[7:x] should normally be clear.
+ */
+union cvmx_pip_bsel_ext_posx {
+ uint64_t u64;
+ struct cvmx_pip_bsel_ext_posx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pos7_val : 1; /**< Valid bit for bit position 7 */
+ uint64_t pos7 : 7; /**< Bit position for the 8th bit from 128 bit segment
+ of pkt that is defined by the SKIP field of
+ PIP_BSEL_EXT_CFG register. */
+ uint64_t pos6_val : 1; /**< Valid bit for bit position 6 */
+ uint64_t pos6 : 7; /**< Bit position for the 7th bit from 128 bit segment
+ of pkt that is defined by the SKIP field of
+ PIP_BSEL_EXT_CFG register. */
+ uint64_t pos5_val : 1; /**< Valid bit for bit position 5 */
+ uint64_t pos5 : 7; /**< Bit position for the 6th bit from 128 bit segment
+ of pkt that is defined by the SKIP field of
+ PIP_BSEL_EXT_CFG register. */
+ uint64_t pos4_val : 1; /**< Valid bit for bit position 4 */
+ uint64_t pos4 : 7; /**< Bit position for the 5th bit from 128 bit segment
+ of pkt that is defined by the SKIP field of
+ PIP_BSEL_EXT_CFG register. */
+ uint64_t pos3_val : 1; /**< Valid bit for bit position 3 */
+ uint64_t pos3 : 7; /**< Bit position for the 4th bit from 128 bit segment
+ of pkt that is defined by the SKIP field of
+ PIP_BSEL_EXT_CFG register. */
+ uint64_t pos2_val : 1; /**< Valid bit for bit position 2 */
+ uint64_t pos2 : 7; /**< Bit position for the 3rd bit from 128 bit segment
+ of pkt that is defined by the SKIP field of
+ PIP_BSEL_EXT_CFG register. */
+ uint64_t pos1_val : 1; /**< Valid bit for bit position 1 */
+ uint64_t pos1 : 7; /**< Bit position for the 2nd bit from 128 bit segment
+ of pkt that is defined by the SKIP field of
+ PIP_BSEL_EXT_CFG register. */
+ uint64_t pos0_val : 1; /**< Valid bit for bit position 0 */
+ uint64_t pos0 : 7; /**< Bit position for the 1st bit from 128 bit segment
+ of pkt that is defined by the SKIP field of
+ PIP_BSEL_EXT_CFG register. */
+#else
+ uint64_t pos0 : 7;
+ uint64_t pos0_val : 1;
+ uint64_t pos1 : 7;
+ uint64_t pos1_val : 1;
+ uint64_t pos2 : 7;
+ uint64_t pos2_val : 1;
+ uint64_t pos3 : 7;
+ uint64_t pos3_val : 1;
+ uint64_t pos4 : 7;
+ uint64_t pos4_val : 1;
+ uint64_t pos5 : 7;
+ uint64_t pos5_val : 1;
+ uint64_t pos6 : 7;
+ uint64_t pos6_val : 1;
+ uint64_t pos7 : 7;
+ uint64_t pos7_val : 1;
+#endif
+ } s;
+ struct cvmx_pip_bsel_ext_posx_s cn61xx;
+ struct cvmx_pip_bsel_ext_posx_s cn68xx;
+ struct cvmx_pip_bsel_ext_posx_s cnf71xx;
+};
+typedef union cvmx_pip_bsel_ext_posx cvmx_pip_bsel_ext_posx_t;
+
+/**
+ * cvmx_pip_bsel_tbl_ent#
+ *
+ * PIP_BSEL_TBL_ENTX = Entry for the extractor table
+ *
+ */
+union cvmx_pip_bsel_tbl_entx {
+ uint64_t u64;
+ struct cvmx_pip_bsel_tbl_entx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t tag_en : 1; /**< Enables the use of the TAG field */
+ uint64_t grp_en : 1; /**< Enables the use of the GRP field */
+ uint64_t tt_en : 1; /**< Enables the use of the TT field */
+ uint64_t qos_en : 1; /**< Enables the use of the QOS field */
+ uint64_t reserved_40_59 : 20;
+ uint64_t tag : 8; /**< TAG bits to be used if TAG_EN is set */
+ uint64_t reserved_22_31 : 10;
+ uint64_t grp : 6; /**< GRP field to be used if GRP_EN is set */
+ uint64_t reserved_10_15 : 6;
+ uint64_t tt : 2; /**< TT field to be used if TT_EN is set */
+ uint64_t reserved_3_7 : 5;
+ uint64_t qos : 3; /**< QOS field to be used if QOS_EN is set */
+#else
+ uint64_t qos : 3;
+ uint64_t reserved_3_7 : 5;
+ uint64_t tt : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t grp : 6;
+ uint64_t reserved_22_31 : 10;
+ uint64_t tag : 8;
+ uint64_t reserved_40_59 : 20;
+ uint64_t qos_en : 1;
+ uint64_t tt_en : 1;
+ uint64_t grp_en : 1;
+ uint64_t tag_en : 1;
+#endif
+ } s;
+ struct cvmx_pip_bsel_tbl_entx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t tag_en : 1; /**< Enables the use of the TAG field */
+ uint64_t grp_en : 1; /**< Enables the use of the GRP field */
+ uint64_t tt_en : 1; /**< Enables the use of the TT field */
+ uint64_t qos_en : 1; /**< Enables the use of the QOS field */
+ uint64_t reserved_40_59 : 20;
+ uint64_t tag : 8; /**< TAG bits to be used if TAG_EN is set */
+ uint64_t reserved_20_31 : 12;
+ uint64_t grp : 4; /**< GRP field to be used if GRP_EN is set */
+ uint64_t reserved_10_15 : 6;
+ uint64_t tt : 2; /**< TT field to be used if TT_EN is set */
+ uint64_t reserved_3_7 : 5;
+ uint64_t qos : 3; /**< QOS field to be used if QOS_EN is set */
+#else
+ uint64_t qos : 3;
+ uint64_t reserved_3_7 : 5;
+ uint64_t tt : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t grp : 4;
+ uint64_t reserved_20_31 : 12;
+ uint64_t tag : 8;
+ uint64_t reserved_40_59 : 20;
+ uint64_t qos_en : 1;
+ uint64_t tt_en : 1;
+ uint64_t grp_en : 1;
+ uint64_t tag_en : 1;
+#endif
+ } cn61xx;
+ struct cvmx_pip_bsel_tbl_entx_s cn68xx;
+ struct cvmx_pip_bsel_tbl_entx_cn61xx cnf71xx;
+};
+typedef union cvmx_pip_bsel_tbl_entx cvmx_pip_bsel_tbl_entx_t;
+
+/**
+ * cvmx_pip_clken
+ */
+union cvmx_pip_clken {
+ uint64_t u64;
+ struct cvmx_pip_clken_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t clken : 1; /**< Controls the conditional clocking within PIP
+ 0=Allow HW to control the clocks
+ 1=Force the clocks to be always on */
+#else
+ uint64_t clken : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_pip_clken_s cn61xx;
+ struct cvmx_pip_clken_s cn63xx;
+ struct cvmx_pip_clken_s cn63xxp1;
+ struct cvmx_pip_clken_s cn66xx;
+ struct cvmx_pip_clken_s cn68xx;
+ struct cvmx_pip_clken_s cn68xxp1;
+ struct cvmx_pip_clken_s cnf71xx;
+};
+typedef union cvmx_pip_clken cvmx_pip_clken_t;
+
+/**
+ * cvmx_pip_crc_ctl#
+ *
+ * PIP_CRC_CTL = PIP CRC Control Register
+ *
+ * Controls datapath reflection when calculating CRC
+ */
+union cvmx_pip_crc_ctlx {
+ uint64_t u64;
+ struct cvmx_pip_crc_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t invres : 1; /**< Invert the result */
+ uint64_t reflect : 1; /**< Reflect the bits in each byte.
+ Byte order does not change.
+ - 0: CRC is calculated MSB to LSB
+ - 1: CRC is calculated LSB to MSB */
+#else
+ uint64_t reflect : 1;
+ uint64_t invres : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_pip_crc_ctlx_s cn38xx;
+ struct cvmx_pip_crc_ctlx_s cn38xxp2;
+ struct cvmx_pip_crc_ctlx_s cn58xx;
+ struct cvmx_pip_crc_ctlx_s cn58xxp1;
+};
+typedef union cvmx_pip_crc_ctlx cvmx_pip_crc_ctlx_t;
+
+/**
+ * cvmx_pip_crc_iv#
+ *
+ * PIP_CRC_IV = PIP CRC IV Register
+ *
+ * Determines the IV used by the CRC algorithm
+ *
+ * Notes:
+ * * PIP_CRC_IV
+ * PIP_CRC_IV controls the initial state of the CRC algorithm. Octane can
+ * support a wide range of CRC algorithms and as such, the IV must be
+ * carefully constructed to meet the specific algorithm. The code below
+ * determines the value to program into Octane based on the algorthim's IV
+ * and width. In the case of Octane, the width should always be 32.
+ *
+ * PIP_CRC_IV0 sets the IV for ports 0-15 while PIP_CRC_IV1 sets the IV for
+ * ports 16-31.
+ *
+ * unsigned octane_crc_iv(unsigned algorithm_iv, unsigned poly, unsigned w)
+ * [
+ * int i;
+ * int doit;
+ * unsigned int current_val = algorithm_iv;
+ *
+ * for(i = 0; i < w; i++) [
+ * doit = current_val & 0x1;
+ *
+ * if(doit) current_val ^= poly;
+ * assert(!(current_val & 0x1));
+ *
+ * current_val = (current_val >> 1) | (doit << (w-1));
+ * ]
+ *
+ * return current_val;
+ * ]
+ */
+union cvmx_pip_crc_ivx {
+ uint64_t u64;
+ struct cvmx_pip_crc_ivx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iv : 32; /**< IV used by the CRC algorithm. Default is FCS32. */
+#else
+ uint64_t iv : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pip_crc_ivx_s cn38xx;
+ struct cvmx_pip_crc_ivx_s cn38xxp2;
+ struct cvmx_pip_crc_ivx_s cn58xx;
+ struct cvmx_pip_crc_ivx_s cn58xxp1;
+};
+typedef union cvmx_pip_crc_ivx cvmx_pip_crc_ivx_t;
+
+/**
+ * cvmx_pip_dec_ipsec#
+ *
+ * PIP_DEC_IPSEC = UDP or TCP ports to watch for DEC IPSEC
+ *
+ * PIP sets the dec_ipsec based on TCP or UDP destination port.
+ */
+union cvmx_pip_dec_ipsecx {
+ uint64_t u64;
+ struct cvmx_pip_dec_ipsecx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t tcp : 1; /**< This DPRT should be used for TCP packets */
+ uint64_t udp : 1; /**< This DPRT should be used for UDP packets */
+ uint64_t dprt : 16; /**< UDP or TCP destination port to match on */
+#else
+ uint64_t dprt : 16;
+ uint64_t udp : 1;
+ uint64_t tcp : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_pip_dec_ipsecx_s cn30xx;
+ struct cvmx_pip_dec_ipsecx_s cn31xx;
+ struct cvmx_pip_dec_ipsecx_s cn38xx;
+ struct cvmx_pip_dec_ipsecx_s cn38xxp2;
+ struct cvmx_pip_dec_ipsecx_s cn50xx;
+ struct cvmx_pip_dec_ipsecx_s cn52xx;
+ struct cvmx_pip_dec_ipsecx_s cn52xxp1;
+ struct cvmx_pip_dec_ipsecx_s cn56xx;
+ struct cvmx_pip_dec_ipsecx_s cn56xxp1;
+ struct cvmx_pip_dec_ipsecx_s cn58xx;
+ struct cvmx_pip_dec_ipsecx_s cn58xxp1;
+ struct cvmx_pip_dec_ipsecx_s cn61xx;
+ struct cvmx_pip_dec_ipsecx_s cn63xx;
+ struct cvmx_pip_dec_ipsecx_s cn63xxp1;
+ struct cvmx_pip_dec_ipsecx_s cn66xx;
+ struct cvmx_pip_dec_ipsecx_s cn68xx;
+ struct cvmx_pip_dec_ipsecx_s cn68xxp1;
+ struct cvmx_pip_dec_ipsecx_s cnf71xx;
+};
+typedef union cvmx_pip_dec_ipsecx cvmx_pip_dec_ipsecx_t;
+
+/**
+ * cvmx_pip_dsa_src_grp
+ */
+union cvmx_pip_dsa_src_grp {
+ uint64_t u64;
+ struct cvmx_pip_dsa_src_grp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t map15 : 4; /**< DSA Group Algorithm */
+ uint64_t map14 : 4; /**< DSA Group Algorithm */
+ uint64_t map13 : 4; /**< DSA Group Algorithm */
+ uint64_t map12 : 4; /**< DSA Group Algorithm */
+ uint64_t map11 : 4; /**< DSA Group Algorithm */
+ uint64_t map10 : 4; /**< DSA Group Algorithm */
+ uint64_t map9 : 4; /**< DSA Group Algorithm */
+ uint64_t map8 : 4; /**< DSA Group Algorithm */
+ uint64_t map7 : 4; /**< DSA Group Algorithm */
+ uint64_t map6 : 4; /**< DSA Group Algorithm */
+ uint64_t map5 : 4; /**< DSA Group Algorithm */
+ uint64_t map4 : 4; /**< DSA Group Algorithm */
+ uint64_t map3 : 4; /**< DSA Group Algorithm */
+ uint64_t map2 : 4; /**< DSA Group Algorithm */
+ uint64_t map1 : 4; /**< DSA Group Algorithm */
+ uint64_t map0 : 4; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP */
+#else
+ uint64_t map0 : 4;
+ uint64_t map1 : 4;
+ uint64_t map2 : 4;
+ uint64_t map3 : 4;
+ uint64_t map4 : 4;
+ uint64_t map5 : 4;
+ uint64_t map6 : 4;
+ uint64_t map7 : 4;
+ uint64_t map8 : 4;
+ uint64_t map9 : 4;
+ uint64_t map10 : 4;
+ uint64_t map11 : 4;
+ uint64_t map12 : 4;
+ uint64_t map13 : 4;
+ uint64_t map14 : 4;
+ uint64_t map15 : 4;
+#endif
+ } s;
+ struct cvmx_pip_dsa_src_grp_s cn52xx;
+ struct cvmx_pip_dsa_src_grp_s cn52xxp1;
+ struct cvmx_pip_dsa_src_grp_s cn56xx;
+ struct cvmx_pip_dsa_src_grp_s cn61xx;
+ struct cvmx_pip_dsa_src_grp_s cn63xx;
+ struct cvmx_pip_dsa_src_grp_s cn63xxp1;
+ struct cvmx_pip_dsa_src_grp_s cn66xx;
+ struct cvmx_pip_dsa_src_grp_s cn68xx;
+ struct cvmx_pip_dsa_src_grp_s cn68xxp1;
+ struct cvmx_pip_dsa_src_grp_s cnf71xx;
+};
+typedef union cvmx_pip_dsa_src_grp cvmx_pip_dsa_src_grp_t;
+
+/**
+ * cvmx_pip_dsa_vid_grp
+ */
+union cvmx_pip_dsa_vid_grp {
+ uint64_t u64;
+ struct cvmx_pip_dsa_vid_grp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t map15 : 4; /**< DSA Group Algorithm */
+ uint64_t map14 : 4; /**< DSA Group Algorithm */
+ uint64_t map13 : 4; /**< DSA Group Algorithm */
+ uint64_t map12 : 4; /**< DSA Group Algorithm */
+ uint64_t map11 : 4; /**< DSA Group Algorithm */
+ uint64_t map10 : 4; /**< DSA Group Algorithm */
+ uint64_t map9 : 4; /**< DSA Group Algorithm */
+ uint64_t map8 : 4; /**< DSA Group Algorithm */
+ uint64_t map7 : 4; /**< DSA Group Algorithm */
+ uint64_t map6 : 4; /**< DSA Group Algorithm */
+ uint64_t map5 : 4; /**< DSA Group Algorithm */
+ uint64_t map4 : 4; /**< DSA Group Algorithm */
+ uint64_t map3 : 4; /**< DSA Group Algorithm */
+ uint64_t map2 : 4; /**< DSA Group Algorithm */
+ uint64_t map1 : 4; /**< DSA Group Algorithm */
+ uint64_t map0 : 4; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP */
+#else
+ uint64_t map0 : 4;
+ uint64_t map1 : 4;
+ uint64_t map2 : 4;
+ uint64_t map3 : 4;
+ uint64_t map4 : 4;
+ uint64_t map5 : 4;
+ uint64_t map6 : 4;
+ uint64_t map7 : 4;
+ uint64_t map8 : 4;
+ uint64_t map9 : 4;
+ uint64_t map10 : 4;
+ uint64_t map11 : 4;
+ uint64_t map12 : 4;
+ uint64_t map13 : 4;
+ uint64_t map14 : 4;
+ uint64_t map15 : 4;
+#endif
+ } s;
+ struct cvmx_pip_dsa_vid_grp_s cn52xx;
+ struct cvmx_pip_dsa_vid_grp_s cn52xxp1;
+ struct cvmx_pip_dsa_vid_grp_s cn56xx;
+ struct cvmx_pip_dsa_vid_grp_s cn61xx;
+ struct cvmx_pip_dsa_vid_grp_s cn63xx;
+ struct cvmx_pip_dsa_vid_grp_s cn63xxp1;
+ struct cvmx_pip_dsa_vid_grp_s cn66xx;
+ struct cvmx_pip_dsa_vid_grp_s cn68xx;
+ struct cvmx_pip_dsa_vid_grp_s cn68xxp1;
+ struct cvmx_pip_dsa_vid_grp_s cnf71xx;
+};
+typedef union cvmx_pip_dsa_vid_grp cvmx_pip_dsa_vid_grp_t;
+
+/**
+ * cvmx_pip_frm_len_chk#
+ *
+ * Notes:
+ * PIP_FRM_LEN_CHK0 is used for packets on packet interface0, PCI, PCI RAW, and PKO loopback ports.
+ * PIP_FRM_LEN_CHK1 is unused.
+ */
+union cvmx_pip_frm_len_chkx {
+ uint64_t u64;
+ struct cvmx_pip_frm_len_chkx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t maxlen : 16; /**< Byte count for Max-sized frame check
+ PIP_PRT_CFGn[MAXERR_EN] enables the check for
+ port n.
+ If enabled, failing packets set the MAXERR
+ interrupt and work-queue entry WORD2[opcode] is
+ set to OVER_FCS (0x3, if packet has bad FCS) or
+ OVER_ERR (0x4, if packet has good FCS).
+ The effective MAXLEN used by HW is
+ PIP_PRT_CFG[DSA_EN] == 0,
+ PIP_FRM_LEN_CHK[MAXLEN] + 4*VV + 4*VS
+ PIP_PRT_CFG[DSA_EN] == 1,
+ PIP_FRM_LEN_CHK[MAXLEN] + PIP_PRT_CFG[SKIP]+4*VS
+ If PTP_MODE, the 8B timestamp is prepended to the
+ packet. MAXLEN should be increased by 8 to
+ compensate for the additional timestamp field. */
+ uint64_t minlen : 16; /**< Byte count for Min-sized frame check
+ PIP_PRT_CFGn[MINERR_EN] enables the check for
+ port n.
+ If enabled, failing packets set the MINERR
+ interrupt and work-queue entry WORD2[opcode] is
+ set to UNDER_FCS (0x6, if packet has bad FCS) or
+ UNDER_ERR (0x8, if packet has good FCS).
+ If PTP_MODE, the 8B timestamp is prepended to the
+ packet. MINLEN should be increased by 8 to
+ compensate for the additional timestamp field. */
+#else
+ uint64_t minlen : 16;
+ uint64_t maxlen : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pip_frm_len_chkx_s cn50xx;
+ struct cvmx_pip_frm_len_chkx_s cn52xx;
+ struct cvmx_pip_frm_len_chkx_s cn52xxp1;
+ struct cvmx_pip_frm_len_chkx_s cn56xx;
+ struct cvmx_pip_frm_len_chkx_s cn56xxp1;
+ struct cvmx_pip_frm_len_chkx_s cn61xx;
+ struct cvmx_pip_frm_len_chkx_s cn63xx;
+ struct cvmx_pip_frm_len_chkx_s cn63xxp1;
+ struct cvmx_pip_frm_len_chkx_s cn66xx;
+ struct cvmx_pip_frm_len_chkx_s cn68xx;
+ struct cvmx_pip_frm_len_chkx_s cn68xxp1;
+ struct cvmx_pip_frm_len_chkx_s cnf71xx;
+};
+typedef union cvmx_pip_frm_len_chkx cvmx_pip_frm_len_chkx_t;
+
+/**
+ * cvmx_pip_gbl_cfg
+ *
+ * PIP_GBL_CFG = PIP's Global Config Register
+ *
+ * Global config information that applies to all ports.
+ *
+ * Notes:
+ * * IP6_UDP
+ * IPv4 allows optional UDP checksum by sending the all 0's patterns. IPv6
+ * outlaws this and the spec says to always check UDP checksum. This mode
+ * bit allows the user to treat IPv6 as IPv4, meaning that the all 0's
+ * pattern will cause a UDP checksum pass.
+ */
+union cvmx_pip_gbl_cfg {
+ uint64_t u64;
+ struct cvmx_pip_gbl_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t tag_syn : 1; /**< Do not include src_crc for TCP/SYN&!ACK packets
+ 0 = include src_crc
+ 1 = tag hash is dst_crc for TCP/SYN&!ACK packets */
+ uint64_t ip6_udp : 1; /**< IPv6/UDP checksum is not optional
+ 0 = Allow optional checksum code
+ 1 = Do not allow optional checksum code */
+ uint64_t max_l2 : 1; /**< Config bit to choose the largest L2 frame size
+ Chooses the value of the L2 Type/Length field
+ to classify the frame as length.
+ 0 = 1500 / 0x5dc
+ 1 = 1535 / 0x5ff */
+ uint64_t reserved_11_15 : 5;
+ uint64_t raw_shf : 3; /**< RAW Packet shift amount
+ Number of bytes to pad a RAW packet. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t nip_shf : 3; /**< Non-IP shift amount
+ Number of bytes to pad a packet that has been
+ classified as not IP. */
+#else
+ uint64_t nip_shf : 3;
+ uint64_t reserved_3_7 : 5;
+ uint64_t raw_shf : 3;
+ uint64_t reserved_11_15 : 5;
+ uint64_t max_l2 : 1;
+ uint64_t ip6_udp : 1;
+ uint64_t tag_syn : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_pip_gbl_cfg_s cn30xx;
+ struct cvmx_pip_gbl_cfg_s cn31xx;
+ struct cvmx_pip_gbl_cfg_s cn38xx;
+ struct cvmx_pip_gbl_cfg_s cn38xxp2;
+ struct cvmx_pip_gbl_cfg_s cn50xx;
+ struct cvmx_pip_gbl_cfg_s cn52xx;
+ struct cvmx_pip_gbl_cfg_s cn52xxp1;
+ struct cvmx_pip_gbl_cfg_s cn56xx;
+ struct cvmx_pip_gbl_cfg_s cn56xxp1;
+ struct cvmx_pip_gbl_cfg_s cn58xx;
+ struct cvmx_pip_gbl_cfg_s cn58xxp1;
+ struct cvmx_pip_gbl_cfg_s cn61xx;
+ struct cvmx_pip_gbl_cfg_s cn63xx;
+ struct cvmx_pip_gbl_cfg_s cn63xxp1;
+ struct cvmx_pip_gbl_cfg_s cn66xx;
+ struct cvmx_pip_gbl_cfg_s cn68xx;
+ struct cvmx_pip_gbl_cfg_s cn68xxp1;
+ struct cvmx_pip_gbl_cfg_s cnf71xx;
+};
+typedef union cvmx_pip_gbl_cfg cvmx_pip_gbl_cfg_t;
+
+/**
+ * cvmx_pip_gbl_ctl
+ *
+ * PIP_GBL_CTL = PIP's Global Control Register
+ *
+ * Global control information. These are the global checker enables for
+ * IPv4/IPv6 and TCP/UDP parsing. The enables effect all ports.
+ *
+ * Notes:
+ * The following text describes the conditions in which each checker will
+ * assert and flag an exception. By disabling the checker, the exception will
+ * not be flagged and the packet will be parsed as best it can. Note, by
+ * disabling conditions, packets can be parsed incorrectly (.i.e. IP_MAL and
+ * L4_MAL could cause bits to be seen in the wrong place. IP_CHK and L4_CHK
+ * means that the packet was corrupted).
+ *
+ * * IP_CHK
+ * Indicates that an IPv4 packet contained an IPv4 header checksum
+ * violations. Only applies to packets classified as IPv4.
+ *
+ * * IP_MAL
+ * Indicates that the packet was malformed. Malformed packets are defined as
+ * packets that are not long enough to cover the IP header or not long enough
+ * to cover the length in the IP header.
+ *
+ * * IP_HOP
+ * Indicates that the IPv4 TTL field or IPv6 HOP field is zero.
+ *
+ * * IP4_OPTS
+ * Indicates the presence of IPv4 options. It is set when the length != 5.
+ * This only applies to packets classified as IPv4.
+ *
+ * * IP6_EEXT
+ * Indicate the presence of IPv6 early extension headers. These bits only
+ * apply to packets classified as IPv6. Bit 0 will flag early extensions
+ * when next_header is any one of the following...
+ *
+ * - hop-by-hop (0)
+ * - destination (60)
+ * - routing (43)
+ *
+ * Bit 1 will flag early extentions when next_header is NOT any of the
+ * following...
+ *
+ * - TCP (6)
+ * - UDP (17)
+ * - fragmentation (44)
+ * - ICMP (58)
+ * - IPSEC ESP (50)
+ * - IPSEC AH (51)
+ * - IPCOMP
+ *
+ * * L4_MAL
+ * Indicates that a TCP or UDP packet is not long enough to cover the TCP or
+ * UDP header.
+ *
+ * * L4_PRT
+ * Indicates that a TCP or UDP packet has an illegal port number - either the
+ * source or destination port is zero.
+ *
+ * * L4_CHK
+ * Indicates that a packet classified as either TCP or UDP contains an L4
+ * checksum failure
+ *
+ * * L4_LEN
+ * Indicates that the TCP or UDP length does not match the the IP length.
+ *
+ * * TCP_FLAG
+ * Indicates any of the following conditions...
+ *
+ * [URG, ACK, PSH, RST, SYN, FIN] : tcp_flag
+ * 6'b000001: (FIN only)
+ * 6'b000000: (0)
+ * 6'bxxx1x1: (RST+FIN+*)
+ * 6'b1xxx1x: (URG+SYN+*)
+ * 6'bxxx11x: (RST+SYN+*)
+ * 6'bxxxx11: (SYN+FIN+*)
+ */
+union cvmx_pip_gbl_ctl {
+ uint64_t u64;
+ struct cvmx_pip_gbl_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t egrp_dis : 1; /**< PKT_INST_HDR extended group field disable
+ When set, HW will ignore the EGRP field of the
+ PKT_INST_HDR - bits 47:46. */
+ uint64_t ihmsk_dis : 1; /**< Instruction Header Mask Disable
+ 0=Allow NTAG,NTT,NGRP,NQOS bits in the
+ instruction header to control which fields from
+ the instruction header are used for WQE WORD2.
+ 1=Ignore the NTAG,NTT,NGRP,NQOS bits in the
+ instruction header and act as if these fields
+ were zero. Thus always use the TAG,TT,GRP,QOS
+ (depending on the instruction header length)
+ from the instruction header for the WQE WORD2. */
+ uint64_t dsa_grp_tvid : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP */
+ uint64_t dsa_grp_scmd : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP when the
+ DSA tag command to TO_CPU */
+ uint64_t dsa_grp_sid : 1; /**< DSA Group Algorithm
+ Use the DSA VLAN id to compute GRP */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ring_en : 1; /**< Enable DPI ring information in WQE */
+ uint64_t reserved_17_19 : 3;
+ uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set
+ Does not apply to DPI ports (32-35)
+ When using 2-byte instruction header words,
+ either PIP_PRT_CFG[DYN_RS] or IGNRS should be set */
+ uint64_t vs_wqe : 1; /**< Which DSA/VLAN CFI/ID to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t vs_qos : 1; /**< Which DSA/VLAN priority to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */
+ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */
+ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */
+ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */
+ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */
+ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */
+ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */
+ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */
+ uint64_t ip_mal : 1; /**< Enable malformed check */
+ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */
+#else
+ uint64_t ip_chk : 1;
+ uint64_t ip_mal : 1;
+ uint64_t ip_hop : 1;
+ uint64_t ip4_opts : 1;
+ uint64_t ip6_eext : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t l4_mal : 1;
+ uint64_t l4_prt : 1;
+ uint64_t l4_chk : 1;
+ uint64_t l4_len : 1;
+ uint64_t tcp_flag : 1;
+ uint64_t l2_mal : 1;
+ uint64_t vs_qos : 1;
+ uint64_t vs_wqe : 1;
+ uint64_t ignrs : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t ring_en : 1;
+ uint64_t reserved_21_23 : 3;
+ uint64_t dsa_grp_sid : 1;
+ uint64_t dsa_grp_scmd : 1;
+ uint64_t dsa_grp_tvid : 1;
+ uint64_t ihmsk_dis : 1;
+ uint64_t egrp_dis : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_pip_gbl_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set
+ Only applies to the packet interface prts (0-31)
+ When using 2-byte instruction header words,
+ either PIP_PRT_CFG[DYN_RS] or IGNRS should be set */
+ uint64_t vs_wqe : 1; /**< Which VLAN CFI and ID to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t vs_qos : 1; /**< Which VLAN priority to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */
+ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */
+ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */
+ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */
+ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */
+ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */
+ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */
+ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */
+ uint64_t ip_mal : 1; /**< Enable malformed check */
+ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */
+#else
+ uint64_t ip_chk : 1;
+ uint64_t ip_mal : 1;
+ uint64_t ip_hop : 1;
+ uint64_t ip4_opts : 1;
+ uint64_t ip6_eext : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t l4_mal : 1;
+ uint64_t l4_prt : 1;
+ uint64_t l4_chk : 1;
+ uint64_t l4_len : 1;
+ uint64_t tcp_flag : 1;
+ uint64_t l2_mal : 1;
+ uint64_t vs_qos : 1;
+ uint64_t vs_wqe : 1;
+ uint64_t ignrs : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn30xx;
+ struct cvmx_pip_gbl_ctl_cn30xx cn31xx;
+ struct cvmx_pip_gbl_ctl_cn30xx cn38xx;
+ struct cvmx_pip_gbl_ctl_cn30xx cn38xxp2;
+ struct cvmx_pip_gbl_ctl_cn30xx cn50xx;
+ struct cvmx_pip_gbl_ctl_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t dsa_grp_tvid : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP */
+ uint64_t dsa_grp_scmd : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP when the
+ DSA tag command to TO_CPU */
+ uint64_t dsa_grp_sid : 1; /**< DSA Group Algorithm
+ Use the DSA VLAN id to compute GRP */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ring_en : 1; /**< Enable PCIe ring information in WQE */
+ uint64_t reserved_17_19 : 3;
+ uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set
+ Does not apply to PCI ports (32-35)
+ When using 2-byte instruction header words,
+ either PIP_PRT_CFG[DYN_RS] or IGNRS should be set */
+ uint64_t vs_wqe : 1; /**< Which DSA/VLAN CFI/ID to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t vs_qos : 1; /**< Which DSA/VLAN priority to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */
+ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */
+ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */
+ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */
+ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */
+ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */
+ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */
+ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */
+ uint64_t ip_mal : 1; /**< Enable malformed check */
+ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */
+#else
+ uint64_t ip_chk : 1;
+ uint64_t ip_mal : 1;
+ uint64_t ip_hop : 1;
+ uint64_t ip4_opts : 1;
+ uint64_t ip6_eext : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t l4_mal : 1;
+ uint64_t l4_prt : 1;
+ uint64_t l4_chk : 1;
+ uint64_t l4_len : 1;
+ uint64_t tcp_flag : 1;
+ uint64_t l2_mal : 1;
+ uint64_t vs_qos : 1;
+ uint64_t vs_wqe : 1;
+ uint64_t ignrs : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t ring_en : 1;
+ uint64_t reserved_21_23 : 3;
+ uint64_t dsa_grp_sid : 1;
+ uint64_t dsa_grp_scmd : 1;
+ uint64_t dsa_grp_tvid : 1;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn52xx;
+ struct cvmx_pip_gbl_ctl_cn52xx cn52xxp1;
+ struct cvmx_pip_gbl_ctl_cn52xx cn56xx;
+ struct cvmx_pip_gbl_ctl_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t ring_en : 1; /**< Enable PCIe ring information in WQE */
+ uint64_t reserved_17_19 : 3;
+ uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set
+ Does not apply to PCI ports (32-35)
+ When using 2-byte instruction header words,
+ either PIP_PRT_CFG[DYN_RS] or IGNRS should be set */
+ uint64_t vs_wqe : 1; /**< Which VLAN CFI and ID to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t vs_qos : 1; /**< Which VLAN priority to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */
+ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */
+ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */
+ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */
+ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */
+ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */
+ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */
+ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */
+ uint64_t ip_mal : 1; /**< Enable malformed check */
+ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */
+#else
+ uint64_t ip_chk : 1;
+ uint64_t ip_mal : 1;
+ uint64_t ip_hop : 1;
+ uint64_t ip4_opts : 1;
+ uint64_t ip6_eext : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t l4_mal : 1;
+ uint64_t l4_prt : 1;
+ uint64_t l4_chk : 1;
+ uint64_t l4_len : 1;
+ uint64_t tcp_flag : 1;
+ uint64_t l2_mal : 1;
+ uint64_t vs_qos : 1;
+ uint64_t vs_wqe : 1;
+ uint64_t ignrs : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t ring_en : 1;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } cn56xxp1;
+ struct cvmx_pip_gbl_ctl_cn30xx cn58xx;
+ struct cvmx_pip_gbl_ctl_cn30xx cn58xxp1;
+ struct cvmx_pip_gbl_ctl_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t ihmsk_dis : 1; /**< Instruction Header Mask Disable
+ 0=Allow NTAG,NTT,NGRP,NQOS bits in the
+ instruction header to control which fields from
+ the instruction header are used for WQE WORD2.
+ 1=Ignore the NTAG,NTT,NGRP,NQOS bits in the
+ instruction header and act as if these fields
+ were zero. Thus always use the TAG,TT,GRP,QOS
+ (depending on the instruction header length)
+ from the instruction header for the WQE WORD2. */
+ uint64_t dsa_grp_tvid : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP */
+ uint64_t dsa_grp_scmd : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP when the
+ DSA tag command to TO_CPU */
+ uint64_t dsa_grp_sid : 1; /**< DSA Group Algorithm
+ Use the DSA VLAN id to compute GRP */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ring_en : 1; /**< Enable DPI ring information in WQE */
+ uint64_t reserved_17_19 : 3;
+ uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set
+ Does not apply to DPI ports (32-35)
+ When using 2-byte instruction header words,
+ either PIP_PRT_CFG[DYN_RS] or IGNRS should be set */
+ uint64_t vs_wqe : 1; /**< Which DSA/VLAN CFI/ID to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t vs_qos : 1; /**< Which DSA/VLAN priority to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */
+ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */
+ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */
+ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */
+ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */
+ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */
+ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */
+ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */
+ uint64_t ip_mal : 1; /**< Enable malformed check */
+ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */
+#else
+ uint64_t ip_chk : 1;
+ uint64_t ip_mal : 1;
+ uint64_t ip_hop : 1;
+ uint64_t ip4_opts : 1;
+ uint64_t ip6_eext : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t l4_mal : 1;
+ uint64_t l4_prt : 1;
+ uint64_t l4_chk : 1;
+ uint64_t l4_len : 1;
+ uint64_t tcp_flag : 1;
+ uint64_t l2_mal : 1;
+ uint64_t vs_qos : 1;
+ uint64_t vs_wqe : 1;
+ uint64_t ignrs : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t ring_en : 1;
+ uint64_t reserved_21_23 : 3;
+ uint64_t dsa_grp_sid : 1;
+ uint64_t dsa_grp_scmd : 1;
+ uint64_t dsa_grp_tvid : 1;
+ uint64_t ihmsk_dis : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn61xx;
+ struct cvmx_pip_gbl_ctl_cn61xx cn63xx;
+ struct cvmx_pip_gbl_ctl_cn61xx cn63xxp1;
+ struct cvmx_pip_gbl_ctl_cn61xx cn66xx;
+ struct cvmx_pip_gbl_ctl_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t egrp_dis : 1; /**< PKT_INST_HDR extended group field disable
+ When set, HW will ignore the EGRP field of the
+ PKT_INST_HDR - bits 47:46. */
+ uint64_t ihmsk_dis : 1; /**< Instruction Header Mask Disable
+ 0=Allow NTAG,NTT,NGRP,NQOS bits in the
+ instruction header to control which fields from
+ the instruction header are used for WQE WORD2.
+ 1=Ignore the NTAG,NTT,NGRP,NQOS bits in the
+ instruction header and act as if these fields
+ were zero. Thus always use the TAG,TT,GRP,QOS
+ (depending on the instruction header length)
+ from the instruction header for the WQE WORD2. */
+ uint64_t dsa_grp_tvid : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP */
+ uint64_t dsa_grp_scmd : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP when the
+ DSA tag command to TO_CPU */
+ uint64_t dsa_grp_sid : 1; /**< DSA Group Algorithm
+ Use the DSA VLAN id to compute GRP */
+ uint64_t reserved_17_23 : 7;
+ uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set
+ When using 2-byte instruction header words,
+ either PIP_PRT_CFG[DYN_RS] or IGNRS should be set */
+ uint64_t vs_wqe : 1; /**< Which DSA/VLAN CFI/ID to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t vs_qos : 1; /**< Which DSA/VLAN priority to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */
+ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */
+ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */
+ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */
+ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */
+ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */
+ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */
+ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */
+ uint64_t ip_mal : 1; /**< Enable malformed check */
+ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */
+#else
+ uint64_t ip_chk : 1;
+ uint64_t ip_mal : 1;
+ uint64_t ip_hop : 1;
+ uint64_t ip4_opts : 1;
+ uint64_t ip6_eext : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t l4_mal : 1;
+ uint64_t l4_prt : 1;
+ uint64_t l4_chk : 1;
+ uint64_t l4_len : 1;
+ uint64_t tcp_flag : 1;
+ uint64_t l2_mal : 1;
+ uint64_t vs_qos : 1;
+ uint64_t vs_wqe : 1;
+ uint64_t ignrs : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t dsa_grp_sid : 1;
+ uint64_t dsa_grp_scmd : 1;
+ uint64_t dsa_grp_tvid : 1;
+ uint64_t ihmsk_dis : 1;
+ uint64_t egrp_dis : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn68xx;
+ struct cvmx_pip_gbl_ctl_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t ihmsk_dis : 1; /**< Instruction Header Mask Disable
+ 0=Allow NTAG,NTT,NGRP,NQOS bits in the
+ instruction header to control which fields from
+ the instruction header are used for WQE WORD2.
+ 1=Ignore the NTAG,NTT,NGRP,NQOS bits in the
+ instruction header and act as if these fields
+ were zero. Thus always use the TAG,TT,GRP,QOS
+ (depending on the instruction header length)
+ from the instruction header for the WQE WORD2. */
+ uint64_t dsa_grp_tvid : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP */
+ uint64_t dsa_grp_scmd : 1; /**< DSA Group Algorithm
+ Use the DSA source id to compute GRP when the
+ DSA tag command to TO_CPU */
+ uint64_t dsa_grp_sid : 1; /**< DSA Group Algorithm
+ Use the DSA VLAN id to compute GRP */
+ uint64_t reserved_17_23 : 7;
+ uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set
+ When using 2-byte instruction header words,
+ either PIP_PRT_CFG[DYN_RS] or IGNRS should be set */
+ uint64_t vs_wqe : 1; /**< Which DSA/VLAN CFI/ID to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t vs_qos : 1; /**< Which DSA/VLAN priority to use when VLAN Stacking
+ 0=use the 1st (network order) VLAN
+ 1=use the 2nd (network order) VLAN */
+ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */
+ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */
+ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */
+ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */
+ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */
+ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */
+ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */
+ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */
+ uint64_t ip_mal : 1; /**< Enable malformed check */
+ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */
+#else
+ uint64_t ip_chk : 1;
+ uint64_t ip_mal : 1;
+ uint64_t ip_hop : 1;
+ uint64_t ip4_opts : 1;
+ uint64_t ip6_eext : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t l4_mal : 1;
+ uint64_t l4_prt : 1;
+ uint64_t l4_chk : 1;
+ uint64_t l4_len : 1;
+ uint64_t tcp_flag : 1;
+ uint64_t l2_mal : 1;
+ uint64_t vs_qos : 1;
+ uint64_t vs_wqe : 1;
+ uint64_t ignrs : 1;
+ uint64_t reserved_17_23 : 7;
+ uint64_t dsa_grp_sid : 1;
+ uint64_t dsa_grp_scmd : 1;
+ uint64_t dsa_grp_tvid : 1;
+ uint64_t ihmsk_dis : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn68xxp1;
+ struct cvmx_pip_gbl_ctl_cn61xx cnf71xx;
+};
+typedef union cvmx_pip_gbl_ctl cvmx_pip_gbl_ctl_t;
+
+/**
+ * cvmx_pip_hg_pri_qos
+ *
+ * Notes:
+ * This register controls accesses to the HG_QOS_TABLE. To write an entry of
+ * the table, write PIP_HG_PRI_QOS with PRI=table address, QOS=priority level,
+ * UP_QOS=1. To read an entry of the table, write PIP_HG_PRI_QOS with
+ * PRI=table address, QOS=dont_carepriority level, UP_QOS=0 and then read
+ * PIP_HG_PRI_QOS. The table data will be in PIP_HG_PRI_QOS[QOS].
+ */
+union cvmx_pip_hg_pri_qos {
+ uint64_t u64;
+ struct cvmx_pip_hg_pri_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t up_qos : 1; /**< When written to '1', updates the entry in the
+ HG_QOS_TABLE as specified by PRI to a value of
+ QOS as follows
+ HG_QOS_TABLE[PRI] = QOS */
+ uint64_t reserved_11_11 : 1;
+ uint64_t qos : 3; /**< QOS Map level to priority */
+ uint64_t reserved_6_7 : 2;
+ uint64_t pri : 6; /**< The priority level from HiGig header
+ HiGig/HiGig+ PRI = [1'b0, CNG[1:0], COS[2:0]]
+ HiGig2 PRI = [DP[1:0], TC[3:0]] */
+#else
+ uint64_t pri : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t qos : 3;
+ uint64_t reserved_11_11 : 1;
+ uint64_t up_qos : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_pip_hg_pri_qos_s cn52xx;
+ struct cvmx_pip_hg_pri_qos_s cn52xxp1;
+ struct cvmx_pip_hg_pri_qos_s cn56xx;
+ struct cvmx_pip_hg_pri_qos_s cn61xx;
+ struct cvmx_pip_hg_pri_qos_s cn63xx;
+ struct cvmx_pip_hg_pri_qos_s cn63xxp1;
+ struct cvmx_pip_hg_pri_qos_s cn66xx;
+ struct cvmx_pip_hg_pri_qos_s cnf71xx;
+};
+typedef union cvmx_pip_hg_pri_qos cvmx_pip_hg_pri_qos_t;
+
+/**
+ * cvmx_pip_int_en
+ *
+ * PIP_INT_EN = PIP's Interrupt Enable Register
+ *
+ * Determines if hardward should raise an interrupt to software
+ * when an exception event occurs.
+ */
+union cvmx_pip_int_en {
+ uint64_t u64;
+ struct cvmx_pip_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC
+ stripping in IPD is enable */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t crcerr : 1; /**< PIP calculated bad CRC */
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t crcerr : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t minerr : 1;
+ uint64_t maxerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t punyerr : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_pip_int_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow
+ (not used in O2P) */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure
+ (not used in O2P) */
+ uint64_t crcerr : 1; /**< PIP calculated bad CRC
+ (not used in O2P) */
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t crcerr : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn30xx;
+ struct cvmx_pip_int_en_cn30xx cn31xx;
+ struct cvmx_pip_int_en_cn30xx cn38xx;
+ struct cvmx_pip_int_en_cn30xx cn38xxp2;
+ struct cvmx_pip_int_en_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t minerr : 1;
+ uint64_t maxerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn50xx;
+ struct cvmx_pip_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC
+ stripping in IPD is enable */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t minerr : 1;
+ uint64_t maxerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t punyerr : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn52xx;
+ struct cvmx_pip_int_en_cn52xx cn52xxp1;
+ struct cvmx_pip_int_en_s cn56xx;
+ struct cvmx_pip_int_en_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t crcerr : 1; /**< PIP calculated bad CRC
+ (Disabled in 56xx) */
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t crcerr : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t minerr : 1;
+ uint64_t maxerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xxp1;
+ struct cvmx_pip_int_en_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC
+ stripping in IPD is enable */
+ uint64_t reserved_9_11 : 3;
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t crcerr : 1; /**< PIP calculated bad CRC */
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t crcerr : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t punyerr : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn58xx;
+ struct cvmx_pip_int_en_cn30xx cn58xxp1;
+ struct cvmx_pip_int_en_s cn61xx;
+ struct cvmx_pip_int_en_s cn63xx;
+ struct cvmx_pip_int_en_s cn63xxp1;
+ struct cvmx_pip_int_en_s cn66xx;
+ struct cvmx_pip_int_en_s cn68xx;
+ struct cvmx_pip_int_en_s cn68xxp1;
+ struct cvmx_pip_int_en_s cnf71xx;
+};
+typedef union cvmx_pip_int_en cvmx_pip_int_en_t;
+
+/**
+ * cvmx_pip_int_reg
+ *
+ * PIP_INT_REG = PIP's Interrupt Register
+ *
+ * Any exception event that occurs is captured in the PIP_INT_REG.
+ * PIP_INT_REG will set the exception bit regardless of the value
+ * of PIP_INT_EN. PIP_INT_EN only controls if an interrupt is
+ * raised to software.
+ *
+ * Notes:
+ * * TODOOVR
+ * The PIP Todo list stores packets that have been received and require work
+ * queue entry generation. PIP will normally assert backpressure when the
+ * list fills up such that any error is normally is result of a programming
+ * the PIP_BCK_PRS[HIWATER] incorrectly. PIP itself can handle 29M
+ * packets/sec X500MHz or 15Gbs X 64B packets.
+ *
+ * * SKPRUNT
+ * If a packet size is less then the amount programmed in the per port
+ * skippers, then there will be nothing to parse and the entire packet will
+ * basically be skipped over. This is probably not what the user desired, so
+ * there is an indication to software.
+ *
+ * * BADTAG
+ * A tag is considered bad when it is resued by a new packet before it was
+ * released by PIP. PIP considers a tag released by one of two methods.
+ * . QOS dropped so that it is released over the pip__ipd_release bus.
+ * . WorkQ entry is validated by the pip__ipd_done signal
+ *
+ * * PRTNXA
+ * If PIP receives a packet that is not in the valid port range, the port
+ * processed will be mapped into the valid port space (the mapping is
+ * currently unpredictable) and the PRTNXA bit will be set. PRTNXA will be
+ * set for packets received under the following conditions:
+ *
+ * * packet ports (ports 0-31)
+ * - GMX_INF_MODE[TYPE]==0 (SGMII), received port is 4-15 or 20-31
+ * - GMX_INF_MODE[TYPE]==1 (XAUI), received port is 1-15 or 17-31
+ * * upper ports (pci and loopback ports 32-63)
+ * - received port is 40-47 or 52-63
+ *
+ * * BCKPRS
+ * PIP can assert backpressure to the receive logic when the todo list
+ * exceeds a high-water mark (see PIP_BCK_PRS for more details). When this
+ * occurs, PIP can raise an interrupt to software.
+ *
+ * * CRCERR
+ * Octane can compute CRC in two places. Each RGMII port will compute its
+ * own CRC, but PIP can provide an additional check or check loopback or
+ * PCI ports. If PIP computes a bad CRC, then PIP will raise an interrupt.
+ *
+ * * PKTDRP
+ * PIP can drop packets based on QOS results received from IPD. If the QOS
+ * algorithm decides to drop a packet, PIP will assert an interrupt.
+ */
+union cvmx_pip_int_reg {
+ uint64_t u64;
+ struct cvmx_pip_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC
+ stripping in IPD is enable */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper
+ This interrupt can occur with received PARTIAL
+ packets that are truncated to SKIP bytes or
+ smaller. */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t crcerr : 1; /**< PIP calculated bad CRC */
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t crcerr : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t minerr : 1;
+ uint64_t maxerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t punyerr : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_pip_int_reg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow
+ (not used in O2P) */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper
+ This interrupt can occur with received PARTIAL
+ packets that are truncated to SKIP bytes or
+ smaller. */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure
+ (not used in O2P) */
+ uint64_t crcerr : 1; /**< PIP calculated bad CRC
+ (not used in O2P) */
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t crcerr : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn30xx;
+ struct cvmx_pip_int_reg_cn30xx cn31xx;
+ struct cvmx_pip_int_reg_cn30xx cn38xx;
+ struct cvmx_pip_int_reg_cn30xx cn38xxp2;
+ struct cvmx_pip_int_reg_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper
+ This interrupt can occur with received PARTIAL
+ packets that are truncated to SKIP bytes or
+ smaller. */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t minerr : 1;
+ uint64_t maxerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn50xx;
+ struct cvmx_pip_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC
+ stripping in IPD is enable */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper
+ This interrupt can occur with received PARTIAL
+ packets that are truncated to SKIP bytes or
+ smaller. */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t minerr : 1;
+ uint64_t maxerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t punyerr : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn52xx;
+ struct cvmx_pip_int_reg_cn52xx cn52xxp1;
+ struct cvmx_pip_int_reg_s cn56xx;
+ struct cvmx_pip_int_reg_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper
+ This interrupt can occur with received PARTIAL
+ packets that are truncated to SKIP bytes or
+ smaller. */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t crcerr : 1; /**< PIP calculated bad CRC
+ (Disabled in 56xx) */
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t crcerr : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t minerr : 1;
+ uint64_t maxerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xxp1;
+ struct cvmx_pip_int_reg_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC
+ stripping in IPD is enable */
+ uint64_t reserved_9_11 : 3;
+ uint64_t beperr : 1; /**< Parity Error in back end memory */
+ uint64_t feperr : 1; /**< Parity Error in front end memory */
+ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */
+ uint64_t skprunt : 1; /**< Packet was engulfed by skipper
+ This interrupt can occur with received PARTIAL
+ packets that are truncated to SKIP bytes or
+ smaller. */
+ uint64_t badtag : 1; /**< A bad tag was sent from IPD */
+ uint64_t prtnxa : 1; /**< Non-existent port */
+ uint64_t bckprs : 1; /**< PIP asserted backpressure */
+ uint64_t crcerr : 1; /**< PIP calculated bad CRC */
+ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */
+#else
+ uint64_t pktdrp : 1;
+ uint64_t crcerr : 1;
+ uint64_t bckprs : 1;
+ uint64_t prtnxa : 1;
+ uint64_t badtag : 1;
+ uint64_t skprunt : 1;
+ uint64_t todoovr : 1;
+ uint64_t feperr : 1;
+ uint64_t beperr : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t punyerr : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn58xx;
+ struct cvmx_pip_int_reg_cn30xx cn58xxp1;
+ struct cvmx_pip_int_reg_s cn61xx;
+ struct cvmx_pip_int_reg_s cn63xx;
+ struct cvmx_pip_int_reg_s cn63xxp1;
+ struct cvmx_pip_int_reg_s cn66xx;
+ struct cvmx_pip_int_reg_s cn68xx;
+ struct cvmx_pip_int_reg_s cn68xxp1;
+ struct cvmx_pip_int_reg_s cnf71xx;
+};
+typedef union cvmx_pip_int_reg cvmx_pip_int_reg_t;
+
+/**
+ * cvmx_pip_ip_offset
+ *
+ * PIP_IP_OFFSET = Location of the IP in the workQ entry
+ *
+ * An 8-byte offset to find the start of the IP header in the data portion of IP workQ entires
+ *
+ * Notes:
+ * In normal configurations, OFFSET must be set in the 0..4 range to allow the
+ * entire IP and TCP/UDP headers to be buffered in HW and calculate the L4
+ * checksum for TCP/UDP packets.
+ *
+ * The MAX value of OFFSET is determined by the the types of packets that can
+ * be sent to PIP as follows...
+ *
+ * Packet Type MAX OFFSET
+ * IPv4/TCP/UDP 7
+ * IPv6/TCP/UDP 5
+ * IPv6/without L4 parsing 6
+ *
+ * If the L4 can be ignored, then the MAX OFFSET for IPv6 packets can increase
+ * to 6. Here are the following programming restrictions for IPv6 packets and
+ * OFFSET==6:
+ *
+ * . PIP_GBL_CTL[TCP_FLAG] == 0
+ * . PIP_GBL_CTL[L4_LEN] == 0
+ * . PIP_GBL_CTL[L4_CHK] == 0
+ * . PIP_GBL_CTL[L4_PRT] == 0
+ * . PIP_GBL_CTL[L4_MAL] == 0
+ * . PIP_DEC_IPSEC[TCP] == 0
+ * . PIP_DEC_IPSEC[UDP] == 0
+ * . PIP_PRT_TAG[IP6_DPRT] == 0
+ * . PIP_PRT_TAG[IP6_SPRT] == 0
+ * . PIP_PRT_TAG[TCP6_TAG] == 0
+ * . PIP_GBL_CFG[TAG_SYN] == 0
+ */
+union cvmx_pip_ip_offset {
+ uint64_t u64;
+ struct cvmx_pip_ip_offset_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t offset : 3; /**< Number of 8B ticks to include in workQ entry
+ prior to IP data
+ - 0: 0 Bytes / IP start at WORD4 of workQ entry
+ - 1: 8 Bytes / IP start at WORD5 of workQ entry
+ - 2: 16 Bytes / IP start at WORD6 of workQ entry
+ - 3: 24 Bytes / IP start at WORD7 of workQ entry
+ - 4: 32 Bytes / IP start at WORD8 of workQ entry
+ - 5: 40 Bytes / IP start at WORD9 of workQ entry
+ - 6: 48 Bytes / IP start at WORD10 of workQ entry
+ - 7: 56 Bytes / IP start at WORD11 of workQ entry */
+#else
+ uint64_t offset : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_pip_ip_offset_s cn30xx;
+ struct cvmx_pip_ip_offset_s cn31xx;
+ struct cvmx_pip_ip_offset_s cn38xx;
+ struct cvmx_pip_ip_offset_s cn38xxp2;
+ struct cvmx_pip_ip_offset_s cn50xx;
+ struct cvmx_pip_ip_offset_s cn52xx;
+ struct cvmx_pip_ip_offset_s cn52xxp1;
+ struct cvmx_pip_ip_offset_s cn56xx;
+ struct cvmx_pip_ip_offset_s cn56xxp1;
+ struct cvmx_pip_ip_offset_s cn58xx;
+ struct cvmx_pip_ip_offset_s cn58xxp1;
+ struct cvmx_pip_ip_offset_s cn61xx;
+ struct cvmx_pip_ip_offset_s cn63xx;
+ struct cvmx_pip_ip_offset_s cn63xxp1;
+ struct cvmx_pip_ip_offset_s cn66xx;
+ struct cvmx_pip_ip_offset_s cn68xx;
+ struct cvmx_pip_ip_offset_s cn68xxp1;
+ struct cvmx_pip_ip_offset_s cnf71xx;
+};
+typedef union cvmx_pip_ip_offset cvmx_pip_ip_offset_t;
+
+/**
+ * cvmx_pip_pri_tbl#
+ *
+ * Notes:
+ * The priority level from HiGig header is as follows
+ *
+ * HiGig/HiGig+ PRI = [1'b0, CNG[1:0], COS[2:0]]
+ * HiGig2 PRI = [DP[1:0], TC[3:0]]
+ *
+ * DSA PRI = WORD0[15:13]
+ *
+ * VLAN PRI = VLAN[15:13]
+ *
+ * DIFFSERV = IP.TOS/CLASS<7:2>
+ */
+union cvmx_pip_pri_tblx {
+ uint64_t u64;
+ struct cvmx_pip_pri_tblx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t diff2_padd : 8; /**< Diffserv port-add */
+ uint64_t hg2_padd : 8; /**< HG_PRI port-add */
+ uint64_t vlan2_padd : 8; /**< VLAN port-add */
+ uint64_t reserved_38_39 : 2;
+ uint64_t diff2_bpid : 6; /**< Diffserv backpressure ID */
+ uint64_t reserved_30_31 : 2;
+ uint64_t hg2_bpid : 6; /**< HG_PRI backpressure ID */
+ uint64_t reserved_22_23 : 2;
+ uint64_t vlan2_bpid : 6; /**< VLAN backpressure ID */
+ uint64_t reserved_11_15 : 5;
+ uint64_t diff2_qos : 3; /**< Diffserv QOS level */
+ uint64_t reserved_7_7 : 1;
+ uint64_t hg2_qos : 3; /**< HG_PRI QOS level */
+ uint64_t reserved_3_3 : 1;
+ uint64_t vlan2_qos : 3; /**< VLAN QOS level */
+#else
+ uint64_t vlan2_qos : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t hg2_qos : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t diff2_qos : 3;
+ uint64_t reserved_11_15 : 5;
+ uint64_t vlan2_bpid : 6;
+ uint64_t reserved_22_23 : 2;
+ uint64_t hg2_bpid : 6;
+ uint64_t reserved_30_31 : 2;
+ uint64_t diff2_bpid : 6;
+ uint64_t reserved_38_39 : 2;
+ uint64_t vlan2_padd : 8;
+ uint64_t hg2_padd : 8;
+ uint64_t diff2_padd : 8;
+#endif
+ } s;
+ struct cvmx_pip_pri_tblx_s cn68xx;
+ struct cvmx_pip_pri_tblx_s cn68xxp1;
+};
+typedef union cvmx_pip_pri_tblx cvmx_pip_pri_tblx_t;
+
+/**
+ * cvmx_pip_prt_cfg#
+ *
+ * PIP_PRT_CFGX = Per port config information
+ *
+ */
+union cvmx_pip_prt_cfgx {
+ uint64_t u64;
+ struct cvmx_pip_prt_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63 : 9;
+ uint64_t ih_pri : 1; /**< Use the PRI/QOS field in the instruction header
+ as the PRIORITY in BPID calculations. */
+ uint64_t len_chk_sel : 1; /**< Selects which PIP_FRM_LEN_CHK register is used
+ for this port-kind for MINERR and MAXERR checks.
+ LEN_CHK_SEL=0, use PIP_FRM_LEN_CHK0
+ LEN_CHK_SEL=1, use PIP_FRM_LEN_CHK1 */
+ uint64_t pad_len : 1; /**< When set, disables the length check for pkts with
+ padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for DSA/VLAN
+ pkts */
+ uint64_t lenerr_en : 1; /**< L2 length error check enable
+ Frame was received with length error
+ Typically, this check will not be enabled for
+ incoming packets on the DPI and sRIO ports
+ because the CRC bytes may not normally be
+ present. */
+ uint64_t maxerr_en : 1; /**< Max frame error check enable
+ Frame was received with length > max_length
+ max_length is defined by PIP_FRM_LEN_CHK[MAXLEN] */
+ uint64_t minerr_en : 1; /**< Min frame error check enable
+ Frame was received with length < min_length
+ Typically, this check will not be enabled for
+ incoming packets on the DPI and sRIO ports
+ because the CRC bytes may not normally be
+ present.
+ min_length is defined by PIP_FRM_LEN_CHK[MINLEN] */
+ uint64_t grp_wat_47 : 4; /**< GRP Watcher enable
+ (Watchers 4-7) */
+ uint64_t qos_wat_47 : 4; /**< QOS Watcher enable
+ (Watchers 4-7) */
+ uint64_t reserved_37_39 : 3;
+ uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet.
+ Normally, IPD will never drop a packet that PIP
+ indicates is RAW.
+ 0=never drop RAW packets based on RED algorithm
+ 1=allow RAW packet drops based on RED algorithm */
+ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when
+ calculating mask tag hash */
+ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size and
+ configuration. If DYN_RS is set then
+ PKT_INST_HDR[RS] is not used. When using 2-byte
+ instruction header words, either DYN_RS or
+ PIP_GBL_CTL[IGNRS] should be set. */
+ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets
+ Internally set for RAWFULL/RAWSCHED packets
+ on the DPI ports (32-35).
+ Internally cleared for all other packets on the
+ DPI ports (32-35).
+ Must be zero in DSA mode */
+ uint64_t grp_wat : 4; /**< GRP Watcher enable */
+ uint64_t hg_qos : 1; /**< When set, uses the HiGig priority bits as a
+ lookup into the HG_QOS_TABLE (PIP_HG_PRI_QOS)
+ to determine the QOS value
+ HG_QOS must not be set when HIGIG_EN=0 */
+ uint64_t qos : 3; /**< Default QOS level of the port */
+ uint64_t qos_wat : 4; /**< QOS Watcher enable
+ (Watchers 0-3) */
+ uint64_t qos_vsel : 1; /**< Which QOS in PIP_QOS_VLAN to use
+ 0 = PIP_QOS_VLAN[QOS]
+ 1 = PIP_QOS_VLAN[QOS1] */
+ uint64_t qos_vod : 1; /**< QOS VLAN over Diffserv
+ if DSA/VLAN exists, it is used
+ else if IP exists, Diffserv is used
+ else the per port default is used
+ Watchers are still highest priority */
+ uint64_t qos_diff : 1; /**< QOS Diffserv */
+ uint64_t qos_vlan : 1; /**< QOS VLAN */
+ uint64_t reserved_13_15 : 3;
+ uint64_t crc_en : 1; /**< CRC Checking enabled */
+ uint64_t higig_en : 1; /**< Enable HiGig parsing
+ Should not be set for DPI ports (ports 32-35)
+ Should not be set for sRIO ports (ports 40-47)
+ Should not be set for ports in which PTP_MODE=1
+ When HIGIG_EN=1:
+ DSA_EN field below must be zero
+ PIP_PRT_CFGB[ALT_SKP_EN] must be zero.
+ SKIP field below is both Skip I size and the
+ size of the HiGig* header (12 or 16 bytes) */
+ uint64_t dsa_en : 1; /**< Enable DSA tag parsing
+ Should not be set for sRIO (ports 40-47)
+ Should not be set for ports in which PTP_MODE=1
+ When DSA_EN=1:
+ HIGIG_EN field above must be zero
+ SKIP field below is size of DSA tag (4, 8, or
+ 12 bytes) rather than the size of Skip I
+ total SKIP (Skip I + header + Skip II
+ must be zero
+ INST_HDR field above must be zero (non-DPI
+ ports)
+ PIP_PRT_CFGB[ALT_SKP_EN] must be zero.
+ For DPI ports, SLI_PKT*_INSTR_HEADER[USE_IHDR]
+ and DPI_INST_HDR[R] should be clear
+ MODE field below must be "skip to L2" */
+ cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode
+ 0 = no packet inspection (Uninterpreted)
+ 1 = L2 parsing / skip to L2
+ 2 = IP parsing / skip to L3
+ 3 = (illegal)
+ Must be 2 ("skip to L2") when in DSA mode. */
+ uint64_t reserved_7_7 : 1;
+ uint64_t skip : 7; /**< Optional Skip I amount for packets.
+ HW forces the SKIP to zero for packets on DPI
+ ports (32-35) when a PKT_INST_HDR is present.
+ See PIP_PRT_CFGB[ALT_SKP*] and PIP_ALT_SKIP_CFG.
+ See HRM sections "Parse Mode and Skip Length
+ Selection" and "Legal Skip Values"
+ for further details.
+ In DSA mode, indicates the DSA header length, not
+ Skip I size. (Must be 4,8,or 12)
+ In HIGIG mode, indicates both the Skip I size and
+ the HiGig header size (Must be 12 or 16).
+ If PTP_MODE, the 8B timestamp is prepended to the
+ packet. SKIP should be increased by 8 to
+ compensate for the additional timestamp field. */
+#else
+ uint64_t skip : 7;
+ uint64_t reserved_7_7 : 1;
+ cvmx_pip_port_parse_mode_t mode : 2;
+ uint64_t dsa_en : 1;
+ uint64_t higig_en : 1;
+ uint64_t crc_en : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t qos_vlan : 1;
+ uint64_t qos_diff : 1;
+ uint64_t qos_vod : 1;
+ uint64_t qos_vsel : 1;
+ uint64_t qos_wat : 4;
+ uint64_t qos : 3;
+ uint64_t hg_qos : 1;
+ uint64_t grp_wat : 4;
+ uint64_t inst_hdr : 1;
+ uint64_t dyn_rs : 1;
+ uint64_t tag_inc : 2;
+ uint64_t rawdrp : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t qos_wat_47 : 4;
+ uint64_t grp_wat_47 : 4;
+ uint64_t minerr_en : 1;
+ uint64_t maxerr_en : 1;
+ uint64_t lenerr_en : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t len_chk_sel : 1;
+ uint64_t ih_pri : 1;
+ uint64_t reserved_55_63 : 9;
+#endif
+ } s;
+ struct cvmx_pip_prt_cfgx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet.
+ Normally, IPD will never drop a packet that PIP
+ indicates is RAW.
+ 0=never drop RAW packets based on RED algorithm
+ 1=allow RAW packet drops based on RED algorithm */
+ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when
+ calculating mask tag hash */
+ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size and
+ configuration. If DYN_RS is set then
+ PKT_INST_HDR[RS] is not used. When using 2-byte
+ instruction header words, either DYN_RS or
+ PIP_GBL_CTL[IGNRS] should be set. */
+ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets
+ (not for PCI prts, 32-35) */
+ uint64_t grp_wat : 4; /**< GRP Watcher enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t qos : 3; /**< Default QOS level of the port */
+ uint64_t qos_wat : 4; /**< QOS Watcher enable */
+ uint64_t reserved_18_19 : 2;
+ uint64_t qos_diff : 1; /**< QOS Diffserv */
+ uint64_t qos_vlan : 1; /**< QOS VLAN */
+ uint64_t reserved_10_15 : 6;
+ cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode
+ 0 = no packet inspection (Uninterpreted)
+ 1 = L2 parsing / skip to L2
+ 2 = IP parsing / skip to L3
+ 3 = PCI Raw (illegal for software to set) */
+ uint64_t reserved_7_7 : 1;
+ uint64_t skip : 7; /**< Optional Skip I amount for packets. Does not
+ apply to packets on PCI ports when a PKT_INST_HDR
+ is present. See section 7.2.7 - Legal Skip
+ Values for further details. */
+#else
+ uint64_t skip : 7;
+ uint64_t reserved_7_7 : 1;
+ cvmx_pip_port_parse_mode_t mode : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t qos_vlan : 1;
+ uint64_t qos_diff : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t qos_wat : 4;
+ uint64_t qos : 3;
+ uint64_t reserved_27_27 : 1;
+ uint64_t grp_wat : 4;
+ uint64_t inst_hdr : 1;
+ uint64_t dyn_rs : 1;
+ uint64_t tag_inc : 2;
+ uint64_t rawdrp : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } cn30xx;
+ struct cvmx_pip_prt_cfgx_cn30xx cn31xx;
+ struct cvmx_pip_prt_cfgx_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet.
+ Normally, IPD will never drop a packet that PIP
+ indicates is RAW.
+ 0=never drop RAW packets based on RED algorithm
+ 1=allow RAW packet drops based on RED algorithm */
+ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when
+ calculating mask tag hash */
+ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size and
+ configuration. If DYN_RS is set then
+ PKT_INST_HDR[RS] is not used. When using 2-byte
+ instruction header words, either DYN_RS or
+ PIP_GBL_CTL[IGNRS] should be set. */
+ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets
+ (not for PCI prts, 32-35) */
+ uint64_t grp_wat : 4; /**< GRP Watcher enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t qos : 3; /**< Default QOS level of the port */
+ uint64_t qos_wat : 4; /**< QOS Watcher enable */
+ uint64_t reserved_18_19 : 2;
+ uint64_t qos_diff : 1; /**< QOS Diffserv */
+ uint64_t qos_vlan : 1; /**< QOS VLAN */
+ uint64_t reserved_13_15 : 3;
+ uint64_t crc_en : 1; /**< CRC Checking enabled (for ports 0-31 only) */
+ uint64_t reserved_10_11 : 2;
+ cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode
+ 0 = no packet inspection (Uninterpreted)
+ 1 = L2 parsing / skip to L2
+ 2 = IP parsing / skip to L3
+ 3 = PCI Raw (illegal for software to set) */
+ uint64_t reserved_7_7 : 1;
+ uint64_t skip : 7; /**< Optional Skip I amount for packets. Does not
+ apply to packets on PCI ports when a PKT_INST_HDR
+ is present. See section 7.2.7 - Legal Skip
+ Values for further details. */
+#else
+ uint64_t skip : 7;
+ uint64_t reserved_7_7 : 1;
+ cvmx_pip_port_parse_mode_t mode : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t crc_en : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t qos_vlan : 1;
+ uint64_t qos_diff : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t qos_wat : 4;
+ uint64_t qos : 3;
+ uint64_t reserved_27_27 : 1;
+ uint64_t grp_wat : 4;
+ uint64_t inst_hdr : 1;
+ uint64_t dyn_rs : 1;
+ uint64_t tag_inc : 2;
+ uint64_t rawdrp : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } cn38xx;
+ struct cvmx_pip_prt_cfgx_cn38xx cn38xxp2;
+ struct cvmx_pip_prt_cfgx_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_53_63 : 11;
+ uint64_t pad_len : 1; /**< When set, disables the length check for pkts with
+ padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t lenerr_en : 1; /**< L2 length error check enable
+ Frame was received with length error */
+ uint64_t maxerr_en : 1; /**< Max frame error check enable
+ Frame was received with length > max_length */
+ uint64_t minerr_en : 1; /**< Min frame error check enable
+ Frame was received with length < min_length */
+ uint64_t grp_wat_47 : 4; /**< GRP Watcher enable
+ (Watchers 4-7) */
+ uint64_t qos_wat_47 : 4; /**< QOS Watcher enable
+ (Watchers 4-7) */
+ uint64_t reserved_37_39 : 3;
+ uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet.
+ Normally, IPD will never drop a packet that PIP
+ indicates is RAW.
+ 0=never drop RAW packets based on RED algorithm
+ 1=allow RAW packet drops based on RED algorithm */
+ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when
+ calculating mask tag hash */
+ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size and
+ configuration. If DYN_RS is set then
+ PKT_INST_HDR[RS] is not used. When using 2-byte
+ instruction header words, either DYN_RS or
+ PIP_GBL_CTL[IGNRS] should be set. */
+ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets
+ (not for PCI prts, 32-35) */
+ uint64_t grp_wat : 4; /**< GRP Watcher enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t qos : 3; /**< Default QOS level of the port */
+ uint64_t qos_wat : 4; /**< QOS Watcher enable
+ (Watchers 0-3) */
+ uint64_t reserved_19_19 : 1;
+ uint64_t qos_vod : 1; /**< QOS VLAN over Diffserv
+ if VLAN exists, it is used
+ else if IP exists, Diffserv is used
+ else the per port default is used
+ Watchers are still highest priority */
+ uint64_t qos_diff : 1; /**< QOS Diffserv */
+ uint64_t qos_vlan : 1; /**< QOS VLAN */
+ uint64_t reserved_13_15 : 3;
+ uint64_t crc_en : 1; /**< CRC Checking enabled
+ (Disabled in 5020) */
+ uint64_t reserved_10_11 : 2;
+ cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode
+ 0 = no packet inspection (Uninterpreted)
+ 1 = L2 parsing / skip to L2
+ 2 = IP parsing / skip to L3
+ 3 = PCI Raw (illegal for software to set) */
+ uint64_t reserved_7_7 : 1;
+ uint64_t skip : 7; /**< Optional Skip I amount for packets. Does not
+ apply to packets on PCI ports when a PKT_INST_HDR
+ is present. See section 7.2.7 - Legal Skip
+ Values for further details. */
+#else
+ uint64_t skip : 7;
+ uint64_t reserved_7_7 : 1;
+ cvmx_pip_port_parse_mode_t mode : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t crc_en : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t qos_vlan : 1;
+ uint64_t qos_diff : 1;
+ uint64_t qos_vod : 1;
+ uint64_t reserved_19_19 : 1;
+ uint64_t qos_wat : 4;
+ uint64_t qos : 3;
+ uint64_t reserved_27_27 : 1;
+ uint64_t grp_wat : 4;
+ uint64_t inst_hdr : 1;
+ uint64_t dyn_rs : 1;
+ uint64_t tag_inc : 2;
+ uint64_t rawdrp : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t qos_wat_47 : 4;
+ uint64_t grp_wat_47 : 4;
+ uint64_t minerr_en : 1;
+ uint64_t maxerr_en : 1;
+ uint64_t lenerr_en : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t reserved_53_63 : 11;
+#endif
+ } cn50xx;
+ struct cvmx_pip_prt_cfgx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_53_63 : 11;
+ uint64_t pad_len : 1; /**< When set, disables the length check for pkts with
+ padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for DSA/VLAN
+ pkts */
+ uint64_t lenerr_en : 1; /**< L2 length error check enable
+ Frame was received with length error
+ Typically, this check will not be enabled for
+ incoming packets on the PCIe ports. */
+ uint64_t maxerr_en : 1; /**< Max frame error check enable
+ Frame was received with length > max_length */
+ uint64_t minerr_en : 1; /**< Min frame error check enable
+ Frame was received with length < min_length
+ Typically, this check will not be enabled for
+ incoming packets on the PCIe ports. */
+ uint64_t grp_wat_47 : 4; /**< GRP Watcher enable
+ (Watchers 4-7) */
+ uint64_t qos_wat_47 : 4; /**< QOS Watcher enable
+ (Watchers 4-7) */
+ uint64_t reserved_37_39 : 3;
+ uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet.
+ Normally, IPD will never drop a packet that PIP
+ indicates is RAW.
+ 0=never drop RAW packets based on RED algorithm
+ 1=allow RAW packet drops based on RED algorithm */
+ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when
+ calculating mask tag hash */
+ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size and
+ configuration. If DYN_RS is set then
+ PKT_INST_HDR[RS] is not used. When using 2-byte
+ instruction header words, either DYN_RS or
+ PIP_GBL_CTL[IGNRS] should be set. */
+ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets
+ (not for PCI ports, 32-35)
+ Must be zero in DSA mode */
+ uint64_t grp_wat : 4; /**< GRP Watcher enable */
+ uint64_t hg_qos : 1; /**< When set, uses the HiGig priority bits as a
+ lookup into the HG_QOS_TABLE (PIP_HG_PRI_QOS)
+ to determine the QOS value
+ HG_QOS must not be set when HIGIG_EN=0 */
+ uint64_t qos : 3; /**< Default QOS level of the port */
+ uint64_t qos_wat : 4; /**< QOS Watcher enable
+ (Watchers 0-3) */
+ uint64_t qos_vsel : 1; /**< Which QOS in PIP_QOS_VLAN to use
+ 0 = PIP_QOS_VLAN[QOS]
+ 1 = PIP_QOS_VLAN[QOS1] */
+ uint64_t qos_vod : 1; /**< QOS VLAN over Diffserv
+ if DSA/VLAN exists, it is used
+ else if IP exists, Diffserv is used
+ else the per port default is used
+ Watchers are still highest priority */
+ uint64_t qos_diff : 1; /**< QOS Diffserv */
+ uint64_t qos_vlan : 1; /**< QOS VLAN */
+ uint64_t reserved_13_15 : 3;
+ uint64_t crc_en : 1; /**< CRC Checking enabled
+ (Disabled in 52xx) */
+ uint64_t higig_en : 1; /**< Enable HiGig parsing
+ When HIGIG_EN=1:
+ DSA_EN field below must be zero
+ SKIP field below is both Skip I size and the
+ size of the HiGig* header (12 or 16 bytes) */
+ uint64_t dsa_en : 1; /**< Enable DSA tag parsing
+ When DSA_EN=1:
+ HIGIG_EN field above must be zero
+ SKIP field below is size of DSA tag (4, 8, or
+ 12 bytes) rather than the size of Skip I
+ total SKIP (Skip I + header + Skip II
+ must be zero
+ INST_HDR field above must be zero
+ MODE field below must be "skip to L2" */
+ cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode
+ 0 = no packet inspection (Uninterpreted)
+ 1 = L2 parsing / skip to L2
+ 2 = IP parsing / skip to L3
+ 3 = (illegal)
+ Must be 2 ("skip to L2") when in DSA mode. */
+ uint64_t reserved_7_7 : 1;
+ uint64_t skip : 7; /**< Optional Skip I amount for packets.
+ See section 7.2.7 - Legal Skip
+ Values for further details.
+ In DSA mode, indicates the DSA header length, not
+ Skip I size. (Must be 4,8,or 12)
+ In HIGIG mode, indicates both the Skip I size and
+ the HiGig header size (Must be 12 or 16). */
+#else
+ uint64_t skip : 7;
+ uint64_t reserved_7_7 : 1;
+ cvmx_pip_port_parse_mode_t mode : 2;
+ uint64_t dsa_en : 1;
+ uint64_t higig_en : 1;
+ uint64_t crc_en : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t qos_vlan : 1;
+ uint64_t qos_diff : 1;
+ uint64_t qos_vod : 1;
+ uint64_t qos_vsel : 1;
+ uint64_t qos_wat : 4;
+ uint64_t qos : 3;
+ uint64_t hg_qos : 1;
+ uint64_t grp_wat : 4;
+ uint64_t inst_hdr : 1;
+ uint64_t dyn_rs : 1;
+ uint64_t tag_inc : 2;
+ uint64_t rawdrp : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t qos_wat_47 : 4;
+ uint64_t grp_wat_47 : 4;
+ uint64_t minerr_en : 1;
+ uint64_t maxerr_en : 1;
+ uint64_t lenerr_en : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t reserved_53_63 : 11;
+#endif
+ } cn52xx;
+ struct cvmx_pip_prt_cfgx_cn52xx cn52xxp1;
+ struct cvmx_pip_prt_cfgx_cn52xx cn56xx;
+ struct cvmx_pip_prt_cfgx_cn50xx cn56xxp1;
+ struct cvmx_pip_prt_cfgx_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet.
+ Normally, IPD will never drop a packet that PIP
+ indicates is RAW.
+ 0=never drop RAW packets based on RED algorithm
+ 1=allow RAW packet drops based on RED algorithm */
+ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when
+ calculating mask tag hash */
+ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size and
+ configuration. If DYN_RS is set then
+ PKT_INST_HDR[RS] is not used. When using 2-byte
+ instruction header words, either DYN_RS or
+ PIP_GBL_CTL[IGNRS] should be set. */
+ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets
+ (not for PCI prts, 32-35) */
+ uint64_t grp_wat : 4; /**< GRP Watcher enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t qos : 3; /**< Default QOS level of the port */
+ uint64_t qos_wat : 4; /**< QOS Watcher enable */
+ uint64_t reserved_19_19 : 1;
+ uint64_t qos_vod : 1; /**< QOS VLAN over Diffserv
+ if VLAN exists, it is used
+ else if IP exists, Diffserv is used
+ else the per port default is used
+ Watchers are still highest priority */
+ uint64_t qos_diff : 1; /**< QOS Diffserv */
+ uint64_t qos_vlan : 1; /**< QOS VLAN */
+ uint64_t reserved_13_15 : 3;
+ uint64_t crc_en : 1; /**< CRC Checking enabled (for ports 0-31 only) */
+ uint64_t reserved_10_11 : 2;
+ cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode
+ 0 = no packet inspection (Uninterpreted)
+ 1 = L2 parsing / skip to L2
+ 2 = IP parsing / skip to L3
+ 3 = PCI Raw (illegal for software to set) */
+ uint64_t reserved_7_7 : 1;
+ uint64_t skip : 7; /**< Optional Skip I amount for packets. Does not
+ apply to packets on PCI ports when a PKT_INST_HDR
+ is present. See section 7.2.7 - Legal Skip
+ Values for further details. */
+#else
+ uint64_t skip : 7;
+ uint64_t reserved_7_7 : 1;
+ cvmx_pip_port_parse_mode_t mode : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t crc_en : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t qos_vlan : 1;
+ uint64_t qos_diff : 1;
+ uint64_t qos_vod : 1;
+ uint64_t reserved_19_19 : 1;
+ uint64_t qos_wat : 4;
+ uint64_t qos : 3;
+ uint64_t reserved_27_27 : 1;
+ uint64_t grp_wat : 4;
+ uint64_t inst_hdr : 1;
+ uint64_t dyn_rs : 1;
+ uint64_t tag_inc : 2;
+ uint64_t rawdrp : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } cn58xx;
+ struct cvmx_pip_prt_cfgx_cn58xx cn58xxp1;
+ struct cvmx_pip_prt_cfgx_cn52xx cn61xx;
+ struct cvmx_pip_prt_cfgx_cn52xx cn63xx;
+ struct cvmx_pip_prt_cfgx_cn52xx cn63xxp1;
+ struct cvmx_pip_prt_cfgx_cn52xx cn66xx;
+ struct cvmx_pip_prt_cfgx_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63 : 9;
+ uint64_t ih_pri : 1; /**< Use the PRI/QOS field in the instruction header
+ as the PRIORITY in BPID calculations. */
+ uint64_t len_chk_sel : 1; /**< Selects which PIP_FRM_LEN_CHK register is used
+ for this port-kind for MINERR and MAXERR checks.
+ LEN_CHK_SEL=0, use PIP_FRM_LEN_CHK0
+ LEN_CHK_SEL=1, use PIP_FRM_LEN_CHK1 */
+ uint64_t pad_len : 1; /**< When set, disables the length check for pkts with
+ padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for DSA/VLAN
+ pkts */
+ uint64_t lenerr_en : 1; /**< L2 length error check enable
+ Frame was received with length error
+ Typically, this check will not be enabled for
+ incoming packets on the DPI rings
+ because the CRC bytes may not normally be
+ present. */
+ uint64_t maxerr_en : 1; /**< Max frame error check enable
+ Frame was received with length > max_length
+ max_length is defined by PIP_FRM_LEN_CHK[MAXLEN] */
+ uint64_t minerr_en : 1; /**< Min frame error check enable
+ Frame was received with length < min_length
+ Typically, this check will not be enabled for
+ incoming packets on the DPI rings
+ because the CRC bytes may not normally be
+ present.
+ min_length is defined by PIP_FRM_LEN_CHK[MINLEN] */
+ uint64_t grp_wat_47 : 4; /**< GRP Watcher enable
+ (Watchers 4-7) */
+ uint64_t qos_wat_47 : 4; /**< QOS Watcher enable
+ (Watchers 4-7) */
+ uint64_t reserved_37_39 : 3;
+ uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet.
+ Normally, IPD will never drop a packet in which
+ PKT_INST_HDR[R] is set.
+ 0=never drop RAW packets based on RED algorithm
+ 1=allow RAW packet drops based on RED algorithm */
+ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when
+ calculating mask tag hash */
+ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size and
+ configuration. If DYN_RS is set then
+ PKT_INST_HDR[RS] is not used. When using 2-byte
+ instruction header words, either DYN_RS or
+ PIP_GBL_CTL[IGNRS] should be set. */
+ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets
+ Normally INST_HDR should be set for packets that
+ include a PKT_INST_HDR prepended by DPI hardware.
+ (If SLI_PORTx_PKIND[RPK_ENB]=0, for packets that
+ include a PKT_INST_HDR prepended by DPI,
+ PIP internally sets INST_HDR before using it.)
+ Must be zero in DSA mode */
+ uint64_t grp_wat : 4; /**< GRP Watcher enable */
+ uint64_t hg_qos : 1; /**< When set, uses the HiGig priority bits as a
+ lookup into the HG_QOS_TABLE (PIP_HG_PRI_QOS)
+ to determine the QOS value
+ HG_QOS must not be set when HIGIG_EN=0 */
+ uint64_t qos : 3; /**< Default QOS level of the port */
+ uint64_t qos_wat : 4; /**< QOS Watcher enable
+ (Watchers 0-3) */
+ uint64_t reserved_19_19 : 1;
+ uint64_t qos_vod : 1; /**< QOS VLAN over Diffserv
+ if DSA/VLAN exists, it is used
+ else if IP exists, Diffserv is used
+ else the per port default is used
+ Watchers are still highest priority */
+ uint64_t qos_diff : 1; /**< QOS Diffserv */
+ uint64_t qos_vlan : 1; /**< QOS VLAN */
+ uint64_t reserved_13_15 : 3;
+ uint64_t crc_en : 1; /**< CRC Checking enabled */
+ uint64_t higig_en : 1; /**< Enable HiGig parsing
+ Normally HIGIG_EN should be clear for packets that
+ include a PKT_INST_HDR prepended by DPI hardware.
+ (If SLI_PORTx_PKIND[RPK_ENB]=0, for packets that
+ include a PKT_INST_HDR prepended by DPI,
+ PIP internally clears HIGIG_EN before using it.)
+ Should not be set for ports in which PTP_MODE=1
+ When HIGIG_EN=1:
+ DSA_EN field below must be zero
+ PIP_PRT_CFGB[ALT_SKP_EN] must be zero.
+ SKIP field below is both Skip I size and the
+ size of the HiGig* header (12 or 16 bytes) */
+ uint64_t dsa_en : 1; /**< Enable DSA tag parsing
+ Should not be set for ports in which PTP_MODE=1
+ When DSA_EN=1:
+ HIGIG_EN field above must be zero
+ SKIP field below is size of DSA tag (4, 8, or
+ 12 bytes) rather than the size of Skip I
+ total SKIP (Skip I + header + Skip II
+ must be zero
+ INST_HDR field above must be zero
+ PIP_PRT_CFGB[ALT_SKP_EN] must be zero.
+ For DPI rings, DPI hardware must not prepend
+ a PKT_INST_HDR when DSA_EN=1.
+ MODE field below must be "skip to L2" */
+ cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode
+ 0 = no packet inspection (Uninterpreted)
+ 1 = L2 parsing / skip to L2
+ 2 = IP parsing / skip to L3
+ 3 = (illegal)
+ Must be 2 ("skip to L2") when in DSA mode. */
+ uint64_t reserved_7_7 : 1;
+ uint64_t skip : 7; /**< Optional Skip I amount for packets.
+ Should normally be zero for packets on
+ DPI rings when a PKT_INST_HDR is prepended by DPI
+ hardware.
+ See PIP_PRT_CFGB[ALT_SKP*] and PIP_ALT_SKIP_CFG.
+ See HRM sections "Parse Mode and Skip Length
+ Selection" and "Legal Skip Values"
+ for further details.
+ In DSA mode, indicates the DSA header length, not
+ Skip I size. (Must be 4,8,or 12)
+ In HIGIG mode, indicates both the Skip I size and
+ the HiGig header size (Must be 12 or 16).
+ If PTP_MODE, the 8B timestamp is prepended to the
+ packet. SKIP should be increased by 8 to
+ compensate for the additional timestamp field. */
+#else
+ uint64_t skip : 7;
+ uint64_t reserved_7_7 : 1;
+ cvmx_pip_port_parse_mode_t mode : 2;
+ uint64_t dsa_en : 1;
+ uint64_t higig_en : 1;
+ uint64_t crc_en : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t qos_vlan : 1;
+ uint64_t qos_diff : 1;
+ uint64_t qos_vod : 1;
+ uint64_t reserved_19_19 : 1;
+ uint64_t qos_wat : 4;
+ uint64_t qos : 3;
+ uint64_t hg_qos : 1;
+ uint64_t grp_wat : 4;
+ uint64_t inst_hdr : 1;
+ uint64_t dyn_rs : 1;
+ uint64_t tag_inc : 2;
+ uint64_t rawdrp : 1;
+ uint64_t reserved_37_39 : 3;
+ uint64_t qos_wat_47 : 4;
+ uint64_t grp_wat_47 : 4;
+ uint64_t minerr_en : 1;
+ uint64_t maxerr_en : 1;
+ uint64_t lenerr_en : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t len_chk_sel : 1;
+ uint64_t ih_pri : 1;
+ uint64_t reserved_55_63 : 9;
+#endif
+ } cn68xx;
+ struct cvmx_pip_prt_cfgx_cn68xx cn68xxp1;
+ struct cvmx_pip_prt_cfgx_cn52xx cnf71xx;
+};
+typedef union cvmx_pip_prt_cfgx cvmx_pip_prt_cfgx_t;
+
+/**
+ * cvmx_pip_prt_cfgb#
+ *
+ * Notes:
+ * PIP_PRT_CFGB* does not exist prior to pass 1.2.
+ *
+ */
+union cvmx_pip_prt_cfgbx {
+ uint64_t u64;
+ struct cvmx_pip_prt_cfgbx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t alt_skp_sel : 2; /**< Alternate skip selector
+ When enabled (ALT_SKP_EN), selects which of the
+ four PIP_ALT_SKIP_CFGx to use with the packets
+ arriving on the port-kind. */
+ uint64_t alt_skp_en : 1; /**< Enable the alternate skip selector
+ When enabled, the HW is able to recompute the
+ SKIP I value based on the packet contents.
+ Up to two of the initial 64 bits of the header
+ are used along with four PIP_ALT_SKIP_CFGx to
+ determine the updated SKIP I value.
+ The bits of the packet used should be present in
+ all packets.
+ PIP_PRT_CFG[DSA_EN,HIGIG_EN] must be disabled
+ when ALT_SKP_EN is set.
+ ALT_SKP_EN must not be set for DPI ports (32-35)
+ when a PKT_INST_HDR is present.
+ ALT_SKP_EN should not be enabled for ports which
+ have GMX_RX_FRM_CTL[PTP_MODE] set as the timestamp
+ will be prepended onto the initial 64 bits of the
+ packet. */
+ uint64_t reserved_35_35 : 1;
+ uint64_t bsel_num : 2; /**< Which of the 4 bit select extractors to use
+ (Alias to PIP_PRT_CFG) */
+ uint64_t bsel_en : 1; /**< Enable to turn on/off use of bit select extractor
+ (Alias to PIP_PRT_CFG) */
+ uint64_t reserved_24_31 : 8;
+ uint64_t base : 8; /**< Base priority address into the table */
+ uint64_t reserved_6_15 : 10;
+ uint64_t bpid : 6; /**< Default BPID to use for packets on this port-kind. */
+#else
+ uint64_t bpid : 6;
+ uint64_t reserved_6_15 : 10;
+ uint64_t base : 8;
+ uint64_t reserved_24_31 : 8;
+ uint64_t bsel_en : 1;
+ uint64_t bsel_num : 2;
+ uint64_t reserved_35_35 : 1;
+ uint64_t alt_skp_en : 1;
+ uint64_t alt_skp_sel : 2;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } s;
+ struct cvmx_pip_prt_cfgbx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t alt_skp_sel : 2; /**< Alternate skip selector
+ When enabled (ALT_SKP_EN), selects which of the
+ four PIP_ALT_SKIP_CFGx to use with the packets
+ arriving on the port-kind. */
+ uint64_t alt_skp_en : 1; /**< Enable the alternate skip selector
+ When enabled, the HW is able to recompute the
+ SKIP I value based on the packet contents.
+ Up to two of the initial 64 bits of the header
+ are used along with four PIP_ALT_SKIP_CFGx to
+ determine the updated SKIP I value.
+ The bits of the packet used should be present in
+ all packets.
+ PIP_PRT_CFG[DSA_EN,HIGIG_EN] must be disabled
+ when ALT_SKP_EN is set.
+ ALT_SKP_EN must not be set for DPI ports (32-35)
+ when a PKT_INST_HDR is present.
+ ALT_SKP_EN should not be enabled for ports which
+ have GMX_RX_FRM_CTL[PTP_MODE] set as the timestamp
+ will be prepended onto the initial 64 bits of the
+ packet. */
+ uint64_t reserved_35_35 : 1;
+ uint64_t bsel_num : 2; /**< Which of the 4 bit select extractors to use
+ (Alias to PIP_PRT_CFG) */
+ uint64_t bsel_en : 1; /**< Enable to turn on/off use of bit select extractor
+ (Alias to PIP_PRT_CFG) */
+ uint64_t reserved_0_31 : 32;
+#else
+ uint64_t reserved_0_31 : 32;
+ uint64_t bsel_en : 1;
+ uint64_t bsel_num : 2;
+ uint64_t reserved_35_35 : 1;
+ uint64_t alt_skp_en : 1;
+ uint64_t alt_skp_sel : 2;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } cn61xx;
+ struct cvmx_pip_prt_cfgbx_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t alt_skp_sel : 2; /**< Alternate skip selector
+ When enabled (ALT_SKP_EN), selects which of the
+ four PIP_ALT_SKIP_CFGx to use with the packets
+ arriving on the port-kind. */
+ uint64_t alt_skp_en : 1; /**< Enable the alternate skip selector
+ When enabled, the HW is able to recompute the
+ SKIP I value based on the packet contents.
+ Up to two of the initial 64 bits of the header
+ are used along with four PIP_ALT_SKIP_CFGx to
+ determine the updated SKIP I value.
+ The bits of the packet used should be present in
+ all packets.
+ PIP_PRT_CFG[DSA_EN,HIGIG_EN] must be disabled
+ when ALT_SKP_EN is set.
+ ALT_SKP_EN must not be set for DPI ports (32-35)
+ when a PKT_INST_HDR is present. */
+ uint64_t reserved_0_35 : 36;
+#else
+ uint64_t reserved_0_35 : 36;
+ uint64_t alt_skp_en : 1;
+ uint64_t alt_skp_sel : 2;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } cn66xx;
+ struct cvmx_pip_prt_cfgbx_s cn68xx;
+ struct cvmx_pip_prt_cfgbx_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t base : 8; /**< Base priority address into the table */
+ uint64_t reserved_6_15 : 10;
+ uint64_t bpid : 6; /**< Default BPID to use for packets on this port-kind. */
+#else
+ uint64_t bpid : 6;
+ uint64_t reserved_6_15 : 10;
+ uint64_t base : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn68xxp1;
+ struct cvmx_pip_prt_cfgbx_cn61xx cnf71xx;
+};
+typedef union cvmx_pip_prt_cfgbx cvmx_pip_prt_cfgbx_t;
+
+/**
+ * cvmx_pip_prt_tag#
+ *
+ * PIP_PRT_TAGX = Per port config information
+ *
+ */
+union cvmx_pip_prt_tagx {
+ uint64_t u64;
+ struct cvmx_pip_prt_tagx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t portadd_en : 1; /**< Enables PIP to optionally increment the incoming
+ port from the MACs based on port-kind
+ configuration and packet contents. */
+ uint64_t inc_hwchk : 1; /**< Include the HW_checksum into WORD0 of the WQE
+ instead of the L4PTR. This mode will be
+ deprecated in future products. */
+ uint64_t reserved_50_51 : 2;
+ uint64_t grptagbase_msb : 2; /**< Most significant 2 bits of the GRPTAGBASE value. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t grptagmask_msb : 2; /**< Most significant 2 bits of the GRPTAGMASK value.
+ group when GRPTAG is set. */
+ uint64_t reserved_42_43 : 2;
+ uint64_t grp_msb : 2; /**< Most significant 2 bits of the 6-bit value
+ indicating the group to schedule to. */
+ uint64_t grptagbase : 4; /**< Offset to use when computing group from tag bits
+ when GRPTAG is set. */
+ uint64_t grptagmask : 4; /**< Which bits of the tag to exclude when computing
+ group when GRPTAG is set. */
+ uint64_t grptag : 1; /**< When set, use the lower bit of the tag to compute
+ the group in the work queue entry
+ GRP = WQE[TAG[3:0]] & ~GRPTAGMASK + GRPTAGBASE */
+ uint64_t grptag_mskip : 1; /**< When set, GRPTAG will be used regardless if the
+ packet IS_IP. */
+ uint64_t tag_mode : 2; /**< Which tag algorithm to use
+ 0 = always use tuple tag algorithm
+ 1 = always use mask tag algorithm
+ 2 = if packet is IP, use tuple else use mask
+ 3 = tuple XOR mask */
+ uint64_t inc_vs : 2; /**< determines the DSA/VLAN ID (VID) to be included in
+ tuple tag when VLAN stacking is detected
+ 0 = do not include VID in tuple tag generation
+ 1 = include VID (VLAN0) in hash
+ 2 = include VID (VLAN1) in hash
+ 3 = include VID ([VLAN0,VLAN1]) in hash */
+ uint64_t inc_vlan : 1; /**< when set, the DSA/VLAN ID is included in tuple tag
+ when VLAN stacking is not detected
+ 0 = do not include VID in tuple tag generation
+ 1 = include VID in hash */
+ uint64_t inc_prt_flag : 1; /**< sets whether the port is included in tuple tag */
+ uint64_t ip6_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is
+ included in tuple tag for IPv6 packets */
+ uint64_t ip4_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is
+ included in tuple tag for IPv4 */
+ uint64_t ip6_sprt_flag : 1; /**< sets whether the TCP/UDP src port is
+ included in tuple tag for IPv6 packets */
+ uint64_t ip4_sprt_flag : 1; /**< sets whether the TCP/UDP src port is
+ included in tuple tag for IPv4 */
+ uint64_t ip6_nxth_flag : 1; /**< sets whether ipv6 includes next header in tuple
+ tag hash */
+ uint64_t ip4_pctl_flag : 1; /**< sets whether ipv4 includes protocol in tuple
+ tag hash */
+ uint64_t ip6_dst_flag : 1; /**< sets whether ipv6 includes dst address in tuple
+ tag hash */
+ uint64_t ip4_dst_flag : 1; /**< sets whether ipv4 includes dst address in tuple
+ tag hash */
+ uint64_t ip6_src_flag : 1; /**< sets whether ipv6 includes src address in tuple
+ tag hash */
+ uint64_t ip4_src_flag : 1; /**< sets whether ipv4 includes src address in tuple
+ tag hash */
+ cvmx_pow_tag_type_t tcp6_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv6)
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t tcp4_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv4)
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t ip6_tag_type : 2; /**< sets whether IPv6 packet tag type
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t ip4_tag_type : 2; /**< sets whether IPv4 packet tag type
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t non_tag_type : 2; /**< sets whether non-IP packet tag type
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ uint64_t grp : 4; /**< 4-bit value indicating the group to schedule to */
+#else
+ uint64_t grp : 4;
+ cvmx_pow_tag_type_t non_tag_type : 2;
+ cvmx_pow_tag_type_t ip4_tag_type : 2;
+ cvmx_pow_tag_type_t ip6_tag_type : 2;
+ cvmx_pow_tag_type_t tcp4_tag_type : 2;
+ cvmx_pow_tag_type_t tcp6_tag_type : 2;
+ uint64_t ip4_src_flag : 1;
+ uint64_t ip6_src_flag : 1;
+ uint64_t ip4_dst_flag : 1;
+ uint64_t ip6_dst_flag : 1;
+ uint64_t ip4_pctl_flag : 1;
+ uint64_t ip6_nxth_flag : 1;
+ uint64_t ip4_sprt_flag : 1;
+ uint64_t ip6_sprt_flag : 1;
+ uint64_t ip4_dprt_flag : 1;
+ uint64_t ip6_dprt_flag : 1;
+ uint64_t inc_prt_flag : 1;
+ uint64_t inc_vlan : 1;
+ uint64_t inc_vs : 2;
+ uint64_t tag_mode : 2;
+ uint64_t grptag_mskip : 1;
+ uint64_t grptag : 1;
+ uint64_t grptagmask : 4;
+ uint64_t grptagbase : 4;
+ uint64_t grp_msb : 2;
+ uint64_t reserved_42_43 : 2;
+ uint64_t grptagmask_msb : 2;
+ uint64_t reserved_46_47 : 2;
+ uint64_t grptagbase_msb : 2;
+ uint64_t reserved_50_51 : 2;
+ uint64_t inc_hwchk : 1;
+ uint64_t portadd_en : 1;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_pip_prt_tagx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t grptagbase : 4; /**< Offset to use when computing group from tag bits
+ when GRPTAG is set. */
+ uint64_t grptagmask : 4; /**< Which bits of the tag to exclude when computing
+ group when GRPTAG is set. */
+ uint64_t grptag : 1; /**< When set, use the lower bit of the tag to compute
+ the group in the work queue entry
+ GRP = WQE[TAG[3:0]] & ~GRPTAGMASK + GRPTAGBASE */
+ uint64_t reserved_30_30 : 1;
+ uint64_t tag_mode : 2; /**< Which tag algorithm to use
+ 0 = always use tuple tag algorithm
+ 1 = always use mask tag algorithm
+ 2 = if packet is IP, use tuple else use mask
+ 3 = tuple XOR mask */
+ uint64_t inc_vs : 2; /**< determines the VLAN ID (VID) to be included in
+ tuple tag when VLAN stacking is detected
+ 0 = do not include VID in tuple tag generation
+ 1 = include VID (VLAN0) in hash
+ 2 = include VID (VLAN1) in hash
+ 3 = include VID ([VLAN0,VLAN1]) in hash */
+ uint64_t inc_vlan : 1; /**< when set, the VLAN ID is included in tuple tag
+ when VLAN stacking is not detected
+ 0 = do not include VID in tuple tag generation
+ 1 = include VID in hash */
+ uint64_t inc_prt_flag : 1; /**< sets whether the port is included in tuple tag */
+ uint64_t ip6_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is
+ included in tuple tag for IPv6 packets */
+ uint64_t ip4_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is
+ included in tuple tag for IPv4 */
+ uint64_t ip6_sprt_flag : 1; /**< sets whether the TCP/UDP src port is
+ included in tuple tag for IPv6 packets */
+ uint64_t ip4_sprt_flag : 1; /**< sets whether the TCP/UDP src port is
+ included in tuple tag for IPv4 */
+ uint64_t ip6_nxth_flag : 1; /**< sets whether ipv6 includes next header in tuple
+ tag hash */
+ uint64_t ip4_pctl_flag : 1; /**< sets whether ipv4 includes protocol in tuple
+ tag hash */
+ uint64_t ip6_dst_flag : 1; /**< sets whether ipv6 includes dst address in tuple
+ tag hash */
+ uint64_t ip4_dst_flag : 1; /**< sets whether ipv4 includes dst address in tuple
+ tag hash */
+ uint64_t ip6_src_flag : 1; /**< sets whether ipv6 includes src address in tuple
+ tag hash */
+ uint64_t ip4_src_flag : 1; /**< sets whether ipv4 includes src address in tuple
+ tag hash */
+ cvmx_pow_tag_type_t tcp6_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv6)
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t tcp4_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv4)
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t ip6_tag_type : 2; /**< sets whether IPv6 packet tag type
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t ip4_tag_type : 2; /**< sets whether IPv4 packet tag type
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t non_tag_type : 2; /**< sets whether non-IP packet tag type
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ uint64_t grp : 4; /**< 4-bit value indicating the group to schedule to */
+#else
+ uint64_t grp : 4;
+ cvmx_pow_tag_type_t non_tag_type : 2;
+ cvmx_pow_tag_type_t ip4_tag_type : 2;
+ cvmx_pow_tag_type_t ip6_tag_type : 2;
+ cvmx_pow_tag_type_t tcp4_tag_type : 2;
+ cvmx_pow_tag_type_t tcp6_tag_type : 2;
+ uint64_t ip4_src_flag : 1;
+ uint64_t ip6_src_flag : 1;
+ uint64_t ip4_dst_flag : 1;
+ uint64_t ip6_dst_flag : 1;
+ uint64_t ip4_pctl_flag : 1;
+ uint64_t ip6_nxth_flag : 1;
+ uint64_t ip4_sprt_flag : 1;
+ uint64_t ip6_sprt_flag : 1;
+ uint64_t ip4_dprt_flag : 1;
+ uint64_t ip6_dprt_flag : 1;
+ uint64_t inc_prt_flag : 1;
+ uint64_t inc_vlan : 1;
+ uint64_t inc_vs : 2;
+ uint64_t tag_mode : 2;
+ uint64_t reserved_30_30 : 1;
+ uint64_t grptag : 1;
+ uint64_t grptagmask : 4;
+ uint64_t grptagbase : 4;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn30xx;
+ struct cvmx_pip_prt_tagx_cn30xx cn31xx;
+ struct cvmx_pip_prt_tagx_cn30xx cn38xx;
+ struct cvmx_pip_prt_tagx_cn30xx cn38xxp2;
+ struct cvmx_pip_prt_tagx_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t grptagbase : 4; /**< Offset to use when computing group from tag bits
+ when GRPTAG is set. */
+ uint64_t grptagmask : 4; /**< Which bits of the tag to exclude when computing
+ group when GRPTAG is set. */
+ uint64_t grptag : 1; /**< When set, use the lower bit of the tag to compute
+ the group in the work queue entry
+ GRP = WQE[TAG[3:0]] & ~GRPTAGMASK + GRPTAGBASE */
+ uint64_t grptag_mskip : 1; /**< When set, GRPTAG will be used regardless if the
+ packet IS_IP. */
+ uint64_t tag_mode : 2; /**< Which tag algorithm to use
+ 0 = always use tuple tag algorithm
+ 1 = always use mask tag algorithm
+ 2 = if packet is IP, use tuple else use mask
+ 3 = tuple XOR mask */
+ uint64_t inc_vs : 2; /**< determines the VLAN ID (VID) to be included in
+ tuple tag when VLAN stacking is detected
+ 0 = do not include VID in tuple tag generation
+ 1 = include VID (VLAN0) in hash
+ 2 = include VID (VLAN1) in hash
+ 3 = include VID ([VLAN0,VLAN1]) in hash */
+ uint64_t inc_vlan : 1; /**< when set, the VLAN ID is included in tuple tag
+ when VLAN stacking is not detected
+ 0 = do not include VID in tuple tag generation
+ 1 = include VID in hash */
+ uint64_t inc_prt_flag : 1; /**< sets whether the port is included in tuple tag */
+ uint64_t ip6_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is
+ included in tuple tag for IPv6 packets */
+ uint64_t ip4_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is
+ included in tuple tag for IPv4 */
+ uint64_t ip6_sprt_flag : 1; /**< sets whether the TCP/UDP src port is
+ included in tuple tag for IPv6 packets */
+ uint64_t ip4_sprt_flag : 1; /**< sets whether the TCP/UDP src port is
+ included in tuple tag for IPv4 */
+ uint64_t ip6_nxth_flag : 1; /**< sets whether ipv6 includes next header in tuple
+ tag hash */
+ uint64_t ip4_pctl_flag : 1; /**< sets whether ipv4 includes protocol in tuple
+ tag hash */
+ uint64_t ip6_dst_flag : 1; /**< sets whether ipv6 includes dst address in tuple
+ tag hash */
+ uint64_t ip4_dst_flag : 1; /**< sets whether ipv4 includes dst address in tuple
+ tag hash */
+ uint64_t ip6_src_flag : 1; /**< sets whether ipv6 includes src address in tuple
+ tag hash */
+ uint64_t ip4_src_flag : 1; /**< sets whether ipv4 includes src address in tuple
+ tag hash */
+ cvmx_pow_tag_type_t tcp6_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv6)
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t tcp4_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv4)
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t ip6_tag_type : 2; /**< sets whether IPv6 packet tag type
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t ip4_tag_type : 2; /**< sets whether IPv4 packet tag type
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ cvmx_pow_tag_type_t non_tag_type : 2; /**< sets whether non-IP packet tag type
+ 0 = ordered tags
+ 1 = atomic tags
+ 2 = Null tags */
+ uint64_t grp : 4; /**< 4-bit value indicating the group to schedule to */
+#else
+ uint64_t grp : 4;
+ cvmx_pow_tag_type_t non_tag_type : 2;
+ cvmx_pow_tag_type_t ip4_tag_type : 2;
+ cvmx_pow_tag_type_t ip6_tag_type : 2;
+ cvmx_pow_tag_type_t tcp4_tag_type : 2;
+ cvmx_pow_tag_type_t tcp6_tag_type : 2;
+ uint64_t ip4_src_flag : 1;
+ uint64_t ip6_src_flag : 1;
+ uint64_t ip4_dst_flag : 1;
+ uint64_t ip6_dst_flag : 1;
+ uint64_t ip4_pctl_flag : 1;
+ uint64_t ip6_nxth_flag : 1;
+ uint64_t ip4_sprt_flag : 1;
+ uint64_t ip6_sprt_flag : 1;
+ uint64_t ip4_dprt_flag : 1;
+ uint64_t ip6_dprt_flag : 1;
+ uint64_t inc_prt_flag : 1;
+ uint64_t inc_vlan : 1;
+ uint64_t inc_vs : 2;
+ uint64_t tag_mode : 2;
+ uint64_t grptag_mskip : 1;
+ uint64_t grptag : 1;
+ uint64_t grptagmask : 4;
+ uint64_t grptagbase : 4;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn50xx;
+ struct cvmx_pip_prt_tagx_cn50xx cn52xx;
+ struct cvmx_pip_prt_tagx_cn50xx cn52xxp1;
+ struct cvmx_pip_prt_tagx_cn50xx cn56xx;
+ struct cvmx_pip_prt_tagx_cn50xx cn56xxp1;
+ struct cvmx_pip_prt_tagx_cn30xx cn58xx;
+ struct cvmx_pip_prt_tagx_cn30xx cn58xxp1;
+ struct cvmx_pip_prt_tagx_cn50xx cn61xx;
+ struct cvmx_pip_prt_tagx_cn50xx cn63xx;
+ struct cvmx_pip_prt_tagx_cn50xx cn63xxp1;
+ struct cvmx_pip_prt_tagx_cn50xx cn66xx;
+ struct cvmx_pip_prt_tagx_s cn68xx;
+ struct cvmx_pip_prt_tagx_s cn68xxp1;
+ struct cvmx_pip_prt_tagx_cn50xx cnf71xx;
+};
+typedef union cvmx_pip_prt_tagx cvmx_pip_prt_tagx_t;
+
+/**
+ * cvmx_pip_qos_diff#
+ *
+ * PIP_QOS_DIFFX = QOS Diffserv Tables
+ *
+ */
+union cvmx_pip_qos_diffx {
+ uint64_t u64;
+ struct cvmx_pip_qos_diffx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t qos : 3; /**< Diffserv QOS level */
+#else
+ uint64_t qos : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_pip_qos_diffx_s cn30xx;
+ struct cvmx_pip_qos_diffx_s cn31xx;
+ struct cvmx_pip_qos_diffx_s cn38xx;
+ struct cvmx_pip_qos_diffx_s cn38xxp2;
+ struct cvmx_pip_qos_diffx_s cn50xx;
+ struct cvmx_pip_qos_diffx_s cn52xx;
+ struct cvmx_pip_qos_diffx_s cn52xxp1;
+ struct cvmx_pip_qos_diffx_s cn56xx;
+ struct cvmx_pip_qos_diffx_s cn56xxp1;
+ struct cvmx_pip_qos_diffx_s cn58xx;
+ struct cvmx_pip_qos_diffx_s cn58xxp1;
+ struct cvmx_pip_qos_diffx_s cn61xx;
+ struct cvmx_pip_qos_diffx_s cn63xx;
+ struct cvmx_pip_qos_diffx_s cn63xxp1;
+ struct cvmx_pip_qos_diffx_s cn66xx;
+ struct cvmx_pip_qos_diffx_s cnf71xx;
+};
+typedef union cvmx_pip_qos_diffx cvmx_pip_qos_diffx_t;
+
+/**
+ * cvmx_pip_qos_vlan#
+ *
+ * PIP_QOS_VLANX = QOS VLAN Tables
+ *
+ * If the PIP indentifies a packet is DSA/VLAN tagged, then the QOS
+ * can be set based on the DSA/VLAN user priority. These eight register
+ * comprise the QOS values for all DSA/VLAN user priority values.
+ */
+union cvmx_pip_qos_vlanx {
+ uint64_t u64;
+ struct cvmx_pip_qos_vlanx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t qos1 : 3; /**< DSA/VLAN QOS level
+ Selected when PIP_PRT_CFGx[QOS_VSEL] = 1 */
+ uint64_t reserved_3_3 : 1;
+ uint64_t qos : 3; /**< DSA/VLAN QOS level
+ Selected when PIP_PRT_CFGx[QOS_VSEL] = 0 */
+#else
+ uint64_t qos : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t qos1 : 3;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_pip_qos_vlanx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t qos : 3; /**< VLAN QOS level */
+#else
+ uint64_t qos : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn31xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn38xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn38xxp2;
+ struct cvmx_pip_qos_vlanx_cn30xx cn50xx;
+ struct cvmx_pip_qos_vlanx_s cn52xx;
+ struct cvmx_pip_qos_vlanx_s cn52xxp1;
+ struct cvmx_pip_qos_vlanx_s cn56xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn56xxp1;
+ struct cvmx_pip_qos_vlanx_cn30xx cn58xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn58xxp1;
+ struct cvmx_pip_qos_vlanx_s cn61xx;
+ struct cvmx_pip_qos_vlanx_s cn63xx;
+ struct cvmx_pip_qos_vlanx_s cn63xxp1;
+ struct cvmx_pip_qos_vlanx_s cn66xx;
+ struct cvmx_pip_qos_vlanx_s cnf71xx;
+};
+typedef union cvmx_pip_qos_vlanx cvmx_pip_qos_vlanx_t;
+
+/**
+ * cvmx_pip_qos_watch#
+ *
+ * PIP_QOS_WATCHX = QOS Watcher Tables
+ *
+ * Sets up the Configuration CSRs for the four QOS Watchers.
+ * Each Watcher can be set to look for a specific protocol,
+ * TCP/UDP destination port, or Ethertype to override the
+ * default QOS value.
+ */
+union cvmx_pip_qos_watchx {
+ uint64_t u64;
+ struct cvmx_pip_qos_watchx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t mask : 16; /**< Mask off a range of values */
+ uint64_t reserved_30_31 : 2;
+ uint64_t grp : 6; /**< The GRP number of the watcher */
+ uint64_t reserved_23_23 : 1;
+ uint64_t qos : 3; /**< The QOS level of the watcher */
+ uint64_t reserved_19_19 : 1;
+ cvmx_pip_qos_watch_types match_type : 3; /**< The field for the watcher match against
+ 0 = disable across all ports
+ 1 = protocol (ipv4)
+ = next_header (ipv6)
+ 2 = TCP destination port
+ 3 = UDP destination port
+ 4 = Ether type
+ 5-7 = Reserved */
+ uint64_t match_value : 16; /**< The value to watch for */
+#else
+ uint64_t match_value : 16;
+ cvmx_pip_qos_watch_types match_type : 3;
+ uint64_t reserved_19_19 : 1;
+ uint64_t qos : 3;
+ uint64_t reserved_23_23 : 1;
+ uint64_t grp : 6;
+ uint64_t reserved_30_31 : 2;
+ uint64_t mask : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_pip_qos_watchx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t mask : 16; /**< Mask off a range of values */
+ uint64_t reserved_28_31 : 4;
+ uint64_t grp : 4; /**< The GRP number of the watcher */
+ uint64_t reserved_23_23 : 1;
+ uint64_t qos : 3; /**< The QOS level of the watcher */
+ uint64_t reserved_18_19 : 2;
+ cvmx_pip_qos_watch_types match_type : 2; /**< The field for the watcher match against
+ 0 = disable across all ports
+ 1 = protocol (ipv4)
+ = next_header (ipv6)
+ 2 = TCP destination port
+ 3 = UDP destination port */
+ uint64_t match_value : 16; /**< The value to watch for */
+#else
+ uint64_t match_value : 16;
+ cvmx_pip_qos_watch_types match_type : 2;
+ uint64_t reserved_18_19 : 2;
+ uint64_t qos : 3;
+ uint64_t reserved_23_23 : 1;
+ uint64_t grp : 4;
+ uint64_t reserved_28_31 : 4;
+ uint64_t mask : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cn30xx;
+ struct cvmx_pip_qos_watchx_cn30xx cn31xx;
+ struct cvmx_pip_qos_watchx_cn30xx cn38xx;
+ struct cvmx_pip_qos_watchx_cn30xx cn38xxp2;
+ struct cvmx_pip_qos_watchx_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t mask : 16; /**< Mask off a range of values */
+ uint64_t reserved_28_31 : 4;
+ uint64_t grp : 4; /**< The GRP number of the watcher */
+ uint64_t reserved_23_23 : 1;
+ uint64_t qos : 3; /**< The QOS level of the watcher */
+ uint64_t reserved_19_19 : 1;
+ cvmx_pip_qos_watch_types match_type : 3; /**< The field for the watcher match against
+ 0 = disable across all ports
+ 1 = protocol (ipv4)
+ = next_header (ipv6)
+ 2 = TCP destination port
+ 3 = UDP destination port
+ 4 = Ether type
+ 5-7 = Reserved */
+ uint64_t match_value : 16; /**< The value to watch for */
+#else
+ uint64_t match_value : 16;
+ cvmx_pip_qos_watch_types match_type : 3;
+ uint64_t reserved_19_19 : 1;
+ uint64_t qos : 3;
+ uint64_t reserved_23_23 : 1;
+ uint64_t grp : 4;
+ uint64_t reserved_28_31 : 4;
+ uint64_t mask : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cn50xx;
+ struct cvmx_pip_qos_watchx_cn50xx cn52xx;
+ struct cvmx_pip_qos_watchx_cn50xx cn52xxp1;
+ struct cvmx_pip_qos_watchx_cn50xx cn56xx;
+ struct cvmx_pip_qos_watchx_cn50xx cn56xxp1;
+ struct cvmx_pip_qos_watchx_cn30xx cn58xx;
+ struct cvmx_pip_qos_watchx_cn30xx cn58xxp1;
+ struct cvmx_pip_qos_watchx_cn50xx cn61xx;
+ struct cvmx_pip_qos_watchx_cn50xx cn63xx;
+ struct cvmx_pip_qos_watchx_cn50xx cn63xxp1;
+ struct cvmx_pip_qos_watchx_cn50xx cn66xx;
+ struct cvmx_pip_qos_watchx_s cn68xx;
+ struct cvmx_pip_qos_watchx_s cn68xxp1;
+ struct cvmx_pip_qos_watchx_cn50xx cnf71xx;
+};
+typedef union cvmx_pip_qos_watchx cvmx_pip_qos_watchx_t;
+
+/**
+ * cvmx_pip_raw_word
+ *
+ * PIP_RAW_WORD = The RAW Word2 of the workQ entry.
+ *
+ * The RAW Word2 to be inserted into the workQ entry of RAWFULL packets.
+ */
+union cvmx_pip_raw_word {
+ uint64_t u64;
+ struct cvmx_pip_raw_word_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t word : 56; /**< Word2 of the workQ entry
+ The 8-bit bufs field is still set by HW (IPD) */
+#else
+ uint64_t word : 56;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_pip_raw_word_s cn30xx;
+ struct cvmx_pip_raw_word_s cn31xx;
+ struct cvmx_pip_raw_word_s cn38xx;
+ struct cvmx_pip_raw_word_s cn38xxp2;
+ struct cvmx_pip_raw_word_s cn50xx;
+ struct cvmx_pip_raw_word_s cn52xx;
+ struct cvmx_pip_raw_word_s cn52xxp1;
+ struct cvmx_pip_raw_word_s cn56xx;
+ struct cvmx_pip_raw_word_s cn56xxp1;
+ struct cvmx_pip_raw_word_s cn58xx;
+ struct cvmx_pip_raw_word_s cn58xxp1;
+ struct cvmx_pip_raw_word_s cn61xx;
+ struct cvmx_pip_raw_word_s cn63xx;
+ struct cvmx_pip_raw_word_s cn63xxp1;
+ struct cvmx_pip_raw_word_s cn66xx;
+ struct cvmx_pip_raw_word_s cn68xx;
+ struct cvmx_pip_raw_word_s cn68xxp1;
+ struct cvmx_pip_raw_word_s cnf71xx;
+};
+typedef union cvmx_pip_raw_word cvmx_pip_raw_word_t;
+
+/**
+ * cvmx_pip_sft_rst
+ *
+ * PIP_SFT_RST = PIP Soft Reset
+ *
+ * When written to a '1', resets the pip block
+ *
+ * Notes:
+ * When RST is set to a '1' by SW, PIP will get a short reset pulse (3 cycles
+ * in duration). Although this will reset much of PIP's internal state, some
+ * CSRs will not reset.
+ *
+ * . PIP_BIST_STATUS
+ * . PIP_STAT0_PRT*
+ * . PIP_STAT1_PRT*
+ * . PIP_STAT2_PRT*
+ * . PIP_STAT3_PRT*
+ * . PIP_STAT4_PRT*
+ * . PIP_STAT5_PRT*
+ * . PIP_STAT6_PRT*
+ * . PIP_STAT7_PRT*
+ * . PIP_STAT8_PRT*
+ * . PIP_STAT9_PRT*
+ * . PIP_XSTAT0_PRT*
+ * . PIP_XSTAT1_PRT*
+ * . PIP_XSTAT2_PRT*
+ * . PIP_XSTAT3_PRT*
+ * . PIP_XSTAT4_PRT*
+ * . PIP_XSTAT5_PRT*
+ * . PIP_XSTAT6_PRT*
+ * . PIP_XSTAT7_PRT*
+ * . PIP_XSTAT8_PRT*
+ * . PIP_XSTAT9_PRT*
+ * . PIP_STAT_INB_PKTS*
+ * . PIP_STAT_INB_OCTS*
+ * . PIP_STAT_INB_ERRS*
+ * . PIP_TAG_INC*
+ */
+union cvmx_pip_sft_rst {
+ uint64_t u64;
+ struct cvmx_pip_sft_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t rst : 1; /**< Soft Reset */
+#else
+ uint64_t rst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_pip_sft_rst_s cn30xx;
+ struct cvmx_pip_sft_rst_s cn31xx;
+ struct cvmx_pip_sft_rst_s cn38xx;
+ struct cvmx_pip_sft_rst_s cn50xx;
+ struct cvmx_pip_sft_rst_s cn52xx;
+ struct cvmx_pip_sft_rst_s cn52xxp1;
+ struct cvmx_pip_sft_rst_s cn56xx;
+ struct cvmx_pip_sft_rst_s cn56xxp1;
+ struct cvmx_pip_sft_rst_s cn58xx;
+ struct cvmx_pip_sft_rst_s cn58xxp1;
+ struct cvmx_pip_sft_rst_s cn61xx;
+ struct cvmx_pip_sft_rst_s cn63xx;
+ struct cvmx_pip_sft_rst_s cn63xxp1;
+ struct cvmx_pip_sft_rst_s cn66xx;
+ struct cvmx_pip_sft_rst_s cn68xx;
+ struct cvmx_pip_sft_rst_s cn68xxp1;
+ struct cvmx_pip_sft_rst_s cnf71xx;
+};
+typedef union cvmx_pip_sft_rst cvmx_pip_sft_rst_t;
+
+/**
+ * cvmx_pip_stat0_#
+ *
+ * PIP Statistics Counters
+ *
+ * Note: special stat counter behavior
+ *
+ * 1) Read and write operations must arbitrate for the statistics resources
+ * along with the packet engines which are incrementing the counters.
+ * In order to not drop packet information, the packet HW is always a
+ * higher priority and the CSR requests will only be satisified when
+ * there are idle cycles. This can potentially cause long delays if the
+ * system becomes full.
+ *
+ * 2) stat counters can be cleared in two ways. If PIP_STAT_CTL[RDCLR] is
+ * set, then all read accesses will clear the register. In addition,
+ * any write to a stats register will also reset the register to zero.
+ * Please note that the clearing operations must obey rule \#1 above.
+ *
+ * 3) all counters are wrapping - software must ensure they are read periodically
+ *
+ * 4) The counters accumulate statistics for packets that are sent to PKI. If
+ * PTP_MODE is enabled, the 8B timestamp is prepended to the packet. This
+ * additional 8B of data is captured in the octet counts.
+ *
+ * 5) X represents either the packet's port-kind or backpressure ID as
+ * determined by PIP_STAT_CTL[MODE]
+ * PIP_STAT0_X = PIP_STAT_DRP_PKTS / PIP_STAT_DRP_OCTS
+ */
+union cvmx_pip_stat0_x {
+ uint64_t u64;
+ struct cvmx_pip_stat0_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drp_pkts : 32; /**< Inbound packets marked to be dropped by the IPD
+ QOS widget per port */
+ uint64_t drp_octs : 32; /**< Inbound octets marked to be dropped by the IPD
+ QOS widget per port */
+#else
+ uint64_t drp_octs : 32;
+ uint64_t drp_pkts : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat0_x_s cn68xx;
+ struct cvmx_pip_stat0_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat0_x cvmx_pip_stat0_x_t;
+
+/**
+ * cvmx_pip_stat0_prt#
+ *
+ * PIP Statistics Counters
+ *
+ * Note: special stat counter behavior
+ *
+ * 1) Read and write operations must arbitrate for the statistics resources
+ * along with the packet engines which are incrementing the counters.
+ * In order to not drop packet information, the packet HW is always a
+ * higher priority and the CSR requests will only be satisified when
+ * there are idle cycles. This can potentially cause long delays if the
+ * system becomes full.
+ *
+ * 2) stat counters can be cleared in two ways. If PIP_STAT_CTL[RDCLR] is
+ * set, then all read accesses will clear the register. In addition,
+ * any write to a stats register will also reset the register to zero.
+ * Please note that the clearing operations must obey rule \#1 above.
+ *
+ * 3) all counters are wrapping - software must ensure they are read periodically
+ *
+ * 4) The counters accumulate statistics for packets that are sent to PKI. If
+ * PTP_MODE is enabled, the 8B timestamp is prepended to the packet. This
+ * additional 8B of data is captured in the octet counts.
+ * PIP_STAT0_PRT = PIP_STAT_DRP_PKTS / PIP_STAT_DRP_OCTS
+ */
+union cvmx_pip_stat0_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat0_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drp_pkts : 32; /**< Inbound packets marked to be dropped by the IPD
+ QOS widget per port */
+ uint64_t drp_octs : 32; /**< Inbound octets marked to be dropped by the IPD
+ QOS widget per port */
+#else
+ uint64_t drp_octs : 32;
+ uint64_t drp_pkts : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat0_prtx_s cn30xx;
+ struct cvmx_pip_stat0_prtx_s cn31xx;
+ struct cvmx_pip_stat0_prtx_s cn38xx;
+ struct cvmx_pip_stat0_prtx_s cn38xxp2;
+ struct cvmx_pip_stat0_prtx_s cn50xx;
+ struct cvmx_pip_stat0_prtx_s cn52xx;
+ struct cvmx_pip_stat0_prtx_s cn52xxp1;
+ struct cvmx_pip_stat0_prtx_s cn56xx;
+ struct cvmx_pip_stat0_prtx_s cn56xxp1;
+ struct cvmx_pip_stat0_prtx_s cn58xx;
+ struct cvmx_pip_stat0_prtx_s cn58xxp1;
+ struct cvmx_pip_stat0_prtx_s cn61xx;
+ struct cvmx_pip_stat0_prtx_s cn63xx;
+ struct cvmx_pip_stat0_prtx_s cn63xxp1;
+ struct cvmx_pip_stat0_prtx_s cn66xx;
+ struct cvmx_pip_stat0_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat0_prtx cvmx_pip_stat0_prtx_t;
+
+/**
+ * cvmx_pip_stat10_#
+ *
+ * PIP_STAT10_X = PIP_STAT_L2_MCAST / PIP_STAT_L2_BCAST
+ *
+ */
+union cvmx_pip_stat10_x {
+ uint64_t u64;
+ struct cvmx_pip_stat10_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast : 32; /**< Number of packets with L2 Broadcast DMAC
+ that were dropped due to RED.
+ The HW will consider a packet to be an L2
+ broadcast packet when the 48-bit DMAC is all 1's.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2. */
+ uint64_t mcast : 32; /**< Number of packets with L2 Mulitcast DMAC
+ that were dropped due to RED.
+ The HW will consider a packet to be an L2
+ multicast packet when the least-significant bit
+ of the first byte of the DMAC is set and the
+ packet is not an L2 broadcast packet.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2. */
+#else
+ uint64_t mcast : 32;
+ uint64_t bcast : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat10_x_s cn68xx;
+ struct cvmx_pip_stat10_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat10_x cvmx_pip_stat10_x_t;
+
+/**
+ * cvmx_pip_stat10_prt#
+ *
+ * PIP_STAT10_PRTX = PIP_STAT_L2_MCAST / PIP_STAT_L2_BCAST
+ *
+ */
+union cvmx_pip_stat10_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat10_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast : 32; /**< Number of packets with L2 Broadcast DMAC
+ that were dropped due to RED.
+ The HW will consider a packet to be an L2
+ broadcast packet when the 48-bit DMAC is all 1's.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2. */
+ uint64_t mcast : 32; /**< Number of packets with L2 Mulitcast DMAC
+ that were dropped due to RED.
+ The HW will consider a packet to be an L2
+ multicast packet when the least-significant bit
+ of the first byte of the DMAC is set and the
+ packet is not an L2 broadcast packet.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2. */
+#else
+ uint64_t mcast : 32;
+ uint64_t bcast : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat10_prtx_s cn52xx;
+ struct cvmx_pip_stat10_prtx_s cn52xxp1;
+ struct cvmx_pip_stat10_prtx_s cn56xx;
+ struct cvmx_pip_stat10_prtx_s cn56xxp1;
+ struct cvmx_pip_stat10_prtx_s cn61xx;
+ struct cvmx_pip_stat10_prtx_s cn63xx;
+ struct cvmx_pip_stat10_prtx_s cn63xxp1;
+ struct cvmx_pip_stat10_prtx_s cn66xx;
+ struct cvmx_pip_stat10_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat10_prtx cvmx_pip_stat10_prtx_t;
+
+/**
+ * cvmx_pip_stat11_#
+ *
+ * PIP_STAT11_X = PIP_STAT_L3_MCAST / PIP_STAT_L3_BCAST
+ *
+ */
+union cvmx_pip_stat11_x {
+ uint64_t u64;
+ struct cvmx_pip_stat11_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast : 32; /**< Number of packets with L3 Broadcast Dest Address
+ that were dropped due to RED.
+ The HW considers an IPv4 packet to be broadcast
+ when all bits are set in the MSB of the
+ destination address. IPv6 does not have the
+ concept of a broadcast packets.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2 and the packet is IP or the parse
+ mode for the packet is SKIP-TO-IP. */
+ uint64_t mcast : 32; /**< Number of packets with L3 Multicast Dest Address
+ that were dropped due to RED.
+ The HW considers an IPv4 packet to be multicast
+ when the most-significant nibble of the 32-bit
+ destination address is 0xE (i.e. it is a class D
+ address). The HW considers an IPv6 packet to be
+ multicast when the most-significant byte of the
+ 128-bit destination address is all 1's.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2 and the packet is IP or the parse
+ mode for the packet is SKIP-TO-IP. */
+#else
+ uint64_t mcast : 32;
+ uint64_t bcast : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat11_x_s cn68xx;
+ struct cvmx_pip_stat11_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat11_x cvmx_pip_stat11_x_t;
+
+/**
+ * cvmx_pip_stat11_prt#
+ *
+ * PIP_STAT11_PRTX = PIP_STAT_L3_MCAST / PIP_STAT_L3_BCAST
+ *
+ */
+union cvmx_pip_stat11_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat11_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast : 32; /**< Number of packets with L3 Broadcast Dest Address
+ that were dropped due to RED.
+ The HW considers an IPv4 packet to be broadcast
+ when all bits are set in the MSB of the
+ destination address. IPv6 does not have the
+ concept of a broadcast packets.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2 and the packet is IP or the parse
+ mode for the packet is SKIP-TO-IP. */
+ uint64_t mcast : 32; /**< Number of packets with L3 Multicast Dest Address
+ that were dropped due to RED.
+ The HW considers an IPv4 packet to be multicast
+ when the most-significant nibble of the 32-bit
+ destination address is 0xE (i.e. it is a class D
+ address). The HW considers an IPv6 packet to be
+ multicast when the most-significant byte of the
+ 128-bit destination address is all 1's.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2 and the packet is IP or the parse
+ mode for the packet is SKIP-TO-IP. */
+#else
+ uint64_t mcast : 32;
+ uint64_t bcast : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat11_prtx_s cn52xx;
+ struct cvmx_pip_stat11_prtx_s cn52xxp1;
+ struct cvmx_pip_stat11_prtx_s cn56xx;
+ struct cvmx_pip_stat11_prtx_s cn56xxp1;
+ struct cvmx_pip_stat11_prtx_s cn61xx;
+ struct cvmx_pip_stat11_prtx_s cn63xx;
+ struct cvmx_pip_stat11_prtx_s cn63xxp1;
+ struct cvmx_pip_stat11_prtx_s cn66xx;
+ struct cvmx_pip_stat11_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat11_prtx cvmx_pip_stat11_prtx_t;
+
+/**
+ * cvmx_pip_stat1_#
+ *
+ * PIP_STAT1_X = PIP_STAT_OCTS
+ *
+ */
+union cvmx_pip_stat1_x {
+ uint64_t u64;
+ struct cvmx_pip_stat1_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< Number of octets received by PIP (good and bad) */
+#else
+ uint64_t octs : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_pip_stat1_x_s cn68xx;
+ struct cvmx_pip_stat1_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat1_x cvmx_pip_stat1_x_t;
+
+/**
+ * cvmx_pip_stat1_prt#
+ *
+ * PIP_STAT1_PRTX = PIP_STAT_OCTS
+ *
+ */
+union cvmx_pip_stat1_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat1_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< Number of octets received by PIP (good and bad) */
+#else
+ uint64_t octs : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_pip_stat1_prtx_s cn30xx;
+ struct cvmx_pip_stat1_prtx_s cn31xx;
+ struct cvmx_pip_stat1_prtx_s cn38xx;
+ struct cvmx_pip_stat1_prtx_s cn38xxp2;
+ struct cvmx_pip_stat1_prtx_s cn50xx;
+ struct cvmx_pip_stat1_prtx_s cn52xx;
+ struct cvmx_pip_stat1_prtx_s cn52xxp1;
+ struct cvmx_pip_stat1_prtx_s cn56xx;
+ struct cvmx_pip_stat1_prtx_s cn56xxp1;
+ struct cvmx_pip_stat1_prtx_s cn58xx;
+ struct cvmx_pip_stat1_prtx_s cn58xxp1;
+ struct cvmx_pip_stat1_prtx_s cn61xx;
+ struct cvmx_pip_stat1_prtx_s cn63xx;
+ struct cvmx_pip_stat1_prtx_s cn63xxp1;
+ struct cvmx_pip_stat1_prtx_s cn66xx;
+ struct cvmx_pip_stat1_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat1_prtx cvmx_pip_stat1_prtx_t;
+
+/**
+ * cvmx_pip_stat2_#
+ *
+ * PIP_STAT2_X = PIP_STAT_PKTS / PIP_STAT_RAW
+ *
+ */
+union cvmx_pip_stat2_x {
+ uint64_t u64;
+ struct cvmx_pip_stat2_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkts : 32; /**< Number of packets processed by PIP */
+ uint64_t raw : 32; /**< RAWFULL + RAWSCH Packets without an L1/L2 error
+ received by PIP per port */
+#else
+ uint64_t raw : 32;
+ uint64_t pkts : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat2_x_s cn68xx;
+ struct cvmx_pip_stat2_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat2_x cvmx_pip_stat2_x_t;
+
+/**
+ * cvmx_pip_stat2_prt#
+ *
+ * PIP_STAT2_PRTX = PIP_STAT_PKTS / PIP_STAT_RAW
+ *
+ */
+union cvmx_pip_stat2_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat2_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkts : 32; /**< Number of packets processed by PIP */
+ uint64_t raw : 32; /**< RAWFULL + RAWSCH Packets without an L1/L2 error
+ received by PIP per port */
+#else
+ uint64_t raw : 32;
+ uint64_t pkts : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat2_prtx_s cn30xx;
+ struct cvmx_pip_stat2_prtx_s cn31xx;
+ struct cvmx_pip_stat2_prtx_s cn38xx;
+ struct cvmx_pip_stat2_prtx_s cn38xxp2;
+ struct cvmx_pip_stat2_prtx_s cn50xx;
+ struct cvmx_pip_stat2_prtx_s cn52xx;
+ struct cvmx_pip_stat2_prtx_s cn52xxp1;
+ struct cvmx_pip_stat2_prtx_s cn56xx;
+ struct cvmx_pip_stat2_prtx_s cn56xxp1;
+ struct cvmx_pip_stat2_prtx_s cn58xx;
+ struct cvmx_pip_stat2_prtx_s cn58xxp1;
+ struct cvmx_pip_stat2_prtx_s cn61xx;
+ struct cvmx_pip_stat2_prtx_s cn63xx;
+ struct cvmx_pip_stat2_prtx_s cn63xxp1;
+ struct cvmx_pip_stat2_prtx_s cn66xx;
+ struct cvmx_pip_stat2_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat2_prtx cvmx_pip_stat2_prtx_t;
+
+/**
+ * cvmx_pip_stat3_#
+ *
+ * PIP_STAT3_X = PIP_STAT_BCST / PIP_STAT_MCST
+ *
+ */
+union cvmx_pip_stat3_x {
+ uint64_t u64;
+ struct cvmx_pip_stat3_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcst : 32; /**< Number of indentified L2 broadcast packets
+ Does not include multicast packets
+ Only includes packets whose parse mode is
+ SKIP_TO_L2. */
+ uint64_t mcst : 32; /**< Number of indentified L2 multicast packets
+ Does not include broadcast packets
+ Only includes packets whose parse mode is
+ SKIP_TO_L2. */
+#else
+ uint64_t mcst : 32;
+ uint64_t bcst : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat3_x_s cn68xx;
+ struct cvmx_pip_stat3_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat3_x cvmx_pip_stat3_x_t;
+
+/**
+ * cvmx_pip_stat3_prt#
+ *
+ * PIP_STAT3_PRTX = PIP_STAT_BCST / PIP_STAT_MCST
+ *
+ */
+union cvmx_pip_stat3_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat3_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcst : 32; /**< Number of indentified L2 broadcast packets
+ Does not include multicast packets
+ Only includes packets whose parse mode is
+ SKIP_TO_L2. */
+ uint64_t mcst : 32; /**< Number of indentified L2 multicast packets
+ Does not include broadcast packets
+ Only includes packets whose parse mode is
+ SKIP_TO_L2. */
+#else
+ uint64_t mcst : 32;
+ uint64_t bcst : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat3_prtx_s cn30xx;
+ struct cvmx_pip_stat3_prtx_s cn31xx;
+ struct cvmx_pip_stat3_prtx_s cn38xx;
+ struct cvmx_pip_stat3_prtx_s cn38xxp2;
+ struct cvmx_pip_stat3_prtx_s cn50xx;
+ struct cvmx_pip_stat3_prtx_s cn52xx;
+ struct cvmx_pip_stat3_prtx_s cn52xxp1;
+ struct cvmx_pip_stat3_prtx_s cn56xx;
+ struct cvmx_pip_stat3_prtx_s cn56xxp1;
+ struct cvmx_pip_stat3_prtx_s cn58xx;
+ struct cvmx_pip_stat3_prtx_s cn58xxp1;
+ struct cvmx_pip_stat3_prtx_s cn61xx;
+ struct cvmx_pip_stat3_prtx_s cn63xx;
+ struct cvmx_pip_stat3_prtx_s cn63xxp1;
+ struct cvmx_pip_stat3_prtx_s cn66xx;
+ struct cvmx_pip_stat3_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat3_prtx cvmx_pip_stat3_prtx_t;
+
+/**
+ * cvmx_pip_stat4_#
+ *
+ * PIP_STAT4_X = PIP_STAT_HIST1 / PIP_STAT_HIST0
+ *
+ */
+union cvmx_pip_stat4_x {
+ uint64_t u64;
+ struct cvmx_pip_stat4_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h65to127 : 32; /**< Number of 65-127B packets */
+ uint64_t h64 : 32; /**< Number of 1-64B packets */
+#else
+ uint64_t h64 : 32;
+ uint64_t h65to127 : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat4_x_s cn68xx;
+ struct cvmx_pip_stat4_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat4_x cvmx_pip_stat4_x_t;
+
+/**
+ * cvmx_pip_stat4_prt#
+ *
+ * PIP_STAT4_PRTX = PIP_STAT_HIST1 / PIP_STAT_HIST0
+ *
+ */
+union cvmx_pip_stat4_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat4_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h65to127 : 32; /**< Number of 65-127B packets */
+ uint64_t h64 : 32; /**< Number of 1-64B packets */
+#else
+ uint64_t h64 : 32;
+ uint64_t h65to127 : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat4_prtx_s cn30xx;
+ struct cvmx_pip_stat4_prtx_s cn31xx;
+ struct cvmx_pip_stat4_prtx_s cn38xx;
+ struct cvmx_pip_stat4_prtx_s cn38xxp2;
+ struct cvmx_pip_stat4_prtx_s cn50xx;
+ struct cvmx_pip_stat4_prtx_s cn52xx;
+ struct cvmx_pip_stat4_prtx_s cn52xxp1;
+ struct cvmx_pip_stat4_prtx_s cn56xx;
+ struct cvmx_pip_stat4_prtx_s cn56xxp1;
+ struct cvmx_pip_stat4_prtx_s cn58xx;
+ struct cvmx_pip_stat4_prtx_s cn58xxp1;
+ struct cvmx_pip_stat4_prtx_s cn61xx;
+ struct cvmx_pip_stat4_prtx_s cn63xx;
+ struct cvmx_pip_stat4_prtx_s cn63xxp1;
+ struct cvmx_pip_stat4_prtx_s cn66xx;
+ struct cvmx_pip_stat4_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat4_prtx cvmx_pip_stat4_prtx_t;
+
+/**
+ * cvmx_pip_stat5_#
+ *
+ * PIP_STAT5_X = PIP_STAT_HIST3 / PIP_STAT_HIST2
+ *
+ */
+union cvmx_pip_stat5_x {
+ uint64_t u64;
+ struct cvmx_pip_stat5_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h256to511 : 32; /**< Number of 256-511B packets */
+ uint64_t h128to255 : 32; /**< Number of 128-255B packets */
+#else
+ uint64_t h128to255 : 32;
+ uint64_t h256to511 : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat5_x_s cn68xx;
+ struct cvmx_pip_stat5_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat5_x cvmx_pip_stat5_x_t;
+
+/**
+ * cvmx_pip_stat5_prt#
+ *
+ * PIP_STAT5_PRTX = PIP_STAT_HIST3 / PIP_STAT_HIST2
+ *
+ */
+union cvmx_pip_stat5_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat5_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h256to511 : 32; /**< Number of 256-511B packets */
+ uint64_t h128to255 : 32; /**< Number of 128-255B packets */
+#else
+ uint64_t h128to255 : 32;
+ uint64_t h256to511 : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat5_prtx_s cn30xx;
+ struct cvmx_pip_stat5_prtx_s cn31xx;
+ struct cvmx_pip_stat5_prtx_s cn38xx;
+ struct cvmx_pip_stat5_prtx_s cn38xxp2;
+ struct cvmx_pip_stat5_prtx_s cn50xx;
+ struct cvmx_pip_stat5_prtx_s cn52xx;
+ struct cvmx_pip_stat5_prtx_s cn52xxp1;
+ struct cvmx_pip_stat5_prtx_s cn56xx;
+ struct cvmx_pip_stat5_prtx_s cn56xxp1;
+ struct cvmx_pip_stat5_prtx_s cn58xx;
+ struct cvmx_pip_stat5_prtx_s cn58xxp1;
+ struct cvmx_pip_stat5_prtx_s cn61xx;
+ struct cvmx_pip_stat5_prtx_s cn63xx;
+ struct cvmx_pip_stat5_prtx_s cn63xxp1;
+ struct cvmx_pip_stat5_prtx_s cn66xx;
+ struct cvmx_pip_stat5_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat5_prtx cvmx_pip_stat5_prtx_t;
+
+/**
+ * cvmx_pip_stat6_#
+ *
+ * PIP_STAT6_X = PIP_STAT_HIST5 / PIP_STAT_HIST4
+ *
+ */
+union cvmx_pip_stat6_x {
+ uint64_t u64;
+ struct cvmx_pip_stat6_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h1024to1518 : 32; /**< Number of 1024-1518B packets */
+ uint64_t h512to1023 : 32; /**< Number of 512-1023B packets */
+#else
+ uint64_t h512to1023 : 32;
+ uint64_t h1024to1518 : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat6_x_s cn68xx;
+ struct cvmx_pip_stat6_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat6_x cvmx_pip_stat6_x_t;
+
+/**
+ * cvmx_pip_stat6_prt#
+ *
+ * PIP_STAT6_PRTX = PIP_STAT_HIST5 / PIP_STAT_HIST4
+ *
+ */
+union cvmx_pip_stat6_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat6_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h1024to1518 : 32; /**< Number of 1024-1518B packets */
+ uint64_t h512to1023 : 32; /**< Number of 512-1023B packets */
+#else
+ uint64_t h512to1023 : 32;
+ uint64_t h1024to1518 : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat6_prtx_s cn30xx;
+ struct cvmx_pip_stat6_prtx_s cn31xx;
+ struct cvmx_pip_stat6_prtx_s cn38xx;
+ struct cvmx_pip_stat6_prtx_s cn38xxp2;
+ struct cvmx_pip_stat6_prtx_s cn50xx;
+ struct cvmx_pip_stat6_prtx_s cn52xx;
+ struct cvmx_pip_stat6_prtx_s cn52xxp1;
+ struct cvmx_pip_stat6_prtx_s cn56xx;
+ struct cvmx_pip_stat6_prtx_s cn56xxp1;
+ struct cvmx_pip_stat6_prtx_s cn58xx;
+ struct cvmx_pip_stat6_prtx_s cn58xxp1;
+ struct cvmx_pip_stat6_prtx_s cn61xx;
+ struct cvmx_pip_stat6_prtx_s cn63xx;
+ struct cvmx_pip_stat6_prtx_s cn63xxp1;
+ struct cvmx_pip_stat6_prtx_s cn66xx;
+ struct cvmx_pip_stat6_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat6_prtx cvmx_pip_stat6_prtx_t;
+
+/**
+ * cvmx_pip_stat7_#
+ *
+ * PIP_STAT7_X = PIP_STAT_FCS / PIP_STAT_HIST6
+ *
+ */
+union cvmx_pip_stat7_x {
+ uint64_t u64;
+ struct cvmx_pip_stat7_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fcs : 32; /**< Number of packets with FCS or Align opcode errors */
+ uint64_t h1519 : 32; /**< Number of 1519-max packets */
+#else
+ uint64_t h1519 : 32;
+ uint64_t fcs : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat7_x_s cn68xx;
+ struct cvmx_pip_stat7_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat7_x cvmx_pip_stat7_x_t;
+
+/**
+ * cvmx_pip_stat7_prt#
+ *
+ * PIP_STAT7_PRTX = PIP_STAT_FCS / PIP_STAT_HIST6
+ *
+ *
+ * Notes:
+ * DPI does not check FCS, therefore FCS will never increment on DPI ports 32-35
+ * sRIO does not check FCS, therefore FCS will never increment on sRIO ports 40-47
+ */
+union cvmx_pip_stat7_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat7_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fcs : 32; /**< Number of packets with FCS or Align opcode errors */
+ uint64_t h1519 : 32; /**< Number of 1519-max packets */
+#else
+ uint64_t h1519 : 32;
+ uint64_t fcs : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat7_prtx_s cn30xx;
+ struct cvmx_pip_stat7_prtx_s cn31xx;
+ struct cvmx_pip_stat7_prtx_s cn38xx;
+ struct cvmx_pip_stat7_prtx_s cn38xxp2;
+ struct cvmx_pip_stat7_prtx_s cn50xx;
+ struct cvmx_pip_stat7_prtx_s cn52xx;
+ struct cvmx_pip_stat7_prtx_s cn52xxp1;
+ struct cvmx_pip_stat7_prtx_s cn56xx;
+ struct cvmx_pip_stat7_prtx_s cn56xxp1;
+ struct cvmx_pip_stat7_prtx_s cn58xx;
+ struct cvmx_pip_stat7_prtx_s cn58xxp1;
+ struct cvmx_pip_stat7_prtx_s cn61xx;
+ struct cvmx_pip_stat7_prtx_s cn63xx;
+ struct cvmx_pip_stat7_prtx_s cn63xxp1;
+ struct cvmx_pip_stat7_prtx_s cn66xx;
+ struct cvmx_pip_stat7_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat7_prtx cvmx_pip_stat7_prtx_t;
+
+/**
+ * cvmx_pip_stat8_#
+ *
+ * PIP_STAT8_X = PIP_STAT_FRAG / PIP_STAT_UNDER
+ *
+ */
+union cvmx_pip_stat8_x {
+ uint64_t u64;
+ struct cvmx_pip_stat8_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t frag : 32; /**< Number of packets with length < min and FCS error */
+ uint64_t undersz : 32; /**< Number of packets with length < min */
+#else
+ uint64_t undersz : 32;
+ uint64_t frag : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat8_x_s cn68xx;
+ struct cvmx_pip_stat8_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat8_x cvmx_pip_stat8_x_t;
+
+/**
+ * cvmx_pip_stat8_prt#
+ *
+ * PIP_STAT8_PRTX = PIP_STAT_FRAG / PIP_STAT_UNDER
+ *
+ *
+ * Notes:
+ * DPI does not check FCS, therefore FRAG will never increment on DPI ports 32-35
+ * sRIO does not check FCS, therefore FRAG will never increment on sRIO ports 40-47
+ */
+union cvmx_pip_stat8_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat8_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t frag : 32; /**< Number of packets with length < min and FCS error */
+ uint64_t undersz : 32; /**< Number of packets with length < min */
+#else
+ uint64_t undersz : 32;
+ uint64_t frag : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat8_prtx_s cn30xx;
+ struct cvmx_pip_stat8_prtx_s cn31xx;
+ struct cvmx_pip_stat8_prtx_s cn38xx;
+ struct cvmx_pip_stat8_prtx_s cn38xxp2;
+ struct cvmx_pip_stat8_prtx_s cn50xx;
+ struct cvmx_pip_stat8_prtx_s cn52xx;
+ struct cvmx_pip_stat8_prtx_s cn52xxp1;
+ struct cvmx_pip_stat8_prtx_s cn56xx;
+ struct cvmx_pip_stat8_prtx_s cn56xxp1;
+ struct cvmx_pip_stat8_prtx_s cn58xx;
+ struct cvmx_pip_stat8_prtx_s cn58xxp1;
+ struct cvmx_pip_stat8_prtx_s cn61xx;
+ struct cvmx_pip_stat8_prtx_s cn63xx;
+ struct cvmx_pip_stat8_prtx_s cn63xxp1;
+ struct cvmx_pip_stat8_prtx_s cn66xx;
+ struct cvmx_pip_stat8_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat8_prtx cvmx_pip_stat8_prtx_t;
+
+/**
+ * cvmx_pip_stat9_#
+ *
+ * PIP_STAT9_X = PIP_STAT_JABBER / PIP_STAT_OVER
+ *
+ */
+union cvmx_pip_stat9_x {
+ uint64_t u64;
+ struct cvmx_pip_stat9_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t jabber : 32; /**< Number of packets with length > max and FCS error */
+ uint64_t oversz : 32; /**< Number of packets with length > max */
+#else
+ uint64_t oversz : 32;
+ uint64_t jabber : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat9_x_s cn68xx;
+ struct cvmx_pip_stat9_x_s cn68xxp1;
+};
+typedef union cvmx_pip_stat9_x cvmx_pip_stat9_x_t;
+
+/**
+ * cvmx_pip_stat9_prt#
+ *
+ * PIP_STAT9_PRTX = PIP_STAT_JABBER / PIP_STAT_OVER
+ *
+ *
+ * Notes:
+ * DPI does not check FCS, therefore JABBER will never increment on DPI ports 32-35
+ * sRIO does not check FCS, therefore JABBER will never increment on sRIO ports 40-47 due to FCS errors
+ * sRIO does use the JABBER opcode to communicate sRIO error, therefore JABBER can increment under the sRIO error conditions
+ */
+union cvmx_pip_stat9_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat9_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t jabber : 32; /**< Number of packets with length > max and FCS error */
+ uint64_t oversz : 32; /**< Number of packets with length > max */
+#else
+ uint64_t oversz : 32;
+ uint64_t jabber : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat9_prtx_s cn30xx;
+ struct cvmx_pip_stat9_prtx_s cn31xx;
+ struct cvmx_pip_stat9_prtx_s cn38xx;
+ struct cvmx_pip_stat9_prtx_s cn38xxp2;
+ struct cvmx_pip_stat9_prtx_s cn50xx;
+ struct cvmx_pip_stat9_prtx_s cn52xx;
+ struct cvmx_pip_stat9_prtx_s cn52xxp1;
+ struct cvmx_pip_stat9_prtx_s cn56xx;
+ struct cvmx_pip_stat9_prtx_s cn56xxp1;
+ struct cvmx_pip_stat9_prtx_s cn58xx;
+ struct cvmx_pip_stat9_prtx_s cn58xxp1;
+ struct cvmx_pip_stat9_prtx_s cn61xx;
+ struct cvmx_pip_stat9_prtx_s cn63xx;
+ struct cvmx_pip_stat9_prtx_s cn63xxp1;
+ struct cvmx_pip_stat9_prtx_s cn66xx;
+ struct cvmx_pip_stat9_prtx_s cnf71xx;
+};
+typedef union cvmx_pip_stat9_prtx cvmx_pip_stat9_prtx_t;
+
+/**
+ * cvmx_pip_stat_ctl
+ *
+ * PIP_STAT_CTL = PIP's Stat Control Register
+ *
+ * Controls how the PIP statistics counters are handled.
+ */
+union cvmx_pip_stat_ctl {
+ uint64_t u64;
+ struct cvmx_pip_stat_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t mode : 1; /**< The PIP_STAT*_X registers can be indexed either by
+ port-kind or backpressure ID.
+ Does not apply to the PIP_STAT_INB* registers.
+ 0 = X represents the packet's port-kind
+ 1 = X represents the packet's backpressure ID */
+ uint64_t reserved_1_7 : 7;
+ uint64_t rdclr : 1; /**< Stat registers are read and clear
+ 0 = stat registers hold value when read
+ 1 = stat registers are cleared when read */
+#else
+ uint64_t rdclr : 1;
+ uint64_t reserved_1_7 : 7;
+ uint64_t mode : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_pip_stat_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t rdclr : 1; /**< Stat registers are read and clear
+ 0 = stat registers hold value when read
+ 1 = stat registers are cleared when read */
+#else
+ uint64_t rdclr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_pip_stat_ctl_cn30xx cn31xx;
+ struct cvmx_pip_stat_ctl_cn30xx cn38xx;
+ struct cvmx_pip_stat_ctl_cn30xx cn38xxp2;
+ struct cvmx_pip_stat_ctl_cn30xx cn50xx;
+ struct cvmx_pip_stat_ctl_cn30xx cn52xx;
+ struct cvmx_pip_stat_ctl_cn30xx cn52xxp1;
+ struct cvmx_pip_stat_ctl_cn30xx cn56xx;
+ struct cvmx_pip_stat_ctl_cn30xx cn56xxp1;
+ struct cvmx_pip_stat_ctl_cn30xx cn58xx;
+ struct cvmx_pip_stat_ctl_cn30xx cn58xxp1;
+ struct cvmx_pip_stat_ctl_cn30xx cn61xx;
+ struct cvmx_pip_stat_ctl_cn30xx cn63xx;
+ struct cvmx_pip_stat_ctl_cn30xx cn63xxp1;
+ struct cvmx_pip_stat_ctl_cn30xx cn66xx;
+ struct cvmx_pip_stat_ctl_s cn68xx;
+ struct cvmx_pip_stat_ctl_s cn68xxp1;
+ struct cvmx_pip_stat_ctl_cn30xx cnf71xx;
+};
+typedef union cvmx_pip_stat_ctl cvmx_pip_stat_ctl_t;
+
+/**
+ * cvmx_pip_stat_inb_errs#
+ *
+ * PIP_STAT_INB_ERRSX = Inbound error packets received by PIP per port
+ *
+ * Inbound stats collect all data sent to PIP from all packet interfaces.
+ * Its the raw counts of everything that comes into the block. The counts
+ * will reflect all error packets and packets dropped by the PKI RED engine.
+ * These counts are intended for system debug, but could convey useful
+ * information in production systems.
+ */
+union cvmx_pip_stat_inb_errsx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_errsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t errs : 16; /**< Number of packets with errors
+ received by PIP */
+#else
+ uint64_t errs : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pip_stat_inb_errsx_s cn30xx;
+ struct cvmx_pip_stat_inb_errsx_s cn31xx;
+ struct cvmx_pip_stat_inb_errsx_s cn38xx;
+ struct cvmx_pip_stat_inb_errsx_s cn38xxp2;
+ struct cvmx_pip_stat_inb_errsx_s cn50xx;
+ struct cvmx_pip_stat_inb_errsx_s cn52xx;
+ struct cvmx_pip_stat_inb_errsx_s cn52xxp1;
+ struct cvmx_pip_stat_inb_errsx_s cn56xx;
+ struct cvmx_pip_stat_inb_errsx_s cn56xxp1;
+ struct cvmx_pip_stat_inb_errsx_s cn58xx;
+ struct cvmx_pip_stat_inb_errsx_s cn58xxp1;
+ struct cvmx_pip_stat_inb_errsx_s cn61xx;
+ struct cvmx_pip_stat_inb_errsx_s cn63xx;
+ struct cvmx_pip_stat_inb_errsx_s cn63xxp1;
+ struct cvmx_pip_stat_inb_errsx_s cn66xx;
+ struct cvmx_pip_stat_inb_errsx_s cnf71xx;
+};
+typedef union cvmx_pip_stat_inb_errsx cvmx_pip_stat_inb_errsx_t;
+
+/**
+ * cvmx_pip_stat_inb_errs_pknd#
+ *
+ * PIP_STAT_INB_ERRS_PKNDX = Inbound error packets received by PIP per pkind
+ *
+ * Inbound stats collect all data sent to PIP from all packet interfaces.
+ * Its the raw counts of everything that comes into the block. The counts
+ * will reflect all error packets and packets dropped by the PKI RED engine.
+ * These counts are intended for system debug, but could convey useful
+ * information in production systems.
+ */
+union cvmx_pip_stat_inb_errs_pkndx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_errs_pkndx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t errs : 16; /**< Number of packets with errors
+ received by PIP */
+#else
+ uint64_t errs : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pip_stat_inb_errs_pkndx_s cn68xx;
+ struct cvmx_pip_stat_inb_errs_pkndx_s cn68xxp1;
+};
+typedef union cvmx_pip_stat_inb_errs_pkndx cvmx_pip_stat_inb_errs_pkndx_t;
+
+/**
+ * cvmx_pip_stat_inb_octs#
+ *
+ * PIP_STAT_INB_OCTSX = Inbound octets received by PIP per port
+ *
+ * Inbound stats collect all data sent to PIP from all packet interfaces.
+ * Its the raw counts of everything that comes into the block. The counts
+ * will reflect all error packets and packets dropped by the PKI RED engine.
+ * These counts are intended for system debug, but could convey useful
+ * information in production systems. The OCTS will include the bytes from
+ * timestamp fields in PTP_MODE.
+ */
+union cvmx_pip_stat_inb_octsx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_octsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< Total number of octets from all packets received
+ by PIP */
+#else
+ uint64_t octs : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_pip_stat_inb_octsx_s cn30xx;
+ struct cvmx_pip_stat_inb_octsx_s cn31xx;
+ struct cvmx_pip_stat_inb_octsx_s cn38xx;
+ struct cvmx_pip_stat_inb_octsx_s cn38xxp2;
+ struct cvmx_pip_stat_inb_octsx_s cn50xx;
+ struct cvmx_pip_stat_inb_octsx_s cn52xx;
+ struct cvmx_pip_stat_inb_octsx_s cn52xxp1;
+ struct cvmx_pip_stat_inb_octsx_s cn56xx;
+ struct cvmx_pip_stat_inb_octsx_s cn56xxp1;
+ struct cvmx_pip_stat_inb_octsx_s cn58xx;
+ struct cvmx_pip_stat_inb_octsx_s cn58xxp1;
+ struct cvmx_pip_stat_inb_octsx_s cn61xx;
+ struct cvmx_pip_stat_inb_octsx_s cn63xx;
+ struct cvmx_pip_stat_inb_octsx_s cn63xxp1;
+ struct cvmx_pip_stat_inb_octsx_s cn66xx;
+ struct cvmx_pip_stat_inb_octsx_s cnf71xx;
+};
+typedef union cvmx_pip_stat_inb_octsx cvmx_pip_stat_inb_octsx_t;
+
+/**
+ * cvmx_pip_stat_inb_octs_pknd#
+ *
+ * PIP_STAT_INB_OCTS_PKNDX = Inbound octets received by PIP per pkind
+ *
+ * Inbound stats collect all data sent to PIP from all packet interfaces.
+ * Its the raw counts of everything that comes into the block. The counts
+ * will reflect all error packets and packets dropped by the PKI RED engine.
+ * These counts are intended for system debug, but could convey useful
+ * information in production systems. The OCTS will include the bytes from
+ * timestamp fields in PTP_MODE.
+ */
+union cvmx_pip_stat_inb_octs_pkndx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_octs_pkndx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< Total number of octets from all packets received
+ by PIP */
+#else
+ uint64_t octs : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_pip_stat_inb_octs_pkndx_s cn68xx;
+ struct cvmx_pip_stat_inb_octs_pkndx_s cn68xxp1;
+};
+typedef union cvmx_pip_stat_inb_octs_pkndx cvmx_pip_stat_inb_octs_pkndx_t;
+
+/**
+ * cvmx_pip_stat_inb_pkts#
+ *
+ * PIP_STAT_INB_PKTSX = Inbound packets received by PIP per port
+ *
+ * Inbound stats collect all data sent to PIP from all packet interfaces.
+ * Its the raw counts of everything that comes into the block. The counts
+ * will reflect all error packets and packets dropped by the PKI RED engine.
+ * These counts are intended for system debug, but could convey useful
+ * information in production systems.
+ */
+union cvmx_pip_stat_inb_pktsx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_pktsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pkts : 32; /**< Number of packets without errors
+ received by PIP */
+#else
+ uint64_t pkts : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat_inb_pktsx_s cn30xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn31xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn38xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn38xxp2;
+ struct cvmx_pip_stat_inb_pktsx_s cn50xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn52xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn52xxp1;
+ struct cvmx_pip_stat_inb_pktsx_s cn56xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn56xxp1;
+ struct cvmx_pip_stat_inb_pktsx_s cn58xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn58xxp1;
+ struct cvmx_pip_stat_inb_pktsx_s cn61xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn63xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn63xxp1;
+ struct cvmx_pip_stat_inb_pktsx_s cn66xx;
+ struct cvmx_pip_stat_inb_pktsx_s cnf71xx;
+};
+typedef union cvmx_pip_stat_inb_pktsx cvmx_pip_stat_inb_pktsx_t;
+
+/**
+ * cvmx_pip_stat_inb_pkts_pknd#
+ *
+ * PIP_STAT_INB_PKTS_PKNDX = Inbound packets received by PIP per pkind
+ *
+ * Inbound stats collect all data sent to PIP from all packet interfaces.
+ * Its the raw counts of everything that comes into the block. The counts
+ * will reflect all error packets and packets dropped by the PKI RED engine.
+ * These counts are intended for system debug, but could convey useful
+ * information in production systems.
+ */
+union cvmx_pip_stat_inb_pkts_pkndx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_pkts_pkndx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pkts : 32; /**< Number of packets without errors
+ received by PIP */
+#else
+ uint64_t pkts : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pip_stat_inb_pkts_pkndx_s cn68xx;
+ struct cvmx_pip_stat_inb_pkts_pkndx_s cn68xxp1;
+};
+typedef union cvmx_pip_stat_inb_pkts_pkndx cvmx_pip_stat_inb_pkts_pkndx_t;
+
+/**
+ * cvmx_pip_sub_pkind_fcs#
+ */
+union cvmx_pip_sub_pkind_fcsx {
+ uint64_t u64;
+ struct cvmx_pip_sub_pkind_fcsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t port_bit : 64; /**< When set '1', the pkind corresponding to the bit
+ position set, will subtract the FCS for packets
+ on that pkind. */
+#else
+ uint64_t port_bit : 64;
+#endif
+ } s;
+ struct cvmx_pip_sub_pkind_fcsx_s cn68xx;
+ struct cvmx_pip_sub_pkind_fcsx_s cn68xxp1;
+};
+typedef union cvmx_pip_sub_pkind_fcsx cvmx_pip_sub_pkind_fcsx_t;
+
+/**
+ * cvmx_pip_tag_inc#
+ *
+ * PIP_TAG_INC = Which bytes to include in the new tag hash algorithm
+ *
+ * # $PIP_TAG_INCX = 0x300+X X=(0..63) RegType=(RSL) RtlReg=(pip_tag_inc_csr_direct_TestbuilderTask)
+ */
+union cvmx_pip_tag_incx {
+ uint64_t u64;
+ struct cvmx_pip_tag_incx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t en : 8; /**< Which bytes to include in mask tag algorithm
+ Broken into 4, 16-entry masks to cover 128B
+ PIP_PRT_CFG[TAG_INC] selects 1 of 4 to use
+ registers 0-15 map to PIP_PRT_CFG[TAG_INC] == 0
+ registers 16-31 map to PIP_PRT_CFG[TAG_INC] == 1
+ registers 32-47 map to PIP_PRT_CFG[TAG_INC] == 2
+ registers 48-63 map to PIP_PRT_CFG[TAG_INC] == 3
+ [7] coresponds to the MSB of the 8B word
+ [0] coresponds to the LSB of the 8B word
+ If PTP_MODE, the 8B timestamp is prepended to the
+ packet. The EN byte masks should be adjusted to
+ compensate for the additional timestamp field. */
+#else
+ uint64_t en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_pip_tag_incx_s cn30xx;
+ struct cvmx_pip_tag_incx_s cn31xx;
+ struct cvmx_pip_tag_incx_s cn38xx;
+ struct cvmx_pip_tag_incx_s cn38xxp2;
+ struct cvmx_pip_tag_incx_s cn50xx;
+ struct cvmx_pip_tag_incx_s cn52xx;
+ struct cvmx_pip_tag_incx_s cn52xxp1;
+ struct cvmx_pip_tag_incx_s cn56xx;
+ struct cvmx_pip_tag_incx_s cn56xxp1;
+ struct cvmx_pip_tag_incx_s cn58xx;
+ struct cvmx_pip_tag_incx_s cn58xxp1;
+ struct cvmx_pip_tag_incx_s cn61xx;
+ struct cvmx_pip_tag_incx_s cn63xx;
+ struct cvmx_pip_tag_incx_s cn63xxp1;
+ struct cvmx_pip_tag_incx_s cn66xx;
+ struct cvmx_pip_tag_incx_s cn68xx;
+ struct cvmx_pip_tag_incx_s cn68xxp1;
+ struct cvmx_pip_tag_incx_s cnf71xx;
+};
+typedef union cvmx_pip_tag_incx cvmx_pip_tag_incx_t;
+
+/**
+ * cvmx_pip_tag_mask
+ *
+ * PIP_TAG_MASK = Mask bit in the tag generation
+ *
+ */
+union cvmx_pip_tag_mask {
+ uint64_t u64;
+ struct cvmx_pip_tag_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mask : 16; /**< When set, MASK clears individual bits of lower 16
+ bits of the computed tag. Does not effect RAW
+ or INSTR HDR packets. */
+#else
+ uint64_t mask : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pip_tag_mask_s cn30xx;
+ struct cvmx_pip_tag_mask_s cn31xx;
+ struct cvmx_pip_tag_mask_s cn38xx;
+ struct cvmx_pip_tag_mask_s cn38xxp2;
+ struct cvmx_pip_tag_mask_s cn50xx;
+ struct cvmx_pip_tag_mask_s cn52xx;
+ struct cvmx_pip_tag_mask_s cn52xxp1;
+ struct cvmx_pip_tag_mask_s cn56xx;
+ struct cvmx_pip_tag_mask_s cn56xxp1;
+ struct cvmx_pip_tag_mask_s cn58xx;
+ struct cvmx_pip_tag_mask_s cn58xxp1;
+ struct cvmx_pip_tag_mask_s cn61xx;
+ struct cvmx_pip_tag_mask_s cn63xx;
+ struct cvmx_pip_tag_mask_s cn63xxp1;
+ struct cvmx_pip_tag_mask_s cn66xx;
+ struct cvmx_pip_tag_mask_s cn68xx;
+ struct cvmx_pip_tag_mask_s cn68xxp1;
+ struct cvmx_pip_tag_mask_s cnf71xx;
+};
+typedef union cvmx_pip_tag_mask cvmx_pip_tag_mask_t;
+
+/**
+ * cvmx_pip_tag_secret
+ *
+ * PIP_TAG_SECRET = Initial value in tag generation
+ *
+ * The source and destination IV's provide a mechanism for each Octeon to be unique.
+ */
+union cvmx_pip_tag_secret {
+ uint64_t u64;
+ struct cvmx_pip_tag_secret_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t dst : 16; /**< Secret for the destination tuple tag CRC calc */
+ uint64_t src : 16; /**< Secret for the source tuple tag CRC calc */
+#else
+ uint64_t src : 16;
+ uint64_t dst : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pip_tag_secret_s cn30xx;
+ struct cvmx_pip_tag_secret_s cn31xx;
+ struct cvmx_pip_tag_secret_s cn38xx;
+ struct cvmx_pip_tag_secret_s cn38xxp2;
+ struct cvmx_pip_tag_secret_s cn50xx;
+ struct cvmx_pip_tag_secret_s cn52xx;
+ struct cvmx_pip_tag_secret_s cn52xxp1;
+ struct cvmx_pip_tag_secret_s cn56xx;
+ struct cvmx_pip_tag_secret_s cn56xxp1;
+ struct cvmx_pip_tag_secret_s cn58xx;
+ struct cvmx_pip_tag_secret_s cn58xxp1;
+ struct cvmx_pip_tag_secret_s cn61xx;
+ struct cvmx_pip_tag_secret_s cn63xx;
+ struct cvmx_pip_tag_secret_s cn63xxp1;
+ struct cvmx_pip_tag_secret_s cn66xx;
+ struct cvmx_pip_tag_secret_s cn68xx;
+ struct cvmx_pip_tag_secret_s cn68xxp1;
+ struct cvmx_pip_tag_secret_s cnf71xx;
+};
+typedef union cvmx_pip_tag_secret cvmx_pip_tag_secret_t;
+
+/**
+ * cvmx_pip_todo_entry
+ *
+ * PIP_TODO_ENTRY = Head entry of the Todo list (debug only)
+ *
+ * Summary of the current packet that has completed and waiting to be processed
+ */
+union cvmx_pip_todo_entry {
+ uint64_t u64;
+ struct cvmx_pip_todo_entry_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t val : 1; /**< Entry is valid */
+ uint64_t reserved_62_62 : 1;
+ uint64_t entry : 62; /**< Todo list entry summary */
+#else
+ uint64_t entry : 62;
+ uint64_t reserved_62_62 : 1;
+ uint64_t val : 1;
+#endif
+ } s;
+ struct cvmx_pip_todo_entry_s cn30xx;
+ struct cvmx_pip_todo_entry_s cn31xx;
+ struct cvmx_pip_todo_entry_s cn38xx;
+ struct cvmx_pip_todo_entry_s cn38xxp2;
+ struct cvmx_pip_todo_entry_s cn50xx;
+ struct cvmx_pip_todo_entry_s cn52xx;
+ struct cvmx_pip_todo_entry_s cn52xxp1;
+ struct cvmx_pip_todo_entry_s cn56xx;
+ struct cvmx_pip_todo_entry_s cn56xxp1;
+ struct cvmx_pip_todo_entry_s cn58xx;
+ struct cvmx_pip_todo_entry_s cn58xxp1;
+ struct cvmx_pip_todo_entry_s cn61xx;
+ struct cvmx_pip_todo_entry_s cn63xx;
+ struct cvmx_pip_todo_entry_s cn63xxp1;
+ struct cvmx_pip_todo_entry_s cn66xx;
+ struct cvmx_pip_todo_entry_s cn68xx;
+ struct cvmx_pip_todo_entry_s cn68xxp1;
+ struct cvmx_pip_todo_entry_s cnf71xx;
+};
+typedef union cvmx_pip_todo_entry cvmx_pip_todo_entry_t;
+
+/**
+ * cvmx_pip_vlan_etypes#
+ */
+union cvmx_pip_vlan_etypesx {
+ uint64_t u64;
+ struct cvmx_pip_vlan_etypesx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t type3 : 16; /**< VLAN Ethertype */
+ uint64_t type2 : 16; /**< VLAN Ethertype */
+ uint64_t type1 : 16; /**< VLAN Ethertype */
+ uint64_t type0 : 16; /**< VLAN Ethertype
+ Specifies ethertypes that will be parsed as
+ containing VLAN information. Each TYPE is
+ orthagonal; if all eight are not required, set
+ multiple TYPEs to the same value (as in the
+ 0x8100 default value). */
+#else
+ uint64_t type0 : 16;
+ uint64_t type1 : 16;
+ uint64_t type2 : 16;
+ uint64_t type3 : 16;
+#endif
+ } s;
+ struct cvmx_pip_vlan_etypesx_s cn61xx;
+ struct cvmx_pip_vlan_etypesx_s cn66xx;
+ struct cvmx_pip_vlan_etypesx_s cn68xx;
+ struct cvmx_pip_vlan_etypesx_s cnf71xx;
+};
+typedef union cvmx_pip_vlan_etypesx cvmx_pip_vlan_etypesx_t;
+
+/**
+ * cvmx_pip_xstat0_prt#
+ *
+ * PIP_XSTAT0_PRT = PIP_XSTAT_DRP_PKTS / PIP_XSTAT_DRP_OCTS
+ *
+ */
+union cvmx_pip_xstat0_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat0_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drp_pkts : 32; /**< Inbound packets marked to be dropped by the IPD
+ QOS widget per port */
+ uint64_t drp_octs : 32; /**< Inbound octets marked to be dropped by the IPD
+ QOS widget per port */
+#else
+ uint64_t drp_octs : 32;
+ uint64_t drp_pkts : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat0_prtx_s cn63xx;
+ struct cvmx_pip_xstat0_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat0_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat0_prtx cvmx_pip_xstat0_prtx_t;
+
+/**
+ * cvmx_pip_xstat10_prt#
+ *
+ * PIP_XSTAT10_PRTX = PIP_XSTAT_L2_MCAST / PIP_XSTAT_L2_BCAST
+ *
+ */
+union cvmx_pip_xstat10_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat10_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast : 32; /**< Number of packets with L2 Broadcast DMAC
+ that were dropped due to RED.
+ The HW will consider a packet to be an L2
+ broadcast packet when the 48-bit DMAC is all 1's.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2. */
+ uint64_t mcast : 32; /**< Number of packets with L2 Mulitcast DMAC
+ that were dropped due to RED.
+ The HW will consider a packet to be an L2
+ multicast packet when the least-significant bit
+ of the first byte of the DMAC is set and the
+ packet is not an L2 broadcast packet.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2. */
+#else
+ uint64_t mcast : 32;
+ uint64_t bcast : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat10_prtx_s cn63xx;
+ struct cvmx_pip_xstat10_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat10_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat10_prtx cvmx_pip_xstat10_prtx_t;
+
+/**
+ * cvmx_pip_xstat11_prt#
+ *
+ * PIP_XSTAT11_PRTX = PIP_XSTAT_L3_MCAST / PIP_XSTAT_L3_BCAST
+ *
+ */
+union cvmx_pip_xstat11_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat11_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast : 32; /**< Number of packets with L3 Broadcast Dest Address
+ that were dropped due to RED.
+ The HW considers an IPv4 packet to be broadcast
+ when all bits are set in the MSB of the
+ destination address. IPv6 does not have the
+ concept of a broadcast packets.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2 and the packet is IP or the parse
+ mode for the packet is SKIP-TO-IP. */
+ uint64_t mcast : 32; /**< Number of packets with L3 Multicast Dest Address
+ that were dropped due to RED.
+ The HW considers an IPv4 packet to be multicast
+ when the most-significant nibble of the 32-bit
+ destination address is 0xE (i.e. it is a class D
+ address). The HW considers an IPv6 packet to be
+ multicast when the most-significant byte of the
+ 128-bit destination address is all 1's.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2 and the packet is IP or the parse
+ mode for the packet is SKIP-TO-IP. */
+#else
+ uint64_t mcast : 32;
+ uint64_t bcast : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat11_prtx_s cn63xx;
+ struct cvmx_pip_xstat11_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat11_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat11_prtx cvmx_pip_xstat11_prtx_t;
+
+/**
+ * cvmx_pip_xstat1_prt#
+ *
+ * PIP_XSTAT1_PRTX = PIP_XSTAT_OCTS
+ *
+ */
+union cvmx_pip_xstat1_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat1_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< Number of octets received by PIP (good and bad) */
+#else
+ uint64_t octs : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_pip_xstat1_prtx_s cn63xx;
+ struct cvmx_pip_xstat1_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat1_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat1_prtx cvmx_pip_xstat1_prtx_t;
+
+/**
+ * cvmx_pip_xstat2_prt#
+ *
+ * PIP_XSTAT2_PRTX = PIP_XSTAT_PKTS / PIP_XSTAT_RAW
+ *
+ */
+union cvmx_pip_xstat2_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat2_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkts : 32; /**< Number of packets processed by PIP */
+ uint64_t raw : 32; /**< RAWFULL + RAWSCH Packets without an L1/L2 error
+ received by PIP per port */
+#else
+ uint64_t raw : 32;
+ uint64_t pkts : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat2_prtx_s cn63xx;
+ struct cvmx_pip_xstat2_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat2_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat2_prtx cvmx_pip_xstat2_prtx_t;
+
+/**
+ * cvmx_pip_xstat3_prt#
+ *
+ * PIP_XSTAT3_PRTX = PIP_XSTAT_BCST / PIP_XSTAT_MCST
+ *
+ */
+union cvmx_pip_xstat3_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat3_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcst : 32; /**< Number of indentified L2 broadcast packets
+ Does not include multicast packets
+ Only includes packets whose parse mode is
+ SKIP_TO_L2. */
+ uint64_t mcst : 32; /**< Number of indentified L2 multicast packets
+ Does not include broadcast packets
+ Only includes packets whose parse mode is
+ SKIP_TO_L2. */
+#else
+ uint64_t mcst : 32;
+ uint64_t bcst : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat3_prtx_s cn63xx;
+ struct cvmx_pip_xstat3_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat3_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat3_prtx cvmx_pip_xstat3_prtx_t;
+
+/**
+ * cvmx_pip_xstat4_prt#
+ *
+ * PIP_XSTAT4_PRTX = PIP_XSTAT_HIST1 / PIP_XSTAT_HIST0
+ *
+ */
+union cvmx_pip_xstat4_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat4_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h65to127 : 32; /**< Number of 65-127B packets */
+ uint64_t h64 : 32; /**< Number of 1-64B packets */
+#else
+ uint64_t h64 : 32;
+ uint64_t h65to127 : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat4_prtx_s cn63xx;
+ struct cvmx_pip_xstat4_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat4_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat4_prtx cvmx_pip_xstat4_prtx_t;
+
+/**
+ * cvmx_pip_xstat5_prt#
+ *
+ * PIP_XSTAT5_PRTX = PIP_XSTAT_HIST3 / PIP_XSTAT_HIST2
+ *
+ */
+union cvmx_pip_xstat5_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat5_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h256to511 : 32; /**< Number of 256-511B packets */
+ uint64_t h128to255 : 32; /**< Number of 128-255B packets */
+#else
+ uint64_t h128to255 : 32;
+ uint64_t h256to511 : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat5_prtx_s cn63xx;
+ struct cvmx_pip_xstat5_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat5_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat5_prtx cvmx_pip_xstat5_prtx_t;
+
+/**
+ * cvmx_pip_xstat6_prt#
+ *
+ * PIP_XSTAT6_PRTX = PIP_XSTAT_HIST5 / PIP_XSTAT_HIST4
+ *
+ */
+union cvmx_pip_xstat6_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat6_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h1024to1518 : 32; /**< Number of 1024-1518B packets */
+ uint64_t h512to1023 : 32; /**< Number of 512-1023B packets */
+#else
+ uint64_t h512to1023 : 32;
+ uint64_t h1024to1518 : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat6_prtx_s cn63xx;
+ struct cvmx_pip_xstat6_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat6_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat6_prtx cvmx_pip_xstat6_prtx_t;
+
+/**
+ * cvmx_pip_xstat7_prt#
+ *
+ * PIP_XSTAT7_PRTX = PIP_XSTAT_FCS / PIP_XSTAT_HIST6
+ *
+ *
+ * Notes:
+ * DPI does not check FCS, therefore FCS will never increment on DPI ports 32-35
+ * sRIO does not check FCS, therefore FCS will never increment on sRIO ports 40-47
+ */
+union cvmx_pip_xstat7_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat7_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fcs : 32; /**< Number of packets with FCS or Align opcode errors */
+ uint64_t h1519 : 32; /**< Number of 1519-max packets */
+#else
+ uint64_t h1519 : 32;
+ uint64_t fcs : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat7_prtx_s cn63xx;
+ struct cvmx_pip_xstat7_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat7_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat7_prtx cvmx_pip_xstat7_prtx_t;
+
+/**
+ * cvmx_pip_xstat8_prt#
+ *
+ * PIP_XSTAT8_PRTX = PIP_XSTAT_FRAG / PIP_XSTAT_UNDER
+ *
+ *
+ * Notes:
+ * DPI does not check FCS, therefore FRAG will never increment on DPI ports 32-35
+ * sRIO does not check FCS, therefore FRAG will never increment on sRIO ports 40-47
+ */
+union cvmx_pip_xstat8_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat8_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t frag : 32; /**< Number of packets with length < min and FCS error */
+ uint64_t undersz : 32; /**< Number of packets with length < min */
+#else
+ uint64_t undersz : 32;
+ uint64_t frag : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat8_prtx_s cn63xx;
+ struct cvmx_pip_xstat8_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat8_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat8_prtx cvmx_pip_xstat8_prtx_t;
+
+/**
+ * cvmx_pip_xstat9_prt#
+ *
+ * PIP_XSTAT9_PRTX = PIP_XSTAT_JABBER / PIP_XSTAT_OVER
+ *
+ *
+ * Notes:
+ * DPI does not check FCS, therefore JABBER will never increment on DPI ports 32-35
+ * sRIO does not check FCS, therefore JABBER will never increment on sRIO ports 40-47 due to FCS errors
+ * sRIO does use the JABBER opcode to communicate sRIO error, therefore JABBER can increment under the sRIO error conditions
+ */
+union cvmx_pip_xstat9_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat9_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t jabber : 32; /**< Number of packets with length > max and FCS error */
+ uint64_t oversz : 32; /**< Number of packets with length > max */
+#else
+ uint64_t oversz : 32;
+ uint64_t jabber : 32;
+#endif
+ } s;
+ struct cvmx_pip_xstat9_prtx_s cn63xx;
+ struct cvmx_pip_xstat9_prtx_s cn63xxp1;
+ struct cvmx_pip_xstat9_prtx_s cn66xx;
+};
+typedef union cvmx_pip_xstat9_prtx cvmx_pip_xstat9_prtx_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pip-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pip.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pip.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pip.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,807 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Interface to the hardware Packet Input Processing unit.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+
+#ifndef __CVMX_PIP_H__
+#define __CVMX_PIP_H__
+
+#include "cvmx-wqe.h"
+#include "cvmx-fpa.h"
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include "cvmx-pip-defs.h"
+#else
+#ifndef CVMX_DONT_INCLUDE_CONFIG
+#include "executive-config.h"
+#endif
+#endif
+
+#include "cvmx-helper.h"
+#include "cvmx-helper-util.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_PIP_NUM_INPUT_PORTS 46
+
+/*
+ * Encodes the different error and exception codes
+ */
+typedef enum
+{
+ CVMX_PIP_L4_NO_ERR = 0ull,
+ /* 1 = TCP (UDP) packet not long enough to cover TCP (UDP) header */
+ CVMX_PIP_L4_MAL_ERR = 1ull,
+ /* 2 = TCP/UDP checksum failure */
+ CVMX_PIP_CHK_ERR = 2ull,
+ /* 3 = TCP/UDP length check (TCP/UDP length does not match IP length) */
+ CVMX_PIP_L4_LENGTH_ERR = 3ull,
+ /* 4 = illegal TCP/UDP port (either source or dest port is zero) */
+ CVMX_PIP_BAD_PRT_ERR = 4ull,
+ /* 8 = TCP flags = FIN only */
+ CVMX_PIP_TCP_FLG8_ERR = 8ull,
+ /* 9 = TCP flags = 0 */
+ CVMX_PIP_TCP_FLG9_ERR = 9ull,
+ /* 10 = TCP flags = FIN+RST+* */
+ CVMX_PIP_TCP_FLG10_ERR = 10ull,
+ /* 11 = TCP flags = SYN+URG+* */
+ CVMX_PIP_TCP_FLG11_ERR = 11ull,
+ /* 12 = TCP flags = SYN+RST+* */
+ CVMX_PIP_TCP_FLG12_ERR = 12ull,
+ /* 13 = TCP flags = SYN+FIN+* */
+ CVMX_PIP_TCP_FLG13_ERR = 13ull
+} cvmx_pip_l4_err_t;
+
+typedef enum
+{
+
+ CVMX_PIP_IP_NO_ERR = 0ull,
+ /* 1 = not IPv4 or IPv6 */
+ CVMX_PIP_NOT_IP = 1ull,
+ /* 2 = IPv4 header checksum violation */
+ CVMX_PIP_IPV4_HDR_CHK = 2ull,
+ /* 3 = malformed (packet not long enough to cover IP hdr) */
+ CVMX_PIP_IP_MAL_HDR = 3ull,
+ /* 4 = malformed (packet not long enough to cover len in IP hdr) */
+ CVMX_PIP_IP_MAL_PKT = 4ull,
+ /* 5 = TTL / hop count equal zero */
+ CVMX_PIP_TTL_HOP = 5ull,
+ /* 6 = IPv4 options / IPv6 early extension headers */
+ CVMX_PIP_OPTS = 6ull
+} cvmx_pip_ip_exc_t;
+
+
+/**
+ * NOTES
+ * late collision (data received before collision)
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as bad FCS
+ * or carrier extend error which is CVMX_PIP_EXTEND_ERR
+ */
+typedef enum
+{
+ /**
+ * No error
+ */
+ CVMX_PIP_RX_NO_ERR = 0ull,
+
+ CVMX_PIP_PARTIAL_ERR = 1ull, /* RGM+SPI 1 = partially received packet (buffering/bandwidth not adequate) */
+ CVMX_PIP_JABBER_ERR = 2ull, /* RGM+SPI 2 = receive packet too large and truncated */
+ CVMX_PIP_OVER_FCS_ERR = 3ull, /* RGM 3 = max frame error (pkt len > max frame len) (with FCS error) */
+ CVMX_PIP_OVER_ERR = 4ull, /* RGM+SPI 4 = max frame error (pkt len > max frame len) */
+ CVMX_PIP_ALIGN_ERR = 5ull, /* RGM 5 = nibble error (data not byte multiple - 100M and 10M only) */
+ CVMX_PIP_UNDER_FCS_ERR = 6ull, /* RGM 6 = min frame error (pkt len < min frame len) (with FCS error) */
+ CVMX_PIP_GMX_FCS_ERR = 7ull, /* RGM 7 = FCS error */
+ CVMX_PIP_UNDER_ERR = 8ull, /* RGM+SPI 8 = min frame error (pkt len < min frame len) */
+ CVMX_PIP_EXTEND_ERR = 9ull, /* RGM 9 = Frame carrier extend error */
+ CVMX_PIP_TERMINATE_ERR = 9ull, /* XAUI 9 = Packet was terminated with an idle cycle */
+ CVMX_PIP_LENGTH_ERR = 10ull, /* RGM 10 = length mismatch (len did not match len in L2 length/type) */
+ CVMX_PIP_DAT_ERR = 11ull, /* RGM 11 = Frame error (some or all data bits marked err) */
+ CVMX_PIP_DIP_ERR = 11ull, /* SPI 11 = DIP4 error */
+ CVMX_PIP_SKIP_ERR = 12ull, /* RGM 12 = packet was not large enough to pass the skipper - no inspection could occur */
+ CVMX_PIP_NIBBLE_ERR = 13ull, /* RGM 13 = studder error (data not repeated - 100M and 10M only) */
+ CVMX_PIP_PIP_FCS = 16L, /* RGM+SPI 16 = FCS error */
+ CVMX_PIP_PIP_SKIP_ERR = 17L, /* RGM+SPI+PCI 17 = packet was not large enough to pass the skipper - no inspection could occur */
+ CVMX_PIP_PIP_L2_MAL_HDR= 18L, /* RGM+SPI+PCI 18 = malformed l2 (packet not long enough to cover L2 hdr) */
+ CVMX_PIP_PUNY_ERR = 47L /* SGMII 47 = PUNY error (packet was 4B or less when FCS stripping is enabled) */
+ /* NOTES
+ * xx = late collision (data received before collision)
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as bad FCS
+ * or carrier extend error which is CVMX_PIP_EXTEND_ERR
+ */
+} cvmx_pip_rcv_err_t;
+
+/**
+ * This defines the err_code field errors in the work Q entry
+ */
+typedef union
+{
+ cvmx_pip_l4_err_t l4_err;
+ cvmx_pip_ip_exc_t ip_exc;
+ cvmx_pip_rcv_err_t rcv_err;
+} cvmx_pip_err_t;
+
+
+/**
+ * Status statistics for a port
+ */
+typedef struct
+{
+ uint32_t dropped_octets; /**< Inbound octets marked to be dropped by the IPD */
+ uint32_t dropped_packets; /**< Inbound packets marked to be dropped by the IPD */
+ uint32_t pci_raw_packets; /**< RAW PCI Packets received by PIP per port */
+ uint32_t octets; /**< Number of octets processed by PIP */
+ uint32_t packets; /**< Number of packets processed by PIP */
+ uint32_t multicast_packets; /**< Number of indentified L2 multicast packets.
+ Does not include broadcast packets.
+ Only includes packets whose parse mode is
+ SKIP_TO_L2 */
+ uint32_t broadcast_packets; /**< Number of indentified L2 broadcast packets.
+ Does not include multicast packets.
+ Only includes packets whose parse mode is
+ SKIP_TO_L2 */
+ uint32_t len_64_packets; /**< Number of 64B packets */
+ uint32_t len_65_127_packets; /**< Number of 65-127B packets */
+ uint32_t len_128_255_packets; /**< Number of 128-255B packets */
+ uint32_t len_256_511_packets; /**< Number of 256-511B packets */
+ uint32_t len_512_1023_packets; /**< Number of 512-1023B packets */
+ uint32_t len_1024_1518_packets; /**< Number of 1024-1518B packets */
+ uint32_t len_1519_max_packets; /**< Number of 1519-max packets */
+ uint32_t fcs_align_err_packets; /**< Number of packets with FCS or Align opcode errors */
+ uint32_t runt_packets; /**< Number of packets with length < min */
+ uint32_t runt_crc_packets; /**< Number of packets with length < min and FCS error */
+ uint32_t oversize_packets; /**< Number of packets with length > max */
+ uint32_t oversize_crc_packets; /**< Number of packets with length > max and FCS error */
+ uint32_t inb_packets; /**< Number of packets without GMX/SPX/PCI errors received by PIP */
+ uint64_t inb_octets; /**< Total number of octets from all packets received by PIP, including CRC */
+ uint16_t inb_errors; /**< Number of packets with GMX/SPX/PCI errors received by PIP */
+ uint32_t mcast_l2_red_packets; /**< Number of packets with L2 Multicast DMAC
+ that were dropped due to RED.
+ The HW will consider a packet to be an L2
+ multicast packet when the least-significant bit
+ of the first byte of the DMAC is set and the
+ packet is not an L2 broadcast packet.
+ Only applies when the parse mode for the packets
+ is SKIP-TO-L2 */
+ uint32_t bcast_l2_red_packets; /**< Number of packets with L2 Broadcast DMAC
+ that were dropped due to RED.
+ The HW will consider a packet to be an L2
+ broadcast packet when the 48-bit DMAC is all 1's.
+ Only applies when the parse mode for the packets
+ is SKIP-TO-L2 */
+ uint32_t mcast_l3_red_packets; /**< Number of packets with L3 Multicast Dest Address
+ that were dropped due to RED.
+ The HW considers an IPv4 packet to be multicast
+ when the most-significant nibble of the 32-bit
+ destination address is 0xE (i.e it is a class D
+ address). The HW considers an IPv6 packet to be
+ multicast when the most-significant byte of the
+ 128-bit destination address is all 1's.
+ Only applies when the parse mode for the packets
+ is SKIP-TO-L2 and the packet is IP or the parse
+ mode for the packet is SKIP-TO-IP */
+ uint32_t bcast_l3_red_packets; /**< Number of packets with L3 Broadcast Dest Address
+ that were dropped due to RED.
+ The HW considers an IPv4 packet to be broadcast
+ when all bits are set in the MSB of the
+ destination address. IPv6 does not have the
+ concept of a broadcast packets.
+ Only applies when the parse mode for the packet
+ is SKIP-TO-L2 and the packet is IP or the parse
+ mode for the packet is SKIP-TO-IP */
+} cvmx_pip_port_status_t;
+
+
+/**
+ * Definition of the PIP custom header that can be prepended
+ * to a packet by external hardware.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t rawfull : 1; /**< Documented as R - Set if the Packet is RAWFULL. If set,
+ this header must be the full 8 bytes */
+ uint64_t reserved0 : 5; /**< Must be zero */
+ cvmx_pip_port_parse_mode_t parse_mode : 2; /**< PIP parse mode for this packet */
+ uint64_t reserved1 : 1; /**< Must be zero */
+ uint64_t skip_len : 7; /**< Skip amount, including this header, to the beginning of the packet */
+ uint64_t grpext : 2; /**< These bits get concatenated with the
+ PKT_INST_HDR[GRP] bits, creating a 6-bit
+ GRP field. Added in pass2. */
+ uint64_t nqos : 1; /**< Must be 0 when PKT_INST_HDR[R] = 0.
+ When set to 1, NQOS prevents PIP from directly using
+ PKT_INST_HDR[QOS] for the QOS value in WQE.
+ When PIP_GBL_CTL[IHMSK_DIS] = 1, Octeon2 does not use NQOS */
+ uint64_t ngrp : 1; /**< Must be 0 when PKT_INST_HDR[R] = 0.
+ When set to 1, NGPR prevents PIP from directly using
+ PKT_INST_HDR[GPR] for the GPR value in WQE.
+ When PIP_GBL_CTL[IHMSK_DIS] = 1, Octeon2 does not use NGRP */
+ uint64_t ntt : 1; /**< Must be 0 when PKT_INST_HDR[R] = 0.
+ When set to 1, NTT prevents PIP from directly using
+ PKT_INST_HDR[TT] for the TT value in WQE.
+ When PIP_GBL_CTL[IHMSK_DIS] = 1, Octeon2 does not use NTT */
+ uint64_t ntag : 1; /**< Must be 0 when PKT_INST_HDR[R] = 0.
+ When set to 1, NTAG prevents PIP from directly using
+ PKT_INST_HDR[TAG] for the TAG value in WQE.
+ When PIP_GBL_CTL[IHMSK_DIS] = 1, Octeon2 does not use NTAG */
+ uint64_t qos : 3; /**< POW input queue for this packet */
+ uint64_t grp : 4; /**< POW input group for this packet */
+ uint64_t rs : 1; /**< Flag to store this packet in the work queue entry, if possible */
+ cvmx_pow_tag_type_t tag_type : 2; /**< POW input tag type */
+ uint64_t tag : 32; /**< POW input tag */
+ } s;
+} cvmx_pip_pkt_inst_hdr_t;
+
+/* CSR typedefs have been moved to cvmx-pip-defs.h */
+
+/**
+ * Configure an ethernet input port
+ *
+ * @param port_num Port number to configure
+ * @param port_cfg Port hardware configuration
+ * @param port_tag_cfg
+ * Port POW tagging configuration
+ */
+static inline void cvmx_pip_config_port(uint64_t port_num,
+ cvmx_pip_prt_cfgx_t port_cfg,
+ cvmx_pip_prt_tagx_t port_tag_cfg)
+{
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ int interface, index, pknd;
+
+ interface = cvmx_helper_get_interface_num(port_num);
+ index = cvmx_helper_get_interface_index_num(port_num);
+ pknd = cvmx_helper_get_pknd(interface, index);
+
+ port_num = pknd; /* overload port_num with pknd */
+ }
+
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(port_num), port_cfg.u64);
+ cvmx_write_csr(CVMX_PIP_PRT_TAGX(port_num), port_tag_cfg.u64);
+}
+
+
+/**
+ * Configure the VLAN priority to QoS queue mapping.
+ *
+ * @param vlan_priority
+ * VLAN priority (0-7)
+ * @param qos QoS queue for packets matching this watcher
+ */
+static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority, uint64_t qos)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ /* FIXME for 68xx. */
+ }
+ else
+ {
+ cvmx_pip_qos_vlanx_t pip_qos_vlanx;
+ pip_qos_vlanx.u64 = 0;
+ pip_qos_vlanx.s.qos = qos;
+ cvmx_write_csr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64);
+ }
+}
+
+
+/**
+ * Configure the Diffserv to QoS queue mapping.
+ *
+ * @param diffserv Diffserv field value (0-63)
+ * @param qos QoS queue for packets matching this watcher
+ */
+static inline void cvmx_pip_config_diffserv_qos(uint64_t diffserv, uint64_t qos)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ /* FIXME for 68xx. */
+ }
+ else
+ {
+ cvmx_pip_qos_diffx_t pip_qos_diffx;
+ pip_qos_diffx.u64 = 0;
+ pip_qos_diffx.s.qos = qos;
+ cvmx_write_csr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64);
+ }
+}
+
+
+/**
+ * Get the status counters for a port.
+ *
+ * @param port_num Port number (ipd_port) to get statistics for.
+ * @param clear Set to 1 to clear the counters after they are read
+ * @param status Where to put the results.
+ */
+static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear, cvmx_pip_port_status_t *status)
+{
+ cvmx_pip_stat_ctl_t pip_stat_ctl;
+ cvmx_pip_stat0_prtx_t stat0;
+ cvmx_pip_stat1_prtx_t stat1;
+ cvmx_pip_stat2_prtx_t stat2;
+ cvmx_pip_stat3_prtx_t stat3;
+ cvmx_pip_stat4_prtx_t stat4;
+ cvmx_pip_stat5_prtx_t stat5;
+ cvmx_pip_stat6_prtx_t stat6;
+ cvmx_pip_stat7_prtx_t stat7;
+ cvmx_pip_stat8_prtx_t stat8;
+ cvmx_pip_stat9_prtx_t stat9;
+ cvmx_pip_stat10_x_t stat10;
+ cvmx_pip_stat11_x_t stat11;
+ cvmx_pip_stat_inb_pktsx_t pip_stat_inb_pktsx;
+ cvmx_pip_stat_inb_octsx_t pip_stat_inb_octsx;
+ cvmx_pip_stat_inb_errsx_t pip_stat_inb_errsx;
+ int interface = cvmx_helper_get_interface_num(port_num);
+ int index = cvmx_helper_get_interface_index_num(port_num);
+
+ pip_stat_ctl.u64 = 0;
+ pip_stat_ctl.s.rdclr = clear;
+ cvmx_write_csr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64);
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ int pknd = cvmx_helper_get_pknd(interface, index);
+ /*
+ * PIP_STAT_CTL[MODE] 0 means pkind.
+ */
+ stat0.u64 = cvmx_read_csr(CVMX_PIP_STAT0_X(pknd));
+ stat1.u64 = cvmx_read_csr(CVMX_PIP_STAT1_X(pknd));
+ stat2.u64 = cvmx_read_csr(CVMX_PIP_STAT2_X(pknd));
+ stat3.u64 = cvmx_read_csr(CVMX_PIP_STAT3_X(pknd));
+ stat4.u64 = cvmx_read_csr(CVMX_PIP_STAT4_X(pknd));
+ stat5.u64 = cvmx_read_csr(CVMX_PIP_STAT5_X(pknd));
+ stat6.u64 = cvmx_read_csr(CVMX_PIP_STAT6_X(pknd));
+ stat7.u64 = cvmx_read_csr(CVMX_PIP_STAT7_X(pknd));
+ stat8.u64 = cvmx_read_csr(CVMX_PIP_STAT8_X(pknd));
+ stat9.u64 = cvmx_read_csr(CVMX_PIP_STAT9_X(pknd));
+ stat10.u64 = cvmx_read_csr(CVMX_PIP_STAT10_X(pknd));
+ stat11.u64 = cvmx_read_csr(CVMX_PIP_STAT11_X(pknd));
+ }
+ else
+ {
+ if (port_num >= 40)
+ {
+ stat0.u64 = cvmx_read_csr(CVMX_PIP_XSTAT0_PRTX(port_num));
+ stat1.u64 = cvmx_read_csr(CVMX_PIP_XSTAT1_PRTX(port_num));
+ stat2.u64 = cvmx_read_csr(CVMX_PIP_XSTAT2_PRTX(port_num));
+ stat3.u64 = cvmx_read_csr(CVMX_PIP_XSTAT3_PRTX(port_num));
+ stat4.u64 = cvmx_read_csr(CVMX_PIP_XSTAT4_PRTX(port_num));
+ stat5.u64 = cvmx_read_csr(CVMX_PIP_XSTAT5_PRTX(port_num));
+ stat6.u64 = cvmx_read_csr(CVMX_PIP_XSTAT6_PRTX(port_num));
+ stat7.u64 = cvmx_read_csr(CVMX_PIP_XSTAT7_PRTX(port_num));
+ stat8.u64 = cvmx_read_csr(CVMX_PIP_XSTAT8_PRTX(port_num));
+ stat9.u64 = cvmx_read_csr(CVMX_PIP_XSTAT9_PRTX(port_num));
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ stat10.u64 = cvmx_read_csr(CVMX_PIP_XSTAT10_PRTX(port_num));
+ stat11.u64 = cvmx_read_csr(CVMX_PIP_XSTAT11_PRTX(port_num));
+ }
+ }
+ else
+ {
+ stat0.u64 = cvmx_read_csr(CVMX_PIP_STAT0_PRTX(port_num));
+ stat1.u64 = cvmx_read_csr(CVMX_PIP_STAT1_PRTX(port_num));
+ stat2.u64 = cvmx_read_csr(CVMX_PIP_STAT2_PRTX(port_num));
+ stat3.u64 = cvmx_read_csr(CVMX_PIP_STAT3_PRTX(port_num));
+ stat4.u64 = cvmx_read_csr(CVMX_PIP_STAT4_PRTX(port_num));
+ stat5.u64 = cvmx_read_csr(CVMX_PIP_STAT5_PRTX(port_num));
+ stat6.u64 = cvmx_read_csr(CVMX_PIP_STAT6_PRTX(port_num));
+ stat7.u64 = cvmx_read_csr(CVMX_PIP_STAT7_PRTX(port_num));
+ stat8.u64 = cvmx_read_csr(CVMX_PIP_STAT8_PRTX(port_num));
+ stat9.u64 = cvmx_read_csr(CVMX_PIP_STAT9_PRTX(port_num));
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX)
+ || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ {
+ stat10.u64 = cvmx_read_csr(CVMX_PIP_STAT10_PRTX(port_num));
+ stat11.u64 = cvmx_read_csr(CVMX_PIP_STAT11_PRTX(port_num));
+ }
+ }
+ }
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ int pknd = cvmx_helper_get_pknd(interface, index);
+
+ pip_stat_inb_pktsx.u64 = cvmx_read_csr(CVMX_PIP_STAT_INB_PKTS_PKNDX(pknd));
+ pip_stat_inb_octsx.u64 = cvmx_read_csr(CVMX_PIP_STAT_INB_OCTS_PKNDX(pknd));
+ pip_stat_inb_errsx.u64 = cvmx_read_csr(CVMX_PIP_STAT_INB_ERRS_PKNDX(pknd));
+ }
+ else
+ {
+ pip_stat_inb_pktsx.u64 = cvmx_read_csr(CVMX_PIP_STAT_INB_PKTSX(port_num));
+ pip_stat_inb_octsx.u64 = cvmx_read_csr(CVMX_PIP_STAT_INB_OCTSX(port_num));
+ pip_stat_inb_errsx.u64 = cvmx_read_csr(CVMX_PIP_STAT_INB_ERRSX(port_num));
+ }
+
+ status->dropped_octets = stat0.s.drp_octs;
+ status->dropped_packets = stat0.s.drp_pkts;
+ status->octets = stat1.s.octs;
+ status->pci_raw_packets = stat2.s.raw;
+ status->packets = stat2.s.pkts;
+ status->multicast_packets = stat3.s.mcst;
+ status->broadcast_packets = stat3.s.bcst;
+ status->len_64_packets = stat4.s.h64;
+ status->len_65_127_packets = stat4.s.h65to127;
+ status->len_128_255_packets = stat5.s.h128to255;
+ status->len_256_511_packets = stat5.s.h256to511;
+ status->len_512_1023_packets = stat6.s.h512to1023;
+ status->len_1024_1518_packets = stat6.s.h1024to1518;
+ status->len_1519_max_packets = stat7.s.h1519;
+ status->fcs_align_err_packets = stat7.s.fcs;
+ status->runt_packets = stat8.s.undersz;
+ status->runt_crc_packets = stat8.s.frag;
+ status->oversize_packets = stat9.s.oversz;
+ status->oversize_crc_packets = stat9.s.jabber;
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX)
+ || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ {
+ status->mcast_l2_red_packets = stat10.s.mcast;
+ status->bcast_l2_red_packets = stat10.s.bcast;
+ status->mcast_l3_red_packets = stat11.s.mcast;
+ status->bcast_l3_red_packets = stat11.s.bcast;
+ }
+ status->inb_packets = pip_stat_inb_pktsx.s.pkts;
+ status->inb_octets = pip_stat_inb_octsx.s.octs;
+ status->inb_errors = pip_stat_inb_errsx.s.errs;
+}
+
+
+/**
+ * Configure the hardware CRC engine
+ *
+ * @param interface Interface to configure (0 or 1)
+ * @param invert_result
+ * Invert the result of the CRC
+ * @param reflect Reflect
+ * @param initialization_vector
+ * CRC initialization vector
+ */
+static inline void cvmx_pip_config_crc(uint64_t interface, uint64_t invert_result, uint64_t reflect, uint32_t initialization_vector)
+{
+ if ((OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ {
+ cvmx_pip_crc_ctlx_t config;
+ cvmx_pip_crc_ivx_t pip_crc_ivx;
+
+ config.u64 = 0;
+ config.s.invres = invert_result;
+ config.s.reflect = reflect;
+ cvmx_write_csr(CVMX_PIP_CRC_CTLX(interface), config.u64);
+
+ pip_crc_ivx.u64 = 0;
+ pip_crc_ivx.s.iv = initialization_vector;
+ cvmx_write_csr(CVMX_PIP_CRC_IVX(interface), pip_crc_ivx.u64);
+ }
+}
+
+
+/**
+ * Clear all bits in a tag mask. This should be called on
+ * startup before any calls to cvmx_pip_tag_mask_set. Each bit
+ * set in the final mask represent a byte used in the packet for
+ * tag generation.
+ *
+ * @param mask_index Which tag mask to clear (0..3)
+ */
+static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
+{
+ uint64_t index;
+ cvmx_pip_tag_incx_t pip_tag_incx;
+ pip_tag_incx.u64 = 0;
+ pip_tag_incx.s.en = 0;
+ for (index=mask_index*16; index<(mask_index+1)*16; index++)
+ cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
+}
+
+
+/**
+ * Sets a range of bits in the tag mask. The tag mask is used
+ * when the cvmx_pip_port_tag_cfg_t tag_mode is non zero.
+ * There are four separate masks that can be configured.
+ *
+ * @param mask_index Which tag mask to modify (0..3)
+ * @param offset Offset into the bitmask to set bits at. Use the GCC macro
+ * offsetof() to determine the offsets into packet headers.
+ * For example, offsetof(ethhdr, protocol) returns the offset
+ * of the ethernet protocol field. The bitmask selects which bytes
+ * to include the the tag, with bit offset X selecting byte at offset X
+ * from the beginning of the packet data.
+ * @param len Number of bytes to include. Usually this is the sizeof()
+ * the field.
+ */
+static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset, uint64_t len)
+{
+ while (len--)
+ {
+ cvmx_pip_tag_incx_t pip_tag_incx;
+ uint64_t index = mask_index*16 + offset/8;
+ pip_tag_incx.u64 = cvmx_read_csr(CVMX_PIP_TAG_INCX(index));
+ pip_tag_incx.s.en |= 0x80 >> (offset & 0x7);
+ cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
+ offset++;
+ }
+}
+
+/**
+ * Initialize Bit Select Extractor config. Their are 8 bit positions and valids
+ * to be used when using the corresponding extractor.
+ *
+ * @param bit Bit Select Extractor to use
+ * @param pos Which position to update
+ * @param val The value to update the position with
+ */
+static inline void cvmx_pip_set_bsel_pos(int bit, int pos, int val)
+{
+ cvmx_pip_bsel_ext_posx_t bsel_pos;
+
+ /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */
+ if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))
+ return;
+
+ if (bit < 0 || bit > 3)
+ {
+ cvmx_dprintf("ERROR: cvmx_pip_set_bsel_pos: Invalid Bit-Select Extractor (%d) passed\n", bit);
+ return;
+ }
+
+ bsel_pos.u64 = cvmx_read_csr(CVMX_PIP_BSEL_EXT_POSX(bit));
+ switch(pos)
+ {
+ case 0:
+ bsel_pos.s.pos0_val = 1;
+ bsel_pos.s.pos0 = val & 0x7f;
+ break;
+ case 1:
+ bsel_pos.s.pos1_val = 1;
+ bsel_pos.s.pos1 = val & 0x7f;
+ break;
+ case 2:
+ bsel_pos.s.pos2_val = 1;
+ bsel_pos.s.pos2 = val & 0x7f;
+ break;
+ case 3:
+ bsel_pos.s.pos3_val = 1;
+ bsel_pos.s.pos3 = val & 0x7f;
+ break;
+ case 4:
+ bsel_pos.s.pos4_val = 1;
+ bsel_pos.s.pos4 = val & 0x7f;
+ break;
+ case 5:
+ bsel_pos.s.pos5_val = 1;
+ bsel_pos.s.pos5 = val & 0x7f;
+ break;
+ case 6:
+ bsel_pos.s.pos6_val = 1;
+ bsel_pos.s.pos6 = val & 0x7f;
+ break;
+ case 7:
+ bsel_pos.s.pos7_val = 1;
+ bsel_pos.s.pos7 = val & 0x7f;
+ break;
+ default:
+ cvmx_dprintf("Warning: cvmx_pip_set_bsel_pos: Invalid pos(%d)\n", pos);
+ break;
+ }
+ cvmx_write_csr(CVMX_PIP_BSEL_EXT_POSX(bit), bsel_pos.u64);
+}
+
+/**
+ * Initialize offset and skip values to use by bit select extractor.
+
+ * @param bit Bit Select Extractor to use
+ * @param offset Offset to add to extractor mem addr to get final address
+ to lookup table.
+ * @param skip Number of bytes to skip from start of packet 0-64
+ */
+static inline void cvmx_pip_bsel_config(int bit, int offset, int skip)
+{
+ cvmx_pip_bsel_ext_cfgx_t bsel_cfg;
+
+ /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */
+ if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))
+ return;
+
+ bsel_cfg.u64 = cvmx_read_csr(CVMX_PIP_BSEL_EXT_CFGX(bit));
+ bsel_cfg.s.offset = offset;
+ bsel_cfg.s.skip = skip;
+ cvmx_write_csr(CVMX_PIP_BSEL_EXT_CFGX(bit), bsel_cfg.u64);
+}
+
+
+/**
+ * Get the entry for the Bit Select Extractor Table.
+ * @param work pointer to work queue entry
+ * @return Index of the Bit Select Extractor Table
+ */
+static inline int cvmx_pip_get_bsel_table_index(cvmx_wqe_t *work)
+{
+ int bit = cvmx_wqe_get_port(work) & 0x3;
+ /* Get the Bit select table index. */
+ int index;
+ int y;
+ cvmx_pip_bsel_ext_cfgx_t bsel_cfg;
+ cvmx_pip_bsel_ext_posx_t bsel_pos;
+
+ /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */
+ if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))
+ return -1;
+
+ bsel_cfg.u64 = cvmx_read_csr(CVMX_PIP_BSEL_EXT_CFGX(bit));
+ bsel_pos.u64 = cvmx_read_csr(CVMX_PIP_BSEL_EXT_POSX(bit));
+
+ for (y = 0; y < 8; y++)
+ {
+ char *ptr = (char *)cvmx_phys_to_ptr(work->packet_ptr.s.addr);
+ int bit_loc = 0;
+ int bit;
+
+ ptr += bsel_cfg.s.skip;
+ switch(y)
+ {
+ case 0:
+ ptr += (bsel_pos.s.pos0 >> 3);
+ bit_loc = 7 - (bsel_pos.s.pos0 & 0x3);
+ break;
+ case 1:
+ ptr += (bsel_pos.s.pos1 >> 3);
+ bit_loc = 7 - (bsel_pos.s.pos1 & 0x3);
+ break;
+ case 2:
+ ptr += (bsel_pos.s.pos2 >> 3);
+ bit_loc = 7 - (bsel_pos.s.pos2 & 0x3);
+ break;
+ case 3:
+ ptr += (bsel_pos.s.pos3 >> 3);
+ bit_loc = 7 - (bsel_pos.s.pos3 & 0x3);
+ break;
+ case 4:
+ ptr += (bsel_pos.s.pos4 >> 3);
+ bit_loc = 7 - (bsel_pos.s.pos4 & 0x3);
+ break;
+ case 5:
+ ptr += (bsel_pos.s.pos5 >> 3);
+ bit_loc = 7 - (bsel_pos.s.pos5 & 0x3);
+ break;
+ case 6:
+ ptr += (bsel_pos.s.pos6 >> 3);
+ bit_loc = 7 - (bsel_pos.s.pos6 & 0x3);
+ break;
+ case 7:
+ ptr += (bsel_pos.s.pos7 >> 3);
+ bit_loc = 7 - (bsel_pos.s.pos7 & 0x3);
+ break;
+ }
+ bit = (*ptr >> bit_loc) & 1;
+ index |= bit << y;
+ }
+ index += bsel_cfg.s.offset;
+ index &= 0x1ff;
+ return index;
+}
+
+static inline int cvmx_pip_get_bsel_qos(cvmx_wqe_t *work)
+{
+ int index = cvmx_pip_get_bsel_table_index(work);
+ cvmx_pip_bsel_tbl_entx_t bsel_tbl;
+
+ /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */
+ if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))
+ return -1;
+
+ bsel_tbl.u64 = cvmx_read_csr(CVMX_PIP_BSEL_TBL_ENTX(index));
+
+ return bsel_tbl.s.qos;
+}
+
+static inline int cvmx_pip_get_bsel_grp(cvmx_wqe_t *work)
+{
+ int index = cvmx_pip_get_bsel_table_index(work);
+ cvmx_pip_bsel_tbl_entx_t bsel_tbl;
+
+ /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */
+ if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))
+ return -1;
+
+ bsel_tbl.u64 = cvmx_read_csr(CVMX_PIP_BSEL_TBL_ENTX(index));
+
+ return bsel_tbl.s.grp;
+}
+
+static inline int cvmx_pip_get_bsel_tt(cvmx_wqe_t *work)
+{
+ int index = cvmx_pip_get_bsel_table_index(work);
+ cvmx_pip_bsel_tbl_entx_t bsel_tbl;
+
+ /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */
+ if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))
+ return -1;
+
+ bsel_tbl.u64 = cvmx_read_csr(CVMX_PIP_BSEL_TBL_ENTX(index));
+
+ return bsel_tbl.s.tt;
+}
+
+static inline int cvmx_pip_get_bsel_tag(cvmx_wqe_t *work)
+{
+ int index = cvmx_pip_get_bsel_table_index(work);
+ int port = cvmx_wqe_get_port(work);
+ int bit = port & 0x3;
+ int upper_tag = 0;
+ cvmx_pip_bsel_tbl_entx_t bsel_tbl;
+ cvmx_pip_bsel_ext_cfgx_t bsel_cfg;
+ cvmx_pip_prt_tagx_t prt_tag;
+
+ /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */
+ if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))
+ return -1;
+
+ bsel_tbl.u64 = cvmx_read_csr(CVMX_PIP_BSEL_TBL_ENTX(index));
+ bsel_cfg.u64 = cvmx_read_csr(CVMX_PIP_BSEL_EXT_CFGX(bit));
+
+ prt_tag.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
+ if (prt_tag.s.inc_prt_flag == 0)
+ upper_tag = bsel_cfg.s.upper_tag;
+ return (bsel_tbl.s.tag | ((bsel_cfg.s.tag << 8) & 0xff00) | ((upper_tag << 16) & 0xffff0000));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_PIP_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pip.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pko-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pko-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pko-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,3988 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pko-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pko.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_PKO_DEFS_H__
+#define __CVMX_PKO_DEFS_H__
+
+#define CVMX_PKO_MEM_COUNT0 (CVMX_ADD_IO_SEG(0x0001180050001080ull))
+#define CVMX_PKO_MEM_COUNT1 (CVMX_ADD_IO_SEG(0x0001180050001088ull))
+#define CVMX_PKO_MEM_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180050001100ull))
+#define CVMX_PKO_MEM_DEBUG1 (CVMX_ADD_IO_SEG(0x0001180050001108ull))
+#define CVMX_PKO_MEM_DEBUG10 (CVMX_ADD_IO_SEG(0x0001180050001150ull))
+#define CVMX_PKO_MEM_DEBUG11 (CVMX_ADD_IO_SEG(0x0001180050001158ull))
+#define CVMX_PKO_MEM_DEBUG12 (CVMX_ADD_IO_SEG(0x0001180050001160ull))
+#define CVMX_PKO_MEM_DEBUG13 (CVMX_ADD_IO_SEG(0x0001180050001168ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_DEBUG14 CVMX_PKO_MEM_DEBUG14_FUNC()
+static inline uint64_t CVMX_PKO_MEM_DEBUG14_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_MEM_DEBUG14 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001170ull);
+}
+#else
+#define CVMX_PKO_MEM_DEBUG14 (CVMX_ADD_IO_SEG(0x0001180050001170ull))
+#endif
+#define CVMX_PKO_MEM_DEBUG2 (CVMX_ADD_IO_SEG(0x0001180050001110ull))
+#define CVMX_PKO_MEM_DEBUG3 (CVMX_ADD_IO_SEG(0x0001180050001118ull))
+#define CVMX_PKO_MEM_DEBUG4 (CVMX_ADD_IO_SEG(0x0001180050001120ull))
+#define CVMX_PKO_MEM_DEBUG5 (CVMX_ADD_IO_SEG(0x0001180050001128ull))
+#define CVMX_PKO_MEM_DEBUG6 (CVMX_ADD_IO_SEG(0x0001180050001130ull))
+#define CVMX_PKO_MEM_DEBUG7 (CVMX_ADD_IO_SEG(0x0001180050001138ull))
+#define CVMX_PKO_MEM_DEBUG8 (CVMX_ADD_IO_SEG(0x0001180050001140ull))
+#define CVMX_PKO_MEM_DEBUG9 (CVMX_ADD_IO_SEG(0x0001180050001148ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_IPORT_PTRS CVMX_PKO_MEM_IPORT_PTRS_FUNC()
+static inline uint64_t CVMX_PKO_MEM_IPORT_PTRS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_MEM_IPORT_PTRS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001030ull);
+}
+#else
+#define CVMX_PKO_MEM_IPORT_PTRS (CVMX_ADD_IO_SEG(0x0001180050001030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_IPORT_QOS CVMX_PKO_MEM_IPORT_QOS_FUNC()
+static inline uint64_t CVMX_PKO_MEM_IPORT_QOS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_MEM_IPORT_QOS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001038ull);
+}
+#else
+#define CVMX_PKO_MEM_IPORT_QOS (CVMX_ADD_IO_SEG(0x0001180050001038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_IQUEUE_PTRS CVMX_PKO_MEM_IQUEUE_PTRS_FUNC()
+static inline uint64_t CVMX_PKO_MEM_IQUEUE_PTRS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_MEM_IQUEUE_PTRS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001040ull);
+}
+#else
+#define CVMX_PKO_MEM_IQUEUE_PTRS (CVMX_ADD_IO_SEG(0x0001180050001040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_IQUEUE_QOS CVMX_PKO_MEM_IQUEUE_QOS_FUNC()
+static inline uint64_t CVMX_PKO_MEM_IQUEUE_QOS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_MEM_IQUEUE_QOS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001048ull);
+}
+#else
+#define CVMX_PKO_MEM_IQUEUE_QOS (CVMX_ADD_IO_SEG(0x0001180050001048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_PORT_PTRS CVMX_PKO_MEM_PORT_PTRS_FUNC()
+static inline uint64_t CVMX_PKO_MEM_PORT_PTRS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_MEM_PORT_PTRS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001010ull);
+}
+#else
+#define CVMX_PKO_MEM_PORT_PTRS (CVMX_ADD_IO_SEG(0x0001180050001010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_PORT_QOS CVMX_PKO_MEM_PORT_QOS_FUNC()
+static inline uint64_t CVMX_PKO_MEM_PORT_QOS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_MEM_PORT_QOS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001018ull);
+}
+#else
+#define CVMX_PKO_MEM_PORT_QOS (CVMX_ADD_IO_SEG(0x0001180050001018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_PORT_RATE0 CVMX_PKO_MEM_PORT_RATE0_FUNC()
+static inline uint64_t CVMX_PKO_MEM_PORT_RATE0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_MEM_PORT_RATE0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001020ull);
+}
+#else
+#define CVMX_PKO_MEM_PORT_RATE0 (CVMX_ADD_IO_SEG(0x0001180050001020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_PORT_RATE1 CVMX_PKO_MEM_PORT_RATE1_FUNC()
+static inline uint64_t CVMX_PKO_MEM_PORT_RATE1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_MEM_PORT_RATE1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001028ull);
+}
+#else
+#define CVMX_PKO_MEM_PORT_RATE1 (CVMX_ADD_IO_SEG(0x0001180050001028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_QUEUE_PTRS CVMX_PKO_MEM_QUEUE_PTRS_FUNC()
+static inline uint64_t CVMX_PKO_MEM_QUEUE_PTRS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_MEM_QUEUE_PTRS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001000ull);
+}
+#else
+#define CVMX_PKO_MEM_QUEUE_PTRS (CVMX_ADD_IO_SEG(0x0001180050001000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_QUEUE_QOS CVMX_PKO_MEM_QUEUE_QOS_FUNC()
+static inline uint64_t CVMX_PKO_MEM_QUEUE_QOS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_MEM_QUEUE_QOS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001008ull);
+}
+#else
+#define CVMX_PKO_MEM_QUEUE_QOS (CVMX_ADD_IO_SEG(0x0001180050001008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_THROTTLE_INT CVMX_PKO_MEM_THROTTLE_INT_FUNC()
+static inline uint64_t CVMX_PKO_MEM_THROTTLE_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_MEM_THROTTLE_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001058ull);
+}
+#else
+#define CVMX_PKO_MEM_THROTTLE_INT (CVMX_ADD_IO_SEG(0x0001180050001058ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_MEM_THROTTLE_PIPE CVMX_PKO_MEM_THROTTLE_PIPE_FUNC()
+static inline uint64_t CVMX_PKO_MEM_THROTTLE_PIPE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_MEM_THROTTLE_PIPE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050001050ull);
+}
+#else
+#define CVMX_PKO_MEM_THROTTLE_PIPE (CVMX_ADD_IO_SEG(0x0001180050001050ull))
+#endif
+#define CVMX_PKO_REG_BIST_RESULT (CVMX_ADD_IO_SEG(0x0001180050000080ull))
+#define CVMX_PKO_REG_CMD_BUF (CVMX_ADD_IO_SEG(0x0001180050000010ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PKO_REG_CRC_CTLX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PKO_REG_CRC_CTLX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180050000028ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_PKO_REG_CRC_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180050000028ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_CRC_ENABLE CVMX_PKO_REG_CRC_ENABLE_FUNC()
+static inline uint64_t CVMX_PKO_REG_CRC_ENABLE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_PKO_REG_CRC_ENABLE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000020ull);
+}
+#else
+#define CVMX_PKO_REG_CRC_ENABLE (CVMX_ADD_IO_SEG(0x0001180050000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PKO_REG_CRC_IVX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PKO_REG_CRC_IVX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180050000038ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_PKO_REG_CRC_IVX(offset) (CVMX_ADD_IO_SEG(0x0001180050000038ull) + ((offset) & 1) * 8)
+#endif
+#define CVMX_PKO_REG_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180050000098ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_DEBUG1 CVMX_PKO_REG_DEBUG1_FUNC()
+static inline uint64_t CVMX_PKO_REG_DEBUG1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_DEBUG1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800500000A0ull);
+}
+#else
+#define CVMX_PKO_REG_DEBUG1 (CVMX_ADD_IO_SEG(0x00011800500000A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_DEBUG2 CVMX_PKO_REG_DEBUG2_FUNC()
+static inline uint64_t CVMX_PKO_REG_DEBUG2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_DEBUG2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800500000A8ull);
+}
+#else
+#define CVMX_PKO_REG_DEBUG2 (CVMX_ADD_IO_SEG(0x00011800500000A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_DEBUG3 CVMX_PKO_REG_DEBUG3_FUNC()
+static inline uint64_t CVMX_PKO_REG_DEBUG3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_DEBUG3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800500000B0ull);
+}
+#else
+#define CVMX_PKO_REG_DEBUG3 (CVMX_ADD_IO_SEG(0x00011800500000B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_DEBUG4 CVMX_PKO_REG_DEBUG4_FUNC()
+static inline uint64_t CVMX_PKO_REG_DEBUG4_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_REG_DEBUG4 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800500000B8ull);
+}
+#else
+#define CVMX_PKO_REG_DEBUG4 (CVMX_ADD_IO_SEG(0x00011800500000B8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_ENGINE_INFLIGHT CVMX_PKO_REG_ENGINE_INFLIGHT_FUNC()
+static inline uint64_t CVMX_PKO_REG_ENGINE_INFLIGHT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_ENGINE_INFLIGHT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000050ull);
+}
+#else
+#define CVMX_PKO_REG_ENGINE_INFLIGHT (CVMX_ADD_IO_SEG(0x0001180050000050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_ENGINE_INFLIGHT1 CVMX_PKO_REG_ENGINE_INFLIGHT1_FUNC()
+static inline uint64_t CVMX_PKO_REG_ENGINE_INFLIGHT1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_REG_ENGINE_INFLIGHT1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000318ull);
+}
+#else
+#define CVMX_PKO_REG_ENGINE_INFLIGHT1 (CVMX_ADD_IO_SEG(0x0001180050000318ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_PKO_REG_ENGINE_STORAGEX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_PKO_REG_ENGINE_STORAGEX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180050000300ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_PKO_REG_ENGINE_STORAGEX(offset) (CVMX_ADD_IO_SEG(0x0001180050000300ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_ENGINE_THRESH CVMX_PKO_REG_ENGINE_THRESH_FUNC()
+static inline uint64_t CVMX_PKO_REG_ENGINE_THRESH_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_ENGINE_THRESH not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000058ull);
+}
+#else
+#define CVMX_PKO_REG_ENGINE_THRESH (CVMX_ADD_IO_SEG(0x0001180050000058ull))
+#endif
+#define CVMX_PKO_REG_ERROR (CVMX_ADD_IO_SEG(0x0001180050000088ull))
+#define CVMX_PKO_REG_FLAGS (CVMX_ADD_IO_SEG(0x0001180050000000ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_GMX_PORT_MODE CVMX_PKO_REG_GMX_PORT_MODE_FUNC()
+static inline uint64_t CVMX_PKO_REG_GMX_PORT_MODE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_GMX_PORT_MODE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000018ull);
+}
+#else
+#define CVMX_PKO_REG_GMX_PORT_MODE (CVMX_ADD_IO_SEG(0x0001180050000018ull))
+#endif
+#define CVMX_PKO_REG_INT_MASK (CVMX_ADD_IO_SEG(0x0001180050000090ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_LOOPBACK_BPID CVMX_PKO_REG_LOOPBACK_BPID_FUNC()
+static inline uint64_t CVMX_PKO_REG_LOOPBACK_BPID_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_REG_LOOPBACK_BPID not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000118ull);
+}
+#else
+#define CVMX_PKO_REG_LOOPBACK_BPID (CVMX_ADD_IO_SEG(0x0001180050000118ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_LOOPBACK_PKIND CVMX_PKO_REG_LOOPBACK_PKIND_FUNC()
+static inline uint64_t CVMX_PKO_REG_LOOPBACK_PKIND_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_REG_LOOPBACK_PKIND not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000068ull);
+}
+#else
+#define CVMX_PKO_REG_LOOPBACK_PKIND (CVMX_ADD_IO_SEG(0x0001180050000068ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_MIN_PKT CVMX_PKO_REG_MIN_PKT_FUNC()
+static inline uint64_t CVMX_PKO_REG_MIN_PKT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_REG_MIN_PKT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000070ull);
+}
+#else
+#define CVMX_PKO_REG_MIN_PKT (CVMX_ADD_IO_SEG(0x0001180050000070ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_PREEMPT CVMX_PKO_REG_PREEMPT_FUNC()
+static inline uint64_t CVMX_PKO_REG_PREEMPT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_PREEMPT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000110ull);
+}
+#else
+#define CVMX_PKO_REG_PREEMPT (CVMX_ADD_IO_SEG(0x0001180050000110ull))
+#endif
+#define CVMX_PKO_REG_QUEUE_MODE (CVMX_ADD_IO_SEG(0x0001180050000048ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_QUEUE_PREEMPT CVMX_PKO_REG_QUEUE_PREEMPT_FUNC()
+static inline uint64_t CVMX_PKO_REG_QUEUE_PREEMPT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_QUEUE_PREEMPT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000108ull);
+}
+#else
+#define CVMX_PKO_REG_QUEUE_PREEMPT (CVMX_ADD_IO_SEG(0x0001180050000108ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_QUEUE_PTRS1 CVMX_PKO_REG_QUEUE_PTRS1_FUNC()
+static inline uint64_t CVMX_PKO_REG_QUEUE_PTRS1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_QUEUE_PTRS1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000100ull);
+}
+#else
+#define CVMX_PKO_REG_QUEUE_PTRS1 (CVMX_ADD_IO_SEG(0x0001180050000100ull))
+#endif
+#define CVMX_PKO_REG_READ_IDX (CVMX_ADD_IO_SEG(0x0001180050000008ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_THROTTLE CVMX_PKO_REG_THROTTLE_FUNC()
+static inline uint64_t CVMX_PKO_REG_THROTTLE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_PKO_REG_THROTTLE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000078ull);
+}
+#else
+#define CVMX_PKO_REG_THROTTLE (CVMX_ADD_IO_SEG(0x0001180050000078ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_PKO_REG_TIMESTAMP CVMX_PKO_REG_TIMESTAMP_FUNC()
+static inline uint64_t CVMX_PKO_REG_TIMESTAMP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_PKO_REG_TIMESTAMP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180050000060ull);
+}
+#else
+#define CVMX_PKO_REG_TIMESTAMP (CVMX_ADD_IO_SEG(0x0001180050000060ull))
+#endif
+
+/**
+ * cvmx_pko_mem_count0
+ *
+ * Notes:
+ * Total number of packets seen by PKO, per port
+ * A write to this address will clear the entry whose index is specified as COUNT[5:0].
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_count0 {
+ uint64_t u64;
+ struct cvmx_pko_mem_count0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t count : 32; /**< Total number of packets seen by PKO */
+#else
+ uint64_t count : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pko_mem_count0_s cn30xx;
+ struct cvmx_pko_mem_count0_s cn31xx;
+ struct cvmx_pko_mem_count0_s cn38xx;
+ struct cvmx_pko_mem_count0_s cn38xxp2;
+ struct cvmx_pko_mem_count0_s cn50xx;
+ struct cvmx_pko_mem_count0_s cn52xx;
+ struct cvmx_pko_mem_count0_s cn52xxp1;
+ struct cvmx_pko_mem_count0_s cn56xx;
+ struct cvmx_pko_mem_count0_s cn56xxp1;
+ struct cvmx_pko_mem_count0_s cn58xx;
+ struct cvmx_pko_mem_count0_s cn58xxp1;
+ struct cvmx_pko_mem_count0_s cn61xx;
+ struct cvmx_pko_mem_count0_s cn63xx;
+ struct cvmx_pko_mem_count0_s cn63xxp1;
+ struct cvmx_pko_mem_count0_s cn66xx;
+ struct cvmx_pko_mem_count0_s cn68xx;
+ struct cvmx_pko_mem_count0_s cn68xxp1;
+ struct cvmx_pko_mem_count0_s cnf71xx;
+};
+typedef union cvmx_pko_mem_count0 cvmx_pko_mem_count0_t;
+
+/**
+ * cvmx_pko_mem_count1
+ *
+ * Notes:
+ * Total number of bytes seen by PKO, per port
+ * A write to this address will clear the entry whose index is specified as COUNT[5:0].
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_count1 {
+ uint64_t u64;
+ struct cvmx_pko_mem_count1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t count : 48; /**< Total number of bytes seen by PKO */
+#else
+ uint64_t count : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_pko_mem_count1_s cn30xx;
+ struct cvmx_pko_mem_count1_s cn31xx;
+ struct cvmx_pko_mem_count1_s cn38xx;
+ struct cvmx_pko_mem_count1_s cn38xxp2;
+ struct cvmx_pko_mem_count1_s cn50xx;
+ struct cvmx_pko_mem_count1_s cn52xx;
+ struct cvmx_pko_mem_count1_s cn52xxp1;
+ struct cvmx_pko_mem_count1_s cn56xx;
+ struct cvmx_pko_mem_count1_s cn56xxp1;
+ struct cvmx_pko_mem_count1_s cn58xx;
+ struct cvmx_pko_mem_count1_s cn58xxp1;
+ struct cvmx_pko_mem_count1_s cn61xx;
+ struct cvmx_pko_mem_count1_s cn63xx;
+ struct cvmx_pko_mem_count1_s cn63xxp1;
+ struct cvmx_pko_mem_count1_s cn66xx;
+ struct cvmx_pko_mem_count1_s cn68xx;
+ struct cvmx_pko_mem_count1_s cn68xxp1;
+ struct cvmx_pko_mem_count1_s cnf71xx;
+};
+typedef union cvmx_pko_mem_count1 cvmx_pko_mem_count1_t;
+
+/**
+ * cvmx_pko_mem_debug0
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko_prt_psb.cmnd[63:0]
+ * This CSR is a memory of 12 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug0 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fau : 28; /**< Fetch and add command words */
+ uint64_t cmd : 14; /**< Command word */
+ uint64_t segs : 6; /**< Number of segments/gather size */
+ uint64_t size : 16; /**< Packet length in bytes */
+#else
+ uint64_t size : 16;
+ uint64_t segs : 6;
+ uint64_t cmd : 14;
+ uint64_t fau : 28;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug0_s cn30xx;
+ struct cvmx_pko_mem_debug0_s cn31xx;
+ struct cvmx_pko_mem_debug0_s cn38xx;
+ struct cvmx_pko_mem_debug0_s cn38xxp2;
+ struct cvmx_pko_mem_debug0_s cn50xx;
+ struct cvmx_pko_mem_debug0_s cn52xx;
+ struct cvmx_pko_mem_debug0_s cn52xxp1;
+ struct cvmx_pko_mem_debug0_s cn56xx;
+ struct cvmx_pko_mem_debug0_s cn56xxp1;
+ struct cvmx_pko_mem_debug0_s cn58xx;
+ struct cvmx_pko_mem_debug0_s cn58xxp1;
+ struct cvmx_pko_mem_debug0_s cn61xx;
+ struct cvmx_pko_mem_debug0_s cn63xx;
+ struct cvmx_pko_mem_debug0_s cn63xxp1;
+ struct cvmx_pko_mem_debug0_s cn66xx;
+ struct cvmx_pko_mem_debug0_s cn68xx;
+ struct cvmx_pko_mem_debug0_s cn68xxp1;
+ struct cvmx_pko_mem_debug0_s cnf71xx;
+};
+typedef union cvmx_pko_mem_debug0 cvmx_pko_mem_debug0_t;
+
+/**
+ * cvmx_pko_mem_debug1
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko_prt_psb.curr[63:0]
+ * This CSR is a memory of 12 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug1 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i : 1; /**< "I" value used for free operation */
+ uint64_t back : 4; /**< Back value used for free operation */
+ uint64_t pool : 3; /**< Pool value used for free operation */
+ uint64_t size : 16; /**< Size in bytes */
+ uint64_t ptr : 40; /**< Data pointer */
+#else
+ uint64_t ptr : 40;
+ uint64_t size : 16;
+ uint64_t pool : 3;
+ uint64_t back : 4;
+ uint64_t i : 1;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug1_s cn30xx;
+ struct cvmx_pko_mem_debug1_s cn31xx;
+ struct cvmx_pko_mem_debug1_s cn38xx;
+ struct cvmx_pko_mem_debug1_s cn38xxp2;
+ struct cvmx_pko_mem_debug1_s cn50xx;
+ struct cvmx_pko_mem_debug1_s cn52xx;
+ struct cvmx_pko_mem_debug1_s cn52xxp1;
+ struct cvmx_pko_mem_debug1_s cn56xx;
+ struct cvmx_pko_mem_debug1_s cn56xxp1;
+ struct cvmx_pko_mem_debug1_s cn58xx;
+ struct cvmx_pko_mem_debug1_s cn58xxp1;
+ struct cvmx_pko_mem_debug1_s cn61xx;
+ struct cvmx_pko_mem_debug1_s cn63xx;
+ struct cvmx_pko_mem_debug1_s cn63xxp1;
+ struct cvmx_pko_mem_debug1_s cn66xx;
+ struct cvmx_pko_mem_debug1_s cn68xx;
+ struct cvmx_pko_mem_debug1_s cn68xxp1;
+ struct cvmx_pko_mem_debug1_s cnf71xx;
+};
+typedef union cvmx_pko_mem_debug1 cvmx_pko_mem_debug1_t;
+
+/**
+ * cvmx_pko_mem_debug10
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko.dat.ptr.ptrs1, pko.dat.ptr.ptrs2
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug10 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug10_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug10_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fau : 28; /**< Fetch and add command words */
+ uint64_t cmd : 14; /**< Command word */
+ uint64_t segs : 6; /**< Number of segments/gather size */
+ uint64_t size : 16; /**< Packet length in bytes */
+#else
+ uint64_t size : 16;
+ uint64_t segs : 6;
+ uint64_t cmd : 14;
+ uint64_t fau : 28;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug10_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug10_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug10_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug10_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptrs1 : 17; /**< Internal state */
+ uint64_t reserved_17_31 : 15;
+ uint64_t ptrs2 : 17; /**< Internal state */
+#else
+ uint64_t ptrs2 : 17;
+ uint64_t reserved_17_31 : 15;
+ uint64_t ptrs1 : 17;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug10_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug10_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug10_cn50xx cn61xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn63xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn63xxp1;
+ struct cvmx_pko_mem_debug10_cn50xx cn66xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn68xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn68xxp1;
+ struct cvmx_pko_mem_debug10_cn50xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug10 cvmx_pko_mem_debug10_t;
+
+/**
+ * cvmx_pko_mem_debug11
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko.out.sta.state[22:0]
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug11 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug11_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i : 1; /**< "I" value used for free operation */
+ uint64_t back : 4; /**< Back value used for free operation */
+ uint64_t pool : 3; /**< Pool value used for free operation */
+ uint64_t size : 16; /**< Size in bytes */
+ uint64_t reserved_0_39 : 40;
+#else
+ uint64_t reserved_0_39 : 40;
+ uint64_t size : 16;
+ uint64_t pool : 3;
+ uint64_t back : 4;
+ uint64_t i : 1;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug11_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i : 1; /**< "I" value used for free operation */
+ uint64_t back : 4; /**< Back value used for free operation */
+ uint64_t pool : 3; /**< Pool value used for free operation */
+ uint64_t size : 16; /**< Size in bytes */
+ uint64_t ptr : 40; /**< Data pointer */
+#else
+ uint64_t ptr : 40;
+ uint64_t size : 16;
+ uint64_t pool : 3;
+ uint64_t back : 4;
+ uint64_t i : 1;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug11_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug11_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug11_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug11_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t maj : 1; /**< Internal state */
+ uint64_t uid : 3; /**< Internal state */
+ uint64_t sop : 1; /**< Internal state */
+ uint64_t len : 1; /**< Internal state */
+ uint64_t chk : 1; /**< Internal state */
+ uint64_t cnt : 13; /**< Internal state */
+ uint64_t mod : 3; /**< Internal state */
+#else
+ uint64_t mod : 3;
+ uint64_t cnt : 13;
+ uint64_t chk : 1;
+ uint64_t len : 1;
+ uint64_t sop : 1;
+ uint64_t uid : 3;
+ uint64_t maj : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug11_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug11_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug11_cn50xx cn61xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn63xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn63xxp1;
+ struct cvmx_pko_mem_debug11_cn50xx cn66xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn68xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn68xxp1;
+ struct cvmx_pko_mem_debug11_cn50xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug11 cvmx_pko_mem_debug11_t;
+
+/**
+ * cvmx_pko_mem_debug12
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko.out.ctl.cmnd[63:0]
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug12 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug12_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug12_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< WorkQ data or Store0 pointer */
+#else
+ uint64_t data : 64;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug12_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug12_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug12_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug12_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fau : 28; /**< Fetch and add command words */
+ uint64_t cmd : 14; /**< Command word */
+ uint64_t segs : 6; /**< Number of segments/gather size */
+ uint64_t size : 16; /**< Packet length in bytes */
+#else
+ uint64_t size : 16;
+ uint64_t segs : 6;
+ uint64_t cmd : 14;
+ uint64_t fau : 28;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug12_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug12_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug12_cn50xx cn61xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn63xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn63xxp1;
+ struct cvmx_pko_mem_debug12_cn50xx cn66xx;
+ struct cvmx_pko_mem_debug12_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state : 64; /**< Internal state */
+#else
+ uint64_t state : 64;
+#endif
+ } cn68xx;
+ struct cvmx_pko_mem_debug12_cn68xx cn68xxp1;
+ struct cvmx_pko_mem_debug12_cn50xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug12 cvmx_pko_mem_debug12_t;
+
+/**
+ * cvmx_pko_mem_debug13
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko.out.ctl.head[63:0]
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug13 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug13_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug13_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63 : 13;
+ uint64_t widx : 17; /**< PDB widx */
+ uint64_t ridx2 : 17; /**< PDB ridx2 */
+ uint64_t widx2 : 17; /**< PDB widx2 */
+#else
+ uint64_t widx2 : 17;
+ uint64_t ridx2 : 17;
+ uint64_t widx : 17;
+ uint64_t reserved_51_63 : 13;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug13_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug13_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug13_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug13_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i : 1; /**< "I" value used for free operation */
+ uint64_t back : 4; /**< Back value used for free operation */
+ uint64_t pool : 3; /**< Pool value used for free operation */
+ uint64_t size : 16; /**< Size in bytes */
+ uint64_t ptr : 40; /**< Data pointer */
+#else
+ uint64_t ptr : 40;
+ uint64_t size : 16;
+ uint64_t pool : 3;
+ uint64_t back : 4;
+ uint64_t i : 1;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug13_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug13_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug13_cn50xx cn61xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn63xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn63xxp1;
+ struct cvmx_pko_mem_debug13_cn50xx cn66xx;
+ struct cvmx_pko_mem_debug13_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state : 64; /**< Internal state */
+#else
+ uint64_t state : 64;
+#endif
+ } cn68xx;
+ struct cvmx_pko_mem_debug13_cn68xx cn68xxp1;
+ struct cvmx_pko_mem_debug13_cn50xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug13 cvmx_pko_mem_debug13_t;
+
+/**
+ * cvmx_pko_mem_debug14
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko.prt.psb.save[63:0]
+ * This CSR is a memory of 132 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug14 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug14_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug14_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t ridx : 17; /**< PDB ridx */
+#else
+ uint64_t ridx : 17;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug14_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug14_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug14_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug14_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Command words */
+#else
+ uint64_t data : 64;
+#endif
+ } cn52xx;
+ struct cvmx_pko_mem_debug14_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug14_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug14_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_debug14_cn52xx cn61xx;
+ struct cvmx_pko_mem_debug14_cn52xx cn63xx;
+ struct cvmx_pko_mem_debug14_cn52xx cn63xxp1;
+ struct cvmx_pko_mem_debug14_cn52xx cn66xx;
+ struct cvmx_pko_mem_debug14_cn52xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug14 cvmx_pko_mem_debug14_t;
+
+/**
+ * cvmx_pko_mem_debug2
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko_prt_psb.head[63:0]
+ * This CSR is a memory of 12 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug2 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i : 1; /**< "I" value used for free operation */
+ uint64_t back : 4; /**< Back value used for free operation */
+ uint64_t pool : 3; /**< Pool value used for free operation */
+ uint64_t size : 16; /**< Size in bytes */
+ uint64_t ptr : 40; /**< Data pointer */
+#else
+ uint64_t ptr : 40;
+ uint64_t size : 16;
+ uint64_t pool : 3;
+ uint64_t back : 4;
+ uint64_t i : 1;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug2_s cn30xx;
+ struct cvmx_pko_mem_debug2_s cn31xx;
+ struct cvmx_pko_mem_debug2_s cn38xx;
+ struct cvmx_pko_mem_debug2_s cn38xxp2;
+ struct cvmx_pko_mem_debug2_s cn50xx;
+ struct cvmx_pko_mem_debug2_s cn52xx;
+ struct cvmx_pko_mem_debug2_s cn52xxp1;
+ struct cvmx_pko_mem_debug2_s cn56xx;
+ struct cvmx_pko_mem_debug2_s cn56xxp1;
+ struct cvmx_pko_mem_debug2_s cn58xx;
+ struct cvmx_pko_mem_debug2_s cn58xxp1;
+ struct cvmx_pko_mem_debug2_s cn61xx;
+ struct cvmx_pko_mem_debug2_s cn63xx;
+ struct cvmx_pko_mem_debug2_s cn63xxp1;
+ struct cvmx_pko_mem_debug2_s cn66xx;
+ struct cvmx_pko_mem_debug2_s cn68xx;
+ struct cvmx_pko_mem_debug2_s cn68xxp1;
+ struct cvmx_pko_mem_debug2_s cnf71xx;
+};
+typedef union cvmx_pko_mem_debug2 cvmx_pko_mem_debug2_t;
+
+/**
+ * cvmx_pko_mem_debug3
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko_prt_psb.resp[63:0]
+ * This CSR is a memory of 12 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug3 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug3_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i : 1; /**< "I" value used for free operation */
+ uint64_t back : 4; /**< Back value used for free operation */
+ uint64_t pool : 3; /**< Pool value used for free operation */
+ uint64_t size : 16; /**< Size in bytes */
+ uint64_t ptr : 40; /**< Data pointer */
+#else
+ uint64_t ptr : 40;
+ uint64_t size : 16;
+ uint64_t pool : 3;
+ uint64_t back : 4;
+ uint64_t i : 1;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug3_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug3_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug3_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug3_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< WorkQ data or Store0 pointer */
+#else
+ uint64_t data : 64;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug3_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug3_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug3_cn50xx cn61xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn63xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn63xxp1;
+ struct cvmx_pko_mem_debug3_cn50xx cn66xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn68xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn68xxp1;
+ struct cvmx_pko_mem_debug3_cn50xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug3 cvmx_pko_mem_debug3_t;
+
+/**
+ * cvmx_pko_mem_debug4
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko_prt_psb.state[63:0]
+ * This CSR is a memory of 12 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug4 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug4_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< WorkQ data or Store0 pointer */
+#else
+ uint64_t data : 64;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug4_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug4_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug4_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug4_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cmnd_segs : 3; /**< Internal state */
+ uint64_t cmnd_siz : 16; /**< Internal state */
+ uint64_t cmnd_off : 6; /**< Internal state */
+ uint64_t uid : 3; /**< Internal state */
+ uint64_t dread_sop : 1; /**< Internal state */
+ uint64_t init_dwrite : 1; /**< Internal state */
+ uint64_t chk_once : 1; /**< Internal state */
+ uint64_t chk_mode : 1; /**< Internal state */
+ uint64_t active : 1; /**< Internal state */
+ uint64_t static_p : 1; /**< Internal state */
+ uint64_t qos : 3; /**< Internal state */
+ uint64_t qcb_ridx : 5; /**< Internal state */
+ uint64_t qid_off_max : 4; /**< Internal state */
+ uint64_t qid_off : 4; /**< Internal state */
+ uint64_t qid_base : 8; /**< Internal state */
+ uint64_t wait : 1; /**< Internal state */
+ uint64_t minor : 2; /**< Internal state */
+ uint64_t major : 3; /**< Internal state */
+#else
+ uint64_t major : 3;
+ uint64_t minor : 2;
+ uint64_t wait : 1;
+ uint64_t qid_base : 8;
+ uint64_t qid_off : 4;
+ uint64_t qid_off_max : 4;
+ uint64_t qcb_ridx : 5;
+ uint64_t qos : 3;
+ uint64_t static_p : 1;
+ uint64_t active : 1;
+ uint64_t chk_mode : 1;
+ uint64_t chk_once : 1;
+ uint64_t init_dwrite : 1;
+ uint64_t dread_sop : 1;
+ uint64_t uid : 3;
+ uint64_t cmnd_off : 6;
+ uint64_t cmnd_siz : 16;
+ uint64_t cmnd_segs : 3;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug4_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t curr_siz : 8; /**< Internal state */
+ uint64_t curr_off : 16; /**< Internal state */
+ uint64_t cmnd_segs : 6; /**< Internal state */
+ uint64_t cmnd_siz : 16; /**< Internal state */
+ uint64_t cmnd_off : 6; /**< Internal state */
+ uint64_t uid : 2; /**< Internal state */
+ uint64_t dread_sop : 1; /**< Internal state */
+ uint64_t init_dwrite : 1; /**< Internal state */
+ uint64_t chk_once : 1; /**< Internal state */
+ uint64_t chk_mode : 1; /**< Internal state */
+ uint64_t wait : 1; /**< Internal state */
+ uint64_t minor : 2; /**< Internal state */
+ uint64_t major : 3; /**< Internal state */
+#else
+ uint64_t major : 3;
+ uint64_t minor : 2;
+ uint64_t wait : 1;
+ uint64_t chk_mode : 1;
+ uint64_t chk_once : 1;
+ uint64_t init_dwrite : 1;
+ uint64_t dread_sop : 1;
+ uint64_t uid : 2;
+ uint64_t cmnd_off : 6;
+ uint64_t cmnd_siz : 16;
+ uint64_t cmnd_segs : 6;
+ uint64_t curr_off : 16;
+ uint64_t curr_siz : 8;
+#endif
+ } cn52xx;
+ struct cvmx_pko_mem_debug4_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug4_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug4_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_debug4_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug4_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug4_cn52xx cn61xx;
+ struct cvmx_pko_mem_debug4_cn52xx cn63xx;
+ struct cvmx_pko_mem_debug4_cn52xx cn63xxp1;
+ struct cvmx_pko_mem_debug4_cn52xx cn66xx;
+ struct cvmx_pko_mem_debug4_cn52xx cn68xx;
+ struct cvmx_pko_mem_debug4_cn52xx cn68xxp1;
+ struct cvmx_pko_mem_debug4_cn52xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug4 cvmx_pko_mem_debug4_t;
+
+/**
+ * cvmx_pko_mem_debug5
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko_prt_psb.state[127:64]
+ * This CSR is a memory of 12 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug5 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug5_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dwri_mod : 1; /**< Dwrite mod */
+ uint64_t dwri_sop : 1; /**< Dwrite sop needed */
+ uint64_t dwri_len : 1; /**< Dwrite len */
+ uint64_t dwri_cnt : 13; /**< Dwrite count */
+ uint64_t cmnd_siz : 16; /**< Copy of cmnd.size */
+ uint64_t uid : 1; /**< UID */
+ uint64_t xfer_wor : 1; /**< Transfer work needed */
+ uint64_t xfer_dwr : 1; /**< Transfer dwrite needed */
+ uint64_t cbuf_fre : 1; /**< Cbuf needs free */
+ uint64_t reserved_27_27 : 1;
+ uint64_t chk_mode : 1; /**< Checksum mode */
+ uint64_t active : 1; /**< Port is active */
+ uint64_t qos : 3; /**< Current QOS round */
+ uint64_t qcb_ridx : 5; /**< Buffer read index for QCB */
+ uint64_t qid_off : 3; /**< Offset to be added to QID_BASE for current queue */
+ uint64_t qid_base : 7; /**< Absolute QID of the queue array base = &QUEUES[0] */
+ uint64_t wait : 1; /**< State wait when set */
+ uint64_t minor : 2; /**< State minor code */
+ uint64_t major : 4; /**< State major code */
+#else
+ uint64_t major : 4;
+ uint64_t minor : 2;
+ uint64_t wait : 1;
+ uint64_t qid_base : 7;
+ uint64_t qid_off : 3;
+ uint64_t qcb_ridx : 5;
+ uint64_t qos : 3;
+ uint64_t active : 1;
+ uint64_t chk_mode : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t cbuf_fre : 1;
+ uint64_t xfer_dwr : 1;
+ uint64_t xfer_wor : 1;
+ uint64_t uid : 1;
+ uint64_t cmnd_siz : 16;
+ uint64_t dwri_cnt : 13;
+ uint64_t dwri_len : 1;
+ uint64_t dwri_sop : 1;
+ uint64_t dwri_mod : 1;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug5_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug5_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug5_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug5_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t curr_ptr : 29; /**< Internal state */
+ uint64_t curr_siz : 16; /**< Internal state */
+ uint64_t curr_off : 16; /**< Internal state */
+ uint64_t cmnd_segs : 3; /**< Internal state */
+#else
+ uint64_t cmnd_segs : 3;
+ uint64_t curr_off : 16;
+ uint64_t curr_siz : 16;
+ uint64_t curr_ptr : 29;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug5_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t nxt_inflt : 6; /**< Internal state */
+ uint64_t curr_ptr : 40; /**< Internal state */
+ uint64_t curr_siz : 8; /**< Internal state */
+#else
+ uint64_t curr_siz : 8;
+ uint64_t curr_ptr : 40;
+ uint64_t nxt_inflt : 6;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } cn52xx;
+ struct cvmx_pko_mem_debug5_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug5_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug5_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_debug5_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug5_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug5_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t ptp : 1; /**< Internal state */
+ uint64_t major_3 : 1; /**< Internal state */
+ uint64_t nxt_inflt : 6; /**< Internal state */
+ uint64_t curr_ptr : 40; /**< Internal state */
+ uint64_t curr_siz : 8; /**< Internal state */
+#else
+ uint64_t curr_siz : 8;
+ uint64_t curr_ptr : 40;
+ uint64_t nxt_inflt : 6;
+ uint64_t major_3 : 1;
+ uint64_t ptp : 1;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn61xx;
+ struct cvmx_pko_mem_debug5_cn61xx cn63xx;
+ struct cvmx_pko_mem_debug5_cn61xx cn63xxp1;
+ struct cvmx_pko_mem_debug5_cn61xx cn66xx;
+ struct cvmx_pko_mem_debug5_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63 : 7;
+ uint64_t uid_2 : 1; /**< Internal state */
+ uint64_t ptp : 1; /**< Internal state */
+ uint64_t major_3 : 1; /**< Internal state */
+ uint64_t nxt_inflt : 6; /**< Internal state */
+ uint64_t curr_ptr : 40; /**< Internal state */
+ uint64_t curr_siz : 8; /**< Internal state */
+#else
+ uint64_t curr_siz : 8;
+ uint64_t curr_ptr : 40;
+ uint64_t nxt_inflt : 6;
+ uint64_t major_3 : 1;
+ uint64_t ptp : 1;
+ uint64_t uid_2 : 1;
+ uint64_t reserved_57_63 : 7;
+#endif
+ } cn68xx;
+ struct cvmx_pko_mem_debug5_cn68xx cn68xxp1;
+ struct cvmx_pko_mem_debug5_cn61xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug5 cvmx_pko_mem_debug5_t;
+
+/**
+ * cvmx_pko_mem_debug6
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko_prt_psb.port[63:0]
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug6 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t qid_offres : 4; /**< Internal state */
+ uint64_t qid_offths : 4; /**< Internal state */
+ uint64_t preempter : 1; /**< Internal state */
+ uint64_t preemptee : 1; /**< Internal state */
+ uint64_t preempted : 1; /**< Internal state */
+ uint64_t active : 1; /**< Internal state */
+ uint64_t statc : 1; /**< Internal state */
+ uint64_t qos : 3; /**< Internal state */
+ uint64_t qcb_ridx : 5; /**< Internal state */
+ uint64_t qid_offmax : 4; /**< Internal state */
+ uint64_t reserved_0_11 : 12;
+#else
+ uint64_t reserved_0_11 : 12;
+ uint64_t qid_offmax : 4;
+ uint64_t qcb_ridx : 5;
+ uint64_t qos : 3;
+ uint64_t statc : 1;
+ uint64_t active : 1;
+ uint64_t preempted : 1;
+ uint64_t preemptee : 1;
+ uint64_t preempter : 1;
+ uint64_t qid_offths : 4;
+ uint64_t qid_offres : 4;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug6_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t qid_offm : 3; /**< Qid offset max */
+ uint64_t static_p : 1; /**< Static port when set */
+ uint64_t work_min : 3; /**< Work minor */
+ uint64_t dwri_chk : 1; /**< Dwrite checksum mode */
+ uint64_t dwri_uid : 1; /**< Dwrite UID */
+ uint64_t dwri_mod : 2; /**< Dwrite mod */
+#else
+ uint64_t dwri_mod : 2;
+ uint64_t dwri_uid : 1;
+ uint64_t dwri_chk : 1;
+ uint64_t work_min : 3;
+ uint64_t static_p : 1;
+ uint64_t qid_offm : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug6_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug6_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug6_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug6_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t curr_ptr : 11; /**< Internal state */
+#else
+ uint64_t curr_ptr : 11;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug6_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t qid_offres : 4; /**< Internal state */
+ uint64_t qid_offths : 4; /**< Internal state */
+ uint64_t preempter : 1; /**< Internal state */
+ uint64_t preemptee : 1; /**< Internal state */
+ uint64_t preempted : 1; /**< Internal state */
+ uint64_t active : 1; /**< Internal state */
+ uint64_t statc : 1; /**< Internal state */
+ uint64_t qos : 3; /**< Internal state */
+ uint64_t qcb_ridx : 5; /**< Internal state */
+ uint64_t qid_offmax : 4; /**< Internal state */
+ uint64_t qid_off : 4; /**< Internal state */
+ uint64_t qid_base : 8; /**< Internal state */
+#else
+ uint64_t qid_base : 8;
+ uint64_t qid_off : 4;
+ uint64_t qid_offmax : 4;
+ uint64_t qcb_ridx : 5;
+ uint64_t qos : 3;
+ uint64_t statc : 1;
+ uint64_t active : 1;
+ uint64_t preempted : 1;
+ uint64_t preemptee : 1;
+ uint64_t preempter : 1;
+ uint64_t qid_offths : 4;
+ uint64_t qid_offres : 4;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } cn52xx;
+ struct cvmx_pko_mem_debug6_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug6_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug6_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_debug6_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug6_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug6_cn52xx cn61xx;
+ struct cvmx_pko_mem_debug6_cn52xx cn63xx;
+ struct cvmx_pko_mem_debug6_cn52xx cn63xxp1;
+ struct cvmx_pko_mem_debug6_cn52xx cn66xx;
+ struct cvmx_pko_mem_debug6_cn52xx cn68xx;
+ struct cvmx_pko_mem_debug6_cn52xx cn68xxp1;
+ struct cvmx_pko_mem_debug6_cn52xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug6 cvmx_pko_mem_debug6_t;
+
+/**
+ * cvmx_pko_mem_debug7
+ *
+ * Notes:
+ * Internal per-queue state intended for debug use only - pko_prt_qsb.state[63:0]
+ * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug7 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug7_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t dwb : 9; /**< Calculated DWB count used for free operation */
+ uint64_t start : 33; /**< Calculated start address used for free operation */
+ uint64_t size : 16; /**< Packet length in bytes */
+#else
+ uint64_t size : 16;
+ uint64_t start : 33;
+ uint64_t dwb : 9;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug7_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug7_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug7_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug7_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t qos : 5; /**< QOS mask to enable the queue when set */
+ uint64_t tail : 1; /**< This queue is the last (tail) in the queue array */
+ uint64_t buf_siz : 13; /**< Command buffer remaining size in words */
+ uint64_t buf_ptr : 33; /**< Command word pointer */
+ uint64_t qcb_widx : 6; /**< Buffer write index for QCB */
+ uint64_t qcb_ridx : 6; /**< Buffer read index for QCB */
+#else
+ uint64_t qcb_ridx : 6;
+ uint64_t qcb_widx : 6;
+ uint64_t buf_ptr : 33;
+ uint64_t buf_siz : 13;
+ uint64_t tail : 1;
+ uint64_t qos : 5;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug7_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug7_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug7_cn50xx cn61xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn63xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn63xxp1;
+ struct cvmx_pko_mem_debug7_cn50xx cn66xx;
+ struct cvmx_pko_mem_debug7_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t qos : 3; /**< QOS mask to enable the queue when set */
+ uint64_t tail : 1; /**< This queue is the last (tail) in the queue array */
+ uint64_t buf_siz : 13; /**< Command buffer remaining size in words */
+ uint64_t buf_ptr : 33; /**< Command word pointer */
+ uint64_t qcb_widx : 7; /**< Buffer write index for QCB */
+ uint64_t qcb_ridx : 7; /**< Buffer read index for QCB */
+#else
+ uint64_t qcb_ridx : 7;
+ uint64_t qcb_widx : 7;
+ uint64_t buf_ptr : 33;
+ uint64_t buf_siz : 13;
+ uint64_t tail : 1;
+ uint64_t qos : 3;
+#endif
+ } cn68xx;
+ struct cvmx_pko_mem_debug7_cn68xx cn68xxp1;
+ struct cvmx_pko_mem_debug7_cn50xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug7 cvmx_pko_mem_debug7_t;
+
+/**
+ * cvmx_pko_mem_debug8
+ *
+ * Notes:
+ * Internal per-queue state intended for debug use only - pko_prt_qsb.state[91:64]
+ * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug8 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug8_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t tail : 1; /**< This queue is the last (tail) in the queue array */
+ uint64_t buf_siz : 13; /**< Command buffer remaining size in words */
+ uint64_t reserved_0_44 : 45;
+#else
+ uint64_t reserved_0_44 : 45;
+ uint64_t buf_siz : 13;
+ uint64_t tail : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug8_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t qos : 5; /**< QOS mask to enable the queue when set */
+ uint64_t tail : 1; /**< This queue is the last (tail) in the queue array */
+ uint64_t buf_siz : 13; /**< Command buffer remaining size in words */
+ uint64_t buf_ptr : 33; /**< Command word pointer */
+ uint64_t qcb_widx : 6; /**< Buffer write index for QCB */
+ uint64_t qcb_ridx : 6; /**< Buffer read index for QCB */
+#else
+ uint64_t qcb_ridx : 6;
+ uint64_t qcb_widx : 6;
+ uint64_t buf_ptr : 33;
+ uint64_t buf_siz : 13;
+ uint64_t tail : 1;
+ uint64_t qos : 5;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug8_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug8_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug8_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug8_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t doorbell : 20; /**< Doorbell count */
+ uint64_t reserved_6_7 : 2;
+ uint64_t static_p : 1; /**< Static priority */
+ uint64_t s_tail : 1; /**< Static tail */
+ uint64_t static_q : 1; /**< Static priority */
+ uint64_t qos : 3; /**< QOS mask to enable the queue when set */
+#else
+ uint64_t qos : 3;
+ uint64_t static_q : 1;
+ uint64_t s_tail : 1;
+ uint64_t static_p : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t doorbell : 20;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug8_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t preempter : 1; /**< Preempter */
+ uint64_t doorbell : 20; /**< Doorbell count */
+ uint64_t reserved_7_7 : 1;
+ uint64_t preemptee : 1; /**< Preemptee */
+ uint64_t static_p : 1; /**< Static priority */
+ uint64_t s_tail : 1; /**< Static tail */
+ uint64_t static_q : 1; /**< Static priority */
+ uint64_t qos : 3; /**< QOS mask to enable the queue when set */
+#else
+ uint64_t qos : 3;
+ uint64_t static_q : 1;
+ uint64_t s_tail : 1;
+ uint64_t static_p : 1;
+ uint64_t preemptee : 1;
+ uint64_t reserved_7_7 : 1;
+ uint64_t doorbell : 20;
+ uint64_t preempter : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn52xx;
+ struct cvmx_pko_mem_debug8_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug8_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug8_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_debug8_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug8_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug8_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63 : 22;
+ uint64_t qid_qqos : 8; /**< QOS_MASK */
+ uint64_t reserved_33_33 : 1;
+ uint64_t qid_idx : 4; /**< IDX */
+ uint64_t preempter : 1; /**< Preempter */
+ uint64_t doorbell : 20; /**< Doorbell count */
+ uint64_t reserved_7_7 : 1;
+ uint64_t preemptee : 1; /**< Preemptee */
+ uint64_t static_p : 1; /**< Static priority */
+ uint64_t s_tail : 1; /**< Static tail */
+ uint64_t static_q : 1; /**< Static priority */
+ uint64_t qos : 3; /**< QOS mask to enable the queue when set */
+#else
+ uint64_t qos : 3;
+ uint64_t static_q : 1;
+ uint64_t s_tail : 1;
+ uint64_t static_p : 1;
+ uint64_t preemptee : 1;
+ uint64_t reserved_7_7 : 1;
+ uint64_t doorbell : 20;
+ uint64_t preempter : 1;
+ uint64_t qid_idx : 4;
+ uint64_t reserved_33_33 : 1;
+ uint64_t qid_qqos : 8;
+ uint64_t reserved_42_63 : 22;
+#endif
+ } cn61xx;
+ struct cvmx_pko_mem_debug8_cn52xx cn63xx;
+ struct cvmx_pko_mem_debug8_cn52xx cn63xxp1;
+ struct cvmx_pko_mem_debug8_cn61xx cn66xx;
+ struct cvmx_pko_mem_debug8_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t preempter : 1; /**< Preempter */
+ uint64_t doorbell : 20; /**< Doorbell count */
+ uint64_t reserved_9_15 : 7;
+ uint64_t preemptee : 1; /**< Preemptee */
+ uint64_t static_p : 1; /**< Static priority */
+ uint64_t s_tail : 1; /**< Static tail */
+ uint64_t static_q : 1; /**< Static priority */
+ uint64_t qos : 5; /**< QOS mask to enable the queue when set */
+#else
+ uint64_t qos : 5;
+ uint64_t static_q : 1;
+ uint64_t s_tail : 1;
+ uint64_t static_p : 1;
+ uint64_t preemptee : 1;
+ uint64_t reserved_9_15 : 7;
+ uint64_t doorbell : 20;
+ uint64_t preempter : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } cn68xx;
+ struct cvmx_pko_mem_debug8_cn68xx cn68xxp1;
+ struct cvmx_pko_mem_debug8_cn61xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug8 cvmx_pko_mem_debug8_t;
+
+/**
+ * cvmx_pko_mem_debug9
+ *
+ * Notes:
+ * Internal per-port state intended for debug use only - pko.dat.ptr.ptrs0, pko.dat.ptr.ptrs3
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_pko_mem_debug9 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug9_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptrs0 : 17; /**< Internal state */
+ uint64_t reserved_0_31 : 32;
+#else
+ uint64_t reserved_0_31 : 32;
+ uint64_t ptrs0 : 17;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug9_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t doorbell : 20; /**< Doorbell count */
+ uint64_t reserved_5_7 : 3;
+ uint64_t s_tail : 1; /**< reads as zero (S_TAIL cannot be read) */
+ uint64_t static_q : 1; /**< reads as zero (STATIC_Q cannot be read) */
+ uint64_t qos : 3; /**< QOS mask to enable the queue when set */
+#else
+ uint64_t qos : 3;
+ uint64_t static_q : 1;
+ uint64_t s_tail : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t doorbell : 20;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug9_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug9_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t doorbell : 20; /**< Doorbell count */
+ uint64_t reserved_6_7 : 2;
+ uint64_t static_p : 1; /**< Static priority (port) */
+ uint64_t s_tail : 1; /**< Static tail */
+ uint64_t static_q : 1; /**< Static priority */
+ uint64_t qos : 3; /**< QOS mask to enable the queue when set */
+#else
+ uint64_t qos : 3;
+ uint64_t static_q : 1;
+ uint64_t s_tail : 1;
+ uint64_t static_p : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t doorbell : 20;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn38xx;
+ struct cvmx_pko_mem_debug9_cn38xx cn38xxp2;
+ struct cvmx_pko_mem_debug9_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptrs0 : 17; /**< Internal state */
+ uint64_t reserved_17_31 : 15;
+ uint64_t ptrs3 : 17; /**< Internal state */
+#else
+ uint64_t ptrs3 : 17;
+ uint64_t reserved_17_31 : 15;
+ uint64_t ptrs0 : 17;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug9_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug9_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn58xxp1;
+ struct cvmx_pko_mem_debug9_cn50xx cn61xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn63xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn63xxp1;
+ struct cvmx_pko_mem_debug9_cn50xx cn66xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn68xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn68xxp1;
+ struct cvmx_pko_mem_debug9_cn50xx cnf71xx;
+};
+typedef union cvmx_pko_mem_debug9 cvmx_pko_mem_debug9_t;
+
+/**
+ * cvmx_pko_mem_iport_ptrs
+ *
+ * Notes:
+ * This CSR is a memory of 128 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. The index to this CSR is an IPORT. A read of any
+ * entry that has not been previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_iport_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_iport_ptrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63 : 1;
+ uint64_t crc : 1; /**< Set if this IPID uses CRC */
+ uint64_t static_p : 1; /**< Set if this IPID has static priority */
+ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */
+ uint64_t min_pkt : 3; /**< Min packet size specified by PKO_REG_MIN_PKT[MIN_PKT] */
+ uint64_t reserved_31_49 : 19;
+ uint64_t pipe : 7; /**< The PKO pipe or loopback port
+ When INT != PIP/IPD:
+ PIPE is the PKO pipe to which this port is mapped
+ All used PKO-internal ports that map to the same
+ PIPE must also map to the same INT and EID in
+ this case.
+ When INT == PIP/IPD:
+ PIPE must be in the range
+ 0..PKO_REG_LOOPBACK[NUM_PORTS]-1
+ in this case and selects one of the loopback
+ ports. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t intr : 5; /**< The interface to which this port is mapped
+ All used PKO-internal ports that map to the same EID
+ must also map to the same INT. All used PKO-internal
+ ports that map to the same INT must also map to the
+ same EID.
+ Encoding:
+ 0 = GMX0 XAUI/DXAUI/RXAUI0 or SGMII0
+ 1 = GMX0 SGMII1
+ 2 = GMX0 SGMII2
+ 3 = GMX0 SGMII3
+ 4 = GMX1 RXAUI
+ 8 = GMX2 XAUI/DXAUI or SGMII0
+ 9 = GMX2 SGMII1
+ 10 = GMX2 SGMII2
+ 11 = GMX2 SGMII3
+ 12 = GMX3 XAUI/DXAUI or SGMII0
+ 13 = GMX3 SGMII1
+ 14 = GMX3 SGMII2
+ 15 = GMX3 SGMII3
+ 16 = GMX4 XAUI/DXAUI or SGMII0
+ 17 = GMX4 SGMII1
+ 18 = GMX4 SGMII2
+ 19 = GMX4 SGMII3
+ 28 = ILK interface 0
+ 29 = ILK interface 1
+ 30 = DPI
+ 31 = PIP/IPD
+ others = reserved */
+ uint64_t reserved_13_15 : 3;
+ uint64_t eid : 5; /**< Engine ID to which this port is mapped
+ EID==31 can be used with unused PKO-internal ports.
+ Otherwise, 0-19 are legal EID values. */
+ uint64_t reserved_7_7 : 1;
+ uint64_t ipid : 7; /**< PKO-internal Port ID to be accessed */
+#else
+ uint64_t ipid : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t eid : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t intr : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t pipe : 7;
+ uint64_t reserved_31_49 : 19;
+ uint64_t min_pkt : 3;
+ uint64_t qos_mask : 8;
+ uint64_t static_p : 1;
+ uint64_t crc : 1;
+ uint64_t reserved_63_63 : 1;
+#endif
+ } s;
+ struct cvmx_pko_mem_iport_ptrs_s cn68xx;
+ struct cvmx_pko_mem_iport_ptrs_s cn68xxp1;
+};
+typedef union cvmx_pko_mem_iport_ptrs cvmx_pko_mem_iport_ptrs_t;
+
+/**
+ * cvmx_pko_mem_iport_qos
+ *
+ * Notes:
+ * Sets the QOS mask, per port. These QOS_MASK bits are logically and physically the same QOS_MASK
+ * bits in PKO_MEM_IPORT_PTRS. This CSR address allows the QOS_MASK bits to be written during PKO
+ * operation without affecting any other port state. The engine to which port PID is mapped is engine
+ * EID. Note that the port to engine mapping must be the same as was previously programmed via the
+ * PKO_MEM_IPORT_PTRS CSR.
+ * This CSR is a memory of 128 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. The index to this CSR is an IPORT. A read of
+ * any entry that has not been previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_iport_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_iport_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */
+ uint64_t reserved_13_52 : 40;
+ uint64_t eid : 5; /**< Engine ID to which this port is mapped */
+ uint64_t reserved_7_7 : 1;
+ uint64_t ipid : 7; /**< PKO-internal Port ID */
+#else
+ uint64_t ipid : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t eid : 5;
+ uint64_t reserved_13_52 : 40;
+ uint64_t qos_mask : 8;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } s;
+ struct cvmx_pko_mem_iport_qos_s cn68xx;
+ struct cvmx_pko_mem_iport_qos_s cn68xxp1;
+};
+typedef union cvmx_pko_mem_iport_qos cvmx_pko_mem_iport_qos_t;
+
+/**
+ * cvmx_pko_mem_iqueue_ptrs
+ *
+ * Notes:
+ * Sets the queue to port mapping and the initial command buffer pointer, per queue. Unused queues must
+ * set BUF_PTR=0. Each queue may map to at most one port. No more than 32 queues may map to a port.
+ * The set of queues that is mapped to a port must be a contiguous array of queues. The port to which
+ * queue QID is mapped is port IPID. The index of queue QID in port IPID's queue list is IDX. The last
+ * queue in port IPID's queue array must have its TAIL bit set.
+ * STATIC_Q marks queue QID as having static priority. STATIC_P marks the port IPID to which QID is
+ * mapped as having at least one queue with static priority. If any QID that maps to IPID has static
+ * priority, then all QID that map to IPID must have STATIC_P set. Queues marked as static priority
+ * must be contiguous and begin at IDX 0. The last queue that is marked as having static priority
+ * must have its S_TAIL bit set.
+ * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. The index to this CSR is an IQUEUE. A read of any
+ * entry that has not been previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_iqueue_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_iqueue_ptrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t s_tail : 1; /**< Set if this QID is the tail of the static queues */
+ uint64_t static_p : 1; /**< Set if any QID in this IPID has static priority */
+ uint64_t static_q : 1; /**< Set if this QID has static priority */
+ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */
+ uint64_t buf_ptr : 31; /**< Command buffer pointer[37:7] */
+ uint64_t tail : 1; /**< Set if this QID is the tail of the queue array */
+ uint64_t index : 5; /**< Index (distance from head) in the queue array */
+ uint64_t reserved_15_15 : 1;
+ uint64_t ipid : 7; /**< PKO-Internal Port ID to which this queue is mapped */
+ uint64_t qid : 8; /**< Queue ID */
+#else
+ uint64_t qid : 8;
+ uint64_t ipid : 7;
+ uint64_t reserved_15_15 : 1;
+ uint64_t index : 5;
+ uint64_t tail : 1;
+ uint64_t buf_ptr : 31;
+ uint64_t qos_mask : 8;
+ uint64_t static_q : 1;
+ uint64_t static_p : 1;
+ uint64_t s_tail : 1;
+#endif
+ } s;
+ struct cvmx_pko_mem_iqueue_ptrs_s cn68xx;
+ struct cvmx_pko_mem_iqueue_ptrs_s cn68xxp1;
+};
+typedef union cvmx_pko_mem_iqueue_ptrs cvmx_pko_mem_iqueue_ptrs_t;
+
+/**
+ * cvmx_pko_mem_iqueue_qos
+ *
+ * Notes:
+ * Sets the QOS mask, per queue. These QOS_MASK bits are logically and physically the same QOS_MASK
+ * bits in PKO_MEM_IQUEUE_PTRS. This CSR address allows the QOS_MASK bits to be written during PKO
+ * operation without affecting any other queue state. The port to which queue QID is mapped is port
+ * IPID. Note that the queue to port mapping must be the same as was previously programmed via the
+ * PKO_MEM_IQUEUE_PTRS CSR.
+ * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. The index to this CSR is an IQUEUE. A read of any
+ * entry that has not been previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_iqueue_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_iqueue_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */
+ uint64_t reserved_15_52 : 38;
+ uint64_t ipid : 7; /**< PKO-Internal Port ID to which this queue is mapped */
+ uint64_t qid : 8; /**< Queue ID */
+#else
+ uint64_t qid : 8;
+ uint64_t ipid : 7;
+ uint64_t reserved_15_52 : 38;
+ uint64_t qos_mask : 8;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } s;
+ struct cvmx_pko_mem_iqueue_qos_s cn68xx;
+ struct cvmx_pko_mem_iqueue_qos_s cn68xxp1;
+};
+typedef union cvmx_pko_mem_iqueue_qos cvmx_pko_mem_iqueue_qos_t;
+
+/**
+ * cvmx_pko_mem_port_ptrs
+ *
+ * Notes:
+ * Sets the port to engine mapping, per port. Ports marked as static priority need not be contiguous,
+ * but they must be the lowest numbered PIDs mapped to this EID and must have QOS_MASK=0xff. If EID==8
+ * or EID==9, then PID[1:0] is used to direct the packet to the correct port on that interface.
+ * EID==15 can be used for unused PKO-internal ports.
+ * BP_PORT==63 means that the PKO-internal port is not backpressured.
+ * BP_PORTs are assumed to belong to an interface as follows:
+ * 46 <= BP_PORT < 48 -> srio interface 3
+ * 44 <= BP_PORT < 46 -> srio interface 2
+ * 42 <= BP_PORT < 44 -> srio interface 1
+ * 40 <= BP_PORT < 42 -> srio interface 0
+ * 36 <= BP_PORT < 40 -> loopback interface
+ * 32 <= BP_PORT < 36 -> PCIe interface
+ * 0 <= BP_PORT < 16 -> SGMII/Xaui interface 0
+ *
+ * Note that the SRIO interfaces do not actually provide backpressure. Thus, ports that use
+ * 40 <= BP_PORT < 48 for backpressure will never be backpressured.
+ *
+ * The reset configuration is the following:
+ * PID EID(ext port) BP_PORT QOS_MASK STATIC_P
+ * -------------------------------------------
+ * 0 0( 0) 0 0xff 0
+ * 1 1( 1) 1 0xff 0
+ * 2 2( 2) 2 0xff 0
+ * 3 3( 3) 3 0xff 0
+ * 4 0( 0) 4 0xff 0
+ * 5 1( 1) 5 0xff 0
+ * 6 2( 2) 6 0xff 0
+ * 7 3( 3) 7 0xff 0
+ * 8 0( 0) 8 0xff 0
+ * 9 1( 1) 9 0xff 0
+ * 10 2( 2) 10 0xff 0
+ * 11 3( 3) 11 0xff 0
+ * 12 0( 0) 12 0xff 0
+ * 13 1( 1) 13 0xff 0
+ * 14 2( 2) 14 0xff 0
+ * 15 3( 3) 15 0xff 0
+ * -------------------------------------------
+ * 16 4(16) 16 0xff 0
+ * 17 5(17) 17 0xff 0
+ * 18 6(18) 18 0xff 0
+ * 19 7(19) 19 0xff 0
+ * 20 4(16) 20 0xff 0
+ * 21 5(17) 21 0xff 0
+ * 22 6(18) 22 0xff 0
+ * 23 7(19) 23 0xff 0
+ * 24 4(16) 24 0xff 0
+ * 25 5(17) 25 0xff 0
+ * 26 6(18) 26 0xff 0
+ * 27 7(19) 27 0xff 0
+ * 28 4(16) 28 0xff 0
+ * 29 5(17) 29 0xff 0
+ * 30 6(18) 30 0xff 0
+ * 31 7(19) 31 0xff 0
+ * -------------------------------------------
+ * 32 8(32) 32 0xff 0
+ * 33 8(33) 33 0xff 0
+ * 34 8(34) 34 0xff 0
+ * 35 8(35) 35 0xff 0
+ * -------------------------------------------
+ * 36 9(36) 36 0xff 0
+ * 37 9(37) 37 0xff 0
+ * 38 9(38) 38 0xff 0
+ * 39 9(39) 39 0xff 0
+ *
+ * This CSR is a memory of 48 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_port_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_ptrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t static_p : 1; /**< Set if this PID has static priority */
+ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */
+ uint64_t reserved_16_52 : 37;
+ uint64_t bp_port : 6; /**< PID listens to BP_PORT for per-packet backpressure
+ Legal BP_PORTs: 0-15, 32-47, 63 (63 means no BP) */
+ uint64_t eid : 4; /**< Engine ID to which this port is mapped
+ Legal EIDs: 0-3, 8-13, 15 (15 only if port not used) */
+ uint64_t pid : 6; /**< Port ID[5:0] */
+#else
+ uint64_t pid : 6;
+ uint64_t eid : 4;
+ uint64_t bp_port : 6;
+ uint64_t reserved_16_52 : 37;
+ uint64_t qos_mask : 8;
+ uint64_t static_p : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_pko_mem_port_ptrs_s cn52xx;
+ struct cvmx_pko_mem_port_ptrs_s cn52xxp1;
+ struct cvmx_pko_mem_port_ptrs_s cn56xx;
+ struct cvmx_pko_mem_port_ptrs_s cn56xxp1;
+ struct cvmx_pko_mem_port_ptrs_s cn61xx;
+ struct cvmx_pko_mem_port_ptrs_s cn63xx;
+ struct cvmx_pko_mem_port_ptrs_s cn63xxp1;
+ struct cvmx_pko_mem_port_ptrs_s cn66xx;
+ struct cvmx_pko_mem_port_ptrs_s cnf71xx;
+};
+typedef union cvmx_pko_mem_port_ptrs cvmx_pko_mem_port_ptrs_t;
+
+/**
+ * cvmx_pko_mem_port_qos
+ *
+ * Notes:
+ * Sets the QOS mask, per port. These QOS_MASK bits are logically and physically the same QOS_MASK
+ * bits in PKO_MEM_PORT_PTRS. This CSR address allows the QOS_MASK bits to be written during PKO
+ * operation without affecting any other port state. The engine to which port PID is mapped is engine
+ * EID. Note that the port to engine mapping must be the same as was previously programmed via the
+ * PKO_MEM_PORT_PTRS CSR.
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_port_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */
+ uint64_t reserved_10_52 : 43;
+ uint64_t eid : 4; /**< Engine ID to which this port is mapped
+ Legal EIDs: 0-3, 8-11 */
+ uint64_t pid : 6; /**< Port ID[5:0] */
+#else
+ uint64_t pid : 6;
+ uint64_t eid : 4;
+ uint64_t reserved_10_52 : 43;
+ uint64_t qos_mask : 8;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } s;
+ struct cvmx_pko_mem_port_qos_s cn52xx;
+ struct cvmx_pko_mem_port_qos_s cn52xxp1;
+ struct cvmx_pko_mem_port_qos_s cn56xx;
+ struct cvmx_pko_mem_port_qos_s cn56xxp1;
+ struct cvmx_pko_mem_port_qos_s cn61xx;
+ struct cvmx_pko_mem_port_qos_s cn63xx;
+ struct cvmx_pko_mem_port_qos_s cn63xxp1;
+ struct cvmx_pko_mem_port_qos_s cn66xx;
+ struct cvmx_pko_mem_port_qos_s cnf71xx;
+};
+typedef union cvmx_pko_mem_port_qos cvmx_pko_mem_port_qos_t;
+
+/**
+ * cvmx_pko_mem_port_rate0
+ *
+ * Notes:
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_port_rate0 {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_rate0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63 : 13;
+ uint64_t rate_word : 19; /**< Rate limiting adder per 8 byte */
+ uint64_t rate_pkt : 24; /**< Rate limiting adder per packet */
+ uint64_t reserved_7_7 : 1;
+ uint64_t pid : 7; /**< Port ID[5:0] */
+#else
+ uint64_t pid : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t rate_pkt : 24;
+ uint64_t rate_word : 19;
+ uint64_t reserved_51_63 : 13;
+#endif
+ } s;
+ struct cvmx_pko_mem_port_rate0_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63 : 13;
+ uint64_t rate_word : 19; /**< Rate limiting adder per 8 byte */
+ uint64_t rate_pkt : 24; /**< Rate limiting adder per packet */
+ uint64_t reserved_6_7 : 2;
+ uint64_t pid : 6; /**< Port ID[5:0] */
+#else
+ uint64_t pid : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t rate_pkt : 24;
+ uint64_t rate_word : 19;
+ uint64_t reserved_51_63 : 13;
+#endif
+ } cn52xx;
+ struct cvmx_pko_mem_port_rate0_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_port_rate0_cn52xx cn56xx;
+ struct cvmx_pko_mem_port_rate0_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_port_rate0_cn52xx cn61xx;
+ struct cvmx_pko_mem_port_rate0_cn52xx cn63xx;
+ struct cvmx_pko_mem_port_rate0_cn52xx cn63xxp1;
+ struct cvmx_pko_mem_port_rate0_cn52xx cn66xx;
+ struct cvmx_pko_mem_port_rate0_s cn68xx;
+ struct cvmx_pko_mem_port_rate0_s cn68xxp1;
+ struct cvmx_pko_mem_port_rate0_cn52xx cnf71xx;
+};
+typedef union cvmx_pko_mem_port_rate0 cvmx_pko_mem_port_rate0_t;
+
+/**
+ * cvmx_pko_mem_port_rate1
+ *
+ * Notes:
+ * Writing PKO_MEM_PORT_RATE1[PID,RATE_LIM] has the side effect of setting the corresponding
+ * accumulator to zero.
+ * This CSR is a memory of 44 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_port_rate1 {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_rate1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rate_lim : 24; /**< Rate limiting accumulator limit */
+ uint64_t reserved_7_7 : 1;
+ uint64_t pid : 7; /**< Port ID[5:0] */
+#else
+ uint64_t pid : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t rate_lim : 24;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pko_mem_port_rate1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rate_lim : 24; /**< Rate limiting accumulator limit */
+ uint64_t reserved_6_7 : 2;
+ uint64_t pid : 6; /**< Port ID[5:0] */
+#else
+ uint64_t pid : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t rate_lim : 24;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn52xx;
+ struct cvmx_pko_mem_port_rate1_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_port_rate1_cn52xx cn56xx;
+ struct cvmx_pko_mem_port_rate1_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_port_rate1_cn52xx cn61xx;
+ struct cvmx_pko_mem_port_rate1_cn52xx cn63xx;
+ struct cvmx_pko_mem_port_rate1_cn52xx cn63xxp1;
+ struct cvmx_pko_mem_port_rate1_cn52xx cn66xx;
+ struct cvmx_pko_mem_port_rate1_s cn68xx;
+ struct cvmx_pko_mem_port_rate1_s cn68xxp1;
+ struct cvmx_pko_mem_port_rate1_cn52xx cnf71xx;
+};
+typedef union cvmx_pko_mem_port_rate1 cvmx_pko_mem_port_rate1_t;
+
+/**
+ * cvmx_pko_mem_queue_ptrs
+ *
+ * Notes:
+ * Sets the queue to port mapping and the initial command buffer pointer, per queue
+ * Each queue may map to at most one port. No more than 16 queues may map to a port. The set of
+ * queues that is mapped to a port must be a contiguous array of queues. The port to which queue QID
+ * is mapped is port PID. The index of queue QID in port PID's queue list is IDX. The last queue in
+ * port PID's queue array must have its TAIL bit set. Unused queues must be mapped to port 63.
+ * STATIC_Q marks queue QID as having static priority. STATIC_P marks the port PID to which QID is
+ * mapped as having at least one queue with static priority. If any QID that maps to PID has static
+ * priority, then all QID that map to PID must have STATIC_P set. Queues marked as static priority
+ * must be contiguous and begin at IDX 0. The last queue that is marked as having static priority
+ * must have its S_TAIL bit set.
+ * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_queue_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_queue_ptrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t s_tail : 1; /**< Set if this QID is the tail of the static queues */
+ uint64_t static_p : 1; /**< Set if any QID in this PID has static priority */
+ uint64_t static_q : 1; /**< Set if this QID has static priority */
+ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */
+ uint64_t buf_ptr : 36; /**< Command buffer pointer, <23:17> MBZ */
+ uint64_t tail : 1; /**< Set if this QID is the tail of the queue array */
+ uint64_t index : 3; /**< Index[2:0] (distance from head) in the queue array */
+ uint64_t port : 6; /**< Port ID to which this queue is mapped */
+ uint64_t queue : 7; /**< Queue ID[6:0] */
+#else
+ uint64_t queue : 7;
+ uint64_t port : 6;
+ uint64_t index : 3;
+ uint64_t tail : 1;
+ uint64_t buf_ptr : 36;
+ uint64_t qos_mask : 8;
+ uint64_t static_q : 1;
+ uint64_t static_p : 1;
+ uint64_t s_tail : 1;
+#endif
+ } s;
+ struct cvmx_pko_mem_queue_ptrs_s cn30xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn31xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn38xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn38xxp2;
+ struct cvmx_pko_mem_queue_ptrs_s cn50xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn52xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn52xxp1;
+ struct cvmx_pko_mem_queue_ptrs_s cn56xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn56xxp1;
+ struct cvmx_pko_mem_queue_ptrs_s cn58xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn58xxp1;
+ struct cvmx_pko_mem_queue_ptrs_s cn61xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn63xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn63xxp1;
+ struct cvmx_pko_mem_queue_ptrs_s cn66xx;
+ struct cvmx_pko_mem_queue_ptrs_s cnf71xx;
+};
+typedef union cvmx_pko_mem_queue_ptrs cvmx_pko_mem_queue_ptrs_t;
+
+/**
+ * cvmx_pko_mem_queue_qos
+ *
+ * Notes:
+ * Sets the QOS mask, per queue. These QOS_MASK bits are logically and physically the same QOS_MASK
+ * bits in PKO_MEM_QUEUE_PTRS. This CSR address allows the QOS_MASK bits to be written during PKO
+ * operation without affecting any other queue state. The port to which queue QID is mapped is port
+ * PID. Note that the queue to port mapping must be the same as was previously programmed via the
+ * PKO_MEM_QUEUE_PTRS CSR.
+ * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_queue_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_queue_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */
+ uint64_t reserved_13_52 : 40;
+ uint64_t pid : 6; /**< Port ID to which this queue is mapped */
+ uint64_t qid : 7; /**< Queue ID */
+#else
+ uint64_t qid : 7;
+ uint64_t pid : 6;
+ uint64_t reserved_13_52 : 40;
+ uint64_t qos_mask : 8;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } s;
+ struct cvmx_pko_mem_queue_qos_s cn30xx;
+ struct cvmx_pko_mem_queue_qos_s cn31xx;
+ struct cvmx_pko_mem_queue_qos_s cn38xx;
+ struct cvmx_pko_mem_queue_qos_s cn38xxp2;
+ struct cvmx_pko_mem_queue_qos_s cn50xx;
+ struct cvmx_pko_mem_queue_qos_s cn52xx;
+ struct cvmx_pko_mem_queue_qos_s cn52xxp1;
+ struct cvmx_pko_mem_queue_qos_s cn56xx;
+ struct cvmx_pko_mem_queue_qos_s cn56xxp1;
+ struct cvmx_pko_mem_queue_qos_s cn58xx;
+ struct cvmx_pko_mem_queue_qos_s cn58xxp1;
+ struct cvmx_pko_mem_queue_qos_s cn61xx;
+ struct cvmx_pko_mem_queue_qos_s cn63xx;
+ struct cvmx_pko_mem_queue_qos_s cn63xxp1;
+ struct cvmx_pko_mem_queue_qos_s cn66xx;
+ struct cvmx_pko_mem_queue_qos_s cnf71xx;
+};
+typedef union cvmx_pko_mem_queue_qos cvmx_pko_mem_queue_qos_t;
+
+/**
+ * cvmx_pko_mem_throttle_int
+ *
+ * Notes:
+ * Writing PACKET and WORD with 0 resets both counts for INT to 0 rather than add 0.
+ * Otherwise, writes to this CSR add to the existing WORD/PACKET counts for the interface INT.
+ *
+ * PKO tracks the number of (8-byte) WORD's and PACKET's in-flight (sum total in both PKO
+ * and the interface MAC) on the interface. (When PKO first selects a packet from a PKO queue, it
+ * increments the counts appropriately. When the interface MAC has (largely) completed sending
+ * the words/packet, PKO decrements the count appropriately.) When PKO_REG_FLAGS[ENA_THROTTLE]
+ * is set and the most-significant bit of the WORD or packet count for a interface is set,
+ * PKO will not transfer any packets over the interface. Software can limit the amount of
+ * packet data and/or the number of packets that OCTEON can send out the chip after receiving backpressure
+ * from the interface/pipe via these per-pipe throttle counts when PKO_REG_FLAGS[ENA_THROTTLE]=1.
+ * For example, to limit the number of packets outstanding in the interface to N, preset PACKET for
+ * the pipe to the value 0x20-N (0x20 is the smallest PACKET value with the most-significant bit set).
+ *
+ * This CSR is a memory of 32 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. The index to this CSR is an INTERFACE. A read of any
+ * entry that has not been previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_throttle_int {
+ uint64_t u64;
+ struct cvmx_pko_mem_throttle_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t word : 15; /**< On a write, the amount to add to the interface
+ throttle word count selected by INT. On a read,
+ returns the current value of the interface throttle
+ word count selected by PKO_REG_READ_IDX[IDX]. */
+ uint64_t reserved_14_31 : 18;
+ uint64_t packet : 6; /**< On a write, the amount to add to the interface
+ throttle packet count selected by INT. On a read,
+ returns the current value of the interface throttle
+ packet count selected by PKO_REG_READ_IDX[IDX]. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t intr : 5; /**< Selected interface for writes. Undefined on a read.
+ See PKO_MEM_IPORT_PTRS[INT] for encoding. */
+#else
+ uint64_t intr : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t packet : 6;
+ uint64_t reserved_14_31 : 18;
+ uint64_t word : 15;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_pko_mem_throttle_int_s cn68xx;
+ struct cvmx_pko_mem_throttle_int_s cn68xxp1;
+};
+typedef union cvmx_pko_mem_throttle_int cvmx_pko_mem_throttle_int_t;
+
+/**
+ * cvmx_pko_mem_throttle_pipe
+ *
+ * Notes:
+ * Writing PACKET and WORD with 0 resets both counts for PIPE to 0 rather than add 0.
+ * Otherwise, writes to this CSR add to the existing WORD/PACKET counts for the PKO pipe PIPE.
+ *
+ * PKO tracks the number of (8-byte) WORD's and PACKET's in-flight (sum total in both PKO
+ * and the interface MAC) on the pipe. (When PKO first selects a packet from a PKO queue, it
+ * increments the counts appropriately. When the interface MAC has (largely) completed sending
+ * the words/packet, PKO decrements the count appropriately.) When PKO_REG_FLAGS[ENA_THROTTLE]
+ * is set and the most-significant bit of the WORD or packet count for a PKO pipe is set,
+ * PKO will not transfer any packets over the PKO pipe. Software can limit the amount of
+ * packet data and/or the number of packets that OCTEON can send out the chip after receiving backpressure
+ * from the interface/pipe via these per-pipe throttle counts when PKO_REG_FLAGS[ENA_THROTTLE]=1.
+ * For example, to limit the number of packets outstanding in the pipe to N, preset PACKET for
+ * the pipe to the value 0x20-N (0x20 is the smallest PACKET value with the most-significant bit set).
+ *
+ * This CSR is a memory of 128 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. The index to this CSR is a PIPE. A read of any
+ * entry that has not been previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_pko_mem_throttle_pipe {
+ uint64_t u64;
+ struct cvmx_pko_mem_throttle_pipe_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t word : 15; /**< On a write, the amount to add to the pipe throttle
+ word count selected by PIPE. On a read, returns
+ the current value of the pipe throttle word count
+ selected by PKO_REG_READ_IDX[IDX]. */
+ uint64_t reserved_14_31 : 18;
+ uint64_t packet : 6; /**< On a write, the amount to add to the pipe throttle
+ packet count selected by PIPE. On a read, returns
+ the current value of the pipe throttle packet count
+ selected by PKO_REG_READ_IDX[IDX]. */
+ uint64_t reserved_7_7 : 1;
+ uint64_t pipe : 7; /**< Selected PKO pipe for writes. Undefined on a read. */
+#else
+ uint64_t pipe : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t packet : 6;
+ uint64_t reserved_14_31 : 18;
+ uint64_t word : 15;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_pko_mem_throttle_pipe_s cn68xx;
+ struct cvmx_pko_mem_throttle_pipe_s cn68xxp1;
+};
+typedef union cvmx_pko_mem_throttle_pipe cvmx_pko_mem_throttle_pipe_t;
+
+/**
+ * cvmx_pko_reg_bist_result
+ *
+ * Notes:
+ * Access to the internal BiST results
+ * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail).
+ */
+union cvmx_pko_reg_bist_result {
+ uint64_t u64;
+ struct cvmx_pko_reg_bist_result_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_pko_reg_bist_result_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t psb2 : 5; /**< BiST result of the PSB memories (0=pass, !0=fail) */
+ uint64_t count : 1; /**< BiST result of the COUNT memories (0=pass, !0=fail) */
+ uint64_t rif : 1; /**< BiST result of the RIF memories (0=pass, !0=fail) */
+ uint64_t wif : 1; /**< BiST result of the WIF memories (0=pass, !0=fail) */
+ uint64_t ncb : 1; /**< BiST result of the NCB memories (0=pass, !0=fail) */
+ uint64_t out : 1; /**< BiST result of the OUT memories (0=pass, !0=fail) */
+ uint64_t crc : 1; /**< BiST result of the CRC memories (0=pass, !0=fail) */
+ uint64_t chk : 1; /**< BiST result of the CHK memories (0=pass, !0=fail) */
+ uint64_t qsb : 2; /**< BiST result of the QSB memories (0=pass, !0=fail) */
+ uint64_t qcb : 2; /**< BiST result of the QCB memories (0=pass, !0=fail) */
+ uint64_t pdb : 4; /**< BiST result of the PDB memories (0=pass, !0=fail) */
+ uint64_t psb : 7; /**< BiST result of the PSB memories (0=pass, !0=fail) */
+#else
+ uint64_t psb : 7;
+ uint64_t pdb : 4;
+ uint64_t qcb : 2;
+ uint64_t qsb : 2;
+ uint64_t chk : 1;
+ uint64_t crc : 1;
+ uint64_t out : 1;
+ uint64_t ncb : 1;
+ uint64_t wif : 1;
+ uint64_t rif : 1;
+ uint64_t count : 1;
+ uint64_t psb2 : 5;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn30xx;
+ struct cvmx_pko_reg_bist_result_cn30xx cn31xx;
+ struct cvmx_pko_reg_bist_result_cn30xx cn38xx;
+ struct cvmx_pko_reg_bist_result_cn30xx cn38xxp2;
+ struct cvmx_pko_reg_bist_result_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t csr : 1; /**< BiST result of CSR memories (0=pass, !0=fail) */
+ uint64_t iob : 1; /**< BiST result of IOB memories (0=pass, !0=fail) */
+ uint64_t out_crc : 1; /**< BiST result of OUT_CRC memories (0=pass, !0=fail) */
+ uint64_t out_ctl : 3; /**< BiST result of OUT_CTL memories (0=pass, !0=fail) */
+ uint64_t out_sta : 1; /**< BiST result of OUT_STA memories (0=pass, !0=fail) */
+ uint64_t out_wif : 1; /**< BiST result of OUT_WIF memories (0=pass, !0=fail) */
+ uint64_t prt_chk : 3; /**< BiST result of PRT_CHK memories (0=pass, !0=fail) */
+ uint64_t prt_nxt : 1; /**< BiST result of PRT_NXT memories (0=pass, !0=fail) */
+ uint64_t prt_psb : 6; /**< BiST result of PRT_PSB memories (0=pass, !0=fail) */
+ uint64_t ncb_inb : 2; /**< BiST result of NCB_INB memories (0=pass, !0=fail) */
+ uint64_t prt_qcb : 2; /**< BiST result of PRT_QCB memories (0=pass, !0=fail) */
+ uint64_t prt_qsb : 3; /**< BiST result of PRT_QSB memories (0=pass, !0=fail) */
+ uint64_t dat_dat : 4; /**< BiST result of DAT_DAT memories (0=pass, !0=fail) */
+ uint64_t dat_ptr : 4; /**< BiST result of DAT_PTR memories (0=pass, !0=fail) */
+#else
+ uint64_t dat_ptr : 4;
+ uint64_t dat_dat : 4;
+ uint64_t prt_qsb : 3;
+ uint64_t prt_qcb : 2;
+ uint64_t ncb_inb : 2;
+ uint64_t prt_psb : 6;
+ uint64_t prt_nxt : 1;
+ uint64_t prt_chk : 3;
+ uint64_t out_wif : 1;
+ uint64_t out_sta : 1;
+ uint64_t out_ctl : 3;
+ uint64_t out_crc : 1;
+ uint64_t iob : 1;
+ uint64_t csr : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } cn50xx;
+ struct cvmx_pko_reg_bist_result_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t csr : 1; /**< BiST result of CSR memories (0=pass, !0=fail) */
+ uint64_t iob : 1; /**< BiST result of IOB memories (0=pass, !0=fail) */
+ uint64_t out_dat : 1; /**< BiST result of OUT_DAT memories (0=pass, !0=fail) */
+ uint64_t out_ctl : 3; /**< BiST result of OUT_CTL memories (0=pass, !0=fail) */
+ uint64_t out_sta : 1; /**< BiST result of OUT_STA memories (0=pass, !0=fail) */
+ uint64_t out_wif : 1; /**< BiST result of OUT_WIF memories (0=pass, !0=fail) */
+ uint64_t prt_chk : 3; /**< BiST result of PRT_CHK memories (0=pass, !0=fail) */
+ uint64_t prt_nxt : 1; /**< BiST result of PRT_NXT memories (0=pass, !0=fail) */
+ uint64_t prt_psb : 8; /**< BiST result of PRT_PSB memories (0=pass, !0=fail) */
+ uint64_t ncb_inb : 2; /**< BiST result of NCB_INB memories (0=pass, !0=fail) */
+ uint64_t prt_qcb : 2; /**< BiST result of PRT_QCB memories (0=pass, !0=fail) */
+ uint64_t prt_qsb : 3; /**< BiST result of PRT_QSB memories (0=pass, !0=fail) */
+ uint64_t prt_ctl : 2; /**< BiST result of PRT_CTL memories (0=pass, !0=fail) */
+ uint64_t dat_dat : 2; /**< BiST result of DAT_DAT memories (0=pass, !0=fail) */
+ uint64_t dat_ptr : 4; /**< BiST result of DAT_PTR memories (0=pass, !0=fail) */
+#else
+ uint64_t dat_ptr : 4;
+ uint64_t dat_dat : 2;
+ uint64_t prt_ctl : 2;
+ uint64_t prt_qsb : 3;
+ uint64_t prt_qcb : 2;
+ uint64_t ncb_inb : 2;
+ uint64_t prt_psb : 8;
+ uint64_t prt_nxt : 1;
+ uint64_t prt_chk : 3;
+ uint64_t out_wif : 1;
+ uint64_t out_sta : 1;
+ uint64_t out_ctl : 3;
+ uint64_t out_dat : 1;
+ uint64_t iob : 1;
+ uint64_t csr : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } cn52xx;
+ struct cvmx_pko_reg_bist_result_cn52xx cn52xxp1;
+ struct cvmx_pko_reg_bist_result_cn52xx cn56xx;
+ struct cvmx_pko_reg_bist_result_cn52xx cn56xxp1;
+ struct cvmx_pko_reg_bist_result_cn50xx cn58xx;
+ struct cvmx_pko_reg_bist_result_cn50xx cn58xxp1;
+ struct cvmx_pko_reg_bist_result_cn52xx cn61xx;
+ struct cvmx_pko_reg_bist_result_cn52xx cn63xx;
+ struct cvmx_pko_reg_bist_result_cn52xx cn63xxp1;
+ struct cvmx_pko_reg_bist_result_cn52xx cn66xx;
+ struct cvmx_pko_reg_bist_result_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t crc : 1; /**< BiST result of CRC memories (0=pass, !0=fail) */
+ uint64_t csr : 1; /**< BiST result of CSR memories (0=pass, !0=fail) */
+ uint64_t iob : 1; /**< BiST result of IOB memories (0=pass, !0=fail) */
+ uint64_t out_dat : 1; /**< BiST result of OUT_DAT memories (0=pass, !0=fail) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t out_ctl : 2; /**< BiST result of OUT_CTL memories (0=pass, !0=fail) */
+ uint64_t out_sta : 1; /**< BiST result of OUT_STA memories (0=pass, !0=fail) */
+ uint64_t out_wif : 1; /**< BiST result of OUT_WIF memories (0=pass, !0=fail) */
+ uint64_t prt_chk : 3; /**< BiST result of PRT_CHK memories (0=pass, !0=fail) */
+ uint64_t prt_nxt : 1; /**< BiST result of PRT_NXT memories (0=pass, !0=fail) */
+ uint64_t prt_psb7 : 1; /**< BiST result of PRT_PSB memories (0=pass, !0=fail) */
+ uint64_t reserved_21_21 : 1;
+ uint64_t prt_psb : 6; /**< BiST result of PRT_PSB memories (0=pass, !0=fail) */
+ uint64_t ncb_inb : 2; /**< BiST result of NCB_INB memories (0=pass, !0=fail) */
+ uint64_t prt_qcb : 2; /**< BiST result of PRT_QCB memories (0=pass, !0=fail) */
+ uint64_t prt_qsb : 3; /**< BiST result of PRT_QSB memories (0=pass, !0=fail) */
+ uint64_t prt_ctl : 2; /**< BiST result of PRT_CTL memories (0=pass, !0=fail) */
+ uint64_t dat_dat : 2; /**< BiST result of DAT_DAT memories (0=pass, !0=fail) */
+ uint64_t dat_ptr : 4; /**< BiST result of DAT_PTR memories (0=pass, !0=fail) */
+#else
+ uint64_t dat_ptr : 4;
+ uint64_t dat_dat : 2;
+ uint64_t prt_ctl : 2;
+ uint64_t prt_qsb : 3;
+ uint64_t prt_qcb : 2;
+ uint64_t ncb_inb : 2;
+ uint64_t prt_psb : 6;
+ uint64_t reserved_21_21 : 1;
+ uint64_t prt_psb7 : 1;
+ uint64_t prt_nxt : 1;
+ uint64_t prt_chk : 3;
+ uint64_t out_wif : 1;
+ uint64_t out_sta : 1;
+ uint64_t out_ctl : 2;
+ uint64_t reserved_31_31 : 1;
+ uint64_t out_dat : 1;
+ uint64_t iob : 1;
+ uint64_t csr : 1;
+ uint64_t crc : 1;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn68xx;
+ struct cvmx_pko_reg_bist_result_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t csr : 1; /**< BiST result of CSR memories (0=pass, !0=fail) */
+ uint64_t iob : 1; /**< BiST result of IOB memories (0=pass, !0=fail) */
+ uint64_t out_dat : 1; /**< BiST result of OUT_DAT memories (0=pass, !0=fail) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t out_ctl : 2; /**< BiST result of OUT_CTL memories (0=pass, !0=fail) */
+ uint64_t out_sta : 1; /**< BiST result of OUT_STA memories (0=pass, !0=fail) */
+ uint64_t out_wif : 1; /**< BiST result of OUT_WIF memories (0=pass, !0=fail) */
+ uint64_t prt_chk : 3; /**< BiST result of PRT_CHK memories (0=pass, !0=fail) */
+ uint64_t prt_nxt : 1; /**< BiST result of PRT_NXT memories (0=pass, !0=fail) */
+ uint64_t prt_psb7 : 1; /**< BiST result of PRT_PSB memories (0=pass, !0=fail) */
+ uint64_t reserved_21_21 : 1;
+ uint64_t prt_psb : 6; /**< BiST result of PRT_PSB memories (0=pass, !0=fail) */
+ uint64_t ncb_inb : 2; /**< BiST result of NCB_INB memories (0=pass, !0=fail) */
+ uint64_t prt_qcb : 2; /**< BiST result of PRT_QCB memories (0=pass, !0=fail) */
+ uint64_t prt_qsb : 3; /**< BiST result of PRT_QSB memories (0=pass, !0=fail) */
+ uint64_t prt_ctl : 2; /**< BiST result of PRT_CTL memories (0=pass, !0=fail) */
+ uint64_t dat_dat : 2; /**< BiST result of DAT_DAT memories (0=pass, !0=fail) */
+ uint64_t dat_ptr : 4; /**< BiST result of DAT_PTR memories (0=pass, !0=fail) */
+#else
+ uint64_t dat_ptr : 4;
+ uint64_t dat_dat : 2;
+ uint64_t prt_ctl : 2;
+ uint64_t prt_qsb : 3;
+ uint64_t prt_qcb : 2;
+ uint64_t ncb_inb : 2;
+ uint64_t prt_psb : 6;
+ uint64_t reserved_21_21 : 1;
+ uint64_t prt_psb7 : 1;
+ uint64_t prt_nxt : 1;
+ uint64_t prt_chk : 3;
+ uint64_t out_wif : 1;
+ uint64_t out_sta : 1;
+ uint64_t out_ctl : 2;
+ uint64_t reserved_31_31 : 1;
+ uint64_t out_dat : 1;
+ uint64_t iob : 1;
+ uint64_t csr : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } cn68xxp1;
+ struct cvmx_pko_reg_bist_result_cn52xx cnf71xx;
+};
+typedef union cvmx_pko_reg_bist_result cvmx_pko_reg_bist_result_t;
+
+/**
+ * cvmx_pko_reg_cmd_buf
+ *
+ * Notes:
+ * Sets the command buffer parameters
+ * The size of the command buffer segments is measured in uint64s. The pool specifies (1 of 8 free
+ * lists to be used when freeing command buffer segments.
+ */
+union cvmx_pko_reg_cmd_buf {
+ uint64_t u64;
+ struct cvmx_pko_reg_cmd_buf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t pool : 3; /**< Free list used to free command buffer segments */
+ uint64_t reserved_13_19 : 7;
+ uint64_t size : 13; /**< Number of uint64s per command buffer segment */
+#else
+ uint64_t size : 13;
+ uint64_t reserved_13_19 : 7;
+ uint64_t pool : 3;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_pko_reg_cmd_buf_s cn30xx;
+ struct cvmx_pko_reg_cmd_buf_s cn31xx;
+ struct cvmx_pko_reg_cmd_buf_s cn38xx;
+ struct cvmx_pko_reg_cmd_buf_s cn38xxp2;
+ struct cvmx_pko_reg_cmd_buf_s cn50xx;
+ struct cvmx_pko_reg_cmd_buf_s cn52xx;
+ struct cvmx_pko_reg_cmd_buf_s cn52xxp1;
+ struct cvmx_pko_reg_cmd_buf_s cn56xx;
+ struct cvmx_pko_reg_cmd_buf_s cn56xxp1;
+ struct cvmx_pko_reg_cmd_buf_s cn58xx;
+ struct cvmx_pko_reg_cmd_buf_s cn58xxp1;
+ struct cvmx_pko_reg_cmd_buf_s cn61xx;
+ struct cvmx_pko_reg_cmd_buf_s cn63xx;
+ struct cvmx_pko_reg_cmd_buf_s cn63xxp1;
+ struct cvmx_pko_reg_cmd_buf_s cn66xx;
+ struct cvmx_pko_reg_cmd_buf_s cn68xx;
+ struct cvmx_pko_reg_cmd_buf_s cn68xxp1;
+ struct cvmx_pko_reg_cmd_buf_s cnf71xx;
+};
+typedef union cvmx_pko_reg_cmd_buf cvmx_pko_reg_cmd_buf_t;
+
+/**
+ * cvmx_pko_reg_crc_ctl#
+ *
+ * Notes:
+ * Controls datapath reflection when calculating CRC
+ *
+ */
+union cvmx_pko_reg_crc_ctlx {
+ uint64_t u64;
+ struct cvmx_pko_reg_crc_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t invres : 1; /**< Invert the result */
+ uint64_t refin : 1; /**< Reflect the bits in each byte.
+ Byte order does not change.
+ - 0: CRC is calculated MSB to LSB
+ - 1: CRC is calculated MLB to MSB */
+#else
+ uint64_t refin : 1;
+ uint64_t invres : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_pko_reg_crc_ctlx_s cn38xx;
+ struct cvmx_pko_reg_crc_ctlx_s cn38xxp2;
+ struct cvmx_pko_reg_crc_ctlx_s cn58xx;
+ struct cvmx_pko_reg_crc_ctlx_s cn58xxp1;
+};
+typedef union cvmx_pko_reg_crc_ctlx cvmx_pko_reg_crc_ctlx_t;
+
+/**
+ * cvmx_pko_reg_crc_enable
+ *
+ * Notes:
+ * Enables CRC for the GMX ports.
+ *
+ */
+union cvmx_pko_reg_crc_enable {
+ uint64_t u64;
+ struct cvmx_pko_reg_crc_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t enable : 32; /**< Mask for ports 31-0 to enable CRC
+ Mask bit==0 means CRC not enabled
+ Mask bit==1 means CRC enabled
+ Note that CRC should be enabled only when using SPI4.2 */
+#else
+ uint64_t enable : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pko_reg_crc_enable_s cn38xx;
+ struct cvmx_pko_reg_crc_enable_s cn38xxp2;
+ struct cvmx_pko_reg_crc_enable_s cn58xx;
+ struct cvmx_pko_reg_crc_enable_s cn58xxp1;
+};
+typedef union cvmx_pko_reg_crc_enable cvmx_pko_reg_crc_enable_t;
+
+/**
+ * cvmx_pko_reg_crc_iv#
+ *
+ * Notes:
+ * Determines the IV used by the CRC algorithm
+ * * PKO_CRC_IV
+ * PKO_CRC_IV controls the initial state of the CRC algorithm. Octane can
+ * support a wide range of CRC algorithms and as such, the IV must be
+ * carefully constructed to meet the specific algorithm. The code below
+ * determines the value to program into Octane based on the algorthim's IV
+ * and width. In the case of Octane, the width should always be 32.
+ *
+ * PKO_CRC_IV0 sets the IV for ports 0-15 while PKO_CRC_IV1 sets the IV for
+ * ports 16-31.
+ *
+ * @verbatim
+ * unsigned octane_crc_iv(unsigned algorithm_iv, unsigned poly, unsigned w)
+ * [
+ * int i;
+ * int doit;
+ * unsigned int current_val = algorithm_iv;
+ *
+ * for(i = 0; i < w; i++) [
+ * doit = current_val & 0x1;
+ *
+ * if(doit) current_val ^= poly;
+ * assert(!(current_val & 0x1));
+ *
+ * current_val = (current_val >> 1) | (doit << (w-1));
+ * ]
+ *
+ * return current_val;
+ * ]
+ * @endverbatim
+ */
+union cvmx_pko_reg_crc_ivx {
+ uint64_t u64;
+ struct cvmx_pko_reg_crc_ivx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iv : 32; /**< IV used by the CRC algorithm. Default is FCS32. */
+#else
+ uint64_t iv : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pko_reg_crc_ivx_s cn38xx;
+ struct cvmx_pko_reg_crc_ivx_s cn38xxp2;
+ struct cvmx_pko_reg_crc_ivx_s cn58xx;
+ struct cvmx_pko_reg_crc_ivx_s cn58xxp1;
+};
+typedef union cvmx_pko_reg_crc_ivx cvmx_pko_reg_crc_ivx_t;
+
+/**
+ * cvmx_pko_reg_debug0
+ *
+ * Notes:
+ * Note that this CSR is present only in chip revisions beginning with pass2.
+ *
+ */
+union cvmx_pko_reg_debug0 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts : 64; /**< Various assertion checks */
+#else
+ uint64_t asserts : 64;
+#endif
+ } s;
+ struct cvmx_pko_reg_debug0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t asserts : 17; /**< Various assertion checks */
+#else
+ uint64_t asserts : 17;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn30xx;
+ struct cvmx_pko_reg_debug0_cn30xx cn31xx;
+ struct cvmx_pko_reg_debug0_cn30xx cn38xx;
+ struct cvmx_pko_reg_debug0_cn30xx cn38xxp2;
+ struct cvmx_pko_reg_debug0_s cn50xx;
+ struct cvmx_pko_reg_debug0_s cn52xx;
+ struct cvmx_pko_reg_debug0_s cn52xxp1;
+ struct cvmx_pko_reg_debug0_s cn56xx;
+ struct cvmx_pko_reg_debug0_s cn56xxp1;
+ struct cvmx_pko_reg_debug0_s cn58xx;
+ struct cvmx_pko_reg_debug0_s cn58xxp1;
+ struct cvmx_pko_reg_debug0_s cn61xx;
+ struct cvmx_pko_reg_debug0_s cn63xx;
+ struct cvmx_pko_reg_debug0_s cn63xxp1;
+ struct cvmx_pko_reg_debug0_s cn66xx;
+ struct cvmx_pko_reg_debug0_s cn68xx;
+ struct cvmx_pko_reg_debug0_s cn68xxp1;
+ struct cvmx_pko_reg_debug0_s cnf71xx;
+};
+typedef union cvmx_pko_reg_debug0 cvmx_pko_reg_debug0_t;
+
+/**
+ * cvmx_pko_reg_debug1
+ */
+union cvmx_pko_reg_debug1 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts : 64; /**< Various assertion checks */
+#else
+ uint64_t asserts : 64;
+#endif
+ } s;
+ struct cvmx_pko_reg_debug1_s cn50xx;
+ struct cvmx_pko_reg_debug1_s cn52xx;
+ struct cvmx_pko_reg_debug1_s cn52xxp1;
+ struct cvmx_pko_reg_debug1_s cn56xx;
+ struct cvmx_pko_reg_debug1_s cn56xxp1;
+ struct cvmx_pko_reg_debug1_s cn58xx;
+ struct cvmx_pko_reg_debug1_s cn58xxp1;
+ struct cvmx_pko_reg_debug1_s cn61xx;
+ struct cvmx_pko_reg_debug1_s cn63xx;
+ struct cvmx_pko_reg_debug1_s cn63xxp1;
+ struct cvmx_pko_reg_debug1_s cn66xx;
+ struct cvmx_pko_reg_debug1_s cn68xx;
+ struct cvmx_pko_reg_debug1_s cn68xxp1;
+ struct cvmx_pko_reg_debug1_s cnf71xx;
+};
+typedef union cvmx_pko_reg_debug1 cvmx_pko_reg_debug1_t;
+
+/**
+ * cvmx_pko_reg_debug2
+ */
+union cvmx_pko_reg_debug2 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts : 64; /**< Various assertion checks */
+#else
+ uint64_t asserts : 64;
+#endif
+ } s;
+ struct cvmx_pko_reg_debug2_s cn50xx;
+ struct cvmx_pko_reg_debug2_s cn52xx;
+ struct cvmx_pko_reg_debug2_s cn52xxp1;
+ struct cvmx_pko_reg_debug2_s cn56xx;
+ struct cvmx_pko_reg_debug2_s cn56xxp1;
+ struct cvmx_pko_reg_debug2_s cn58xx;
+ struct cvmx_pko_reg_debug2_s cn58xxp1;
+ struct cvmx_pko_reg_debug2_s cn61xx;
+ struct cvmx_pko_reg_debug2_s cn63xx;
+ struct cvmx_pko_reg_debug2_s cn63xxp1;
+ struct cvmx_pko_reg_debug2_s cn66xx;
+ struct cvmx_pko_reg_debug2_s cn68xx;
+ struct cvmx_pko_reg_debug2_s cn68xxp1;
+ struct cvmx_pko_reg_debug2_s cnf71xx;
+};
+typedef union cvmx_pko_reg_debug2 cvmx_pko_reg_debug2_t;
+
+/**
+ * cvmx_pko_reg_debug3
+ */
+union cvmx_pko_reg_debug3 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts : 64; /**< Various assertion checks */
+#else
+ uint64_t asserts : 64;
+#endif
+ } s;
+ struct cvmx_pko_reg_debug3_s cn50xx;
+ struct cvmx_pko_reg_debug3_s cn52xx;
+ struct cvmx_pko_reg_debug3_s cn52xxp1;
+ struct cvmx_pko_reg_debug3_s cn56xx;
+ struct cvmx_pko_reg_debug3_s cn56xxp1;
+ struct cvmx_pko_reg_debug3_s cn58xx;
+ struct cvmx_pko_reg_debug3_s cn58xxp1;
+ struct cvmx_pko_reg_debug3_s cn61xx;
+ struct cvmx_pko_reg_debug3_s cn63xx;
+ struct cvmx_pko_reg_debug3_s cn63xxp1;
+ struct cvmx_pko_reg_debug3_s cn66xx;
+ struct cvmx_pko_reg_debug3_s cn68xx;
+ struct cvmx_pko_reg_debug3_s cn68xxp1;
+ struct cvmx_pko_reg_debug3_s cnf71xx;
+};
+typedef union cvmx_pko_reg_debug3 cvmx_pko_reg_debug3_t;
+
+/**
+ * cvmx_pko_reg_debug4
+ */
+union cvmx_pko_reg_debug4 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts : 64; /**< Various assertion checks */
+#else
+ uint64_t asserts : 64;
+#endif
+ } s;
+ struct cvmx_pko_reg_debug4_s cn68xx;
+ struct cvmx_pko_reg_debug4_s cn68xxp1;
+};
+typedef union cvmx_pko_reg_debug4 cvmx_pko_reg_debug4_t;
+
+/**
+ * cvmx_pko_reg_engine_inflight
+ *
+ * Notes:
+ * Sets the maximum number of inflight packets, per engine. Values greater than 4 are illegal.
+ * Setting an engine's value to 0 effectively stops the engine.
+ */
+union cvmx_pko_reg_engine_inflight {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_inflight_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t engine15 : 4; /**< Maximum number of inflight packets for engine15 */
+ uint64_t engine14 : 4; /**< Maximum number of inflight packets for engine14 */
+ uint64_t engine13 : 4; /**< Maximum number of inflight packets for engine13 */
+ uint64_t engine12 : 4; /**< Maximum number of inflight packets for engine12 */
+ uint64_t engine11 : 4; /**< Maximum number of inflight packets for engine11 */
+ uint64_t engine10 : 4; /**< Maximum number of inflight packets for engine10 */
+ uint64_t engine9 : 4; /**< Maximum number of inflight packets for engine9 */
+ uint64_t engine8 : 4; /**< Maximum number of inflight packets for engine8 */
+ uint64_t engine7 : 4; /**< Maximum number of inflight packets for engine7 */
+ uint64_t engine6 : 4; /**< Maximum number of inflight packets for engine6 */
+ uint64_t engine5 : 4; /**< Maximum number of inflight packets for engine5 */
+ uint64_t engine4 : 4; /**< Maximum number of inflight packets for engine4 */
+ uint64_t engine3 : 4; /**< Maximum number of inflight packets for engine3 */
+ uint64_t engine2 : 4; /**< Maximum number of inflight packets for engine2 */
+ uint64_t engine1 : 4; /**< Maximum number of inflight packets for engine1 */
+ uint64_t engine0 : 4; /**< Maximum number of inflight packets for engine0 */
+#else
+ uint64_t engine0 : 4;
+ uint64_t engine1 : 4;
+ uint64_t engine2 : 4;
+ uint64_t engine3 : 4;
+ uint64_t engine4 : 4;
+ uint64_t engine5 : 4;
+ uint64_t engine6 : 4;
+ uint64_t engine7 : 4;
+ uint64_t engine8 : 4;
+ uint64_t engine9 : 4;
+ uint64_t engine10 : 4;
+ uint64_t engine11 : 4;
+ uint64_t engine12 : 4;
+ uint64_t engine13 : 4;
+ uint64_t engine14 : 4;
+ uint64_t engine15 : 4;
+#endif
+ } s;
+ struct cvmx_pko_reg_engine_inflight_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t engine9 : 4; /**< Maximum number of inflight packets for engine9 */
+ uint64_t engine8 : 4; /**< Maximum number of inflight packets for engine8 */
+ uint64_t engine7 : 4; /**< MBZ */
+ uint64_t engine6 : 4; /**< MBZ */
+ uint64_t engine5 : 4; /**< MBZ */
+ uint64_t engine4 : 4; /**< MBZ */
+ uint64_t engine3 : 4; /**< Maximum number of inflight packets for engine3 */
+ uint64_t engine2 : 4; /**< Maximum number of inflight packets for engine2 */
+ uint64_t engine1 : 4; /**< Maximum number of inflight packets for engine1 */
+ uint64_t engine0 : 4; /**< Maximum number of inflight packets for engine0 */
+#else
+ uint64_t engine0 : 4;
+ uint64_t engine1 : 4;
+ uint64_t engine2 : 4;
+ uint64_t engine3 : 4;
+ uint64_t engine4 : 4;
+ uint64_t engine5 : 4;
+ uint64_t engine6 : 4;
+ uint64_t engine7 : 4;
+ uint64_t engine8 : 4;
+ uint64_t engine9 : 4;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn52xx;
+ struct cvmx_pko_reg_engine_inflight_cn52xx cn52xxp1;
+ struct cvmx_pko_reg_engine_inflight_cn52xx cn56xx;
+ struct cvmx_pko_reg_engine_inflight_cn52xx cn56xxp1;
+ struct cvmx_pko_reg_engine_inflight_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t engine13 : 4; /**< Maximum number of inflight packets for engine13 */
+ uint64_t engine12 : 4; /**< Maximum number of inflight packets for engine12 */
+ uint64_t engine11 : 4; /**< Maximum number of inflight packets for engine11 */
+ uint64_t engine10 : 4; /**< Maximum number of inflight packets for engine10 */
+ uint64_t engine9 : 4; /**< Maximum number of inflight packets for engine9 */
+ uint64_t engine8 : 4; /**< Maximum number of inflight packets for engine8 */
+ uint64_t engine7 : 4; /**< Maximum number of inflight packets for engine7 */
+ uint64_t engine6 : 4; /**< Maximum number of inflight packets for engine6 */
+ uint64_t engine5 : 4; /**< Maximum number of inflight packets for engine5 */
+ uint64_t engine4 : 4; /**< Maximum number of inflight packets for engine4 */
+ uint64_t engine3 : 4; /**< Maximum number of inflight packets for engine3 */
+ uint64_t engine2 : 4; /**< Maximum number of inflight packets for engine2 */
+ uint64_t engine1 : 4; /**< Maximum number of inflight packets for engine1 */
+ uint64_t engine0 : 4; /**< Maximum number of inflight packets for engine0 */
+#else
+ uint64_t engine0 : 4;
+ uint64_t engine1 : 4;
+ uint64_t engine2 : 4;
+ uint64_t engine3 : 4;
+ uint64_t engine4 : 4;
+ uint64_t engine5 : 4;
+ uint64_t engine6 : 4;
+ uint64_t engine7 : 4;
+ uint64_t engine8 : 4;
+ uint64_t engine9 : 4;
+ uint64_t engine10 : 4;
+ uint64_t engine11 : 4;
+ uint64_t engine12 : 4;
+ uint64_t engine13 : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn61xx;
+ struct cvmx_pko_reg_engine_inflight_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t engine11 : 4; /**< Maximum number of inflight packets for engine11 */
+ uint64_t engine10 : 4; /**< Maximum number of inflight packets for engine10 */
+ uint64_t engine9 : 4; /**< Maximum number of inflight packets for engine9 */
+ uint64_t engine8 : 4; /**< Maximum number of inflight packets for engine8 */
+ uint64_t engine7 : 4; /**< MBZ */
+ uint64_t engine6 : 4; /**< MBZ */
+ uint64_t engine5 : 4; /**< MBZ */
+ uint64_t engine4 : 4; /**< MBZ */
+ uint64_t engine3 : 4; /**< Maximum number of inflight packets for engine3 */
+ uint64_t engine2 : 4; /**< Maximum number of inflight packets for engine2 */
+ uint64_t engine1 : 4; /**< Maximum number of inflight packets for engine1 */
+ uint64_t engine0 : 4; /**< Maximum number of inflight packets for engine0 */
+#else
+ uint64_t engine0 : 4;
+ uint64_t engine1 : 4;
+ uint64_t engine2 : 4;
+ uint64_t engine3 : 4;
+ uint64_t engine4 : 4;
+ uint64_t engine5 : 4;
+ uint64_t engine6 : 4;
+ uint64_t engine7 : 4;
+ uint64_t engine8 : 4;
+ uint64_t engine9 : 4;
+ uint64_t engine10 : 4;
+ uint64_t engine11 : 4;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cn63xx;
+ struct cvmx_pko_reg_engine_inflight_cn63xx cn63xxp1;
+ struct cvmx_pko_reg_engine_inflight_cn61xx cn66xx;
+ struct cvmx_pko_reg_engine_inflight_s cn68xx;
+ struct cvmx_pko_reg_engine_inflight_s cn68xxp1;
+ struct cvmx_pko_reg_engine_inflight_cn61xx cnf71xx;
+};
+typedef union cvmx_pko_reg_engine_inflight cvmx_pko_reg_engine_inflight_t;
+
+/**
+ * cvmx_pko_reg_engine_inflight1
+ *
+ * Notes:
+ * Sets the maximum number of inflight packets, per engine. Values greater than 8 are illegal.
+ * Setting an engine's value to 0 effectively stops the engine.
+ */
+union cvmx_pko_reg_engine_inflight1 {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_inflight1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t engine19 : 4; /**< Maximum number of inflight packets for engine19 */
+ uint64_t engine18 : 4; /**< Maximum number of inflight packets for engine18 */
+ uint64_t engine17 : 4; /**< Maximum number of inflight packets for engine17 */
+ uint64_t engine16 : 4; /**< Maximum number of inflight packets for engine16 */
+#else
+ uint64_t engine16 : 4;
+ uint64_t engine17 : 4;
+ uint64_t engine18 : 4;
+ uint64_t engine19 : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pko_reg_engine_inflight1_s cn68xx;
+ struct cvmx_pko_reg_engine_inflight1_s cn68xxp1;
+};
+typedef union cvmx_pko_reg_engine_inflight1 cvmx_pko_reg_engine_inflight1_t;
+
+/**
+ * cvmx_pko_reg_engine_storage#
+ *
+ * Notes:
+ * The PKO has 40KB of local storage, consisting of 20, 2KB chunks. Up to 15 contiguous chunks may be mapped per engine.
+ * The total of all mapped storage must not exceed 40KB.
+ */
+union cvmx_pko_reg_engine_storagex {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_storagex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t engine15 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 15.
+ ENGINE15 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine14 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 14.
+ ENGINE14 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine13 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 13.
+ ENGINE13 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine12 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 12.
+ ENGINE12 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine11 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 11.
+ ENGINE11 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine10 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 10.
+ ENGINE10 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine9 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 9.
+ ENGINE9 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine8 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 8.
+ ENGINE8 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine7 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 7.
+ ENGINE7 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine6 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 6.
+ ENGINE6 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine5 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 5.
+ ENGINE5 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine4 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 4.
+ ENGINE4 does not exist and is reserved in
+ PKO_REG_ENGINE_STORAGE1. */
+ uint64_t engine3 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 3. */
+ uint64_t engine2 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 2. */
+ uint64_t engine1 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 1. */
+ uint64_t engine0 : 4; /**< Number of contiguous 2KB chunks allocated to
+ engine (X * 16) + 0. */
+#else
+ uint64_t engine0 : 4;
+ uint64_t engine1 : 4;
+ uint64_t engine2 : 4;
+ uint64_t engine3 : 4;
+ uint64_t engine4 : 4;
+ uint64_t engine5 : 4;
+ uint64_t engine6 : 4;
+ uint64_t engine7 : 4;
+ uint64_t engine8 : 4;
+ uint64_t engine9 : 4;
+ uint64_t engine10 : 4;
+ uint64_t engine11 : 4;
+ uint64_t engine12 : 4;
+ uint64_t engine13 : 4;
+ uint64_t engine14 : 4;
+ uint64_t engine15 : 4;
+#endif
+ } s;
+ struct cvmx_pko_reg_engine_storagex_s cn68xx;
+ struct cvmx_pko_reg_engine_storagex_s cn68xxp1;
+};
+typedef union cvmx_pko_reg_engine_storagex cvmx_pko_reg_engine_storagex_t;
+
+/**
+ * cvmx_pko_reg_engine_thresh
+ *
+ * Notes:
+ * When not enabled, packet data may be sent as soon as it is written into PKO's internal buffers.
+ * When enabled and the packet fits entirely in the PKO's internal buffer, none of the packet data will
+ * be sent until all of it has been written into the PKO's internal buffer. Note that a packet is
+ * considered to fit entirely only if the packet's size is <= BUFFER_SIZE-8. When enabled and the
+ * packet does not fit entirely in the PKO's internal buffer, none of the packet data will be sent until
+ * at least BUFFER_SIZE-256 bytes of the packet have been written into the PKO's internal buffer
+ * (note that BUFFER_SIZE is a function of PKO_REG_GMX_PORT_MODE above)
+ */
+union cvmx_pko_reg_engine_thresh {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_thresh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t mask : 20; /**< Mask[n]=0 disables packet send threshold for engine n
+ Mask[n]=1 enables packet send threshold for engine n $PR NS */
+#else
+ uint64_t mask : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_pko_reg_engine_thresh_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t mask : 10; /**< Mask[n]=0 disables packet send threshold for eng n
+ Mask[n]=1 enables packet send threshold for eng n $PR NS
+ Mask[n] MBZ for n = 4-7, as engines 4-7 dont exist */
+#else
+ uint64_t mask : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn52xx;
+ struct cvmx_pko_reg_engine_thresh_cn52xx cn52xxp1;
+ struct cvmx_pko_reg_engine_thresh_cn52xx cn56xx;
+ struct cvmx_pko_reg_engine_thresh_cn52xx cn56xxp1;
+ struct cvmx_pko_reg_engine_thresh_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t mask : 14; /**< Mask[n]=0 disables packet send threshold for engine n
+ Mask[n]=1 enables packet send threshold for engine n $PR NS */
+#else
+ uint64_t mask : 14;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn61xx;
+ struct cvmx_pko_reg_engine_thresh_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t mask : 12; /**< Mask[n]=0 disables packet send threshold for engine n
+ Mask[n]=1 enables packet send threshold for engine n $PR NS
+ Mask[n] MBZ for n = 4-7, as engines 4-7 dont exist */
+#else
+ uint64_t mask : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn63xx;
+ struct cvmx_pko_reg_engine_thresh_cn63xx cn63xxp1;
+ struct cvmx_pko_reg_engine_thresh_cn61xx cn66xx;
+ struct cvmx_pko_reg_engine_thresh_s cn68xx;
+ struct cvmx_pko_reg_engine_thresh_s cn68xxp1;
+ struct cvmx_pko_reg_engine_thresh_cn61xx cnf71xx;
+};
+typedef union cvmx_pko_reg_engine_thresh cvmx_pko_reg_engine_thresh_t;
+
+/**
+ * cvmx_pko_reg_error
+ *
+ * Notes:
+ * Note that this CSR is present only in chip revisions beginning with pass2.
+ *
+ */
+union cvmx_pko_reg_error {
+ uint64_t u64;
+ struct cvmx_pko_reg_error_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t loopback : 1; /**< A packet was sent to an illegal loopback port */
+ uint64_t currzero : 1; /**< A packet data pointer has size=0 */
+ uint64_t doorbell : 1; /**< A doorbell count has overflowed */
+ uint64_t parity : 1; /**< Read parity error at port data buffer */
+#else
+ uint64_t parity : 1;
+ uint64_t doorbell : 1;
+ uint64_t currzero : 1;
+ uint64_t loopback : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_pko_reg_error_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t doorbell : 1; /**< A doorbell count has overflowed */
+ uint64_t parity : 1; /**< Read parity error at port data buffer */
+#else
+ uint64_t parity : 1;
+ uint64_t doorbell : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn30xx;
+ struct cvmx_pko_reg_error_cn30xx cn31xx;
+ struct cvmx_pko_reg_error_cn30xx cn38xx;
+ struct cvmx_pko_reg_error_cn30xx cn38xxp2;
+ struct cvmx_pko_reg_error_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t currzero : 1; /**< A packet data pointer has size=0 */
+ uint64_t doorbell : 1; /**< A doorbell count has overflowed */
+ uint64_t parity : 1; /**< Read parity error at port data buffer */
+#else
+ uint64_t parity : 1;
+ uint64_t doorbell : 1;
+ uint64_t currzero : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn50xx;
+ struct cvmx_pko_reg_error_cn50xx cn52xx;
+ struct cvmx_pko_reg_error_cn50xx cn52xxp1;
+ struct cvmx_pko_reg_error_cn50xx cn56xx;
+ struct cvmx_pko_reg_error_cn50xx cn56xxp1;
+ struct cvmx_pko_reg_error_cn50xx cn58xx;
+ struct cvmx_pko_reg_error_cn50xx cn58xxp1;
+ struct cvmx_pko_reg_error_cn50xx cn61xx;
+ struct cvmx_pko_reg_error_cn50xx cn63xx;
+ struct cvmx_pko_reg_error_cn50xx cn63xxp1;
+ struct cvmx_pko_reg_error_cn50xx cn66xx;
+ struct cvmx_pko_reg_error_s cn68xx;
+ struct cvmx_pko_reg_error_s cn68xxp1;
+ struct cvmx_pko_reg_error_cn50xx cnf71xx;
+};
+typedef union cvmx_pko_reg_error cvmx_pko_reg_error_t;
+
+/**
+ * cvmx_pko_reg_flags
+ *
+ * Notes:
+ * When set, ENA_PKO enables the PKO picker and places the PKO in normal operation. When set, ENA_DWB
+ * enables the use of DontWriteBacks during the buffer freeing operations. When not set, STORE_BE inverts
+ * bits[2:0] of the STORE0 byte write address. When set, RESET causes a 4-cycle reset pulse to the
+ * entire box.
+ */
+union cvmx_pko_reg_flags {
+ uint64_t u64;
+ struct cvmx_pko_reg_flags_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t dis_perf3 : 1; /**< Set to disable inactive queue QOS skipping */
+ uint64_t dis_perf2 : 1; /**< Set to disable inactive queue skipping */
+ uint64_t dis_perf1 : 1; /**< Set to disable command word prefetching */
+ uint64_t dis_perf0 : 1; /**< Set to disable read performance optimizations */
+ uint64_t ena_throttle : 1; /**< Set to enable the PKO picker throttle logic
+ When ENA_THROTTLE=1 and the most-significant
+ bit of any of the pipe or interface, word or
+ packet throttle count is set, then PKO will
+ not output any packets to the interface/pipe.
+ See PKO_MEM_THROTTLE_PIPE and
+ PKO_MEM_THROTTLE_INT. */
+ uint64_t reset : 1; /**< Reset oneshot pulse */
+ uint64_t store_be : 1; /**< Force STORE0 byte write address to big endian */
+ uint64_t ena_dwb : 1; /**< Set to enable DontWriteBacks */
+ uint64_t ena_pko : 1; /**< Set to enable the PKO picker */
+#else
+ uint64_t ena_pko : 1;
+ uint64_t ena_dwb : 1;
+ uint64_t store_be : 1;
+ uint64_t reset : 1;
+ uint64_t ena_throttle : 1;
+ uint64_t dis_perf0 : 1;
+ uint64_t dis_perf1 : 1;
+ uint64_t dis_perf2 : 1;
+ uint64_t dis_perf3 : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_pko_reg_flags_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t reset : 1; /**< Reset oneshot pulse */
+ uint64_t store_be : 1; /**< Force STORE0 byte write address to big endian */
+ uint64_t ena_dwb : 1; /**< Set to enable DontWriteBacks */
+ uint64_t ena_pko : 1; /**< Set to enable the PKO picker */
+#else
+ uint64_t ena_pko : 1;
+ uint64_t ena_dwb : 1;
+ uint64_t store_be : 1;
+ uint64_t reset : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_pko_reg_flags_cn30xx cn31xx;
+ struct cvmx_pko_reg_flags_cn30xx cn38xx;
+ struct cvmx_pko_reg_flags_cn30xx cn38xxp2;
+ struct cvmx_pko_reg_flags_cn30xx cn50xx;
+ struct cvmx_pko_reg_flags_cn30xx cn52xx;
+ struct cvmx_pko_reg_flags_cn30xx cn52xxp1;
+ struct cvmx_pko_reg_flags_cn30xx cn56xx;
+ struct cvmx_pko_reg_flags_cn30xx cn56xxp1;
+ struct cvmx_pko_reg_flags_cn30xx cn58xx;
+ struct cvmx_pko_reg_flags_cn30xx cn58xxp1;
+ struct cvmx_pko_reg_flags_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t dis_perf3 : 1; /**< Set to disable inactive queue QOS skipping */
+ uint64_t dis_perf2 : 1; /**< Set to disable inactive queue skipping */
+ uint64_t reserved_4_6 : 3;
+ uint64_t reset : 1; /**< Reset oneshot pulse */
+ uint64_t store_be : 1; /**< Force STORE0 byte write address to big endian */
+ uint64_t ena_dwb : 1; /**< Set to enable DontWriteBacks */
+ uint64_t ena_pko : 1; /**< Set to enable the PKO picker */
+#else
+ uint64_t ena_pko : 1;
+ uint64_t ena_dwb : 1;
+ uint64_t store_be : 1;
+ uint64_t reset : 1;
+ uint64_t reserved_4_6 : 3;
+ uint64_t dis_perf2 : 1;
+ uint64_t dis_perf3 : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn61xx;
+ struct cvmx_pko_reg_flags_cn30xx cn63xx;
+ struct cvmx_pko_reg_flags_cn30xx cn63xxp1;
+ struct cvmx_pko_reg_flags_cn61xx cn66xx;
+ struct cvmx_pko_reg_flags_s cn68xx;
+ struct cvmx_pko_reg_flags_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t dis_perf1 : 1; /**< Set to disable command word prefetching */
+ uint64_t dis_perf0 : 1; /**< Set to disable read performance optimizations */
+ uint64_t ena_throttle : 1; /**< Set to enable the PKO picker throttle logic
+ When ENA_THROTTLE=1 and the most-significant
+ bit of any of the pipe or interface, word or
+ packet throttle count is set, then PKO will
+ not output any packets to the interface/pipe.
+ See PKO_MEM_THROTTLE_PIPE and
+ PKO_MEM_THROTTLE_INT. */
+ uint64_t reset : 1; /**< Reset oneshot pulse */
+ uint64_t store_be : 1; /**< Force STORE0 byte write address to big endian */
+ uint64_t ena_dwb : 1; /**< Set to enable DontWriteBacks */
+ uint64_t ena_pko : 1; /**< Set to enable the PKO picker */
+#else
+ uint64_t ena_pko : 1;
+ uint64_t ena_dwb : 1;
+ uint64_t store_be : 1;
+ uint64_t reset : 1;
+ uint64_t ena_throttle : 1;
+ uint64_t dis_perf0 : 1;
+ uint64_t dis_perf1 : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn68xxp1;
+ struct cvmx_pko_reg_flags_cn61xx cnf71xx;
+};
+typedef union cvmx_pko_reg_flags cvmx_pko_reg_flags_t;
+
+/**
+ * cvmx_pko_reg_gmx_port_mode
+ *
+ * Notes:
+ * The system has a total of 4 + 4 + 4 + 4 + 4 ports and 4 + 4 + 1 + 1 + 1 + 1 engines (GM0 + GM1 + PCI + LOOP + SRIO0 + SRIO1 + SRIO2 + SRIO3).
+ * This CSR sets the number of GMX0/GMX1 ports and amount of local storage per engine.
+ * It has no effect on the number of ports or amount of local storage per engine for PCI, LOOP,
+ * SRIO0, SRIO1, SRIO2, or SRIO3. When all GMX ports are used (MODE0=2), each GMX engine has 2.5kB of local
+ * storage. Increasing the value of MODEn by 1 decreases the number of GMX ports by a power of 2 and
+ * increases the local storage per PKO GMX engine by a power of 2. If one of the modes is 5, then only
+ * one of interfaces GM0 or GM1 is present and the storage per engine of the existing interface is
+ * doubled. Modes 0 and 1 are illegal and, if selected, are treated as mode 2.
+ *
+ * MODE[n] GM[n] PCI LOOP GM[n] PCI LOOP SRIO[n]
+ * ports ports ports storage/engine storage/engine storage/engine storage/engine
+ * 0 4 4 4 ( 2.5kB << (MODE[1-n]==5)) 2.5kB 2.5kB 2.5kB
+ * 1 4 4 4 ( 2.5kB << (MODE[1-n]==5)) 2.5kB 2.5kB 2.5kB
+ * 2 4 4 4 ( 2.5kB << (MODE[1-n]==5)) 2.5kB 2.5kB 2.5kB
+ * 3 2 4 4 ( 5.0kB << (MODE[1-n]==5)) 2.5kB 2.5kB 2.5kB
+ * 4 1 4 4 (10.0kB << (MODE[1-n]==5)) 2.5kB 2.5kB 2.5kB
+ * 5 0 4 4 ( 0kB ) 2.5kB 2.5kB 2.5kB
+ * where 0 <= n <= 1
+ */
+union cvmx_pko_reg_gmx_port_mode {
+ uint64_t u64;
+ struct cvmx_pko_reg_gmx_port_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t mode1 : 3; /**< # of GM1 ports = 16 >> MODE0, 0 <= MODE0 <= 4 */
+ uint64_t mode0 : 3; /**< # of GM0 ports = 16 >> MODE0, 0 <= MODE0 <= 4 */
+#else
+ uint64_t mode0 : 3;
+ uint64_t mode1 : 3;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_pko_reg_gmx_port_mode_s cn30xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn31xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn38xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn38xxp2;
+ struct cvmx_pko_reg_gmx_port_mode_s cn50xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn52xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn52xxp1;
+ struct cvmx_pko_reg_gmx_port_mode_s cn56xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn56xxp1;
+ struct cvmx_pko_reg_gmx_port_mode_s cn58xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn58xxp1;
+ struct cvmx_pko_reg_gmx_port_mode_s cn61xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn63xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn63xxp1;
+ struct cvmx_pko_reg_gmx_port_mode_s cn66xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cnf71xx;
+};
+typedef union cvmx_pko_reg_gmx_port_mode cvmx_pko_reg_gmx_port_mode_t;
+
+/**
+ * cvmx_pko_reg_int_mask
+ *
+ * Notes:
+ * When a mask bit is set, the corresponding interrupt is enabled.
+ *
+ */
+union cvmx_pko_reg_int_mask {
+ uint64_t u64;
+ struct cvmx_pko_reg_int_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t loopback : 1; /**< Bit mask corresponding to PKO_REG_ERROR[3] above */
+ uint64_t currzero : 1; /**< Bit mask corresponding to PKO_REG_ERROR[2] above */
+ uint64_t doorbell : 1; /**< Bit mask corresponding to PKO_REG_ERROR[1] above */
+ uint64_t parity : 1; /**< Bit mask corresponding to PKO_REG_ERROR[0] above */
+#else
+ uint64_t parity : 1;
+ uint64_t doorbell : 1;
+ uint64_t currzero : 1;
+ uint64_t loopback : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_pko_reg_int_mask_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t doorbell : 1; /**< Bit mask corresponding to PKO_REG_ERROR[1] above */
+ uint64_t parity : 1; /**< Bit mask corresponding to PKO_REG_ERROR[0] above */
+#else
+ uint64_t parity : 1;
+ uint64_t doorbell : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn30xx;
+ struct cvmx_pko_reg_int_mask_cn30xx cn31xx;
+ struct cvmx_pko_reg_int_mask_cn30xx cn38xx;
+ struct cvmx_pko_reg_int_mask_cn30xx cn38xxp2;
+ struct cvmx_pko_reg_int_mask_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t currzero : 1; /**< Bit mask corresponding to PKO_REG_ERROR[2] above */
+ uint64_t doorbell : 1; /**< Bit mask corresponding to PKO_REG_ERROR[1] above */
+ uint64_t parity : 1; /**< Bit mask corresponding to PKO_REG_ERROR[0] above */
+#else
+ uint64_t parity : 1;
+ uint64_t doorbell : 1;
+ uint64_t currzero : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn50xx;
+ struct cvmx_pko_reg_int_mask_cn50xx cn52xx;
+ struct cvmx_pko_reg_int_mask_cn50xx cn52xxp1;
+ struct cvmx_pko_reg_int_mask_cn50xx cn56xx;
+ struct cvmx_pko_reg_int_mask_cn50xx cn56xxp1;
+ struct cvmx_pko_reg_int_mask_cn50xx cn58xx;
+ struct cvmx_pko_reg_int_mask_cn50xx cn58xxp1;
+ struct cvmx_pko_reg_int_mask_cn50xx cn61xx;
+ struct cvmx_pko_reg_int_mask_cn50xx cn63xx;
+ struct cvmx_pko_reg_int_mask_cn50xx cn63xxp1;
+ struct cvmx_pko_reg_int_mask_cn50xx cn66xx;
+ struct cvmx_pko_reg_int_mask_s cn68xx;
+ struct cvmx_pko_reg_int_mask_s cn68xxp1;
+ struct cvmx_pko_reg_int_mask_cn50xx cnf71xx;
+};
+typedef union cvmx_pko_reg_int_mask cvmx_pko_reg_int_mask_t;
+
+/**
+ * cvmx_pko_reg_loopback_bpid
+ *
+ * Notes:
+ * None.
+ *
+ */
+union cvmx_pko_reg_loopback_bpid {
+ uint64_t u64;
+ struct cvmx_pko_reg_loopback_bpid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t bpid7 : 6; /**< Loopback port 7 backpressure-ID */
+ uint64_t reserved_52_52 : 1;
+ uint64_t bpid6 : 6; /**< Loopback port 6 backpressure-ID */
+ uint64_t reserved_45_45 : 1;
+ uint64_t bpid5 : 6; /**< Loopback port 5 backpressure-ID */
+ uint64_t reserved_38_38 : 1;
+ uint64_t bpid4 : 6; /**< Loopback port 4 backpressure-ID */
+ uint64_t reserved_31_31 : 1;
+ uint64_t bpid3 : 6; /**< Loopback port 3 backpressure-ID */
+ uint64_t reserved_24_24 : 1;
+ uint64_t bpid2 : 6; /**< Loopback port 2 backpressure-ID */
+ uint64_t reserved_17_17 : 1;
+ uint64_t bpid1 : 6; /**< Loopback port 1 backpressure-ID */
+ uint64_t reserved_10_10 : 1;
+ uint64_t bpid0 : 6; /**< Loopback port 0 backpressure-ID */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t bpid0 : 6;
+ uint64_t reserved_10_10 : 1;
+ uint64_t bpid1 : 6;
+ uint64_t reserved_17_17 : 1;
+ uint64_t bpid2 : 6;
+ uint64_t reserved_24_24 : 1;
+ uint64_t bpid3 : 6;
+ uint64_t reserved_31_31 : 1;
+ uint64_t bpid4 : 6;
+ uint64_t reserved_38_38 : 1;
+ uint64_t bpid5 : 6;
+ uint64_t reserved_45_45 : 1;
+ uint64_t bpid6 : 6;
+ uint64_t reserved_52_52 : 1;
+ uint64_t bpid7 : 6;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } s;
+ struct cvmx_pko_reg_loopback_bpid_s cn68xx;
+ struct cvmx_pko_reg_loopback_bpid_s cn68xxp1;
+};
+typedef union cvmx_pko_reg_loopback_bpid cvmx_pko_reg_loopback_bpid_t;
+
+/**
+ * cvmx_pko_reg_loopback_pkind
+ *
+ * Notes:
+ * None.
+ *
+ */
+union cvmx_pko_reg_loopback_pkind {
+ uint64_t u64;
+ struct cvmx_pko_reg_loopback_pkind_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t pkind7 : 6; /**< Loopback port 7 port-kind */
+ uint64_t reserved_52_52 : 1;
+ uint64_t pkind6 : 6; /**< Loopback port 6 port-kind */
+ uint64_t reserved_45_45 : 1;
+ uint64_t pkind5 : 6; /**< Loopback port 5 port-kind */
+ uint64_t reserved_38_38 : 1;
+ uint64_t pkind4 : 6; /**< Loopback port 4 port-kind */
+ uint64_t reserved_31_31 : 1;
+ uint64_t pkind3 : 6; /**< Loopback port 3 port-kind */
+ uint64_t reserved_24_24 : 1;
+ uint64_t pkind2 : 6; /**< Loopback port 2 port-kind */
+ uint64_t reserved_17_17 : 1;
+ uint64_t pkind1 : 6; /**< Loopback port 1 port-kind */
+ uint64_t reserved_10_10 : 1;
+ uint64_t pkind0 : 6; /**< Loopback port 0 port-kind */
+ uint64_t num_ports : 4; /**< Number of loopback ports, 0 <= NUM_PORTS <= 8 */
+#else
+ uint64_t num_ports : 4;
+ uint64_t pkind0 : 6;
+ uint64_t reserved_10_10 : 1;
+ uint64_t pkind1 : 6;
+ uint64_t reserved_17_17 : 1;
+ uint64_t pkind2 : 6;
+ uint64_t reserved_24_24 : 1;
+ uint64_t pkind3 : 6;
+ uint64_t reserved_31_31 : 1;
+ uint64_t pkind4 : 6;
+ uint64_t reserved_38_38 : 1;
+ uint64_t pkind5 : 6;
+ uint64_t reserved_45_45 : 1;
+ uint64_t pkind6 : 6;
+ uint64_t reserved_52_52 : 1;
+ uint64_t pkind7 : 6;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } s;
+ struct cvmx_pko_reg_loopback_pkind_s cn68xx;
+ struct cvmx_pko_reg_loopback_pkind_s cn68xxp1;
+};
+typedef union cvmx_pko_reg_loopback_pkind cvmx_pko_reg_loopback_pkind_t;
+
+/**
+ * cvmx_pko_reg_min_pkt
+ *
+ * Notes:
+ * This CSR is used with PKO_MEM_IPORT_PTRS[MIN_PKT] to select the minimum packet size. Packets whose
+ * size in bytes < (SIZEn+1) are zero-padded to (SIZEn+1) bytes. Note that this does not include CRC bytes.
+ * SIZE0=0 is read-only and is used when no padding is desired.
+ */
+union cvmx_pko_reg_min_pkt {
+ uint64_t u64;
+ struct cvmx_pko_reg_min_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t size7 : 8; /**< Minimum packet size-1 in bytes NS */
+ uint64_t size6 : 8; /**< Minimum packet size-1 in bytes NS */
+ uint64_t size5 : 8; /**< Minimum packet size-1 in bytes NS */
+ uint64_t size4 : 8; /**< Minimum packet size-1 in bytes NS */
+ uint64_t size3 : 8; /**< Minimum packet size-1 in bytes NS */
+ uint64_t size2 : 8; /**< Minimum packet size-1 in bytes NS */
+ uint64_t size1 : 8; /**< Minimum packet size-1 in bytes NS */
+ uint64_t size0 : 8; /**< Minimum packet size-1 in bytes NS */
+#else
+ uint64_t size0 : 8;
+ uint64_t size1 : 8;
+ uint64_t size2 : 8;
+ uint64_t size3 : 8;
+ uint64_t size4 : 8;
+ uint64_t size5 : 8;
+ uint64_t size6 : 8;
+ uint64_t size7 : 8;
+#endif
+ } s;
+ struct cvmx_pko_reg_min_pkt_s cn68xx;
+ struct cvmx_pko_reg_min_pkt_s cn68xxp1;
+};
+typedef union cvmx_pko_reg_min_pkt cvmx_pko_reg_min_pkt_t;
+
+/**
+ * cvmx_pko_reg_preempt
+ */
+union cvmx_pko_reg_preempt {
+ uint64_t u64;
+ struct cvmx_pko_reg_preempt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t min_size : 16; /**< Threshhold for packet preemption, measured in bytes.
+ Only packets which have at least MIN_SIZE bytes
+ remaining to be read can be preempted. */
+#else
+ uint64_t min_size : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pko_reg_preempt_s cn52xx;
+ struct cvmx_pko_reg_preempt_s cn52xxp1;
+ struct cvmx_pko_reg_preempt_s cn56xx;
+ struct cvmx_pko_reg_preempt_s cn56xxp1;
+ struct cvmx_pko_reg_preempt_s cn61xx;
+ struct cvmx_pko_reg_preempt_s cn63xx;
+ struct cvmx_pko_reg_preempt_s cn63xxp1;
+ struct cvmx_pko_reg_preempt_s cn66xx;
+ struct cvmx_pko_reg_preempt_s cn68xx;
+ struct cvmx_pko_reg_preempt_s cn68xxp1;
+ struct cvmx_pko_reg_preempt_s cnf71xx;
+};
+typedef union cvmx_pko_reg_preempt cvmx_pko_reg_preempt_t;
+
+/**
+ * cvmx_pko_reg_queue_mode
+ *
+ * Notes:
+ * Sets the number of queues and amount of local storage per queue
+ * The system has a total of 256 queues and (256*8) words of local command storage. This CSR sets the
+ * number of queues that are used. Increasing the value of MODE by 1 decreases the number of queues
+ * by a power of 2 and increases the local storage per queue by a power of 2.
+ * MODEn queues storage/queue
+ * 0 256 64B ( 8 words)
+ * 1 128 128B (16 words)
+ * 2 64 256B (32 words)
+ */
+union cvmx_pko_reg_queue_mode {
+ uint64_t u64;
+ struct cvmx_pko_reg_queue_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t mode : 2; /**< # of queues = 256 >> MODE, 0 <= MODE <=2 */
+#else
+ uint64_t mode : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_pko_reg_queue_mode_s cn30xx;
+ struct cvmx_pko_reg_queue_mode_s cn31xx;
+ struct cvmx_pko_reg_queue_mode_s cn38xx;
+ struct cvmx_pko_reg_queue_mode_s cn38xxp2;
+ struct cvmx_pko_reg_queue_mode_s cn50xx;
+ struct cvmx_pko_reg_queue_mode_s cn52xx;
+ struct cvmx_pko_reg_queue_mode_s cn52xxp1;
+ struct cvmx_pko_reg_queue_mode_s cn56xx;
+ struct cvmx_pko_reg_queue_mode_s cn56xxp1;
+ struct cvmx_pko_reg_queue_mode_s cn58xx;
+ struct cvmx_pko_reg_queue_mode_s cn58xxp1;
+ struct cvmx_pko_reg_queue_mode_s cn61xx;
+ struct cvmx_pko_reg_queue_mode_s cn63xx;
+ struct cvmx_pko_reg_queue_mode_s cn63xxp1;
+ struct cvmx_pko_reg_queue_mode_s cn66xx;
+ struct cvmx_pko_reg_queue_mode_s cn68xx;
+ struct cvmx_pko_reg_queue_mode_s cn68xxp1;
+ struct cvmx_pko_reg_queue_mode_s cnf71xx;
+};
+typedef union cvmx_pko_reg_queue_mode cvmx_pko_reg_queue_mode_t;
+
+/**
+ * cvmx_pko_reg_queue_preempt
+ *
+ * Notes:
+ * Per QID, setting both PREEMPTER=1 and PREEMPTEE=1 is illegal and sets only PREEMPTER=1.
+ * This CSR is used with PKO_MEM_QUEUE_PTRS and PKO_REG_QUEUE_PTRS1. When programming queues, the
+ * programming sequence must first write PKO_REG_QUEUE_PREEMPT, then PKO_REG_QUEUE_PTRS1 and then
+ * PKO_MEM_QUEUE_PTRS for each queue. Preemption is supported only on queues that are ultimately
+ * mapped to engines 0-7. It is illegal to set preemptee or preempter for a queue that is ultimately
+ * mapped to engines 8-11.
+ *
+ * Also, PKO_REG_ENGINE_INFLIGHT must be at least 2 for any engine on which preemption is enabled.
+ *
+ * See the descriptions of PKO_MEM_QUEUE_PTRS for further explanation of queue programming.
+ */
+union cvmx_pko_reg_queue_preempt {
+ uint64_t u64;
+ struct cvmx_pko_reg_queue_preempt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t preemptee : 1; /**< Allow this QID to be preempted.
+ 0=cannot be preempted, 1=can be preempted */
+ uint64_t preempter : 1; /**< Preempts the servicing of packet on PID to
+ allow this QID immediate servicing. 0=do not cause
+ preemption, 1=cause preemption. Per PID, at most
+ 1 QID can have this bit set. */
+#else
+ uint64_t preempter : 1;
+ uint64_t preemptee : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_pko_reg_queue_preempt_s cn52xx;
+ struct cvmx_pko_reg_queue_preempt_s cn52xxp1;
+ struct cvmx_pko_reg_queue_preempt_s cn56xx;
+ struct cvmx_pko_reg_queue_preempt_s cn56xxp1;
+ struct cvmx_pko_reg_queue_preempt_s cn61xx;
+ struct cvmx_pko_reg_queue_preempt_s cn63xx;
+ struct cvmx_pko_reg_queue_preempt_s cn63xxp1;
+ struct cvmx_pko_reg_queue_preempt_s cn66xx;
+ struct cvmx_pko_reg_queue_preempt_s cn68xx;
+ struct cvmx_pko_reg_queue_preempt_s cn68xxp1;
+ struct cvmx_pko_reg_queue_preempt_s cnf71xx;
+};
+typedef union cvmx_pko_reg_queue_preempt cvmx_pko_reg_queue_preempt_t;
+
+/**
+ * cvmx_pko_reg_queue_ptrs1
+ *
+ * Notes:
+ * This CSR is used with PKO_MEM_QUEUE_PTRS and PKO_MEM_QUEUE_QOS to allow access to queues 128-255
+ * and to allow up mapping of up to 16 queues per port. When programming queues 128-255, the
+ * programming sequence must first write PKO_REG_QUEUE_PTRS1 and then write PKO_MEM_QUEUE_PTRS or
+ * PKO_MEM_QUEUE_QOS for each queue.
+ * See the descriptions of PKO_MEM_QUEUE_PTRS and PKO_MEM_QUEUE_QOS for further explanation of queue
+ * programming.
+ */
+union cvmx_pko_reg_queue_ptrs1 {
+ uint64_t u64;
+ struct cvmx_pko_reg_queue_ptrs1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t idx3 : 1; /**< [3] of Index (distance from head) in the queue array */
+ uint64_t qid7 : 1; /**< [7] of Queue ID */
+#else
+ uint64_t qid7 : 1;
+ uint64_t idx3 : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_pko_reg_queue_ptrs1_s cn50xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn52xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn52xxp1;
+ struct cvmx_pko_reg_queue_ptrs1_s cn56xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn56xxp1;
+ struct cvmx_pko_reg_queue_ptrs1_s cn58xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn58xxp1;
+ struct cvmx_pko_reg_queue_ptrs1_s cn61xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn63xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn63xxp1;
+ struct cvmx_pko_reg_queue_ptrs1_s cn66xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cnf71xx;
+};
+typedef union cvmx_pko_reg_queue_ptrs1 cvmx_pko_reg_queue_ptrs1_t;
+
+/**
+ * cvmx_pko_reg_read_idx
+ *
+ * Notes:
+ * Provides the read index during a CSR read operation to any of the CSRs that are physically stored
+ * as memories. The names of these CSRs begin with the prefix "PKO_MEM_".
+ * IDX[7:0] is the read index. INC[7:0] is an increment that is added to IDX[7:0] after any CSR read.
+ * The intended use is to initially write this CSR such that IDX=0 and INC=1. Then, the entire
+ * contents of a CSR memory can be read with consecutive CSR read commands.
+ */
+union cvmx_pko_reg_read_idx {
+ uint64_t u64;
+ struct cvmx_pko_reg_read_idx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t inc : 8; /**< Increment to add to current index for next index */
+ uint64_t index : 8; /**< Index to use for next memory CSR read */
+#else
+ uint64_t index : 8;
+ uint64_t inc : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_pko_reg_read_idx_s cn30xx;
+ struct cvmx_pko_reg_read_idx_s cn31xx;
+ struct cvmx_pko_reg_read_idx_s cn38xx;
+ struct cvmx_pko_reg_read_idx_s cn38xxp2;
+ struct cvmx_pko_reg_read_idx_s cn50xx;
+ struct cvmx_pko_reg_read_idx_s cn52xx;
+ struct cvmx_pko_reg_read_idx_s cn52xxp1;
+ struct cvmx_pko_reg_read_idx_s cn56xx;
+ struct cvmx_pko_reg_read_idx_s cn56xxp1;
+ struct cvmx_pko_reg_read_idx_s cn58xx;
+ struct cvmx_pko_reg_read_idx_s cn58xxp1;
+ struct cvmx_pko_reg_read_idx_s cn61xx;
+ struct cvmx_pko_reg_read_idx_s cn63xx;
+ struct cvmx_pko_reg_read_idx_s cn63xxp1;
+ struct cvmx_pko_reg_read_idx_s cn66xx;
+ struct cvmx_pko_reg_read_idx_s cn68xx;
+ struct cvmx_pko_reg_read_idx_s cn68xxp1;
+ struct cvmx_pko_reg_read_idx_s cnf71xx;
+};
+typedef union cvmx_pko_reg_read_idx cvmx_pko_reg_read_idx_t;
+
+/**
+ * cvmx_pko_reg_throttle
+ *
+ * Notes:
+ * This CSR is used with PKO_MEM_THROTTLE_PIPE and PKO_MEM_THROTTLE_INT. INT_MASK corresponds to the
+ * interfaces listed in the description for PKO_MEM_IPORT_PTRS[INT]. Set INT_MASK[N] to enable the
+ * updating of PKO_MEM_THROTTLE_PIPE and PKO_MEM_THROTTLE_INT counts for packets destined for
+ * interface N. INT_MASK has no effect on the updates caused by CSR writes to PKO_MEM_THROTTLE_PIPE
+ * and PKO_MEM_THROTTLE_INT. Note that this does not disable the throttle logic, just the updating of
+ * the interface counts.
+ */
+union cvmx_pko_reg_throttle {
+ uint64_t u64;
+ struct cvmx_pko_reg_throttle_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t int_mask : 32; /**< Mask to enable THROTTLE count updates per interface NS */
+#else
+ uint64_t int_mask : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pko_reg_throttle_s cn68xx;
+ struct cvmx_pko_reg_throttle_s cn68xxp1;
+};
+typedef union cvmx_pko_reg_throttle cvmx_pko_reg_throttle_t;
+
+/**
+ * cvmx_pko_reg_timestamp
+ *
+ * Notes:
+ * None.
+ *
+ */
+union cvmx_pko_reg_timestamp {
+ uint64_t u64;
+ struct cvmx_pko_reg_timestamp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t wqe_word : 4; /**< Specifies the 8-byte word in the WQE to which a PTP
+ timestamp is written. Values 0 and 1 are illegal. */
+#else
+ uint64_t wqe_word : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_pko_reg_timestamp_s cn61xx;
+ struct cvmx_pko_reg_timestamp_s cn63xx;
+ struct cvmx_pko_reg_timestamp_s cn63xxp1;
+ struct cvmx_pko_reg_timestamp_s cn66xx;
+ struct cvmx_pko_reg_timestamp_s cn68xx;
+ struct cvmx_pko_reg_timestamp_s cn68xxp1;
+ struct cvmx_pko_reg_timestamp_s cnf71xx;
+};
+typedef union cvmx_pko_reg_timestamp cvmx_pko_reg_timestamp_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pko-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pko.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pko.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pko.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1031 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Support library for the hardware Packet Output unit.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#include <asm/octeon/cvmx-clock.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#endif
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "cvmx-config.h"
+#endif
+#include "cvmx-pko.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-cfg.h"
+#endif
+
+/* #define PKO_DEBUG */
+
+#define CVMX_PKO_NQ_PER_PORT_MAX 32
+
+/**
+ * Internal state of packet output
+ */
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/*
+ * PKO port iterator
+ */
+#define CVMX_PKO_FOR_EACH_PORT_BEGIN do { \
+ int XIT_pko_port; \
+ for (XIT_pko_port = 0; XIT_pko_port < CVMX_HELPER_CFG_MAX_PKO_PORT; \
+ XIT_pko_port++) \
+ { \
+ if (__cvmx_helper_cfg_pko_queue_base(XIT_pko_port) != \
+ CVMX_HELPER_CFG_INVALID_VALUE)
+
+#define CVMX_PKO_FOR_EACH_PORT_END } /* for */ \
+ } while (0)
+
+/*
+ * @INTERNAL
+ *
+ * Get INT for a port
+ *
+ * @param interface
+ * @param index
+ * @return the INT value on success and -1 on error
+ */
+static int __cvmx_pko_int(int interface, int index)
+{
+ cvmx_helper_cfg_assert(interface < CVMX_HELPER_CFG_MAX_IFACE);
+ cvmx_helper_cfg_assert(index >= 0);
+
+ switch (interface)
+ {
+ case 0:
+ cvmx_helper_cfg_assert(index < 4);
+ return index;
+ break;
+ case 1:
+ cvmx_helper_cfg_assert(index == 0);
+ return 4;
+ break;
+ case 2:
+ cvmx_helper_cfg_assert(index < 4);
+ return index + 8;
+ break;
+ case 3:
+ cvmx_helper_cfg_assert(index < 4);
+ return index + 0xC;
+ break;
+ case 4:
+ cvmx_helper_cfg_assert(index < 4);
+ return index + 0x10;
+ break;
+ case 5:
+ cvmx_helper_cfg_assert(index < 256);
+ return 0x1C;
+ break;
+ case 6:
+ cvmx_helper_cfg_assert(index < 256);
+ return 0x1D;
+ break;
+ case 7:
+ cvmx_helper_cfg_assert(index < 32);
+ return 0x1E;
+ break;
+ case 8:
+ cvmx_helper_cfg_assert(index < 8);
+ return 0x1F;
+ break;
+ }
+
+ return -1;
+}
+
+int cvmx_pko_get_base_pko_port(int interface, int index)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return __cvmx_helper_cfg_pko_port_base(interface, index);
+ else
+ return cvmx_helper_get_ipd_port(interface, index);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_pko_get_base_pko_port);
+#endif
+
+int cvmx_pko_get_num_pko_ports(int interface, int index)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return __cvmx_helper_cfg_pko_port_num(interface, index);
+ else
+ return 1;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_pko_get_num_pko_ports);
+#endif
+
+int cvmx_pko_get_base_queue(int port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ return __cvmx_helper_cfg_pko_queue_base(
+ cvmx_helper_cfg_ipd2pko_port_base(port));
+ }
+ else
+ return cvmx_pko_get_base_queue_per_core(port, 0);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_pko_get_base_queue);
+#endif
+
+/**
+ * For a given PKO port number, return the base output queue
+ * for the port.
+ *
+ * @param pko_port PKO port number
+ * @return Base output queue
+ */
+int cvmx_pko_get_base_queue_pkoid(int pko_port)
+{
+ return __cvmx_helper_cfg_pko_queue_base(pko_port);
+}
+
+/**
+ * For a given PKO port number, return the number of output queues
+ * for the port.
+ *
+ * @param pko_port PKO port number
+ * @return the number of output queues
+ */
+int cvmx_pko_get_num_queues_pkoid(int pko_port)
+{
+ return __cvmx_helper_cfg_pko_queue_num(pko_port);
+}
+
+int cvmx_pko_get_num_queues(int port)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ return __cvmx_helper_cfg_pko_queue_num(
+ cvmx_helper_cfg_ipd2pko_port_base(port));
+ }
+ else
+ {
+ if (port < 16)
+ return CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
+ else if (port < 32)
+ return CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
+ else if (port < 36)
+ return CVMX_PKO_QUEUES_PER_PORT_PCI;
+ else if (port < 40)
+ return CVMX_PKO_QUEUES_PER_PORT_LOOP;
+ else if (port < 42)
+ return CVMX_PKO_QUEUES_PER_PORT_SRIO0;
+ else if (port < 44)
+ return CVMX_PKO_QUEUES_PER_PORT_SRIO1;
+ else if (port < 46)
+ return CVMX_PKO_QUEUES_PER_PORT_SRIO2;
+ }
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_pko_get_num_queues);
+#endif
+
+#ifdef PKO_DEBUG
+/**
+ * Show queues for the internal ports
+ */
+void cvmx_pko_show_queue_map(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ CVMX_PKO_FOR_EACH_PORT_BEGIN {
+ cvmx_dprintf("pko_port %d (interface%d index%d) has %d queues (queue base = %d)\n",
+ XIT_pko_port,
+ __cvmx_helper_cfg_pko_port_interface(XIT_pko_port),
+ __cvmx_helper_cfg_pko_port_index(XIT_pko_port),
+ __cvmx_helper_cfg_pko_queue_num(XIT_pko_port),
+ __cvmx_helper_cfg_pko_queue_base(XIT_pko_port));
+ } CVMX_PKO_FOR_EACH_PORT_END;
+ }
+ else
+ {
+ int core, port;
+ int pko_output_ports;
+
+ pko_output_ports = 36;
+ cvmx_dprintf("port");
+ for(port = 0; port < pko_output_ports; port++)
+ cvmx_dprintf("%3d ", port);
+ cvmx_dprintf("\n");
+
+ for(core = 0; core < CVMX_MAX_CORES; core++)
+ {
+ cvmx_dprintf("\n%2d: ", core);
+ for(port = 0; port < pko_output_ports; port++)
+ cvmx_dprintf("%3d ",
+ cvmx_pko_get_base_queue_per_core(port, core));
+ }
+ cvmx_dprintf("\n");
+
+ }
+}
+#endif /* PKO_DEBUG */
+
+/*
+ * Configure queues for an internal port.
+ * @INTERNAL
+ * @param pko_port PKO internal port number
+ * Note: o68 only
+ */
+static void __cvmx_pko_iport_config(int pko_port)
+{
+ int queue, base_queue, num_queues;
+ int static_priority_base;
+ int static_priority_end;
+ cvmx_pko_mem_iqueue_ptrs_t config;
+ uint64_t *buf_ptr = NULL;
+ uint64_t priorities[CVMX_PKO_NQ_PER_PORT_MAX] = {
+ [0 ... CVMX_PKO_NQ_PER_PORT_MAX - 1] = 8 };
+
+ static_priority_base = -1;
+ static_priority_end = -1;
+ base_queue = __cvmx_helper_cfg_pko_queue_base(pko_port);
+ num_queues = __cvmx_helper_cfg_pko_queue_num(pko_port);
+
+ /*
+ * Give the user a chance to override the per queue priorities.
+ */
+ if (cvmx_override_pko_queue_priority)
+ cvmx_override_pko_queue_priority(pko_port, &priorities[0]);
+
+ /*
+ * static queue priority validation
+ */
+ for (queue = 0; queue < num_queues; queue++)
+ {
+ if (static_priority_base == -1 &&
+ priorities[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
+ static_priority_base = queue;
+
+ if (static_priority_base != -1 &&
+ static_priority_end == -1 &&
+ priorities[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY &&
+ queue)
+ static_priority_end = queue - 1;
+ else if (static_priority_base != -1 &&
+ static_priority_end == -1 &&
+ queue == num_queues - 1)
+ static_priority_end = queue; /* all queues are static priority */
+
+ /*
+ * Check to make sure all static priority queues are contiguous.
+ * Also catches some cases of static priorites not starting from
+ * queue 0.
+ */
+ if (static_priority_end != -1 &&
+ (int)queue > static_priority_end &&
+ priorities[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
+ {
+ cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Static priority "
+ "queues aren't contiguous or don't start at base queue. "
+ "q: %d, eq: %d\n", (int)queue, static_priority_end);
+ }
+ if (static_priority_base > 0)
+ {
+ cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Static priority "
+ "queues don't start at base queue. sq: %d\n",
+ static_priority_base);
+ }
+ }
+
+ /*
+ * main loop to set the fields of CVMX_PKO_MEM_IQUEUE_PTRS for
+ * each queue
+ */
+ for (queue = 0; queue < num_queues; queue++)
+ {
+ config.u64 = 0;
+ config.s.index = queue;
+ config.s.qid = base_queue + queue;
+ config.s.ipid = pko_port;
+ config.s.tail = (queue == (num_queues - 1));
+ config.s.s_tail = (queue == static_priority_end);
+ config.s.static_p = (static_priority_base >= 0);
+ config.s.static_q = (queue <= static_priority_end);
+
+ /*
+ * Convert the priority into an enable bit field.
+ * Try to space the bits out evenly so the packet
+ * don't get grouped up.
+ */
+ switch ((int)priorities[queue])
+ {
+ case 0: config.s.qos_mask = 0x00; break;
+ case 1: config.s.qos_mask = 0x01; break;
+ case 2: config.s.qos_mask = 0x11; break;
+ case 3: config.s.qos_mask = 0x49; break;
+ case 4: config.s.qos_mask = 0x55; break;
+ case 5: config.s.qos_mask = 0x57; break;
+ case 6: config.s.qos_mask = 0x77; break;
+ case 7: config.s.qos_mask = 0x7f; break;
+ case 8: config.s.qos_mask = 0xff; break;
+ case CVMX_PKO_QUEUE_STATIC_PRIORITY:
+ config.s.qos_mask = 0xff;
+ break;
+ default:
+ cvmx_dprintf("ERROR: __cvmx_pko_iport_config: "
+ "Invalid priority %llu\n",
+ (unsigned long long)priorities[queue]);
+ config.s.qos_mask = 0xff;
+ break;
+ }
+
+ /*
+ * The command queues
+ */
+ {
+ cvmx_cmd_queue_result_t cmd_res;
+
+ cmd_res = cvmx_cmd_queue_initialize(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue),
+ CVMX_PKO_MAX_QUEUE_DEPTH,
+ CVMX_FPA_OUTPUT_BUFFER_POOL,
+ (CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
+ CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
+
+ if (cmd_res != CVMX_CMD_QUEUE_SUCCESS)
+ {
+ switch (cmd_res)
+ {
+ case CVMX_CMD_QUEUE_NO_MEMORY:
+ cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Unable to allocate output buffer.");
+ break;
+ case CVMX_CMD_QUEUE_ALREADY_SETUP:
+ cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Port already setup");
+ break;
+ case CVMX_CMD_QUEUE_INVALID_PARAM:
+ default:
+ cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Command queue initialization failed.");
+ break;
+ }
+ cvmx_dprintf(" pko_port%d base_queue%d num_queues%d queue%d.\n",
+ pko_port, base_queue, num_queues, queue);
+ }
+
+ buf_ptr = (uint64_t*)cvmx_cmd_queue_buffer(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue));
+ config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
+ }
+
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+ }
+}
+
+/*
+ * Allocate queues for the PKO internal ports.
+ * @INTERNAL
+ *
+ */
+static void __cvmx_pko_queue_alloc_o68(void)
+{
+ CVMX_PKO_FOR_EACH_PORT_BEGIN {
+ __cvmx_pko_iport_config(XIT_pko_port);
+ } CVMX_PKO_FOR_EACH_PORT_END;
+}
+
+/*
+ * Allocate memory for PKO engines.
+ *
+ * @param engine is the PKO engine ID.
+ * @return # of 2KB-chunks allocated to this PKO engine.
+ */
+static int __cvmx_pko_memory_per_engine_o68(int engine)
+{
+ /* CN68XX has 40KB to devide between the engines in 2KB chunks */
+ int max_engine;
+ int size_per_engine;
+ int size;
+
+ max_engine = __cvmx_helper_cfg_pko_max_engine();
+ size_per_engine = 40 / 2 / max_engine;
+
+ if (engine >= max_engine)
+ {
+ /* Unused engines get no space */
+ size = 0;
+ }
+ else if (engine == max_engine - 1)
+ {
+ /* The last engine gets all the space lost by rounding. This means
+ the ILK gets the most space */
+ size = 40 / 2 - engine * size_per_engine;
+ }
+ else
+ {
+ /* All other engines get the same space */
+ size = size_per_engine;
+ }
+
+ return size;
+}
+
+/*
+ * Setup one-to-one mapping between PKO iport and eport.
+ * @INTERNAL
+ */
+static void __cvmx_pko_port_map_o68(void)
+{
+ int i;
+ int interface, index;
+ cvmx_helper_interface_mode_t mode;
+ cvmx_pko_mem_iport_ptrs_t config;
+
+ /*
+ * Initialize every iport with the invalid eid.
+ */
+#define CVMX_O68_PKO_INVALID_EID 31
+ config.u64 = 0;
+ config.s.eid = CVMX_O68_PKO_INVALID_EID;
+ for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
+ {
+ config.s.ipid = i;
+ cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+ }
+
+ /*
+ * Set up PKO_MEM_IPORT_PTRS
+ */
+ CVMX_PKO_FOR_EACH_PORT_BEGIN {
+ interface = __cvmx_helper_cfg_pko_port_interface(XIT_pko_port);
+ index = __cvmx_helper_cfg_pko_port_index(XIT_pko_port);
+ mode = cvmx_helper_interface_get_mode(interface);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+ continue;
+
+ config.s.ipid = XIT_pko_port;
+ config.s.qos_mask = 0xff;
+ config.s.crc = __cvmx_helper_get_has_fcs(interface);
+ config.s.min_pkt = __cvmx_helper_get_pko_padding(interface);
+ config.s.intr = __cvmx_pko_int(interface, index);
+ config.s.eid = __cvmx_helper_cfg_pko_port_eid(XIT_pko_port);
+ config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ? index :
+ XIT_pko_port;
+ cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+ } CVMX_PKO_FOR_EACH_PORT_END;
+}
+
+int __cvmx_pko_get_pipe(int interface, int index)
+{
+ /*
+ * the loopback ports do not have pipes
+ */
+ if (cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_LOOP)
+ return -1;
+ /*
+ * We use pko_port as the pipe. See __cvmx_pko_port_map_o68().
+ */
+ return cvmx_helper_get_pko_port(interface, index);
+}
+
+/*
+ * chip-specific setup
+ * @INTERNAL
+ */
+static void __cvmx_pko_chip_init(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ __cvmx_pko_port_map_o68();
+ __cvmx_pko_queue_alloc_o68();
+ }
+ else
+ {
+ int i;
+ uint64_t priority = 8;
+
+ /*
+ * Initialize queues
+ */
+ for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
+ cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
+ &priority);
+ }
+}
+
+/**
+ * Call before any other calls to initialize the packet
+ * output system. This does chip global config, and should only be
+ * done by one core.
+ */
+
+void cvmx_pko_initialize_global(void)
+{
+ cvmx_pko_reg_cmd_buf_t config;
+ int i;
+
+ /*
+ * Set the size of the PKO command buffers to an odd number of 64bit
+ * words. This allows the normal two word send to stay aligned and never
+ * span a command word buffer.
+ */
+ config.u64 = 0;
+ config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
+ config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
+ cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
+
+ /*
+ * chip-specific setup.
+ */
+ __cvmx_pko_chip_init();
+
+ /*
+ * If we aren't using all of the queues optimize PKO's internal memory.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ {
+ int num_interfaces;
+ int last_port;
+ int max_queues;
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ max_queues = __cvmx_helper_cfg_pko_max_queue();
+ else
+ {
+ num_interfaces = cvmx_helper_get_number_of_interfaces();
+ last_port = cvmx_helper_get_last_ipd_port(num_interfaces-1);
+ max_queues = cvmx_pko_get_base_queue(last_port) +
+ cvmx_pko_get_num_queues(last_port);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ {
+ if (max_queues <= 32)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
+ else if (max_queues <= 64)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
+ else
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 0);
+ }
+ else
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX) && max_queues <= 32)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 3);
+ else if (max_queues <= 64)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
+ else if (max_queues <= 128)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
+ else
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 0);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ for (i = 0; i < 2; i++)
+ {
+ cvmx_pko_reg_engine_storagex_t engine_storage;
+
+#define PKO_ASSIGN_ENGINE_STORAGE(index) \
+ engine_storage.s.engine##index = \
+ __cvmx_pko_memory_per_engine_o68(16 * i + (index))
+
+ engine_storage.u64 = 0;
+ PKO_ASSIGN_ENGINE_STORAGE(0);
+ PKO_ASSIGN_ENGINE_STORAGE(1);
+ PKO_ASSIGN_ENGINE_STORAGE(2);
+ PKO_ASSIGN_ENGINE_STORAGE(3);
+ PKO_ASSIGN_ENGINE_STORAGE(4);
+ PKO_ASSIGN_ENGINE_STORAGE(5);
+ PKO_ASSIGN_ENGINE_STORAGE(6);
+ PKO_ASSIGN_ENGINE_STORAGE(7);
+ PKO_ASSIGN_ENGINE_STORAGE(8);
+ PKO_ASSIGN_ENGINE_STORAGE(9);
+ PKO_ASSIGN_ENGINE_STORAGE(10);
+ PKO_ASSIGN_ENGINE_STORAGE(11);
+ PKO_ASSIGN_ENGINE_STORAGE(12);
+ PKO_ASSIGN_ENGINE_STORAGE(13);
+ PKO_ASSIGN_ENGINE_STORAGE(14);
+ PKO_ASSIGN_ENGINE_STORAGE(15);
+ cvmx_write_csr(CVMX_PKO_REG_ENGINE_STORAGEX(i),
+ engine_storage.u64);
+ }
+ }
+ }
+ }
+}
+
+/**
+ * This function does per-core initialization required by the PKO routines.
+ * This must be called on all cores that will do packet output, and must
+ * be called after the FPA has been initialized and filled with pages.
+ *
+ * @return 0 on success
+ * !0 on failure
+ */
+int cvmx_pko_initialize_local(void)
+{
+ /* Nothing to do */
+ return 0;
+}
+#endif
+
+/**
+ * Enables the packet output hardware. It must already be
+ * configured.
+ */
+void cvmx_pko_enable(void)
+{
+ cvmx_pko_reg_flags_t flags;
+
+ flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+ if (flags.s.ena_pko)
+ cvmx_dprintf("Warning: Enabling PKO when PKO already enabled.\n");
+
+ flags.s.ena_dwb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB);
+ flags.s.ena_pko = 1;
+ flags.s.store_be =1; /*
+ * always enable big endian for 3-word command.
+ * Does nothing for 2-word.
+ */
+ cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
+}
+
+/**
+ * Disables the packet output. Does not affect any configuration.
+ */
+void cvmx_pko_disable(void)
+{
+ cvmx_pko_reg_flags_t pko_reg_flags;
+ pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+ pko_reg_flags.s.ena_pko = 0;
+ cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+/**
+ * @INTERNAL
+ * Reset the packet output.
+ */
+static void __cvmx_pko_reset(void)
+{
+ cvmx_pko_reg_flags_t pko_reg_flags;
+ pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+ pko_reg_flags.s.reset = 1;
+ cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+
+/**
+ * Shutdown and free resources required by packet output.
+ */
+void cvmx_pko_shutdown(void)
+{
+ int queue;
+
+ cvmx_pko_disable();
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ cvmx_pko_mem_iqueue_ptrs_t config;
+ config.u64 = 0;
+ for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++)
+ {
+ config.s.qid = queue;
+ cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
+ }
+ }
+ else
+ {
+ cvmx_pko_mem_queue_ptrs_t config;
+ for (queue=0; queue<CVMX_PKO_MAX_OUTPUT_QUEUES; queue++)
+ {
+ config.u64 = 0;
+ config.s.tail = 1;
+ config.s.index = 0;
+ config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
+ config.s.queue = queue & 0x7f;
+ config.s.qos_mask = 0;
+ config.s.buf_ptr = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ cvmx_pko_reg_queue_ptrs1_t config1;
+ config1.u64 = 0;
+ config1.s.qid7 = queue >> 7;
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+ }
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
+ }
+ }
+
+ __cvmx_pko_reset();
+}
+
+/**
+ * Configure a output port and the associated queues for use.
+ *
+ * @param port Port to configure.
+ * @param base_queue First queue number to associate with this port.
+ * @param num_queues Number of queues to associate with this port
+ * @param priority Array of priority levels for each queue. Values are
+ * allowed to be 0-8. A value of 8 get 8 times the traffic
+ * of a value of 1. A value of 0 indicates that no rounds
+ * will be participated in. These priorities can be changed
+ * on the fly while the pko is enabled. A priority of 9
+ * indicates that static priority should be used. If static
+ * priority is used all queues with static priority must be
+ * contiguous starting at the base_queue, and lower numbered
+ * queues have higher priority than higher numbered queues.
+ * There must be num_queues elements in the array.
+ */
+cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
+ uint64_t num_queues, const uint64_t priority[])
+{
+ cvmx_pko_status_t result_code;
+ uint64_t queue;
+ cvmx_pko_mem_queue_ptrs_t config;
+ cvmx_pko_reg_queue_ptrs1_t config1;
+ int static_priority_base = -1;
+ int static_priority_end = -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return CVMX_PKO_SUCCESS;
+
+ if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS) &&
+ (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID))
+ {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
+ (unsigned long long)port);
+ return CVMX_PKO_INVALID_PORT;
+ }
+
+ if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES)
+ {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
+ (unsigned long long)(base_queue + num_queues));
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+
+ if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)
+ {
+ /*
+ * Validate the static queue priority setup and set
+ * static_priority_base and static_priority_end accordingly.
+ */
+ for (queue = 0; queue < num_queues; queue++)
+ {
+ /* Find first queue of static priority */
+ if (static_priority_base == -1 && priority[queue] ==
+ CVMX_PKO_QUEUE_STATIC_PRIORITY)
+ static_priority_base = queue;
+ /* Find last queue of static priority */
+ if (static_priority_base != -1 && static_priority_end == -1 &&
+ priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue)
+ static_priority_end = queue - 1;
+ else if (static_priority_base != -1 && static_priority_end == -1 &&
+ queue == num_queues - 1)
+ static_priority_end = queue; /* all queues're static priority */
+
+ /*
+ * Check to make sure all static priority queues are contiguous.
+ * Also catches some cases of static priorites not starting at
+ * queue 0.
+ */
+ if (static_priority_end != -1 && (int)queue > static_priority_end &&
+ priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
+ {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Static priority "
+ "queues aren't contiguous or don't start at base queue. "
+ "q: %d, eq: %d\n", (int)queue, static_priority_end);
+ return CVMX_PKO_INVALID_PRIORITY;
+ }
+ }
+ if (static_priority_base > 0)
+ {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Static priority queues "
+ "don't start at base queue. sq: %d\n", static_priority_base);
+ return CVMX_PKO_INVALID_PRIORITY;
+ }
+ }
+
+ /*
+ * At this point, static_priority_base and static_priority_end are either
+ * both -1, or are valid start/end queue numbers
+ */
+
+ result_code = CVMX_PKO_SUCCESS;
+
+#ifdef PKO_DEBUG
+ cvmx_dprintf("num queues: %d (%lld,%lld)\n", (int)num_queues,
+ (unsigned long long)CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
+ (unsigned long long)CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
+#endif
+
+ for (queue = 0; queue < num_queues; queue++)
+ {
+ uint64_t *buf_ptr = NULL;
+
+ config1.u64 = 0;
+ config1.s.idx3 = queue >> 3;
+ config1.s.qid7 = (base_queue + queue) >> 7;
+
+ config.u64 = 0;
+ config.s.tail = queue == (num_queues - 1);
+ config.s.index = queue;
+ config.s.port = port;
+ config.s.queue = base_queue + queue;
+
+ config.s.static_p = static_priority_base >= 0;
+ config.s.static_q = (int)queue <= static_priority_end;
+ config.s.s_tail = (int)queue == static_priority_end;
+ /*
+ * Convert the priority into an enable bit field. Try to space the bits
+ * out evenly so the packet don't get grouped up
+ */
+ switch ((int)priority[queue])
+ {
+ case 0: config.s.qos_mask = 0x00; break;
+ case 1: config.s.qos_mask = 0x01; break;
+ case 2: config.s.qos_mask = 0x11; break;
+ case 3: config.s.qos_mask = 0x49; break;
+ case 4: config.s.qos_mask = 0x55; break;
+ case 5: config.s.qos_mask = 0x57; break;
+ case 6: config.s.qos_mask = 0x77; break;
+ case 7: config.s.qos_mask = 0x7f; break;
+ case 8: config.s.qos_mask = 0xff; break;
+ case CVMX_PKO_QUEUE_STATIC_PRIORITY:
+ config.s.qos_mask = 0xff;
+ break;
+ default:
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid priority %llu\n",
+ (unsigned long long)priority[queue]);
+ config.s.qos_mask = 0xff;
+ result_code = CVMX_PKO_INVALID_PRIORITY;
+ break;
+ }
+
+ if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)
+ {
+ cvmx_cmd_queue_result_t cmd_res = cvmx_cmd_queue_initialize(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue),
+ CVMX_PKO_MAX_QUEUE_DEPTH,
+ CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
+ CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST*8);
+ if (cmd_res != CVMX_CMD_QUEUE_SUCCESS)
+ {
+ switch (cmd_res)
+ {
+ case CVMX_CMD_QUEUE_NO_MEMORY:
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: "
+ "Unable to allocate output buffer.\n");
+ return(CVMX_PKO_NO_MEMORY);
+ case CVMX_CMD_QUEUE_ALREADY_SETUP:
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: "
+ "Port already setup.\n");
+ return(CVMX_PKO_PORT_ALREADY_SETUP);
+ case CVMX_CMD_QUEUE_INVALID_PARAM:
+ default:
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: "
+ "Command queue initialization failed.\n");
+ return(CVMX_PKO_CMD_QUEUE_INIT_ERROR);
+ }
+ }
+
+ buf_ptr = (uint64_t*)cvmx_cmd_queue_buffer(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue));
+ config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
+ }
+ else
+ config.s.buf_ptr = 0;
+
+ CVMX_SYNCWS;
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+ }
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+ }
+
+ return result_code;
+}
+
+/**
+ * Rate limit a PKO port to a max packets/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @param port Port to rate limit
+ * @param packets_s Maximum packet/sec
+ * @param burst Maximum number of packets to burst in a row before rate
+ * limiting cuts in.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
+{
+ cvmx_pko_mem_port_rate0_t pko_mem_port_rate0;
+ cvmx_pko_mem_port_rate1_t pko_mem_port_rate1;
+
+ pko_mem_port_rate0.u64 = 0;
+ pko_mem_port_rate0.s.pid = port;
+ pko_mem_port_rate0.s.rate_pkt =
+ cvmx_clock_get_rate(CVMX_CLOCK_SCLK) / packets_s / 16;
+ /* No cost per word since we are limited by packets/sec, not bits/sec */
+ pko_mem_port_rate0.s.rate_word = 0;
+
+ pko_mem_port_rate1.u64 = 0;
+ pko_mem_port_rate1.s.pid = port;
+ pko_mem_port_rate1.s.rate_lim =
+ ((uint64_t)pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
+
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+ return 0;
+}
+
+/**
+ * Rate limit a PKO port to a max bits/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @param port Port to rate limit
+ * @param bits_s PKO rate limit in bits/sec
+ * @param burst Maximum number of bits to burst before rate
+ * limiting cuts in.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
+{
+ cvmx_pko_mem_port_rate0_t pko_mem_port_rate0;
+ cvmx_pko_mem_port_rate1_t pko_mem_port_rate1;
+ uint64_t clock_rate = cvmx_clock_get_rate(CVMX_CLOCK_SCLK);
+ uint64_t tokens_per_bit = clock_rate*16 / bits_s;
+
+ pko_mem_port_rate0.u64 = 0;
+ pko_mem_port_rate0.s.pid = port;
+ /* Each packet has a 12 bytes of interframe gap, an 8 byte preamble, and a
+ 4 byte CRC. These are not included in the per word count. Multiply
+ by 8 to covert to bits and divide by 256 for limit granularity */
+ pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
+ /* Each 8 byte word has 64bits */
+ pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
+
+ pko_mem_port_rate1.u64 = 0;
+ pko_mem_port_rate1.s.pid = port;
+ pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
+
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+ return 0;
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pko.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pko.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pko.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pko.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,839 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Interface to the hardware Packet Output unit.
+ *
+ * Starting with SDK 1.7.0, the PKO output functions now support
+ * two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to
+ * function similarly to previous SDKs by using POW atomic tags
+ * to preserve ordering and exclusivity. As a new option, you
+ * can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc
+ * memory based locking instead. This locking has the advantage
+ * of not affecting the tag state but doesn't preserve packet
+ * ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most
+ * generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used
+ * with hand tuned fast path code.
+ *
+ * Some of other SDK differences visible to the command command
+ * queuing:
+ * - PKO indexes are no longer stored in the FAU. A large
+ * percentage of the FAU register block used to be tied up
+ * maintaining PKO queue pointers. These are now stored in a
+ * global named block.
+ * - The PKO <b>use_locking</b> parameter can now have a global
+ * effect. Since all application use the same named block,
+ * queue locking correctly applies across all operating
+ * systems when using CVMX_PKO_LOCK_CMD_QUEUE.
+ * - PKO 3 word commands are now supported. Use
+ * cvmx_pko_send_packet_finish3().
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+
+#ifndef __CVMX_PKO_H__
+#define __CVMX_PKO_H__
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include "cvmx-config.h"
+#include "cvmx-pko-defs.h"
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-pow.h>
+#include <asm/octeon/cvmx-cmd-queue.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-cfg.h>
+#else
+# ifndef CVMX_DONT_INCLUDE_CONFIG
+# include "executive-config.h"
+# ifdef CVMX_ENABLE_PKO_FUNCTIONS
+# include "cvmx-config.h"
+# endif
+# endif
+#include "cvmx-fau.h"
+#include "cvmx-fpa.h"
+#include "cvmx-pow.h"
+#include "cvmx-cmd-queue.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-util.h"
+#include "cvmx-helper-cfg.h"
+#endif
+
+/* Adjust the command buffer size by 1 word so that in the case of using only
+** two word PKO commands no command words stradle buffers. The useful values
+** for this are 0 and 1. */
+#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
+#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
+ OCTEON_IS_MODEL(OCTEON_CN3010) || \
+ OCTEON_IS_MODEL(OCTEON_CN3005) || \
+ OCTEON_IS_MODEL(OCTEON_CN50XX)) ? \
+ 32 : \
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) || \
+ OCTEON_IS_MODEL(OCTEON_CN56XX) || \
+ OCTEON_IS_MODEL(OCTEON_CN52XX) || \
+ OCTEON_IS_MODEL(OCTEON_CN6XXX) || \
+ OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? \
+ 256 : 128)
+#define CVMX_PKO_NUM_OUTPUT_PORTS ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 44 : (OCTEON_IS_MODEL(OCTEON_CN66XX) ? 46 : 40))
+#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63 /* use this for queues that are not used */
+#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
+#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
+#define CVMX_PKO_MAX_QUEUE_DEPTH 0
+
+typedef enum
+{
+ CVMX_PKO_SUCCESS,
+ CVMX_PKO_INVALID_PORT,
+ CVMX_PKO_INVALID_QUEUE,
+ CVMX_PKO_INVALID_PRIORITY,
+ CVMX_PKO_NO_MEMORY,
+ CVMX_PKO_PORT_ALREADY_SETUP,
+ CVMX_PKO_CMD_QUEUE_INIT_ERROR
+} cvmx_pko_status_t;
+
+/**
+ * This enumeration represents the differnet locking modes supported by PKO.
+ */
+typedef enum
+{
+ CVMX_PKO_LOCK_NONE = 0, /**< PKO doesn't do any locking. It is the responsibility
+ of the application to make sure that no other core is
+ accessing the same queue at the same time */
+ CVMX_PKO_LOCK_ATOMIC_TAG = 1, /**< PKO performs an atomic tagswitch to insure exclusive
+ access to the output queue. This will maintain
+ packet ordering on output */
+ CVMX_PKO_LOCK_CMD_QUEUE = 2, /**< PKO uses the common command queue locks to insure
+ exclusive access to the output queue. This is a memory
+ based ll/sc. This is the most portable locking
+ mechanism */
+} cvmx_pko_lock_t;
+
+typedef struct
+{
+ uint32_t packets;
+ uint64_t octets;
+ uint64_t doorbell;
+} cvmx_pko_port_status_t;
+
+/**
+ * This structure defines the address to use on a packet enqueue
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ cvmx_mips_space_t mem_space : 2; /**< Must CVMX_IO_SEG */
+ uint64_t reserved :13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< The ID of the device on the non-coherent bus */
+ uint64_t reserved2 : 4; /**< Must be zero */
+ uint64_t reserved3 :15; /**< Must be zero */
+ uint64_t port : 9; /**< The hardware must have the output port in addition to the output queue */
+ uint64_t queue : 9; /**< The output queue to send the packet to (0-127 are legal) */
+ uint64_t reserved4 : 3; /**< Must be zero */
+ } s;
+} cvmx_pko_doorbell_address_t;
+
+/**
+ * Structure of the first packet output command word.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ cvmx_fau_op_size_t size1 : 2; /**< The size of the reg1 operation - could be 8, 16, 32, or 64 bits */
+ cvmx_fau_op_size_t size0 : 2; /**< The size of the reg0 operation - could be 8, 16, 32, or 64 bits */
+ uint64_t subone1 : 1; /**< If set, subtract 1, if clear, subtract packet size */
+ uint64_t reg1 :11; /**< The register, subtract will be done if reg1 is non-zero */
+ uint64_t subone0 : 1; /**< If set, subtract 1, if clear, subtract packet size */
+ uint64_t reg0 :11; /**< The register, subtract will be done if reg0 is non-zero */
+ uint64_t le : 1; /**< When set, interpret segment pointer and segment bytes in little endian order */
+ uint64_t n2 : 1; /**< When set, packet data not allocated in L2 cache by PKO */
+ uint64_t wqp : 1; /**< If set and rsp is set, word3 contains a pointer to a work queue entry */
+ uint64_t rsp : 1; /**< If set, the hardware will send a response when done */
+ uint64_t gather : 1; /**< If set, the supplied pkt_ptr is really a pointer to a list of pkt_ptr's */
+ uint64_t ipoffp1 : 7; /**< If ipoffp1 is non zero, (ipoffp1-1) is the number of bytes to IP header,
+ and the hardware will calculate and insert the UDP/TCP checksum */
+ uint64_t ignore_i : 1; /**< If set, ignore the I bit (force to zero) from all pointer structures */
+ uint64_t dontfree : 1; /**< If clear, the hardware will attempt to free the buffers containing the packet */
+ uint64_t segs : 6; /**< The total number of segs in the packet, if gather set, also gather list length */
+ uint64_t total_bytes :16; /**< Including L2, but no trailing CRC */
+ } s;
+} cvmx_pko_command_word0_t;
+
+/* CSR typedefs have been moved to cvmx-pko-defs.h */
+
+/**
+ * Definition of internal state for Packet output processing
+ */
+typedef struct
+{
+ uint64_t * start_ptr; /**< ptr to start of buffer, offset kept in FAU reg */
+} cvmx_pko_state_elem_t;
+
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+/**
+ * Call before any other calls to initialize the packet
+ * output system.
+ */
+extern void cvmx_pko_initialize_global(void);
+extern int cvmx_pko_initialize_local(void);
+
+#endif
+
+
+/**
+ * Enables the packet output hardware. It must already be
+ * configured.
+ */
+extern void cvmx_pko_enable(void);
+
+
+/**
+ * Disables the packet output. Does not affect any configuration.
+ */
+extern void cvmx_pko_disable(void);
+
+
+/**
+ * Shutdown and free resources required by packet output.
+ */
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+extern void cvmx_pko_shutdown(void);
+
+/**
+ * Configure a output port and the associated queues for use.
+ *
+ * @param port Port to configure.
+ * @param base_queue First queue number to associate with this port.
+ * @param num_queues Number of queues t oassociate with this port
+ * @param priority Array of priority levels for each queue. Values are
+ * allowed to be 1-8. A value of 8 get 8 times the traffic
+ * of a value of 1. There must be num_queues elements in the
+ * array.
+ */
+extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue, uint64_t num_queues, const uint64_t priority[]);
+
+
+/**
+ * Ring the packet output doorbell. This tells the packet
+ * output hardware that "len" command words have been added
+ * to its pending list. This command includes the required
+ * CVMX_SYNCWS before the doorbell ring.
+ *
+ * WARNING: This function may have to look up the proper PKO port in
+ * the IPD port to PKO port map, and is thus slower than calling
+ * cvmx_pko_doorbell_pkoid() directly if the PKO port identifier is
+ * known.
+ *
+ * @param ipd_port The IPD port corresponding the to pko port the packet is for
+ * @param queue Queue the packet is for
+ * @param len Length of the command in 64 bit words
+ */
+static inline void cvmx_pko_doorbell(uint64_t ipd_port, uint64_t queue, uint64_t len)
+{
+ cvmx_pko_doorbell_address_t ptr;
+ uint64_t pko_port;
+
+ pko_port = ipd_port;
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ pko_port = cvmx_helper_cfg_ipd2pko_port_base(ipd_port);
+
+ ptr.u64 = 0;
+ ptr.s.mem_space = CVMX_IO_SEG;
+ ptr.s.did = CVMX_OCT_DID_PKT_SEND;
+ ptr.s.is_io = 1;
+ ptr.s.port = pko_port;
+ ptr.s.queue = queue;
+ CVMX_SYNCWS; /* Need to make sure output queue data is in DRAM before doorbell write */
+ cvmx_write_io(ptr.u64, len);
+}
+#endif
+
+
+/**
+ * Prepare to send a packet. This may initiate a tag switch to
+ * get exclusive access to the output queue structure, and
+ * performs other prep work for the packet send operation.
+ *
+ * cvmx_pko_send_packet_finish() MUST be called after this function is called,
+ * and must be called with the same port/queue/use_locking arguments.
+ *
+ * The use_locking parameter allows the caller to use three
+ * possible locking modes.
+ * - CVMX_PKO_LOCK_NONE
+ * - PKO doesn't do any locking. It is the responsibility
+ * of the application to make sure that no other core
+ * is accessing the same queue at the same time.
+ * - CVMX_PKO_LOCK_ATOMIC_TAG
+ * - PKO performs an atomic tagswitch to insure exclusive
+ * access to the output queue. This will maintain
+ * packet ordering on output.
+ * - CVMX_PKO_LOCK_CMD_QUEUE
+ * - PKO uses the common command queue locks to insure
+ * exclusive access to the output queue. This is a
+ * memory based ll/sc. This is the most portable
+ * locking mechanism.
+ *
+ * NOTE: If atomic locking is used, the POW entry CANNOT be
+ * descheduled, as it does not contain a valid WQE pointer.
+ *
+ * @param port Port to send it on, this can be either IPD port or PKO
+ * port.
+ * @param queue Queue to use
+ * @param use_locking
+ * CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
+ */
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, cvmx_pko_lock_t use_locking)
+{
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ {
+ /* Must do a full switch here to handle all cases. We use a fake WQE pointer, as the POW does
+ ** not access this memory. The WQE pointer and group are only used if this work is descheduled,
+ ** which is not supported by the cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish combination.
+ ** Note that this is a special case in which these fake values can be used - this is not a general technique.
+ */
+ uint32_t tag = CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT | CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT | (CVMX_TAG_SUBGROUP_MASK & queue);
+ cvmx_pow_tag_sw_full((cvmx_wqe_t *)cvmx_phys_to_ptr(0x80), tag, CVMX_POW_TAG_TYPE_ATOMIC, 0);
+ }
+}
+
+#define cvmx_pko_send_packet_prepare_pkoid cvmx_pko_send_packet_prepare
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,
+ * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and
+ * cvmx_pko_send_packet_finish().
+ *
+ * WARNING: This function may have to look up the proper PKO port in
+ * the IPD port to PKO port map, and is thus slower than calling
+ * cvmx_pko_send_packet_finish_pkoid() directly if the PKO port
+ * identifier is known.
+ *
+ * @param ipd_port The IPD port corresponding the to pko port the packet is for
+ * @param queue Queue to use
+ * @param pko_command
+ * PKO HW command word
+ * @param packet Packet to send
+ * @param use_locking
+ * CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t ipd_port, uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64,
+ packet.u64);
+ if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS))
+ {
+ cvmx_pko_doorbell(ipd_port, queue, 2);
+ return CVMX_PKO_SUCCESS;
+ }
+ else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL))
+ {
+ return CVMX_PKO_NO_MEMORY;
+ }
+ else
+ {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,
+ * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and
+ * cvmx_pko_send_packet_finish().
+ *
+ * WARNING: This function may have to look up the proper PKO port in
+ * the IPD port to PKO port map, and is thus slower than calling
+ * cvmx_pko_send_packet_finish3_pkoid() directly if the PKO port
+ * identifier is known.
+ *
+ * @param ipd_port The IPD port corresponding the to pko port the packet is for
+ * @param queue Queue to use
+ * @param pko_command
+ * PKO HW command word
+ * @param packet Packet to send
+ * @param addr Plysical address of a work queue entry or physical address to zero on complete.
+ * @param use_locking
+ * CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish3(uint64_t ipd_port, uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, uint64_t addr, cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64,
+ packet.u64,
+ addr);
+ if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS))
+ {
+ cvmx_pko_doorbell(ipd_port, queue, 3);
+ return CVMX_PKO_SUCCESS;
+ }
+ else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL))
+ {
+ return CVMX_PKO_NO_MEMORY;
+ }
+ else
+ {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+/**
+ * Get the first pko_port for the (interface, index)
+ *
+ * @param interface
+ * @param index
+ */
+extern int cvmx_pko_get_base_pko_port(int interface, int index);
+
+/**
+ * Get the number of pko_ports for the (interface, index)
+ *
+ * @param interface
+ * @param index
+ */
+extern int cvmx_pko_get_num_pko_ports(int interface, int index);
+
+/**
+ * Return the pko output queue associated with a port and a specific core.
+ * In normal mode (PKO lockless operation is disabled), the value returned
+ * is the base queue.
+ *
+ * @param port Port number
+ * @param core Core to get queue for
+ *
+ * @return Core-specific output queue and -1 on error.
+ *
+ * Note: This function is invalid for o68.
+ */
+static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ cvmx_dprintf("cvmx_pko_get_base_queue_per_core() not"
+ "supported starting from o68!\n");
+ return -1;
+ }
+
+#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
+ #define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 16
+#endif
+#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
+ #define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 16
+#endif
+#ifndef CVMX_PKO_QUEUES_PER_PORT_SRIO0
+ /* We use two queues per port for SRIO0. Having two queues per
+ port with two ports gives us four queues, one for each mailbox */
+ #define CVMX_PKO_QUEUES_PER_PORT_SRIO0 2
+#endif
+#ifndef CVMX_PKO_QUEUES_PER_PORT_SRIO1
+ /* We use two queues per port for SRIO1. Having two queues per
+ port with two ports gives us four queues, one for each mailbox */
+ #define CVMX_PKO_QUEUES_PER_PORT_SRIO1 2
+#endif
+#ifndef CVMX_PKO_QUEUES_PER_PORT_SRIO2
+ /* We use two queues per port for SRIO2. Having two queues per
+ port with two ports gives us four queues, one for each mailbox */
+ #define CVMX_PKO_QUEUES_PER_PORT_SRIO2 2
+#endif
+ if (port < CVMX_PKO_MAX_PORTS_INTERFACE0)
+ return port * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + core;
+ else if (port >=16 && port < 16 + CVMX_PKO_MAX_PORTS_INTERFACE1)
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ (port-16) * CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + core;
+ else if ((port >= 32) && (port < 36))
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ CVMX_PKO_MAX_PORTS_INTERFACE1 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
+ (port-32) * CVMX_PKO_QUEUES_PER_PORT_PCI;
+ else if ((port >= 36) && (port < 40))
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ CVMX_PKO_MAX_PORTS_INTERFACE1 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_PCI +
+ (port-36) * CVMX_PKO_QUEUES_PER_PORT_LOOP;
+ else if ((port >= 40) && (port < 42))
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ CVMX_PKO_MAX_PORTS_INTERFACE1 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_PCI +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_LOOP +
+ (port-40) * CVMX_PKO_QUEUES_PER_PORT_SRIO0;
+ else if ((port >= 42) && (port < 44))
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ CVMX_PKO_MAX_PORTS_INTERFACE1 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_PCI +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_LOOP +
+ 2 * CVMX_PKO_QUEUES_PER_PORT_SRIO0 +
+ (port-42) * CVMX_PKO_QUEUES_PER_PORT_SRIO1;
+ else if ((port >= 44) && (port < 46))
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ CVMX_PKO_MAX_PORTS_INTERFACE1 * CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_PCI +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_LOOP +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_SRIO0 +
+ (port-44) * CVMX_PKO_QUEUES_PER_PORT_SRIO2;
+ else
+ /* Given the limit on the number of ports we can map to
+ * CVMX_MAX_OUTPUT_QUEUES_STATIC queues (currently 256,
+ * divided among all cores), the remaining unmapped ports
+ * are assigned an illegal queue number */
+ return CVMX_PKO_ILLEGAL_QUEUE;
+}
+
+/**
+ * For a given port number, return the base pko output queue
+ * for the port.
+ *
+ * @param port IPD port number
+ * @return Base output queue
+ */
+extern int cvmx_pko_get_base_queue(int port);
+
+/**
+ * For a given port number, return the number of pko output queues.
+ *
+ * @param port IPD port number
+ * @return Number of output queues
+ */
+extern int cvmx_pko_get_num_queues(int port);
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/**
+ * Get the status counters for a port.
+ *
+ * @param ipd_port Port number (ipd_port) to get statistics for.
+ * @param clear Set to 1 to clear the counters after they are read
+ * @param status Where to put the results.
+ *
+ * Note:
+ * - Only the doorbell for the base queue of the ipd_port is
+ * collected.
+ * - Retrieving the stats involves writing the index through
+ * CVMX_PKO_REG_READ_IDX and reading the stat CSRs, in that
+ * order. It is not MP-safe and caller should guarantee
+ * atomicity.
+ */
+static inline void cvmx_pko_get_port_status(uint64_t ipd_port, uint64_t clear,
+ cvmx_pko_port_status_t *status)
+{
+ cvmx_pko_reg_read_idx_t pko_reg_read_idx;
+ cvmx_pko_mem_count0_t pko_mem_count0;
+ cvmx_pko_mem_count1_t pko_mem_count1;
+ int pko_port, port_base, port_limit;
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ port_base = cvmx_helper_get_pko_port(interface, index);
+ if (port_base == -1)
+ cvmx_dprintf("Warning: Invalid port_base\n");
+ port_limit = port_base + cvmx_pko_get_num_pko_ports(interface, index);
+ } else {
+ port_base = ipd_port;
+ port_limit = port_base + 1;
+ }
+
+ /*
+ * status->packets and status->octets
+ */
+ status->packets = 0;
+ status->octets = 0;
+ pko_reg_read_idx.u64 = 0;
+
+ for (pko_port = port_base; pko_port < port_limit; pko_port++)
+ {
+
+ /*
+ * In theory, one doesn't need to write the index csr every
+ * time as he can set pko_reg_read_idx.s.inc to increment
+ * the index automatically. Need to find out exactly how XXX.
+ */
+ pko_reg_read_idx.s.index = pko_port;
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+
+ pko_mem_count0.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT0);
+ status->packets += pko_mem_count0.s.count;
+ if (clear)
+ {
+ pko_mem_count0.s.count = pko_port;
+ cvmx_write_csr(CVMX_PKO_MEM_COUNT0, pko_mem_count0.u64);
+ }
+
+ pko_mem_count1.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT1);
+ status->octets += pko_mem_count1.s.count;
+ if (clear)
+ {
+ pko_mem_count1.s.count = pko_port;
+ cvmx_write_csr(CVMX_PKO_MEM_COUNT1, pko_mem_count1.u64);
+ }
+ }
+
+ /*
+ * status->doorbell
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ cvmx_pko_mem_debug9_t debug9;
+ pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(ipd_port);
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+ debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
+ status->doorbell = debug9.cn38xx.doorbell;
+ }
+ else
+ {
+ cvmx_pko_mem_debug8_t debug8;
+ pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(ipd_port);
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+ debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ status->doorbell = debug8.cn68xx.doorbell;
+ else
+ status->doorbell = debug8.cn58xx.doorbell;
+ }
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTION */
+
+
+/**
+ * Rate limit a PKO port to a max packets/sec. This function is only
+ * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
+ *
+ * @param port Port to rate limit
+ * @param packets_s Maximum packet/sec
+ * @param burst Maximum number of packets to burst in a row before rate
+ * limiting cuts in.
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
+
+/**
+ * Rate limit a PKO port to a max bits/sec. This function is only
+ * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
+ *
+ * @param port Port to rate limit
+ * @param bits_s PKO rate limit in bits/sec
+ * @param burst Maximum number of bits to burst before rate
+ * limiting cuts in.
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst);
+
+/**
+ * @INTERNAL
+ *
+ * Retrieve the PKO pipe number for a port
+ *
+ * @param interface
+ * @param index
+ *
+ * @return negative on error.
+ *
+ * This applies only to the non-loopback interfaces.
+ *
+ */
+extern int __cvmx_pko_get_pipe(int interface, int index);
+
+/**
+ * For a given PKO port number, return the base output queue
+ * for the port.
+ *
+ * @param pko_port PKO port number
+ * @return Base output queue
+ */
+extern int cvmx_pko_get_base_queue_pkoid(int pko_port);
+
+/**
+ * For a given PKO port number, return the number of output queues
+ * for the port.
+ *
+ * @param pko_port PKO port number
+ * @return the number of output queues
+ */
+extern int cvmx_pko_get_num_queues_pkoid(int pko_port);
+
+/**
+ * Ring the packet output doorbell. This tells the packet
+ * output hardware that "len" command words have been added
+ * to its pending list. This command includes the required
+ * CVMX_SYNCWS before the doorbell ring.
+ *
+ * @param pko_port Port the packet is for
+ * @param queue Queue the packet is for
+ * @param len Length of the command in 64 bit words
+ */
+static inline void cvmx_pko_doorbell_pkoid(uint64_t pko_port, uint64_t queue, uint64_t len)
+{
+ cvmx_pko_doorbell_address_t ptr;
+
+ ptr.u64 = 0;
+ ptr.s.mem_space = CVMX_IO_SEG;
+ ptr.s.did = CVMX_OCT_DID_PKT_SEND;
+ ptr.s.is_io = 1;
+ ptr.s.port = pko_port;
+ ptr.s.queue = queue;
+ CVMX_SYNCWS; /* Need to make sure output queue data is in DRAM before doorbell write */
+ cvmx_write_io(ptr.u64, len);
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,
+ * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and
+ * cvmx_pko_send_packet_finish_pkoid().
+ *
+ * @param pko_port Port to send it on
+ * @param queue Queue to use
+ * @param pko_command
+ * PKO HW command word
+ * @param packet Packet to send
+ * @param use_locking
+ * CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish_pkoid(int pko_port, uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64,
+ packet.u64);
+ if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS))
+ {
+ cvmx_pko_doorbell_pkoid(pko_port, queue, 2);
+ return CVMX_PKO_SUCCESS;
+ }
+ else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL))
+ {
+ return CVMX_PKO_NO_MEMORY;
+ }
+ else
+ {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,
+ * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and
+ * cvmx_pko_send_packet_finish_pkoid().
+ *
+ * @param pko_port The PKO port the packet is for
+ * @param queue Queue to use
+ * @param pko_command
+ * PKO HW command word
+ * @param packet Packet to send
+ * @param addr Plysical address of a work queue entry or physical address to zero on complete.
+ * @param use_locking
+ * CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish3_pkoid(uint64_t pko_port, uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, uint64_t addr, cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64,
+ packet.u64,
+ addr);
+ if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS))
+ {
+ cvmx_pko_doorbell_pkoid(pko_port, queue, 3);
+ return CVMX_PKO_SUCCESS;
+ }
+ else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL))
+ {
+ return CVMX_PKO_NO_MEMORY;
+ }
+ else
+ {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_PKO_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pko.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-platform.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-platform.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-platform.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,244 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file is resposible for including all system dependent
+ * headers for the cvmx-* files.
+ *
+ * <hr>$Revision: 70030 $<hr>
+*/
+
+#ifndef __CVMX_PLATFORM_H__
+#define __CVMX_PLATFORM_H__
+
+#include "cvmx-abi.h"
+
+#ifdef __cplusplus
+#define EXTERN_ASM extern "C"
+#else
+#define EXTERN_ASM extern
+#endif
+
+/* This file defines macros for use in determining the current
+ building environment. It defines a single CVMX_BUILD_FOR_*
+ macro representing the target of the build. The current
+ possibilities are:
+ CVMX_BUILD_FOR_UBOOT
+ CVMX_BUILD_FOR_LINUX_KERNEL
+ CVMX_BUILD_FOR_LINUX_USER
+ CVMX_BUILD_FOR_LINUX_HOST
+ CVMX_BUILD_FOR_VXWORKS
+ CVMX_BUILD_FOR_STANDALONE */
+#if defined(__U_BOOT__)
+ /* We are being used inside of Uboot */
+ #define CVMX_BUILD_FOR_UBOOT
+#elif defined(__linux__)
+ #if defined(__KERNEL__)
+ /* We are in the Linux kernel on Octeon */
+ #define CVMX_BUILD_FOR_LINUX_KERNEL
+ #elif !defined(__mips__)
+ /* We are being used under Linux but not on Octeon. Assume
+ we are on a Linux host with an Octeon target over PCI/PCIe */
+ #ifndef CVMX_BUILD_FOR_LINUX_HOST
+ #define CVMX_BUILD_FOR_LINUX_HOST
+ #endif
+ #else
+ #ifdef CVMX_BUILD_FOR_LINUX_HOST
+ /* This is a manual special case. The host PCI utilities can
+ be configured to run on Octeon. In this case it is impossible
+ to tell the difference between the normal userspace setup
+ and using cvmx_read/write_csr over the PCI bus. The host
+ utilites force this define to fix this */
+ #else
+ /* We are in the Linux userspace on Octeon */
+ #define CVMX_BUILD_FOR_LINUX_USER
+ #endif
+ #endif
+#elif defined(_WRS_KERNEL) || defined(VXWORKS_USER_MAPPINGS)
+ /* We are in VxWorks on Octeon */
+ #define CVMX_BUILD_FOR_VXWORKS
+#elif defined(_OCTEON_TOOLCHAIN_RUNTIME)
+ /* To build the simple exec toolchain runtime (newlib) library. We
+ should only use features available on all Octeon models. */
+ #define CVMX_BUILD_FOR_TOOLCHAIN
+#elif defined(__FreeBSD__) && defined(_KERNEL)
+ #define CVMX_BUILD_FOR_FREEBSD_KERNEL
+#else
+ /* We are building a simple exec standalone image for Octeon */
+ #define CVMX_BUILD_FOR_STANDALONE
+#endif
+
+
+/* To have a global variable be shared among all cores,
+ * declare with the CVMX_SHARED attribute. Ex:
+ * CVMX_SHARED int myglobal;
+ * This will cause the variable to be placed in a special
+ * section that the loader will map as shared for all cores
+ * This is for data structures use by software ONLY,
+ * as it is not 1-1 VA-PA mapped.
+ */
+#if defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#define CVMX_SHARED
+#else
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+#define CVMX_SHARED __attribute__ ((cvmx_shared))
+#else
+#define CVMX_SHARED
+#endif
+#endif
+
+#if defined(CVMX_BUILD_FOR_UBOOT)
+
+ #include <common.h>
+ #include "cvmx-sysinfo.h"
+
+#elif defined(CVMX_BUILD_FOR_LINUX_KERNEL)
+
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <stdarg.h>
+
+#elif defined(CVMX_BUILD_FOR_LINUX_USER)
+
+ #include <stddef.h>
+ #include <stdint.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <stdarg.h>
+ #include <string.h>
+ #include <assert.h>
+ #include <fcntl.h>
+ #include <sys/mman.h>
+ #include <unistd.h>
+ #include <errno.h>
+ #include <sys/sysmips.h>
+ #define MIPS_CAVIUM_XKPHYS_READ 2010 /* XKPHYS */
+ #define MIPS_CAVIUM_XKPHYS_WRITE 2011 /* XKPHYS */
+
+/* Enable access to XKPHYS segments. Warning message is printed in case of
+ error. If warn_count is set, the warning message is not displayed. */
+static inline void cvmx_linux_enable_xkphys_access(int32_t warn_count)
+{
+ int ret;
+ ret = sysmips(MIPS_CAVIUM_XKPHYS_WRITE, getpid(), 3, 0);
+ if (ret != 0 && !warn_count) {
+ switch(errno) {
+ case EINVAL:
+ perror("sysmips(MIPS_CAVIUM_XKPHYS_WRITE) failed.\n"
+ " Did you configure your kernel with both:\n"
+ " CONFIG_CAVIUM_OCTEON_USER_MEM_PER_PROCESS *and*\n"
+ " CONFIG_CAVIUM_OCTEON_USER_IO_PER_PROCESS?");
+ break;
+ case EPERM:
+ perror("sysmips(MIPS_CAVIUM_XKPHYS_WRITE) failed.\n"
+ " Are you running as root?");
+ break;
+ default:
+ perror("sysmips(MIPS_CAVIUM_XKPHYS_WRITE) failed");
+ break;
+ }
+ }
+}
+
+#elif defined(CVMX_BUILD_FOR_LINUX_HOST)
+
+ #include <stddef.h>
+ #include <stdint.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <stdarg.h>
+ #include <string.h>
+ #include <assert.h>
+ #include <fcntl.h>
+ #include <sys/mman.h>
+ #include <unistd.h>
+
+#elif defined(CVMX_BUILD_FOR_VXWORKS)
+
+ #include <stdint.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <stdarg.h>
+ #include <string.h>
+ #include <assert.h>
+
+#elif defined(CVMX_BUILD_FOR_STANDALONE)
+
+ #include <stddef.h>
+ #include <stdint.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <stdarg.h>
+ #include <string.h>
+ #include <assert.h>
+
+#elif defined(CVMX_BUILD_FOR_TOOLCHAIN)
+
+ #ifndef __ASSEMBLY__
+ #include <stddef.h>
+ #include <stdint.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <stdarg.h>
+ #include <string.h>
+ #include <assert.h>
+ #endif
+ #include "rename-cvmx.h"
+
+#elif defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+
+ #include <mips/cavium/cvmx_config.h>
+
+#else
+
+ #error Unexpected CVMX_BUILD_FOR_* macro
+
+#endif
+
+#endif /* __CVMX_PLATFORM_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-platform.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pow-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pow-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pow-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,2012 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-pow-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pow.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_POW_DEFS_H__
+#define __CVMX_POW_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_BIST_STAT CVMX_POW_BIST_STAT_FUNC()
+static inline uint64_t CVMX_POW_BIST_STAT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_BIST_STAT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000003F8ull);
+}
+#else
+#define CVMX_POW_BIST_STAT (CVMX_ADD_IO_SEG(0x00016700000003F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_DS_PC CVMX_POW_DS_PC_FUNC()
+static inline uint64_t CVMX_POW_DS_PC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_DS_PC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000398ull);
+}
+#else
+#define CVMX_POW_DS_PC (CVMX_ADD_IO_SEG(0x0001670000000398ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_ECC_ERR CVMX_POW_ECC_ERR_FUNC()
+static inline uint64_t CVMX_POW_ECC_ERR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_ECC_ERR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000218ull);
+}
+#else
+#define CVMX_POW_ECC_ERR (CVMX_ADD_IO_SEG(0x0001670000000218ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_INT_CTL CVMX_POW_INT_CTL_FUNC()
+static inline uint64_t CVMX_POW_INT_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_INT_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000220ull);
+}
+#else
+#define CVMX_POW_INT_CTL (CVMX_ADD_IO_SEG(0x0001670000000220ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_POW_IQ_CNTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_POW_IQ_CNTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000000340ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_POW_IQ_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000340ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_IQ_COM_CNT CVMX_POW_IQ_COM_CNT_FUNC()
+static inline uint64_t CVMX_POW_IQ_COM_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_IQ_COM_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000388ull);
+}
+#else
+#define CVMX_POW_IQ_COM_CNT (CVMX_ADD_IO_SEG(0x0001670000000388ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_IQ_INT CVMX_POW_IQ_INT_FUNC()
+static inline uint64_t CVMX_POW_IQ_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_IQ_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000238ull);
+}
+#else
+#define CVMX_POW_IQ_INT (CVMX_ADD_IO_SEG(0x0001670000000238ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_IQ_INT_EN CVMX_POW_IQ_INT_EN_FUNC()
+static inline uint64_t CVMX_POW_IQ_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_IQ_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000240ull);
+}
+#else
+#define CVMX_POW_IQ_INT_EN (CVMX_ADD_IO_SEG(0x0001670000000240ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_POW_IQ_THRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_POW_IQ_THRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00016700000003A0ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_POW_IQ_THRX(offset) (CVMX_ADD_IO_SEG(0x00016700000003A0ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_NOS_CNT CVMX_POW_NOS_CNT_FUNC()
+static inline uint64_t CVMX_POW_NOS_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_NOS_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000228ull);
+}
+#else
+#define CVMX_POW_NOS_CNT (CVMX_ADD_IO_SEG(0x0001670000000228ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_NW_TIM CVMX_POW_NW_TIM_FUNC()
+static inline uint64_t CVMX_POW_NW_TIM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_NW_TIM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000210ull);
+}
+#else
+#define CVMX_POW_NW_TIM (CVMX_ADD_IO_SEG(0x0001670000000210ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_PF_RST_MSK CVMX_POW_PF_RST_MSK_FUNC()
+static inline uint64_t CVMX_POW_PF_RST_MSK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_PF_RST_MSK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000230ull);
+}
+#else
+#define CVMX_POW_PF_RST_MSK (CVMX_ADD_IO_SEG(0x0001670000000230ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_POW_PP_GRP_MSKX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_POW_PP_GRP_MSKX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000000000ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_POW_PP_GRP_MSKX(offset) (CVMX_ADD_IO_SEG(0x0001670000000000ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_POW_QOS_RNDX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_POW_QOS_RNDX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00016700000001C0ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_POW_QOS_RNDX(offset) (CVMX_ADD_IO_SEG(0x00016700000001C0ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_POW_QOS_THRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_POW_QOS_THRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000000180ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_POW_QOS_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000180ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_TS_PC CVMX_POW_TS_PC_FUNC()
+static inline uint64_t CVMX_POW_TS_PC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_TS_PC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000390ull);
+}
+#else
+#define CVMX_POW_TS_PC (CVMX_ADD_IO_SEG(0x0001670000000390ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_WA_COM_PC CVMX_POW_WA_COM_PC_FUNC()
+static inline uint64_t CVMX_POW_WA_COM_PC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_WA_COM_PC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000380ull);
+}
+#else
+#define CVMX_POW_WA_COM_PC (CVMX_ADD_IO_SEG(0x0001670000000380ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_POW_WA_PCX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_POW_WA_PCX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000000300ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_POW_WA_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000300ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_WQ_INT CVMX_POW_WQ_INT_FUNC()
+static inline uint64_t CVMX_POW_WQ_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_WQ_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000200ull);
+}
+#else
+#define CVMX_POW_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000000200ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_POW_WQ_INT_CNTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 15)))))
+ cvmx_warn("CVMX_POW_WQ_INT_CNTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000000100ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_POW_WQ_INT_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000100ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_POW_WQ_INT_PC CVMX_POW_WQ_INT_PC_FUNC()
+static inline uint64_t CVMX_POW_WQ_INT_PC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_POW_WQ_INT_PC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000000208ull);
+}
+#else
+#define CVMX_POW_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000000208ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_POW_WQ_INT_THRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 15)))))
+ cvmx_warn("CVMX_POW_WQ_INT_THRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_POW_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_POW_WS_PCX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 15)))))
+ cvmx_warn("CVMX_POW_WS_PCX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_POW_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8)
+#endif
+
+/**
+ * cvmx_pow_bist_stat
+ *
+ * POW_BIST_STAT = POW BIST Status Register
+ *
+ * Contains the BIST status for the POW memories ('0' = pass, '1' = fail).
+ *
+ * Also contains the BIST status for the PP's. Each bit in the PP field is the OR of all BIST
+ * results for the corresponding physical PP ('0' = pass, '1' = fail).
+ */
+union cvmx_pow_bist_stat {
+ uint64_t u64;
+ struct cvmx_pow_bist_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pp : 16; /**< Physical PP BIST status */
+ uint64_t reserved_0_15 : 16;
+#else
+ uint64_t reserved_0_15 : 16;
+ uint64_t pp : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_bist_stat_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t pp : 1; /**< Physical PP BIST status */
+ uint64_t reserved_9_15 : 7;
+ uint64_t cam : 1; /**< POW CAM BIST status */
+ uint64_t nbt1 : 1; /**< NCB transmitter memory 1 BIST status */
+ uint64_t nbt0 : 1; /**< NCB transmitter memory 0 BIST status */
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
+ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
+ uint64_t pend : 1; /**< Pending switch memory BIST status */
+ uint64_t adr : 1; /**< Address memory BIST status */
+#else
+ uint64_t adr : 1;
+ uint64_t pend : 1;
+ uint64_t nbr0 : 1;
+ uint64_t nbr1 : 1;
+ uint64_t fidx : 1;
+ uint64_t index : 1;
+ uint64_t nbt0 : 1;
+ uint64_t nbt1 : 1;
+ uint64_t cam : 1;
+ uint64_t reserved_9_15 : 7;
+ uint64_t pp : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn30xx;
+ struct cvmx_pow_bist_stat_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t pp : 2; /**< Physical PP BIST status */
+ uint64_t reserved_9_15 : 7;
+ uint64_t cam : 1; /**< POW CAM BIST status */
+ uint64_t nbt1 : 1; /**< NCB transmitter memory 1 BIST status */
+ uint64_t nbt0 : 1; /**< NCB transmitter memory 0 BIST status */
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
+ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
+ uint64_t pend : 1; /**< Pending switch memory BIST status */
+ uint64_t adr : 1; /**< Address memory BIST status */
+#else
+ uint64_t adr : 1;
+ uint64_t pend : 1;
+ uint64_t nbr0 : 1;
+ uint64_t nbr1 : 1;
+ uint64_t fidx : 1;
+ uint64_t index : 1;
+ uint64_t nbt0 : 1;
+ uint64_t nbt1 : 1;
+ uint64_t cam : 1;
+ uint64_t reserved_9_15 : 7;
+ uint64_t pp : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn31xx;
+ struct cvmx_pow_bist_stat_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pp : 16; /**< Physical PP BIST status */
+ uint64_t reserved_10_15 : 6;
+ uint64_t cam : 1; /**< POW CAM BIST status */
+ uint64_t nbt : 1; /**< NCB transmitter memory BIST status */
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
+ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
+ uint64_t pend1 : 1; /**< Pending switch memory 1 BIST status */
+ uint64_t pend0 : 1; /**< Pending switch memory 0 BIST status */
+ uint64_t adr1 : 1; /**< Address memory 1 BIST status */
+ uint64_t adr0 : 1; /**< Address memory 0 BIST status */
+#else
+ uint64_t adr0 : 1;
+ uint64_t adr1 : 1;
+ uint64_t pend0 : 1;
+ uint64_t pend1 : 1;
+ uint64_t nbr0 : 1;
+ uint64_t nbr1 : 1;
+ uint64_t fidx : 1;
+ uint64_t index : 1;
+ uint64_t nbt : 1;
+ uint64_t cam : 1;
+ uint64_t reserved_10_15 : 6;
+ uint64_t pp : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn38xx;
+ struct cvmx_pow_bist_stat_cn38xx cn38xxp2;
+ struct cvmx_pow_bist_stat_cn31xx cn50xx;
+ struct cvmx_pow_bist_stat_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pp : 4; /**< Physical PP BIST status */
+ uint64_t reserved_9_15 : 7;
+ uint64_t cam : 1; /**< POW CAM BIST status */
+ uint64_t nbt1 : 1; /**< NCB transmitter memory 1 BIST status */
+ uint64_t nbt0 : 1; /**< NCB transmitter memory 0 BIST status */
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
+ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
+ uint64_t pend : 1; /**< Pending switch memory BIST status */
+ uint64_t adr : 1; /**< Address memory BIST status */
+#else
+ uint64_t adr : 1;
+ uint64_t pend : 1;
+ uint64_t nbr0 : 1;
+ uint64_t nbr1 : 1;
+ uint64_t fidx : 1;
+ uint64_t index : 1;
+ uint64_t nbt0 : 1;
+ uint64_t nbt1 : 1;
+ uint64_t cam : 1;
+ uint64_t reserved_9_15 : 7;
+ uint64_t pp : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_pow_bist_stat_cn52xx cn52xxp1;
+ struct cvmx_pow_bist_stat_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t pp : 12; /**< Physical PP BIST status */
+ uint64_t reserved_10_15 : 6;
+ uint64_t cam : 1; /**< POW CAM BIST status */
+ uint64_t nbt : 1; /**< NCB transmitter memory BIST status */
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
+ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
+ uint64_t pend1 : 1; /**< Pending switch memory 1 BIST status */
+ uint64_t pend0 : 1; /**< Pending switch memory 0 BIST status */
+ uint64_t adr1 : 1; /**< Address memory 1 BIST status */
+ uint64_t adr0 : 1; /**< Address memory 0 BIST status */
+#else
+ uint64_t adr0 : 1;
+ uint64_t adr1 : 1;
+ uint64_t pend0 : 1;
+ uint64_t pend1 : 1;
+ uint64_t nbr0 : 1;
+ uint64_t nbr1 : 1;
+ uint64_t fidx : 1;
+ uint64_t index : 1;
+ uint64_t nbt : 1;
+ uint64_t cam : 1;
+ uint64_t reserved_10_15 : 6;
+ uint64_t pp : 12;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn56xx;
+ struct cvmx_pow_bist_stat_cn56xx cn56xxp1;
+ struct cvmx_pow_bist_stat_cn38xx cn58xx;
+ struct cvmx_pow_bist_stat_cn38xx cn58xxp1;
+ struct cvmx_pow_bist_stat_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t pp : 4; /**< Physical PP BIST status */
+ uint64_t reserved_12_15 : 4;
+ uint64_t cam : 1; /**< POW CAM BIST status */
+ uint64_t nbr : 3; /**< NCB receiver memory BIST status */
+ uint64_t nbt : 4; /**< NCB transmitter memory BIST status */
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t pend : 1; /**< Pending switch memory BIST status */
+ uint64_t adr : 1; /**< Address memory BIST status */
+#else
+ uint64_t adr : 1;
+ uint64_t pend : 1;
+ uint64_t fidx : 1;
+ uint64_t index : 1;
+ uint64_t nbt : 4;
+ uint64_t nbr : 3;
+ uint64_t cam : 1;
+ uint64_t reserved_12_15 : 4;
+ uint64_t pp : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn61xx;
+ struct cvmx_pow_bist_stat_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t pp : 6; /**< Physical PP BIST status */
+ uint64_t reserved_12_15 : 4;
+ uint64_t cam : 1; /**< POW CAM BIST status */
+ uint64_t nbr : 3; /**< NCB receiver memory BIST status */
+ uint64_t nbt : 4; /**< NCB transmitter memory BIST status */
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t pend : 1; /**< Pending switch memory BIST status */
+ uint64_t adr : 1; /**< Address memory BIST status */
+#else
+ uint64_t adr : 1;
+ uint64_t pend : 1;
+ uint64_t fidx : 1;
+ uint64_t index : 1;
+ uint64_t nbt : 4;
+ uint64_t nbr : 3;
+ uint64_t cam : 1;
+ uint64_t reserved_12_15 : 4;
+ uint64_t pp : 6;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } cn63xx;
+ struct cvmx_pow_bist_stat_cn63xx cn63xxp1;
+ struct cvmx_pow_bist_stat_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t pp : 10; /**< Physical PP BIST status */
+ uint64_t reserved_12_15 : 4;
+ uint64_t cam : 1; /**< POW CAM BIST status */
+ uint64_t nbr : 3; /**< NCB receiver memory BIST status */
+ uint64_t nbt : 4; /**< NCB transmitter memory BIST status */
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t pend : 1; /**< Pending switch memory BIST status */
+ uint64_t adr : 1; /**< Address memory BIST status */
+#else
+ uint64_t adr : 1;
+ uint64_t pend : 1;
+ uint64_t fidx : 1;
+ uint64_t index : 1;
+ uint64_t nbt : 4;
+ uint64_t nbr : 3;
+ uint64_t cam : 1;
+ uint64_t reserved_12_15 : 4;
+ uint64_t pp : 10;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } cn66xx;
+ struct cvmx_pow_bist_stat_cn61xx cnf71xx;
+};
+typedef union cvmx_pow_bist_stat cvmx_pow_bist_stat_t;
+
+/**
+ * cvmx_pow_ds_pc
+ *
+ * POW_DS_PC = POW De-Schedule Performance Counter
+ *
+ * Counts the number of de-schedule requests. Write to clear.
+ */
+union cvmx_pow_ds_pc {
+ uint64_t u64;
+ struct cvmx_pow_ds_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ds_pc : 32; /**< De-schedule performance counter */
+#else
+ uint64_t ds_pc : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_ds_pc_s cn30xx;
+ struct cvmx_pow_ds_pc_s cn31xx;
+ struct cvmx_pow_ds_pc_s cn38xx;
+ struct cvmx_pow_ds_pc_s cn38xxp2;
+ struct cvmx_pow_ds_pc_s cn50xx;
+ struct cvmx_pow_ds_pc_s cn52xx;
+ struct cvmx_pow_ds_pc_s cn52xxp1;
+ struct cvmx_pow_ds_pc_s cn56xx;
+ struct cvmx_pow_ds_pc_s cn56xxp1;
+ struct cvmx_pow_ds_pc_s cn58xx;
+ struct cvmx_pow_ds_pc_s cn58xxp1;
+ struct cvmx_pow_ds_pc_s cn61xx;
+ struct cvmx_pow_ds_pc_s cn63xx;
+ struct cvmx_pow_ds_pc_s cn63xxp1;
+ struct cvmx_pow_ds_pc_s cn66xx;
+ struct cvmx_pow_ds_pc_s cnf71xx;
+};
+typedef union cvmx_pow_ds_pc cvmx_pow_ds_pc_t;
+
+/**
+ * cvmx_pow_ecc_err
+ *
+ * POW_ECC_ERR = POW ECC Error Register
+ *
+ * Contains the single and double error bits and the corresponding interrupt enables for the ECC-
+ * protected POW index memory. Also contains the syndrome value in the event of an ECC error.
+ *
+ * Also contains the remote pointer error bit and interrupt enable. RPE is set when the POW detected
+ * corruption on one or more of the input queue lists in L2/DRAM (POW's local copy of the tail pointer
+ * for the L2/DRAM input queue did not match the last entry on the the list). This is caused by
+ * L2/DRAM corruption, and is generally a fatal error because it likely caused POW to load bad work
+ * queue entries.
+ *
+ * This register also contains the illegal operation error bits and the corresponding interrupt
+ * enables as follows:
+ *
+ * <0> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP from PP in NULL_NULL state
+ * <1> Received SWTAG/SWTAG_DESCH/DESCH/UPD_WQP from PP in NULL state
+ * <2> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/GET_WORK from PP with pending tag switch to ORDERED or ATOMIC
+ * <3> Received SWTAG/SWTAG_FULL/SWTAG_DESCH from PP with tag specified as NULL_NULL
+ * <4> Received SWTAG_FULL/SWTAG_DESCH from PP with tag specified as NULL
+ * <5> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/NULL_RD from PP with GET_WORK pending
+ * <6> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/NULL_RD from PP with NULL_RD pending
+ * <7> Received CLR_NSCHED from PP with SWTAG_DESCH/DESCH/CLR_NSCHED pending
+ * <8> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/NULL_RD from PP with CLR_NSCHED pending
+ * <9> Received illegal opcode
+ * <10> Received ADD_WORK with tag specified as NULL_NULL
+ * <11> Received DBG load from PP with DBG load pending
+ * <12> Received CSR load from PP with CSR load pending
+ */
+union cvmx_pow_ecc_err {
+ uint64_t u64;
+ struct cvmx_pow_ecc_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63 : 19;
+ uint64_t iop_ie : 13; /**< Illegal operation interrupt enables */
+ uint64_t reserved_29_31 : 3;
+ uint64_t iop : 13; /**< Illegal operation errors */
+ uint64_t reserved_14_15 : 2;
+ uint64_t rpe_ie : 1; /**< Remote pointer error interrupt enable */
+ uint64_t rpe : 1; /**< Remote pointer error */
+ uint64_t reserved_9_11 : 3;
+ uint64_t syn : 5; /**< Syndrome value (only valid when DBE or SBE is set) */
+ uint64_t dbe_ie : 1; /**< Double bit error interrupt enable */
+ uint64_t sbe_ie : 1; /**< Single bit error interrupt enable */
+ uint64_t dbe : 1; /**< Double bit error */
+ uint64_t sbe : 1; /**< Single bit error */
+#else
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+ uint64_t sbe_ie : 1;
+ uint64_t dbe_ie : 1;
+ uint64_t syn : 5;
+ uint64_t reserved_9_11 : 3;
+ uint64_t rpe : 1;
+ uint64_t rpe_ie : 1;
+ uint64_t reserved_14_15 : 2;
+ uint64_t iop : 13;
+ uint64_t reserved_29_31 : 3;
+ uint64_t iop_ie : 13;
+ uint64_t reserved_45_63 : 19;
+#endif
+ } s;
+ struct cvmx_pow_ecc_err_s cn30xx;
+ struct cvmx_pow_ecc_err_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t rpe_ie : 1; /**< Remote pointer error interrupt enable */
+ uint64_t rpe : 1; /**< Remote pointer error */
+ uint64_t reserved_9_11 : 3;
+ uint64_t syn : 5; /**< Syndrome value (only valid when DBE or SBE is set) */
+ uint64_t dbe_ie : 1; /**< Double bit error interrupt enable */
+ uint64_t sbe_ie : 1; /**< Single bit error interrupt enable */
+ uint64_t dbe : 1; /**< Double bit error */
+ uint64_t sbe : 1; /**< Single bit error */
+#else
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+ uint64_t sbe_ie : 1;
+ uint64_t dbe_ie : 1;
+ uint64_t syn : 5;
+ uint64_t reserved_9_11 : 3;
+ uint64_t rpe : 1;
+ uint64_t rpe_ie : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn31xx;
+ struct cvmx_pow_ecc_err_s cn38xx;
+ struct cvmx_pow_ecc_err_cn31xx cn38xxp2;
+ struct cvmx_pow_ecc_err_s cn50xx;
+ struct cvmx_pow_ecc_err_s cn52xx;
+ struct cvmx_pow_ecc_err_s cn52xxp1;
+ struct cvmx_pow_ecc_err_s cn56xx;
+ struct cvmx_pow_ecc_err_s cn56xxp1;
+ struct cvmx_pow_ecc_err_s cn58xx;
+ struct cvmx_pow_ecc_err_s cn58xxp1;
+ struct cvmx_pow_ecc_err_s cn61xx;
+ struct cvmx_pow_ecc_err_s cn63xx;
+ struct cvmx_pow_ecc_err_s cn63xxp1;
+ struct cvmx_pow_ecc_err_s cn66xx;
+ struct cvmx_pow_ecc_err_s cnf71xx;
+};
+typedef union cvmx_pow_ecc_err cvmx_pow_ecc_err_t;
+
+/**
+ * cvmx_pow_int_ctl
+ *
+ * POW_INT_CTL = POW Internal Control Register
+ *
+ * Contains POW internal control values (for internal use, not typically for customer use):
+ *
+ * PFR_DIS = Disable high-performance pre-fetch reset mode.
+ *
+ * NBR_THR = Assert ncb__busy when the number of remaining coherent bus NBR credits equals is less
+ * than or equal to this value.
+ */
+union cvmx_pow_int_ctl {
+ uint64_t u64;
+ struct cvmx_pow_int_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t pfr_dis : 1; /**< High-perf pre-fetch reset mode disable */
+ uint64_t nbr_thr : 5; /**< NBR busy threshold */
+#else
+ uint64_t nbr_thr : 5;
+ uint64_t pfr_dis : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_pow_int_ctl_s cn30xx;
+ struct cvmx_pow_int_ctl_s cn31xx;
+ struct cvmx_pow_int_ctl_s cn38xx;
+ struct cvmx_pow_int_ctl_s cn38xxp2;
+ struct cvmx_pow_int_ctl_s cn50xx;
+ struct cvmx_pow_int_ctl_s cn52xx;
+ struct cvmx_pow_int_ctl_s cn52xxp1;
+ struct cvmx_pow_int_ctl_s cn56xx;
+ struct cvmx_pow_int_ctl_s cn56xxp1;
+ struct cvmx_pow_int_ctl_s cn58xx;
+ struct cvmx_pow_int_ctl_s cn58xxp1;
+ struct cvmx_pow_int_ctl_s cn61xx;
+ struct cvmx_pow_int_ctl_s cn63xx;
+ struct cvmx_pow_int_ctl_s cn63xxp1;
+ struct cvmx_pow_int_ctl_s cn66xx;
+ struct cvmx_pow_int_ctl_s cnf71xx;
+};
+typedef union cvmx_pow_int_ctl cvmx_pow_int_ctl_t;
+
+/**
+ * cvmx_pow_iq_cnt#
+ *
+ * POW_IQ_CNTX = POW Input Queue Count Register (1 per QOS level)
+ *
+ * Contains a read-only count of the number of work queue entries for each QOS level.
+ */
+union cvmx_pow_iq_cntx {
+ uint64_t u64;
+ struct cvmx_pow_iq_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iq_cnt : 32; /**< Input queue count for QOS level X */
+#else
+ uint64_t iq_cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_iq_cntx_s cn30xx;
+ struct cvmx_pow_iq_cntx_s cn31xx;
+ struct cvmx_pow_iq_cntx_s cn38xx;
+ struct cvmx_pow_iq_cntx_s cn38xxp2;
+ struct cvmx_pow_iq_cntx_s cn50xx;
+ struct cvmx_pow_iq_cntx_s cn52xx;
+ struct cvmx_pow_iq_cntx_s cn52xxp1;
+ struct cvmx_pow_iq_cntx_s cn56xx;
+ struct cvmx_pow_iq_cntx_s cn56xxp1;
+ struct cvmx_pow_iq_cntx_s cn58xx;
+ struct cvmx_pow_iq_cntx_s cn58xxp1;
+ struct cvmx_pow_iq_cntx_s cn61xx;
+ struct cvmx_pow_iq_cntx_s cn63xx;
+ struct cvmx_pow_iq_cntx_s cn63xxp1;
+ struct cvmx_pow_iq_cntx_s cn66xx;
+ struct cvmx_pow_iq_cntx_s cnf71xx;
+};
+typedef union cvmx_pow_iq_cntx cvmx_pow_iq_cntx_t;
+
+/**
+ * cvmx_pow_iq_com_cnt
+ *
+ * POW_IQ_COM_CNT = POW Input Queue Combined Count Register
+ *
+ * Contains a read-only count of the total number of work queue entries in all QOS levels.
+ */
+union cvmx_pow_iq_com_cnt {
+ uint64_t u64;
+ struct cvmx_pow_iq_com_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iq_cnt : 32; /**< Input queue combined count */
+#else
+ uint64_t iq_cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_iq_com_cnt_s cn30xx;
+ struct cvmx_pow_iq_com_cnt_s cn31xx;
+ struct cvmx_pow_iq_com_cnt_s cn38xx;
+ struct cvmx_pow_iq_com_cnt_s cn38xxp2;
+ struct cvmx_pow_iq_com_cnt_s cn50xx;
+ struct cvmx_pow_iq_com_cnt_s cn52xx;
+ struct cvmx_pow_iq_com_cnt_s cn52xxp1;
+ struct cvmx_pow_iq_com_cnt_s cn56xx;
+ struct cvmx_pow_iq_com_cnt_s cn56xxp1;
+ struct cvmx_pow_iq_com_cnt_s cn58xx;
+ struct cvmx_pow_iq_com_cnt_s cn58xxp1;
+ struct cvmx_pow_iq_com_cnt_s cn61xx;
+ struct cvmx_pow_iq_com_cnt_s cn63xx;
+ struct cvmx_pow_iq_com_cnt_s cn63xxp1;
+ struct cvmx_pow_iq_com_cnt_s cn66xx;
+ struct cvmx_pow_iq_com_cnt_s cnf71xx;
+};
+typedef union cvmx_pow_iq_com_cnt cvmx_pow_iq_com_cnt_t;
+
+/**
+ * cvmx_pow_iq_int
+ *
+ * POW_IQ_INT = POW Input Queue Interrupt Register
+ *
+ * Contains the bits (1 per QOS level) that can trigger the input queue interrupt. An IQ_INT bit
+ * will be set if POW_IQ_CNT#QOS# changes and the resulting value is equal to POW_IQ_THR#QOS#.
+ */
+union cvmx_pow_iq_int {
+ uint64_t u64;
+ struct cvmx_pow_iq_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t iq_int : 8; /**< Input queue interrupt bits */
+#else
+ uint64_t iq_int : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_pow_iq_int_s cn52xx;
+ struct cvmx_pow_iq_int_s cn52xxp1;
+ struct cvmx_pow_iq_int_s cn56xx;
+ struct cvmx_pow_iq_int_s cn56xxp1;
+ struct cvmx_pow_iq_int_s cn61xx;
+ struct cvmx_pow_iq_int_s cn63xx;
+ struct cvmx_pow_iq_int_s cn63xxp1;
+ struct cvmx_pow_iq_int_s cn66xx;
+ struct cvmx_pow_iq_int_s cnf71xx;
+};
+typedef union cvmx_pow_iq_int cvmx_pow_iq_int_t;
+
+/**
+ * cvmx_pow_iq_int_en
+ *
+ * POW_IQ_INT_EN = POW Input Queue Interrupt Enable Register
+ *
+ * Contains the bits (1 per QOS level) that enable the input queue interrupt.
+ */
+union cvmx_pow_iq_int_en {
+ uint64_t u64;
+ struct cvmx_pow_iq_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t int_en : 8; /**< Input queue interrupt enable bits */
+#else
+ uint64_t int_en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_pow_iq_int_en_s cn52xx;
+ struct cvmx_pow_iq_int_en_s cn52xxp1;
+ struct cvmx_pow_iq_int_en_s cn56xx;
+ struct cvmx_pow_iq_int_en_s cn56xxp1;
+ struct cvmx_pow_iq_int_en_s cn61xx;
+ struct cvmx_pow_iq_int_en_s cn63xx;
+ struct cvmx_pow_iq_int_en_s cn63xxp1;
+ struct cvmx_pow_iq_int_en_s cn66xx;
+ struct cvmx_pow_iq_int_en_s cnf71xx;
+};
+typedef union cvmx_pow_iq_int_en cvmx_pow_iq_int_en_t;
+
+/**
+ * cvmx_pow_iq_thr#
+ *
+ * POW_IQ_THRX = POW Input Queue Threshold Register (1 per QOS level)
+ *
+ * Threshold value for triggering input queue interrupts.
+ */
+union cvmx_pow_iq_thrx {
+ uint64_t u64;
+ struct cvmx_pow_iq_thrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iq_thr : 32; /**< Input queue threshold for QOS level X */
+#else
+ uint64_t iq_thr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_iq_thrx_s cn52xx;
+ struct cvmx_pow_iq_thrx_s cn52xxp1;
+ struct cvmx_pow_iq_thrx_s cn56xx;
+ struct cvmx_pow_iq_thrx_s cn56xxp1;
+ struct cvmx_pow_iq_thrx_s cn61xx;
+ struct cvmx_pow_iq_thrx_s cn63xx;
+ struct cvmx_pow_iq_thrx_s cn63xxp1;
+ struct cvmx_pow_iq_thrx_s cn66xx;
+ struct cvmx_pow_iq_thrx_s cnf71xx;
+};
+typedef union cvmx_pow_iq_thrx cvmx_pow_iq_thrx_t;
+
+/**
+ * cvmx_pow_nos_cnt
+ *
+ * POW_NOS_CNT = POW No-schedule Count Register
+ *
+ * Contains the number of work queue entries on the no-schedule list.
+ */
+union cvmx_pow_nos_cnt {
+ uint64_t u64;
+ struct cvmx_pow_nos_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t nos_cnt : 12; /**< # of work queue entries on the no-schedule list */
+#else
+ uint64_t nos_cnt : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_pow_nos_cnt_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t nos_cnt : 7; /**< # of work queue entries on the no-schedule list */
+#else
+ uint64_t nos_cnt : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn30xx;
+ struct cvmx_pow_nos_cnt_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t nos_cnt : 9; /**< # of work queue entries on the no-schedule list */
+#else
+ uint64_t nos_cnt : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn31xx;
+ struct cvmx_pow_nos_cnt_s cn38xx;
+ struct cvmx_pow_nos_cnt_s cn38xxp2;
+ struct cvmx_pow_nos_cnt_cn31xx cn50xx;
+ struct cvmx_pow_nos_cnt_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t nos_cnt : 10; /**< # of work queue entries on the no-schedule list */
+#else
+ uint64_t nos_cnt : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn52xx;
+ struct cvmx_pow_nos_cnt_cn52xx cn52xxp1;
+ struct cvmx_pow_nos_cnt_s cn56xx;
+ struct cvmx_pow_nos_cnt_s cn56xxp1;
+ struct cvmx_pow_nos_cnt_s cn58xx;
+ struct cvmx_pow_nos_cnt_s cn58xxp1;
+ struct cvmx_pow_nos_cnt_cn52xx cn61xx;
+ struct cvmx_pow_nos_cnt_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t nos_cnt : 11; /**< # of work queue entries on the no-schedule list */
+#else
+ uint64_t nos_cnt : 11;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn63xx;
+ struct cvmx_pow_nos_cnt_cn63xx cn63xxp1;
+ struct cvmx_pow_nos_cnt_cn63xx cn66xx;
+ struct cvmx_pow_nos_cnt_cn52xx cnf71xx;
+};
+typedef union cvmx_pow_nos_cnt cvmx_pow_nos_cnt_t;
+
+/**
+ * cvmx_pow_nw_tim
+ *
+ * POW_NW_TIM = POW New Work Timer Period Register
+ *
+ * Sets the minimum period for a new work request timeout. Period is specified in n-1 notation
+ * where the increment value is 1024 clock cycles. Thus, a value of 0x0 in this register translates
+ * to 1024 cycles, 0x1 translates to 2048 cycles, 0x2 translates to 3072 cycles, etc... Note: the
+ * maximum period for a new work request timeout is 2 times the minimum period. Note: the new work
+ * request timeout counter is reset when this register is written.
+ *
+ * There are two new work request timeout cases:
+ *
+ * - WAIT bit clear. The new work request can timeout if the timer expires before the pre-fetch
+ * engine has reached the end of all work queues. This can occur if the executable work queue
+ * entry is deep in the queue and the pre-fetch engine is subject to many resets (i.e. high switch,
+ * de-schedule, or new work load from other PP's). Thus, it is possible for a PP to receive a work
+ * response with the NO_WORK bit set even though there was at least one executable entry in the
+ * work queues. The other (and typical) scenario for receiving a NO_WORK response with the WAIT
+ * bit clear is that the pre-fetch engine has reached the end of all work queues without finding
+ * executable work.
+ *
+ * - WAIT bit set. The new work request can timeout if the timer expires before the pre-fetch
+ * engine has found executable work. In this case, the only scenario where the PP will receive a
+ * work response with the NO_WORK bit set is if the timer expires. Note: it is still possible for
+ * a PP to receive a NO_WORK response even though there was at least one executable entry in the
+ * work queues.
+ *
+ * In either case, it's important to note that switches and de-schedules are higher priority
+ * operations that can cause the pre-fetch engine to reset. Thus in a system with many switches or
+ * de-schedules occuring, it's possible for the new work timer to expire (resulting in NO_WORK
+ * responses) before the pre-fetch engine is able to get very deep into the work queues.
+ */
+union cvmx_pow_nw_tim {
+ uint64_t u64;
+ struct cvmx_pow_nw_tim_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t nw_tim : 10; /**< New work timer period */
+#else
+ uint64_t nw_tim : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_pow_nw_tim_s cn30xx;
+ struct cvmx_pow_nw_tim_s cn31xx;
+ struct cvmx_pow_nw_tim_s cn38xx;
+ struct cvmx_pow_nw_tim_s cn38xxp2;
+ struct cvmx_pow_nw_tim_s cn50xx;
+ struct cvmx_pow_nw_tim_s cn52xx;
+ struct cvmx_pow_nw_tim_s cn52xxp1;
+ struct cvmx_pow_nw_tim_s cn56xx;
+ struct cvmx_pow_nw_tim_s cn56xxp1;
+ struct cvmx_pow_nw_tim_s cn58xx;
+ struct cvmx_pow_nw_tim_s cn58xxp1;
+ struct cvmx_pow_nw_tim_s cn61xx;
+ struct cvmx_pow_nw_tim_s cn63xx;
+ struct cvmx_pow_nw_tim_s cn63xxp1;
+ struct cvmx_pow_nw_tim_s cn66xx;
+ struct cvmx_pow_nw_tim_s cnf71xx;
+};
+typedef union cvmx_pow_nw_tim cvmx_pow_nw_tim_t;
+
+/**
+ * cvmx_pow_pf_rst_msk
+ *
+ * POW_PF_RST_MSK = POW Prefetch Reset Mask
+ *
+ * Resets the work prefetch engine when work is stored in an internal buffer (either when the add
+ * work arrives or when the work is reloaded from an external buffer) for an enabled QOS level
+ * (1 bit per QOS level).
+ */
+union cvmx_pow_pf_rst_msk {
+ uint64_t u64;
+ struct cvmx_pow_pf_rst_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rst_msk : 8; /**< Prefetch engine reset mask */
+#else
+ uint64_t rst_msk : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_pow_pf_rst_msk_s cn50xx;
+ struct cvmx_pow_pf_rst_msk_s cn52xx;
+ struct cvmx_pow_pf_rst_msk_s cn52xxp1;
+ struct cvmx_pow_pf_rst_msk_s cn56xx;
+ struct cvmx_pow_pf_rst_msk_s cn56xxp1;
+ struct cvmx_pow_pf_rst_msk_s cn58xx;
+ struct cvmx_pow_pf_rst_msk_s cn58xxp1;
+ struct cvmx_pow_pf_rst_msk_s cn61xx;
+ struct cvmx_pow_pf_rst_msk_s cn63xx;
+ struct cvmx_pow_pf_rst_msk_s cn63xxp1;
+ struct cvmx_pow_pf_rst_msk_s cn66xx;
+ struct cvmx_pow_pf_rst_msk_s cnf71xx;
+};
+typedef union cvmx_pow_pf_rst_msk cvmx_pow_pf_rst_msk_t;
+
+/**
+ * cvmx_pow_pp_grp_msk#
+ *
+ * POW_PP_GRP_MSKX = POW PP Group Mask Register (1 per PP)
+ *
+ * Selects which group(s) a PP belongs to. A '1' in any bit position sets the PP's membership in
+ * the corresponding group. A value of 0x0 will prevent the PP from receiving new work. Note:
+ * disabled or non-existent PP's should have this field set to 0xffff (the reset value) in order to
+ * maximize POW performance.
+ *
+ * Also contains the QOS level priorities for each PP. 0x0 is highest priority, and 0x7 the lowest.
+ * Setting the priority to 0xf will prevent that PP from receiving work from that QOS level.
+ * Priority values 0x8 through 0xe are reserved and should not be used. For a given PP, priorities
+ * should begin at 0x0 and remain contiguous throughout the range.
+ */
+union cvmx_pow_pp_grp_mskx {
+ uint64_t u64;
+ struct cvmx_pow_pp_grp_mskx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t qos7_pri : 4; /**< PPX priority for QOS level 7 */
+ uint64_t qos6_pri : 4; /**< PPX priority for QOS level 6 */
+ uint64_t qos5_pri : 4; /**< PPX priority for QOS level 5 */
+ uint64_t qos4_pri : 4; /**< PPX priority for QOS level 4 */
+ uint64_t qos3_pri : 4; /**< PPX priority for QOS level 3 */
+ uint64_t qos2_pri : 4; /**< PPX priority for QOS level 2 */
+ uint64_t qos1_pri : 4; /**< PPX priority for QOS level 1 */
+ uint64_t qos0_pri : 4; /**< PPX priority for QOS level 0 */
+ uint64_t grp_msk : 16; /**< PPX group mask */
+#else
+ uint64_t grp_msk : 16;
+ uint64_t qos0_pri : 4;
+ uint64_t qos1_pri : 4;
+ uint64_t qos2_pri : 4;
+ uint64_t qos3_pri : 4;
+ uint64_t qos4_pri : 4;
+ uint64_t qos5_pri : 4;
+ uint64_t qos6_pri : 4;
+ uint64_t qos7_pri : 4;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_pow_pp_grp_mskx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t grp_msk : 16; /**< PPX group mask */
+#else
+ uint64_t grp_msk : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn30xx;
+ struct cvmx_pow_pp_grp_mskx_cn30xx cn31xx;
+ struct cvmx_pow_pp_grp_mskx_cn30xx cn38xx;
+ struct cvmx_pow_pp_grp_mskx_cn30xx cn38xxp2;
+ struct cvmx_pow_pp_grp_mskx_s cn50xx;
+ struct cvmx_pow_pp_grp_mskx_s cn52xx;
+ struct cvmx_pow_pp_grp_mskx_s cn52xxp1;
+ struct cvmx_pow_pp_grp_mskx_s cn56xx;
+ struct cvmx_pow_pp_grp_mskx_s cn56xxp1;
+ struct cvmx_pow_pp_grp_mskx_s cn58xx;
+ struct cvmx_pow_pp_grp_mskx_s cn58xxp1;
+ struct cvmx_pow_pp_grp_mskx_s cn61xx;
+ struct cvmx_pow_pp_grp_mskx_s cn63xx;
+ struct cvmx_pow_pp_grp_mskx_s cn63xxp1;
+ struct cvmx_pow_pp_grp_mskx_s cn66xx;
+ struct cvmx_pow_pp_grp_mskx_s cnf71xx;
+};
+typedef union cvmx_pow_pp_grp_mskx cvmx_pow_pp_grp_mskx_t;
+
+/**
+ * cvmx_pow_qos_rnd#
+ *
+ * POW_QOS_RNDX = POW QOS Issue Round Register (4 rounds per register x 8 registers = 32 rounds)
+ *
+ * Contains the round definitions for issuing new work. Each round consists of 8 bits with each bit
+ * corresponding to a QOS level. There are 4 rounds contained in each register for a total of 32
+ * rounds. The issue logic traverses through the rounds sequentially (lowest round to highest round)
+ * in an attempt to find new work for each PP. Within each round, the issue logic traverses through
+ * the QOS levels sequentially (highest QOS to lowest QOS) skipping over each QOS level with a clear
+ * bit in the round mask. Note: setting a QOS level to all zeroes in all issue round registers will
+ * prevent work from being issued from that QOS level.
+ */
+union cvmx_pow_qos_rndx {
+ uint64_t u64;
+ struct cvmx_pow_qos_rndx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rnd_p3 : 8; /**< Round mask for round Xx4+3 */
+ uint64_t rnd_p2 : 8; /**< Round mask for round Xx4+2 */
+ uint64_t rnd_p1 : 8; /**< Round mask for round Xx4+1 */
+ uint64_t rnd : 8; /**< Round mask for round Xx4 */
+#else
+ uint64_t rnd : 8;
+ uint64_t rnd_p1 : 8;
+ uint64_t rnd_p2 : 8;
+ uint64_t rnd_p3 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_qos_rndx_s cn30xx;
+ struct cvmx_pow_qos_rndx_s cn31xx;
+ struct cvmx_pow_qos_rndx_s cn38xx;
+ struct cvmx_pow_qos_rndx_s cn38xxp2;
+ struct cvmx_pow_qos_rndx_s cn50xx;
+ struct cvmx_pow_qos_rndx_s cn52xx;
+ struct cvmx_pow_qos_rndx_s cn52xxp1;
+ struct cvmx_pow_qos_rndx_s cn56xx;
+ struct cvmx_pow_qos_rndx_s cn56xxp1;
+ struct cvmx_pow_qos_rndx_s cn58xx;
+ struct cvmx_pow_qos_rndx_s cn58xxp1;
+ struct cvmx_pow_qos_rndx_s cn61xx;
+ struct cvmx_pow_qos_rndx_s cn63xx;
+ struct cvmx_pow_qos_rndx_s cn63xxp1;
+ struct cvmx_pow_qos_rndx_s cn66xx;
+ struct cvmx_pow_qos_rndx_s cnf71xx;
+};
+typedef union cvmx_pow_qos_rndx cvmx_pow_qos_rndx_t;
+
+/**
+ * cvmx_pow_qos_thr#
+ *
+ * POW_QOS_THRX = POW QOS Threshold Register (1 per QOS level)
+ *
+ * Contains the thresholds for allocating POW internal storage buffers. If the number of remaining
+ * free buffers drops below the minimum threshold (MIN_THR) or the number of allocated buffers for
+ * this QOS level rises above the maximum threshold (MAX_THR), future incoming work queue entries
+ * will be buffered externally rather than internally. This register also contains a read-only count
+ * of the current number of free buffers (FREE_CNT), the number of internal buffers currently
+ * allocated to this QOS level (BUF_CNT), and the total number of buffers on the de-schedule list
+ * (DES_CNT) (which is not the same as the total number of de-scheduled buffers).
+ */
+union cvmx_pow_qos_thrx {
+ uint64_t u64;
+ struct cvmx_pow_qos_thrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t des_cnt : 12; /**< # of buffers on de-schedule list */
+ uint64_t buf_cnt : 12; /**< # of internal buffers allocated to QOS level X */
+ uint64_t free_cnt : 12; /**< # of total free buffers */
+ uint64_t reserved_23_23 : 1;
+ uint64_t max_thr : 11; /**< Max threshold for QOS level X */
+ uint64_t reserved_11_11 : 1;
+ uint64_t min_thr : 11; /**< Min threshold for QOS level X */
+#else
+ uint64_t min_thr : 11;
+ uint64_t reserved_11_11 : 1;
+ uint64_t max_thr : 11;
+ uint64_t reserved_23_23 : 1;
+ uint64_t free_cnt : 12;
+ uint64_t buf_cnt : 12;
+ uint64_t des_cnt : 12;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } s;
+ struct cvmx_pow_qos_thrx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63 : 9;
+ uint64_t des_cnt : 7; /**< # of buffers on de-schedule list */
+ uint64_t reserved_43_47 : 5;
+ uint64_t buf_cnt : 7; /**< # of internal buffers allocated to QOS level X */
+ uint64_t reserved_31_35 : 5;
+ uint64_t free_cnt : 7; /**< # of total free buffers */
+ uint64_t reserved_18_23 : 6;
+ uint64_t max_thr : 6; /**< Max threshold for QOS level X */
+ uint64_t reserved_6_11 : 6;
+ uint64_t min_thr : 6; /**< Min threshold for QOS level X */
+#else
+ uint64_t min_thr : 6;
+ uint64_t reserved_6_11 : 6;
+ uint64_t max_thr : 6;
+ uint64_t reserved_18_23 : 6;
+ uint64_t free_cnt : 7;
+ uint64_t reserved_31_35 : 5;
+ uint64_t buf_cnt : 7;
+ uint64_t reserved_43_47 : 5;
+ uint64_t des_cnt : 7;
+ uint64_t reserved_55_63 : 9;
+#endif
+ } cn30xx;
+ struct cvmx_pow_qos_thrx_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63 : 7;
+ uint64_t des_cnt : 9; /**< # of buffers on de-schedule list */
+ uint64_t reserved_45_47 : 3;
+ uint64_t buf_cnt : 9; /**< # of internal buffers allocated to QOS level X */
+ uint64_t reserved_33_35 : 3;
+ uint64_t free_cnt : 9; /**< # of total free buffers */
+ uint64_t reserved_20_23 : 4;
+ uint64_t max_thr : 8; /**< Max threshold for QOS level X */
+ uint64_t reserved_8_11 : 4;
+ uint64_t min_thr : 8; /**< Min threshold for QOS level X */
+#else
+ uint64_t min_thr : 8;
+ uint64_t reserved_8_11 : 4;
+ uint64_t max_thr : 8;
+ uint64_t reserved_20_23 : 4;
+ uint64_t free_cnt : 9;
+ uint64_t reserved_33_35 : 3;
+ uint64_t buf_cnt : 9;
+ uint64_t reserved_45_47 : 3;
+ uint64_t des_cnt : 9;
+ uint64_t reserved_57_63 : 7;
+#endif
+ } cn31xx;
+ struct cvmx_pow_qos_thrx_s cn38xx;
+ struct cvmx_pow_qos_thrx_s cn38xxp2;
+ struct cvmx_pow_qos_thrx_cn31xx cn50xx;
+ struct cvmx_pow_qos_thrx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t des_cnt : 10; /**< # of buffers on de-schedule list */
+ uint64_t reserved_46_47 : 2;
+ uint64_t buf_cnt : 10; /**< # of internal buffers allocated to QOS level X */
+ uint64_t reserved_34_35 : 2;
+ uint64_t free_cnt : 10; /**< # of total free buffers */
+ uint64_t reserved_21_23 : 3;
+ uint64_t max_thr : 9; /**< Max threshold for QOS level X */
+ uint64_t reserved_9_11 : 3;
+ uint64_t min_thr : 9; /**< Min threshold for QOS level X */
+#else
+ uint64_t min_thr : 9;
+ uint64_t reserved_9_11 : 3;
+ uint64_t max_thr : 9;
+ uint64_t reserved_21_23 : 3;
+ uint64_t free_cnt : 10;
+ uint64_t reserved_34_35 : 2;
+ uint64_t buf_cnt : 10;
+ uint64_t reserved_46_47 : 2;
+ uint64_t des_cnt : 10;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } cn52xx;
+ struct cvmx_pow_qos_thrx_cn52xx cn52xxp1;
+ struct cvmx_pow_qos_thrx_s cn56xx;
+ struct cvmx_pow_qos_thrx_s cn56xxp1;
+ struct cvmx_pow_qos_thrx_s cn58xx;
+ struct cvmx_pow_qos_thrx_s cn58xxp1;
+ struct cvmx_pow_qos_thrx_cn52xx cn61xx;
+ struct cvmx_pow_qos_thrx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t des_cnt : 11; /**< # of buffers on de-schedule list */
+ uint64_t reserved_47_47 : 1;
+ uint64_t buf_cnt : 11; /**< # of internal buffers allocated to QOS level X */
+ uint64_t reserved_35_35 : 1;
+ uint64_t free_cnt : 11; /**< # of total free buffers */
+ uint64_t reserved_22_23 : 2;
+ uint64_t max_thr : 10; /**< Max threshold for QOS level X */
+ uint64_t reserved_10_11 : 2;
+ uint64_t min_thr : 10; /**< Min threshold for QOS level X */
+#else
+ uint64_t min_thr : 10;
+ uint64_t reserved_10_11 : 2;
+ uint64_t max_thr : 10;
+ uint64_t reserved_22_23 : 2;
+ uint64_t free_cnt : 11;
+ uint64_t reserved_35_35 : 1;
+ uint64_t buf_cnt : 11;
+ uint64_t reserved_47_47 : 1;
+ uint64_t des_cnt : 11;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn63xx;
+ struct cvmx_pow_qos_thrx_cn63xx cn63xxp1;
+ struct cvmx_pow_qos_thrx_cn63xx cn66xx;
+ struct cvmx_pow_qos_thrx_cn52xx cnf71xx;
+};
+typedef union cvmx_pow_qos_thrx cvmx_pow_qos_thrx_t;
+
+/**
+ * cvmx_pow_ts_pc
+ *
+ * POW_TS_PC = POW Tag Switch Performance Counter
+ *
+ * Counts the number of tag switch requests. Write to clear.
+ */
+union cvmx_pow_ts_pc {
+ uint64_t u64;
+ struct cvmx_pow_ts_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ts_pc : 32; /**< Tag switch performance counter */
+#else
+ uint64_t ts_pc : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_ts_pc_s cn30xx;
+ struct cvmx_pow_ts_pc_s cn31xx;
+ struct cvmx_pow_ts_pc_s cn38xx;
+ struct cvmx_pow_ts_pc_s cn38xxp2;
+ struct cvmx_pow_ts_pc_s cn50xx;
+ struct cvmx_pow_ts_pc_s cn52xx;
+ struct cvmx_pow_ts_pc_s cn52xxp1;
+ struct cvmx_pow_ts_pc_s cn56xx;
+ struct cvmx_pow_ts_pc_s cn56xxp1;
+ struct cvmx_pow_ts_pc_s cn58xx;
+ struct cvmx_pow_ts_pc_s cn58xxp1;
+ struct cvmx_pow_ts_pc_s cn61xx;
+ struct cvmx_pow_ts_pc_s cn63xx;
+ struct cvmx_pow_ts_pc_s cn63xxp1;
+ struct cvmx_pow_ts_pc_s cn66xx;
+ struct cvmx_pow_ts_pc_s cnf71xx;
+};
+typedef union cvmx_pow_ts_pc cvmx_pow_ts_pc_t;
+
+/**
+ * cvmx_pow_wa_com_pc
+ *
+ * POW_WA_COM_PC = POW Work Add Combined Performance Counter
+ *
+ * Counts the number of add new work requests for all QOS levels. Write to clear.
+ */
+union cvmx_pow_wa_com_pc {
+ uint64_t u64;
+ struct cvmx_pow_wa_com_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wa_pc : 32; /**< Work add combined performance counter */
+#else
+ uint64_t wa_pc : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_wa_com_pc_s cn30xx;
+ struct cvmx_pow_wa_com_pc_s cn31xx;
+ struct cvmx_pow_wa_com_pc_s cn38xx;
+ struct cvmx_pow_wa_com_pc_s cn38xxp2;
+ struct cvmx_pow_wa_com_pc_s cn50xx;
+ struct cvmx_pow_wa_com_pc_s cn52xx;
+ struct cvmx_pow_wa_com_pc_s cn52xxp1;
+ struct cvmx_pow_wa_com_pc_s cn56xx;
+ struct cvmx_pow_wa_com_pc_s cn56xxp1;
+ struct cvmx_pow_wa_com_pc_s cn58xx;
+ struct cvmx_pow_wa_com_pc_s cn58xxp1;
+ struct cvmx_pow_wa_com_pc_s cn61xx;
+ struct cvmx_pow_wa_com_pc_s cn63xx;
+ struct cvmx_pow_wa_com_pc_s cn63xxp1;
+ struct cvmx_pow_wa_com_pc_s cn66xx;
+ struct cvmx_pow_wa_com_pc_s cnf71xx;
+};
+typedef union cvmx_pow_wa_com_pc cvmx_pow_wa_com_pc_t;
+
+/**
+ * cvmx_pow_wa_pc#
+ *
+ * POW_WA_PCX = POW Work Add Performance Counter (1 per QOS level)
+ *
+ * Counts the number of add new work requests for each QOS level. Write to clear.
+ */
+union cvmx_pow_wa_pcx {
+ uint64_t u64;
+ struct cvmx_pow_wa_pcx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wa_pc : 32; /**< Work add performance counter for QOS level X */
+#else
+ uint64_t wa_pc : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_wa_pcx_s cn30xx;
+ struct cvmx_pow_wa_pcx_s cn31xx;
+ struct cvmx_pow_wa_pcx_s cn38xx;
+ struct cvmx_pow_wa_pcx_s cn38xxp2;
+ struct cvmx_pow_wa_pcx_s cn50xx;
+ struct cvmx_pow_wa_pcx_s cn52xx;
+ struct cvmx_pow_wa_pcx_s cn52xxp1;
+ struct cvmx_pow_wa_pcx_s cn56xx;
+ struct cvmx_pow_wa_pcx_s cn56xxp1;
+ struct cvmx_pow_wa_pcx_s cn58xx;
+ struct cvmx_pow_wa_pcx_s cn58xxp1;
+ struct cvmx_pow_wa_pcx_s cn61xx;
+ struct cvmx_pow_wa_pcx_s cn63xx;
+ struct cvmx_pow_wa_pcx_s cn63xxp1;
+ struct cvmx_pow_wa_pcx_s cn66xx;
+ struct cvmx_pow_wa_pcx_s cnf71xx;
+};
+typedef union cvmx_pow_wa_pcx cvmx_pow_wa_pcx_t;
+
+/**
+ * cvmx_pow_wq_int
+ *
+ * POW_WQ_INT = POW Work Queue Interrupt Register
+ *
+ * Contains the bits (1 per group) that set work queue interrupts and are used to clear these
+ * interrupts. Also contains the input queue interrupt temporary disable bits (1 per group). For
+ * more information regarding this register, see the interrupt section.
+ */
+union cvmx_pow_wq_int {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iq_dis : 16; /**< Input queue interrupt temporary disable mask
+ Corresponding WQ_INT<*> bit cannot be set due to
+ IQ_CNT/IQ_THR check when this bit is set.
+ Corresponding IQ_DIS bit is cleared by HW whenever:
+ - POW_WQ_INT_CNT*[IQ_CNT] is zero, or
+ - POW_WQ_INT_CNT*[TC_CNT]==1 when periodic
+ counter POW_WQ_INT_PC[PC]==0 */
+ uint64_t wq_int : 16; /**< Work queue interrupt bits
+ Corresponding WQ_INT bit is set by HW whenever:
+ - POW_WQ_INT_CNT*[IQ_CNT] >=
+ POW_WQ_INT_THR*[IQ_THR] and the threshold
+ interrupt is not disabled.
+ IQ_DIS<*>==1 disables the interrupt.
+ POW_WQ_INT_THR*[IQ_THR]==0 disables the int.
+ - POW_WQ_INT_CNT*[DS_CNT] >=
+ POW_WQ_INT_THR*[DS_THR] and the threshold
+ interrupt is not disabled
+ POW_WQ_INT_THR*[DS_THR]==0 disables the int.
+ - POW_WQ_INT_CNT*[TC_CNT]==1 when periodic
+ counter POW_WQ_INT_PC[PC]==0 and
+ POW_WQ_INT_THR*[TC_EN]==1 and at least one of:
+ - POW_WQ_INT_CNT*[IQ_CNT] > 0
+ - POW_WQ_INT_CNT*[DS_CNT] > 0 */
+#else
+ uint64_t wq_int : 16;
+ uint64_t iq_dis : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_wq_int_s cn30xx;
+ struct cvmx_pow_wq_int_s cn31xx;
+ struct cvmx_pow_wq_int_s cn38xx;
+ struct cvmx_pow_wq_int_s cn38xxp2;
+ struct cvmx_pow_wq_int_s cn50xx;
+ struct cvmx_pow_wq_int_s cn52xx;
+ struct cvmx_pow_wq_int_s cn52xxp1;
+ struct cvmx_pow_wq_int_s cn56xx;
+ struct cvmx_pow_wq_int_s cn56xxp1;
+ struct cvmx_pow_wq_int_s cn58xx;
+ struct cvmx_pow_wq_int_s cn58xxp1;
+ struct cvmx_pow_wq_int_s cn61xx;
+ struct cvmx_pow_wq_int_s cn63xx;
+ struct cvmx_pow_wq_int_s cn63xxp1;
+ struct cvmx_pow_wq_int_s cn66xx;
+ struct cvmx_pow_wq_int_s cnf71xx;
+};
+typedef union cvmx_pow_wq_int cvmx_pow_wq_int_t;
+
+/**
+ * cvmx_pow_wq_int_cnt#
+ *
+ * POW_WQ_INT_CNTX = POW Work Queue Interrupt Count Register (1 per group)
+ *
+ * Contains a read-only copy of the counts used to trigger work queue interrupts. For more
+ * information regarding this register, see the interrupt section.
+ */
+union cvmx_pow_wq_int_cntx {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t tc_cnt : 4; /**< Time counter current value for group X
+ HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
+ - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
+ corresponding POW_WQ_INT_CNT*[DS_CNT]==0
+ - corresponding POW_WQ_INT[WQ_INT<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT[IQ_DIS<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT_THR* is written by SW
+ - TC_CNT==1 and periodic counter
+ POW_WQ_INT_PC[PC]==0
+ Otherwise, HW decrements TC_CNT whenever the
+ periodic counter POW_WQ_INT_PC[PC]==0.
+ TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
+ uint64_t ds_cnt : 12; /**< De-schedule executable count for group X */
+ uint64_t iq_cnt : 12; /**< Input queue executable count for group X */
+#else
+ uint64_t iq_cnt : 12;
+ uint64_t ds_cnt : 12;
+ uint64_t tc_cnt : 4;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_pow_wq_int_cntx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t tc_cnt : 4; /**< Time counter current value for group X
+ HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
+ - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
+ corresponding POW_WQ_INT_CNT*[DS_CNT]==0
+ - corresponding POW_WQ_INT[WQ_INT<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT[IQ_DIS<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT_THR* is written by SW
+ - TC_CNT==1 and periodic counter
+ POW_WQ_INT_PC[PC]==0
+ Otherwise, HW decrements TC_CNT whenever the
+ periodic counter POW_WQ_INT_PC[PC]==0.
+ TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t ds_cnt : 7; /**< De-schedule executable count for group X */
+ uint64_t reserved_7_11 : 5;
+ uint64_t iq_cnt : 7; /**< Input queue executable count for group X */
+#else
+ uint64_t iq_cnt : 7;
+ uint64_t reserved_7_11 : 5;
+ uint64_t ds_cnt : 7;
+ uint64_t reserved_19_23 : 5;
+ uint64_t tc_cnt : 4;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn30xx;
+ struct cvmx_pow_wq_int_cntx_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t tc_cnt : 4; /**< Time counter current value for group X
+ HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
+ - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
+ corresponding POW_WQ_INT_CNT*[DS_CNT]==0
+ - corresponding POW_WQ_INT[WQ_INT<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT[IQ_DIS<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT_THR* is written by SW
+ - TC_CNT==1 and periodic counter
+ POW_WQ_INT_PC[PC]==0
+ Otherwise, HW decrements TC_CNT whenever the
+ periodic counter POW_WQ_INT_PC[PC]==0.
+ TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ds_cnt : 9; /**< De-schedule executable count for group X */
+ uint64_t reserved_9_11 : 3;
+ uint64_t iq_cnt : 9; /**< Input queue executable count for group X */
+#else
+ uint64_t iq_cnt : 9;
+ uint64_t reserved_9_11 : 3;
+ uint64_t ds_cnt : 9;
+ uint64_t reserved_21_23 : 3;
+ uint64_t tc_cnt : 4;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn31xx;
+ struct cvmx_pow_wq_int_cntx_s cn38xx;
+ struct cvmx_pow_wq_int_cntx_s cn38xxp2;
+ struct cvmx_pow_wq_int_cntx_cn31xx cn50xx;
+ struct cvmx_pow_wq_int_cntx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t tc_cnt : 4; /**< Time counter current value for group X
+ HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
+ - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
+ corresponding POW_WQ_INT_CNT*[DS_CNT]==0
+ - corresponding POW_WQ_INT[WQ_INT<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT[IQ_DIS<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT_THR* is written by SW
+ - TC_CNT==1 and periodic counter
+ POW_WQ_INT_PC[PC]==0
+ Otherwise, HW decrements TC_CNT whenever the
+ periodic counter POW_WQ_INT_PC[PC]==0.
+ TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t ds_cnt : 10; /**< De-schedule executable count for group X */
+ uint64_t reserved_10_11 : 2;
+ uint64_t iq_cnt : 10; /**< Input queue executable count for group X */
+#else
+ uint64_t iq_cnt : 10;
+ uint64_t reserved_10_11 : 2;
+ uint64_t ds_cnt : 10;
+ uint64_t reserved_22_23 : 2;
+ uint64_t tc_cnt : 4;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn52xx;
+ struct cvmx_pow_wq_int_cntx_cn52xx cn52xxp1;
+ struct cvmx_pow_wq_int_cntx_s cn56xx;
+ struct cvmx_pow_wq_int_cntx_s cn56xxp1;
+ struct cvmx_pow_wq_int_cntx_s cn58xx;
+ struct cvmx_pow_wq_int_cntx_s cn58xxp1;
+ struct cvmx_pow_wq_int_cntx_cn52xx cn61xx;
+ struct cvmx_pow_wq_int_cntx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t tc_cnt : 4; /**< Time counter current value for group X
+ HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
+ - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
+ corresponding POW_WQ_INT_CNT*[DS_CNT]==0
+ - corresponding POW_WQ_INT[WQ_INT<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT[IQ_DIS<*>] is written
+ with a 1 by SW
+ - corresponding POW_WQ_INT_THR* is written by SW
+ - TC_CNT==1 and periodic counter
+ POW_WQ_INT_PC[PC]==0
+ Otherwise, HW decrements TC_CNT whenever the
+ periodic counter POW_WQ_INT_PC[PC]==0.
+ TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
+ uint64_t reserved_23_23 : 1;
+ uint64_t ds_cnt : 11; /**< De-schedule executable count for group X */
+ uint64_t reserved_11_11 : 1;
+ uint64_t iq_cnt : 11; /**< Input queue executable count for group X */
+#else
+ uint64_t iq_cnt : 11;
+ uint64_t reserved_11_11 : 1;
+ uint64_t ds_cnt : 11;
+ uint64_t reserved_23_23 : 1;
+ uint64_t tc_cnt : 4;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn63xx;
+ struct cvmx_pow_wq_int_cntx_cn63xx cn63xxp1;
+ struct cvmx_pow_wq_int_cntx_cn63xx cn66xx;
+ struct cvmx_pow_wq_int_cntx_cn52xx cnf71xx;
+};
+typedef union cvmx_pow_wq_int_cntx cvmx_pow_wq_int_cntx_t;
+
+/**
+ * cvmx_pow_wq_int_pc
+ *
+ * POW_WQ_INT_PC = POW Work Queue Interrupt Periodic Counter Register
+ *
+ * Contains the threshold value for the work queue interrupt periodic counter and also a read-only
+ * copy of the periodic counter. For more information regarding this register, see the interrupt
+ * section.
+ */
+union cvmx_pow_wq_int_pc {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t pc : 28; /**< Work queue interrupt periodic counter */
+ uint64_t reserved_28_31 : 4;
+ uint64_t pc_thr : 20; /**< Work queue interrupt periodic counter threshold */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t pc_thr : 20;
+ uint64_t reserved_28_31 : 4;
+ uint64_t pc : 28;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } s;
+ struct cvmx_pow_wq_int_pc_s cn30xx;
+ struct cvmx_pow_wq_int_pc_s cn31xx;
+ struct cvmx_pow_wq_int_pc_s cn38xx;
+ struct cvmx_pow_wq_int_pc_s cn38xxp2;
+ struct cvmx_pow_wq_int_pc_s cn50xx;
+ struct cvmx_pow_wq_int_pc_s cn52xx;
+ struct cvmx_pow_wq_int_pc_s cn52xxp1;
+ struct cvmx_pow_wq_int_pc_s cn56xx;
+ struct cvmx_pow_wq_int_pc_s cn56xxp1;
+ struct cvmx_pow_wq_int_pc_s cn58xx;
+ struct cvmx_pow_wq_int_pc_s cn58xxp1;
+ struct cvmx_pow_wq_int_pc_s cn61xx;
+ struct cvmx_pow_wq_int_pc_s cn63xx;
+ struct cvmx_pow_wq_int_pc_s cn63xxp1;
+ struct cvmx_pow_wq_int_pc_s cn66xx;
+ struct cvmx_pow_wq_int_pc_s cnf71xx;
+};
+typedef union cvmx_pow_wq_int_pc cvmx_pow_wq_int_pc_t;
+
+/**
+ * cvmx_pow_wq_int_thr#
+ *
+ * POW_WQ_INT_THRX = POW Work Queue Interrupt Threshold Register (1 per group)
+ *
+ * Contains the thresholds for enabling and setting work queue interrupts. For more information
+ * regarding this register, see the interrupt section.
+ *
+ * Note: Up to 4 of the POW's internal storage buffers can be allocated for hardware use and are
+ * therefore not available for incoming work queue entries. Additionally, any PP that is not in the
+ * NULL_NULL state consumes a buffer. Thus in a 4 PP system, it is not advisable to set either
+ * IQ_THR or DS_THR to greater than 512 - 4 - 4 = 504. Doing so may prevent the interrupt from
+ * ever triggering.
+ */
+union cvmx_pow_wq_int_thrx {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_thrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
+ TC_EN must be zero when TC_THR==0 */
+ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
+ When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
+ uint64_t reserved_23_23 : 1;
+ uint64_t ds_thr : 11; /**< De-schedule count threshold for group X
+ DS_THR==0 disables the threshold interrupt */
+ uint64_t reserved_11_11 : 1;
+ uint64_t iq_thr : 11; /**< Input queue count threshold for group X
+ IQ_THR==0 disables the threshold interrupt */
+#else
+ uint64_t iq_thr : 11;
+ uint64_t reserved_11_11 : 1;
+ uint64_t ds_thr : 11;
+ uint64_t reserved_23_23 : 1;
+ uint64_t tc_thr : 4;
+ uint64_t tc_en : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_pow_wq_int_thrx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
+ TC_EN must be zero when TC_THR==0 */
+ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
+ When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
+ uint64_t reserved_18_23 : 6;
+ uint64_t ds_thr : 6; /**< De-schedule count threshold for group X
+ DS_THR==0 disables the threshold interrupt */
+ uint64_t reserved_6_11 : 6;
+ uint64_t iq_thr : 6; /**< Input queue count threshold for group X
+ IQ_THR==0 disables the threshold interrupt */
+#else
+ uint64_t iq_thr : 6;
+ uint64_t reserved_6_11 : 6;
+ uint64_t ds_thr : 6;
+ uint64_t reserved_18_23 : 6;
+ uint64_t tc_thr : 4;
+ uint64_t tc_en : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn30xx;
+ struct cvmx_pow_wq_int_thrx_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
+ TC_EN must be zero when TC_THR==0 */
+ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
+ When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
+ uint64_t reserved_20_23 : 4;
+ uint64_t ds_thr : 8; /**< De-schedule count threshold for group X
+ DS_THR==0 disables the threshold interrupt */
+ uint64_t reserved_8_11 : 4;
+ uint64_t iq_thr : 8; /**< Input queue count threshold for group X
+ IQ_THR==0 disables the threshold interrupt */
+#else
+ uint64_t iq_thr : 8;
+ uint64_t reserved_8_11 : 4;
+ uint64_t ds_thr : 8;
+ uint64_t reserved_20_23 : 4;
+ uint64_t tc_thr : 4;
+ uint64_t tc_en : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn31xx;
+ struct cvmx_pow_wq_int_thrx_s cn38xx;
+ struct cvmx_pow_wq_int_thrx_s cn38xxp2;
+ struct cvmx_pow_wq_int_thrx_cn31xx cn50xx;
+ struct cvmx_pow_wq_int_thrx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
+ TC_EN must be zero when TC_THR==0 */
+ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
+ When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ds_thr : 9; /**< De-schedule count threshold for group X
+ DS_THR==0 disables the threshold interrupt */
+ uint64_t reserved_9_11 : 3;
+ uint64_t iq_thr : 9; /**< Input queue count threshold for group X
+ IQ_THR==0 disables the threshold interrupt */
+#else
+ uint64_t iq_thr : 9;
+ uint64_t reserved_9_11 : 3;
+ uint64_t ds_thr : 9;
+ uint64_t reserved_21_23 : 3;
+ uint64_t tc_thr : 4;
+ uint64_t tc_en : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn52xx;
+ struct cvmx_pow_wq_int_thrx_cn52xx cn52xxp1;
+ struct cvmx_pow_wq_int_thrx_s cn56xx;
+ struct cvmx_pow_wq_int_thrx_s cn56xxp1;
+ struct cvmx_pow_wq_int_thrx_s cn58xx;
+ struct cvmx_pow_wq_int_thrx_s cn58xxp1;
+ struct cvmx_pow_wq_int_thrx_cn52xx cn61xx;
+ struct cvmx_pow_wq_int_thrx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63 : 35;
+ uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
+ TC_EN must be zero when TC_THR==0 */
+ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
+ When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
+ uint64_t reserved_22_23 : 2;
+ uint64_t ds_thr : 10; /**< De-schedule count threshold for group X
+ DS_THR==0 disables the threshold interrupt */
+ uint64_t reserved_10_11 : 2;
+ uint64_t iq_thr : 10; /**< Input queue count threshold for group X
+ IQ_THR==0 disables the threshold interrupt */
+#else
+ uint64_t iq_thr : 10;
+ uint64_t reserved_10_11 : 2;
+ uint64_t ds_thr : 10;
+ uint64_t reserved_22_23 : 2;
+ uint64_t tc_thr : 4;
+ uint64_t tc_en : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn63xx;
+ struct cvmx_pow_wq_int_thrx_cn63xx cn63xxp1;
+ struct cvmx_pow_wq_int_thrx_cn63xx cn66xx;
+ struct cvmx_pow_wq_int_thrx_cn52xx cnf71xx;
+};
+typedef union cvmx_pow_wq_int_thrx cvmx_pow_wq_int_thrx_t;
+
+/**
+ * cvmx_pow_ws_pc#
+ *
+ * POW_WS_PCX = POW Work Schedule Performance Counter (1 per group)
+ *
+ * Counts the number of work schedules for each group. Write to clear.
+ */
+union cvmx_pow_ws_pcx {
+ uint64_t u64;
+ struct cvmx_pow_ws_pcx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ws_pc : 32; /**< Work schedule performance counter for group X */
+#else
+ uint64_t ws_pc : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_pow_ws_pcx_s cn30xx;
+ struct cvmx_pow_ws_pcx_s cn31xx;
+ struct cvmx_pow_ws_pcx_s cn38xx;
+ struct cvmx_pow_ws_pcx_s cn38xxp2;
+ struct cvmx_pow_ws_pcx_s cn50xx;
+ struct cvmx_pow_ws_pcx_s cn52xx;
+ struct cvmx_pow_ws_pcx_s cn52xxp1;
+ struct cvmx_pow_ws_pcx_s cn56xx;
+ struct cvmx_pow_ws_pcx_s cn56xxp1;
+ struct cvmx_pow_ws_pcx_s cn58xx;
+ struct cvmx_pow_ws_pcx_s cn58xxp1;
+ struct cvmx_pow_ws_pcx_s cn61xx;
+ struct cvmx_pow_ws_pcx_s cn63xx;
+ struct cvmx_pow_ws_pcx_s cn63xxp1;
+ struct cvmx_pow_ws_pcx_s cn66xx;
+ struct cvmx_pow_ws_pcx_s cnf71xx;
+};
+typedef union cvmx_pow_ws_pcx cvmx_pow_ws_pcx_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pow-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pow.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pow.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pow.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,789 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the hardware Packet Order / Work unit.
+ *
+ * <hr>$Revision: 29727 $<hr>
+ */
+
+#include "cvmx.h"
+#include "cvmx-pow.h"
+
+/**
+ * @INTERNAL
+ * This structure stores the internal POW state captured by
+ * cvmx_pow_capture(). It is purposely not exposed to the user
+ * since the format may change without notice.
+ */
+typedef struct
+{
+ cvmx_pow_tag_load_resp_t sstatus[CVMX_MAX_CORES][8];
+ cvmx_pow_tag_load_resp_t smemload[2048][8];
+ cvmx_pow_tag_load_resp_t sindexload[64][8];
+} __cvmx_pow_dump_t;
+
+typedef enum
+{
+ CVMX_POW_LIST_UNKNOWN=0,
+ CVMX_POW_LIST_FREE=1,
+ CVMX_POW_LIST_INPUT=2,
+ CVMX_POW_LIST_CORE=CVMX_POW_LIST_INPUT+8,
+ CVMX_POW_LIST_DESCHED=CVMX_POW_LIST_CORE+32,
+ CVMX_POW_LIST_NOSCHED=CVMX_POW_LIST_DESCHED+64,
+} __cvmx_pow_list_types_t;
+
+static const char *__cvmx_pow_list_names[] = {
+ "Unknown",
+ "Free List",
+ "Queue 0", "Queue 1", "Queue 2", "Queue 3",
+ "Queue 4", "Queue 5", "Queue 6", "Queue 7",
+ "Core 0", "Core 1", "Core 2", "Core 3",
+ "Core 4", "Core 5", "Core 6", "Core 7",
+ "Core 8", "Core 9", "Core 10", "Core 11",
+ "Core 12", "Core 13", "Core 14", "Core 15",
+ "Core 16", "Core 17", "Core 18", "Core 19",
+ "Core 20", "Core 21", "Core 22", "Core 23",
+ "Core 24", "Core 25", "Core 26", "Core 27",
+ "Core 28", "Core 29", "Core 30", "Core 31",
+ "Desched 0", "Desched 1", "Desched 2", "Desched 3",
+ "Desched 4", "Desched 5", "Desched 6", "Desched 7",
+ "Desched 8", "Desched 9", "Desched 10", "Desched 11",
+ "Desched 12", "Desched 13", "Desched 14", "Desched 15",
+ "Desched 16", "Desched 17", "Desched 18", "Desched 19",
+ "Desched 20", "Desched 21", "Desched 22", "Desched 23",
+ "Desched 24", "Desched 25", "Desched 26", "Desched 27",
+ "Desched 28", "Desched 29", "Desched 30", "Desched 31",
+ "Desched 32", "Desched 33", "Desched 34", "Desched 35",
+ "Desched 36", "Desched 37", "Desched 38", "Desched 39",
+ "Desched 40", "Desched 41", "Desched 42", "Desched 43",
+ "Desched 44", "Desched 45", "Desched 46", "Desched 47",
+ "Desched 48", "Desched 49", "Desched 50", "Desched 51",
+ "Desched 52", "Desched 53", "Desched 54", "Desched 55",
+ "Desched 56", "Desched 57", "Desched 58", "Desched 59",
+ "Desched 60", "Desched 61", "Desched 62", "Desched 63",
+ "Nosched 0"
+};
+
+
+/**
+ * Return the number of POW entries supported by this chip
+ *
+ * @return Number of POW entries
+ */
+int cvmx_pow_get_num_entries(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ return 64;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ return 256;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN61XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ return 512;
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX))
+ return 1024;
+ else
+ return 2048;
+}
+
+
+static int __cvmx_pow_capture_v1(void *buffer, int buffer_size)
+{
+ __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
+ int num_cores;
+ int num_pow_entries = cvmx_pow_get_num_entries();
+ int core;
+ int index;
+ int bits;
+
+ if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
+ {
+ cvmx_dprintf("cvmx_pow_capture: Buffer too small\n");
+ return -1;
+ }
+
+ num_cores = cvmx_octeon_num_cores();
+
+ /* Read all core related state */
+ for (core=0; core<num_cores; core++)
+ {
+ cvmx_pow_load_addr_t load_addr;
+ load_addr.u64 = 0;
+ load_addr.sstatus.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus.is_io = 1;
+ load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
+ load_addr.sstatus.coreid = core;
+ for (bits=0; bits<8; bits++)
+ {
+ load_addr.sstatus.get_rev = (bits & 1) != 0;
+ load_addr.sstatus.get_cur = (bits & 2) != 0;
+ load_addr.sstatus.get_wqp = (bits & 4) != 0;
+ if ((load_addr.sstatus.get_cur == 0) && load_addr.sstatus.get_rev)
+ dump->sstatus[core][bits].u64 = -1;
+ else
+ dump->sstatus[core][bits].u64 = cvmx_read_csr(load_addr.u64);
+ }
+ }
+
+ /* Read all internal POW entries */
+ for (index=0; index<num_pow_entries; index++)
+ {
+ cvmx_pow_load_addr_t load_addr;
+ load_addr.u64 = 0;
+ load_addr.smemload.mem_region = CVMX_IO_SEG;
+ load_addr.smemload.is_io = 1;
+ load_addr.smemload.did = CVMX_OCT_DID_TAG_TAG2;
+ load_addr.smemload.index = index;
+ for (bits=0; bits<3; bits++)
+ {
+ load_addr.smemload.get_des = (bits & 1) != 0;
+ load_addr.smemload.get_wqp = (bits & 2) != 0;
+ dump->smemload[index][bits].u64 = cvmx_read_csr(load_addr.u64);
+ }
+ }
+
+ /* Read all group and queue pointers */
+ for (index=0; index<16; index++)
+ {
+ cvmx_pow_load_addr_t load_addr;
+ load_addr.u64 = 0;
+ load_addr.sindexload.mem_region = CVMX_IO_SEG;
+ load_addr.sindexload.is_io = 1;
+ load_addr.sindexload.did = CVMX_OCT_DID_TAG_TAG3;
+ load_addr.sindexload.qosgrp = index;
+ for (bits=0; bits<4; bits++)
+ {
+ load_addr.sindexload.get_rmt = (bits & 1) != 0;
+ load_addr.sindexload.get_des_get_tail = (bits & 2) != 0;
+ /* The first pass only has 8 valid index values */
+ if ((load_addr.sindexload.get_rmt == 0) &&
+ (load_addr.sindexload.get_des_get_tail == 0) &&
+ (index >= 8))
+ dump->sindexload[index][bits].u64 = -1;
+ else
+ dump->sindexload[index][bits].u64 = cvmx_read_csr(load_addr.u64);
+ }
+ }
+ return 0;
+}
+
+static int __cvmx_pow_capture_v2(void *buffer, int buffer_size)
+{
+ __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
+ int num_cores;
+ int num_pow_entries = cvmx_pow_get_num_entries();
+ int core;
+ int index;
+ int bits;
+
+ if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
+ {
+ cvmx_dprintf("cvmx_pow_capture: Buffer too small\n");
+ return -1;
+ }
+
+ num_cores = cvmx_octeon_num_cores();
+
+ /* Read all core related state */
+ for (core=0; core<num_cores; core++)
+ {
+ cvmx_pow_load_addr_t load_addr;
+ load_addr.u64 = 0;
+ load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus_cn68xx.is_io = 1;
+ load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
+ load_addr.sstatus_cn68xx.coreid = core;
+ for (bits=1; bits<6; bits++)
+ {
+ load_addr.sstatus_cn68xx.opcode = bits;
+ dump->sstatus[core][bits].u64 = cvmx_read_csr(load_addr.u64);
+ }
+ }
+ /* Read all internal POW entries */
+ for (index=0; index<num_pow_entries; index++)
+ {
+ cvmx_pow_load_addr_t load_addr;
+ load_addr.u64 = 0;
+ load_addr.smemload_cn68xx.mem_region = CVMX_IO_SEG;
+ load_addr.smemload_cn68xx.is_io = 1;
+ load_addr.smemload_cn68xx.did = CVMX_OCT_DID_TAG_TAG2;
+ load_addr.smemload_cn68xx.index = index;
+ for (bits=1; bits<5; bits++)
+ {
+ load_addr.smemload_cn68xx.opcode = bits;
+ dump->smemload[index][bits].u64 = cvmx_read_csr(load_addr.u64);
+ }
+ }
+
+ /* Read all group and queue pointers */
+ for (index=0; index<64; index++)
+ {
+ cvmx_pow_load_addr_t load_addr;
+ load_addr.u64 = 0;
+ load_addr.sindexload_cn68xx.mem_region = CVMX_IO_SEG;
+ load_addr.sindexload_cn68xx.is_io = 1;
+ load_addr.sindexload_cn68xx.did = CVMX_OCT_DID_TAG_TAG3;
+ load_addr.sindexload_cn68xx.qos_grp = index;
+ for (bits=1; bits<7; bits++)
+ {
+ load_addr.sindexload_cn68xx.opcode = bits;
+ dump->sindexload[index][bits].u64 = cvmx_read_csr(load_addr.u64);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Store the current POW internal state into the supplied
+ * buffer. It is recommended that you pass a buffer of at least
+ * 128KB. The format of the capture may change based on SDK
+ * version and Octeon chip.
+ *
+ * @param buffer Buffer to store capture into
+ * @param buffer_size
+ * The size of the supplied buffer
+ *
+ * @return Zero on sucess, negative on failure
+ */
+int cvmx_pow_capture(void *buffer, int buffer_size)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return __cvmx_pow_capture_v2(buffer, buffer_size);
+ else
+ return __cvmx_pow_capture_v1(buffer, buffer_size);
+}
+
+/**
+ * Function to display a POW internal queue to the user
+ *
+ * @param name User visible name for the queue
+ * @param name_param Parameter for printf in creating the name
+ * @param valid Set if the queue contains any elements
+ * @param has_one Set if the queue contains exactly one element
+ * @param head The head pointer
+ * @param tail The tail pointer
+ */
+static void __cvmx_pow_display_list(const char *name, int name_param, int valid, int has_one, uint64_t head, uint64_t tail)
+{
+ printf(name, name_param);
+ printf(": ");
+ if (valid)
+ {
+ if (has_one)
+ printf("One element index=%llu(0x%llx)\n", CAST64(head), CAST64(head));
+ else
+ printf("Multiple elements head=%llu(0x%llx) tail=%llu(0x%llx)\n", CAST64(head), CAST64(head), CAST64(tail), CAST64(tail));
+ }
+ else
+ printf("Empty\n");
+}
+
+
+/**
+ * Mark which list a POW entry is on. Print a warning message if the
+ * entry is already on a list. This happens if the POW changed while
+ * the capture was running.
+ *
+ * @param entry_num Entry number to mark
+ * @param entry_type List type
+ * @param entry_list Array to store marks
+ *
+ * @return Zero on success, negative if already on a list
+ */
+static int __cvmx_pow_entry_mark_list(int entry_num, __cvmx_pow_list_types_t entry_type, uint8_t entry_list[])
+{
+ if (entry_list[entry_num] == 0)
+ {
+ entry_list[entry_num] = entry_type;
+ return 0;
+ }
+ else
+ {
+ printf("\nWARNING: Entry %d already on list %s, but we tried to add it to %s\n",
+ entry_num, __cvmx_pow_list_names[entry_list[entry_num]], __cvmx_pow_list_names[entry_type]);
+ return -1;
+ }
+}
+
+
+/**
+ * Display a list and mark all elements on the list as belonging to
+ * the list.
+ *
+ * @param entry_type Type of the list to display and mark
+ * @param dump POW capture data
+ * @param entry_list Array to store marks in
+ * @param valid Set if the queue contains any elements
+ * @param has_one Set if the queue contains exactly one element
+ * @param head The head pointer
+ * @param tail The tail pointer
+ */
+static void __cvmx_pow_display_list_and_walk(__cvmx_pow_list_types_t entry_type,
+ __cvmx_pow_dump_t *dump, uint8_t entry_list[],
+ int valid, int has_one, uint64_t head, uint64_t tail)
+{
+ __cvmx_pow_display_list(__cvmx_pow_list_names[entry_type], 0, valid, has_one, head, tail);
+ if (valid)
+ {
+ if (has_one)
+ __cvmx_pow_entry_mark_list(head, entry_type, entry_list);
+ else
+ {
+ while (head != tail)
+ {
+ if (__cvmx_pow_entry_mark_list(head, entry_type, entry_list))
+ break;
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ {
+ if (entry_type >= CVMX_POW_LIST_INPUT && entry_type < CVMX_POW_LIST_CORE)
+
+ head = dump->smemload[head][4].s_smemload3_cn68xx.next_index;
+ else
+ head = dump->smemload[head][4].s_smemload3_cn68xx.fwd_index;
+ }
+ else
+ head = dump->smemload[head][0].s_smemload0.next_index;
+ }
+ __cvmx_pow_entry_mark_list(tail, entry_type, entry_list);
+ }
+ }
+}
+
+
+void __cvmx_pow_display_v1(void *buffer, int buffer_size)
+{
+ __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
+ int num_pow_entries = cvmx_pow_get_num_entries();
+ int num_cores;
+ int core;
+ int index;
+ uint8_t entry_list[2048];
+
+ if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
+ {
+ cvmx_dprintf("cvmx_pow_dump: Buffer too small\n");
+ return;
+ }
+
+ memset(entry_list, 0, sizeof(entry_list));
+ num_cores = cvmx_octeon_num_cores();
+
+ /* Print the free list info */
+ __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_FREE, dump, entry_list,
+ dump->sindexload[0][0].sindexload0.free_val,
+ dump->sindexload[0][0].sindexload0.free_one,
+ dump->sindexload[0][0].sindexload0.free_head,
+ dump->sindexload[0][0].sindexload0.free_tail);
+
+ /* Print the core state */
+ for (core=0; core<num_cores; core++)
+ {
+ const int bit_rev = 1;
+ const int bit_cur = 2;
+ const int bit_wqp = 4;
+ printf("Core %d State: tag=%s,0x%08x", core,
+ OCT_TAG_TYPE_STRING(dump->sstatus[core][bit_cur].s_sstatus2.tag_type),
+ dump->sstatus[core][bit_cur].s_sstatus2.tag);
+ if (dump->sstatus[core][bit_cur].s_sstatus2.tag_type != CVMX_POW_TAG_TYPE_NULL_NULL)
+ {
+ __cvmx_pow_entry_mark_list(dump->sstatus[core][bit_cur].s_sstatus2.index, CVMX_POW_LIST_CORE + core, entry_list);
+ printf(" grp=%d", dump->sstatus[core][bit_cur].s_sstatus2.grp);
+ printf(" wqp=0x%016llx", CAST64(dump->sstatus[core][bit_cur|bit_wqp].s_sstatus4.wqp));
+ printf(" index=%d", dump->sstatus[core][bit_cur].s_sstatus2.index);
+ if (dump->sstatus[core][bit_cur].s_sstatus2.head)
+ printf(" head");
+ else
+ printf(" prev=%d", dump->sstatus[core][bit_cur|bit_rev].s_sstatus3.revlink_index);
+ if (dump->sstatus[core][bit_cur].s_sstatus2.tail)
+ printf(" tail");
+ else
+ printf(" next=%d", dump->sstatus[core][bit_cur].s_sstatus2.link_index);
+ }
+
+ if (dump->sstatus[core][0].s_sstatus0.pend_switch)
+ {
+ printf(" pend_switch=%d", dump->sstatus[core][0].s_sstatus0.pend_switch);
+ printf(" pend_switch_full=%d", dump->sstatus[core][0].s_sstatus0.pend_switch_full);
+ printf(" pend_switch_null=%d", dump->sstatus[core][0].s_sstatus0.pend_switch_null);
+ }
+
+ if (dump->sstatus[core][0].s_sstatus0.pend_desched)
+ {
+ printf(" pend_desched=%d", dump->sstatus[core][0].s_sstatus0.pend_desched);
+ printf(" pend_desched_switch=%d", dump->sstatus[core][0].s_sstatus0.pend_desched_switch);
+ printf(" pend_nosched=%d", dump->sstatus[core][0].s_sstatus0.pend_nosched);
+ if (dump->sstatus[core][0].s_sstatus0.pend_desched_switch)
+ printf(" pend_grp=%d", dump->sstatus[core][0].s_sstatus0.pend_grp);
+ }
+
+ if (dump->sstatus[core][0].s_sstatus0.pend_new_work)
+ {
+ if (dump->sstatus[core][0].s_sstatus0.pend_new_work_wait)
+ printf(" (Waiting for work)");
+ else
+ printf(" (Getting work)");
+ }
+ if (dump->sstatus[core][0].s_sstatus0.pend_null_rd)
+ printf(" pend_null_rd=%d", dump->sstatus[core][0].s_sstatus0.pend_null_rd);
+ if (dump->sstatus[core][0].s_sstatus0.pend_nosched_clr)
+ {
+ printf(" pend_nosched_clr=%d", dump->sstatus[core][0].s_sstatus0.pend_nosched_clr);
+ printf(" pend_index=%d", dump->sstatus[core][0].s_sstatus0.pend_index);
+ }
+ if (dump->sstatus[core][0].s_sstatus0.pend_switch ||
+ (dump->sstatus[core][0].s_sstatus0.pend_desched &&
+ dump->sstatus[core][0].s_sstatus0.pend_desched_switch))
+ {
+ printf(" pending tag=%s,0x%08x",
+ OCT_TAG_TYPE_STRING(dump->sstatus[core][0].s_sstatus0.pend_type),
+ dump->sstatus[core][0].s_sstatus0.pend_tag);
+ }
+ if (dump->sstatus[core][0].s_sstatus0.pend_nosched_clr)
+ printf(" pend_wqp=0x%016llx\n", CAST64(dump->sstatus[core][bit_wqp].s_sstatus1.pend_wqp));
+ printf("\n");
+ }
+
+ /* Print out the state of the nosched list and the 16 deschedule lists. */
+ __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_NOSCHED, dump, entry_list,
+ dump->sindexload[0][2].sindexload1.nosched_val,
+ dump->sindexload[0][2].sindexload1.nosched_one,
+ dump->sindexload[0][2].sindexload1.nosched_head,
+ dump->sindexload[0][2].sindexload1.nosched_tail);
+ for (index=0; index<16; index++)
+ {
+ __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_DESCHED + index, dump, entry_list,
+ dump->sindexload[index][2].sindexload1.des_val,
+ dump->sindexload[index][2].sindexload1.des_one,
+ dump->sindexload[index][2].sindexload1.des_head,
+ dump->sindexload[index][2].sindexload1.des_tail);
+ }
+
+ /* Print out the state of the 8 internal input queues */
+ for (index=0; index<8; index++)
+ {
+ __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_INPUT + index, dump, entry_list,
+ dump->sindexload[index][0].sindexload0.loc_val,
+ dump->sindexload[index][0].sindexload0.loc_one,
+ dump->sindexload[index][0].sindexload0.loc_head,
+ dump->sindexload[index][0].sindexload0.loc_tail);
+ }
+
+ /* Print out the state of the 16 memory queues */
+ for (index=0; index<8; index++)
+ {
+ const char *name;
+ if (dump->sindexload[index][1].sindexload2.rmt_is_head)
+ name = "Queue %da Memory (is head)";
+ else
+ name = "Queue %da Memory";
+ __cvmx_pow_display_list(name, index,
+ dump->sindexload[index][1].sindexload2.rmt_val,
+ dump->sindexload[index][1].sindexload2.rmt_one,
+ dump->sindexload[index][1].sindexload2.rmt_head,
+ dump->sindexload[index][3].sindexload3.rmt_tail);
+ if (dump->sindexload[index+8][1].sindexload2.rmt_is_head)
+ name = "Queue %db Memory (is head)";
+ else
+ name = "Queue %db Memory";
+ __cvmx_pow_display_list(name, index,
+ dump->sindexload[index+8][1].sindexload2.rmt_val,
+ dump->sindexload[index+8][1].sindexload2.rmt_one,
+ dump->sindexload[index+8][1].sindexload2.rmt_head,
+ dump->sindexload[index+8][3].sindexload3.rmt_tail);
+ }
+
+ /* Print out each of the internal POW entries. Each entry has a tag, group,
+ wqe, and possibly a next pointer. The next pointer is only valid if this
+ entry isn't make as a tail */
+ for (index=0; index<num_pow_entries; index++)
+ {
+ printf("Entry %d(%-10s): tag=%s,0x%08x grp=%d wqp=0x%016llx", index,
+ __cvmx_pow_list_names[entry_list[index]],
+ OCT_TAG_TYPE_STRING(dump->smemload[index][0].s_smemload0.tag_type),
+ dump->smemload[index][0].s_smemload0.tag,
+ dump->smemload[index][0].s_smemload0.grp,
+ CAST64(dump->smemload[index][2].s_smemload1.wqp));
+ if (dump->smemload[index][0].s_smemload0.tail)
+ printf(" tail");
+ else
+ printf(" next=%d", dump->smemload[index][0].s_smemload0.next_index);
+ if (entry_list[index] >= CVMX_POW_LIST_DESCHED)
+ {
+ printf(" nosched=%d", dump->smemload[index][1].s_smemload2.nosched);
+ if (dump->smemload[index][1].s_smemload2.pend_switch)
+ {
+ printf(" pending tag=%s,0x%08x",
+ OCT_TAG_TYPE_STRING(dump->smemload[index][1].s_smemload2.pend_type),
+ dump->smemload[index][1].s_smemload2.pend_tag);
+ }
+ }
+ printf("\n");
+ }
+}
+
+void __cvmx_pow_display_v2(void *buffer, int buffer_size)
+{
+ __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
+ int num_pow_entries = cvmx_pow_get_num_entries();
+ int num_cores;
+ int core;
+ int index;
+ uint8_t entry_list[2048];
+
+ if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
+ {
+ cvmx_dprintf("cvmx_pow_dump: Buffer too small, pow_dump_t = 0x%x, buffer_size = 0x%x\n", (int)sizeof(__cvmx_pow_dump_t), buffer_size);
+ return;
+ }
+
+ memset(entry_list, 0, sizeof(entry_list));
+ num_cores = cvmx_octeon_num_cores();
+
+ /* Print the free list info */
+ {
+ int valid[3], has_one[3], head[3], tail[3], qnum_head, qnum_tail;
+ int idx;
+
+ valid[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_val;
+ valid[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_val;
+ valid[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_val;
+ has_one[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_one;
+ has_one[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_one;
+ has_one[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_one;
+ head[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_head;
+ head[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_head;
+ head[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_head;
+ tail[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_tail;
+ tail[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_tail;
+ tail[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_tail;
+ qnum_head = dump->sindexload[0][4].sindexload1_cn68xx.qnum_head;
+ qnum_tail = dump->sindexload[0][4].sindexload1_cn68xx.qnum_tail;
+
+ printf("Free List: qnum_head=%d, qnum_tail=%d\n", qnum_head, qnum_tail);
+ printf("Free0: valid=%d, one=%d, head=%llu, tail=%llu\n", valid[0], has_one[0], CAST64(head[0]), CAST64(tail[0]));
+ printf("Free1: valid=%d, one=%d, head=%llu, tail=%llu\n", valid[1], has_one[1], CAST64(head[1]), CAST64(tail[1]));
+ printf("Free2: valid=%d, one=%d, head=%llu, tail=%llu\n", valid[2], has_one[2], CAST64(head[2]), CAST64(tail[2]));
+
+ idx=qnum_head;
+ while (valid[0] || valid[1] || valid[2])
+ {
+ int qidx = idx % 3;
+
+ if (head[qidx] == tail[qidx])
+ valid[qidx] = 0;
+
+ if (__cvmx_pow_entry_mark_list(head[qidx], CVMX_POW_LIST_FREE, entry_list))
+ break;
+ head[qidx] = dump->smemload[head[qidx]][4].s_smemload3_cn68xx.fwd_index;
+ //printf("qidx = %d, idx = %d, head[qidx] = %d\n", qidx, idx, head[qidx]);
+ idx++;
+ }
+ }
+
+ /* Print the core state */
+ for (core = 0; core < num_cores; core++)
+ {
+ int pendtag = 1;
+ int pendwqp = 2;
+ int tag = 3;
+ int wqp = 4;
+ int links = 5;
+
+ printf("Core %d State: tag=%s,0x%08x", core,
+ OCT_TAG_TYPE_STRING(dump->sstatus[core][tag].s_sstatus2_cn68xx.tag_type),
+ dump->sstatus[core][tag].s_sstatus2_cn68xx.tag);
+ if (dump->sstatus[core][tag].s_sstatus2_cn68xx.tag_type != CVMX_POW_TAG_TYPE_NULL_NULL)
+ {
+ __cvmx_pow_entry_mark_list(dump->sstatus[core][tag].s_sstatus2_cn68xx.index, CVMX_POW_LIST_CORE + core, entry_list);
+ printf(" grp=%d", dump->sstatus[core][tag].s_sstatus2_cn68xx.grp);
+ printf(" wqp=0x%016llx", CAST64(dump->sstatus[core][wqp].s_sstatus3_cn68xx.wqp));
+ printf(" index=%d", dump->sstatus[core][tag].s_sstatus2_cn68xx.index);
+ if (dump->sstatus[core][links].s_sstatus4_cn68xx.head)
+ printf(" head");
+ else
+ printf(" prev=%d", dump->sstatus[core][links].s_sstatus4_cn68xx.revlink_index);
+ if (dump->sstatus[core][links].s_sstatus4_cn68xx.tail)
+ printf(" tail");
+ else
+ printf(" next=%d", dump->sstatus[core][links].s_sstatus4_cn68xx.link_index);
+ }
+ if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_switch)
+ {
+ printf(" pend_switch=%d", dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_switch);
+ }
+
+ if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_desched)
+ {
+ printf(" pend_desched=%d", dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_desched);
+ printf(" pend_nosched=%d", dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_nosched);
+ }
+ if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_get_work)
+ {
+ if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_get_work_wait)
+ printf(" (Waiting for work)");
+ else
+ printf(" (Getting work)");
+ }
+ if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_alloc_we)
+ printf(" pend_alloc_we=%d", dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_alloc_we);
+ if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_nosched_clr)
+ {
+ printf(" pend_nosched_clr=%d", dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_nosched_clr);
+ printf(" pend_index=%d", dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_index);
+ }
+ if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_switch)
+ {
+ printf(" pending tag=%s,0x%08x",
+ OCT_TAG_TYPE_STRING(dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_type),
+ dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_tag);
+ }
+ if (dump->sstatus[core][pendwqp].s_sstatus1_cn68xx.pend_nosched_clr)
+ printf(" pend_wqp=0x%016llx\n", CAST64(dump->sstatus[core][pendwqp].s_sstatus1_cn68xx.pend_wqp));
+ printf("\n");
+ }
+
+ /* Print out the state of the nosched list and the 16 deschedule lists. */
+ __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_NOSCHED, dump, entry_list,
+ dump->sindexload[0][3].sindexload0_cn68xx.queue_val,
+ dump->sindexload[0][3].sindexload0_cn68xx.queue_one,
+ dump->sindexload[0][3].sindexload0_cn68xx.queue_head,
+ dump->sindexload[0][3].sindexload0_cn68xx.queue_tail);
+ for (index=0; index<64; index++)
+ {
+ __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_DESCHED + index, dump, entry_list,
+ dump->sindexload[index][2].sindexload0_cn68xx.queue_val,
+ dump->sindexload[index][2].sindexload0_cn68xx.queue_one,
+ dump->sindexload[index][2].sindexload0_cn68xx.queue_head,
+ dump->sindexload[index][2].sindexload0_cn68xx.queue_tail);
+ }
+
+ /* Print out the state of the 8 internal input queues */
+ for (index=0; index<8; index++)
+ {
+ __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_INPUT + index, dump, entry_list,
+ dump->sindexload[index][1].sindexload0_cn68xx.queue_val,
+ dump->sindexload[index][1].sindexload0_cn68xx.queue_one,
+ dump->sindexload[index][1].sindexload0_cn68xx.queue_head,
+ dump->sindexload[index][1].sindexload0_cn68xx.queue_tail);
+ }
+
+ /* Print out the state of the 16 memory queues */
+ for (index=0; index<8; index++)
+ {
+ const char *name;
+ if (dump->sindexload[index][1].sindexload0_cn68xx.queue_head)
+ name = "Queue %da Memory (is head)";
+ else
+ name = "Queue %da Memory";
+ __cvmx_pow_display_list(name, index,
+ dump->sindexload[index][1].sindexload0_cn68xx.queue_val,
+ dump->sindexload[index][1].sindexload0_cn68xx.queue_one,
+ dump->sindexload[index][1].sindexload0_cn68xx.queue_head,
+ dump->sindexload[index][1].sindexload0_cn68xx.queue_tail);
+ if (dump->sindexload[index+8][1].sindexload0_cn68xx.queue_head)
+ name = "Queue %db Memory (is head)";
+ else
+ name = "Queue %db Memory";
+ __cvmx_pow_display_list(name, index,
+ dump->sindexload[index+8][1].sindexload0_cn68xx.queue_val,
+ dump->sindexload[index+8][1].sindexload0_cn68xx.queue_one,
+ dump->sindexload[index+8][1].sindexload0_cn68xx.queue_head,
+ dump->sindexload[index+8][1].sindexload0_cn68xx.queue_tail);
+ }
+
+ /* Print out each of the internal POW entries. Each entry has a tag, group,
+ wqe, and possibly a next pointer. The next pointer is only valid if this
+ entry isn't make as a tail */
+ for (index=0; index<num_pow_entries; index++)
+ {
+ printf("Entry %d(%-10s): tag=%s,0x%08x grp=%d wqp=0x%016llx", index,
+ __cvmx_pow_list_names[entry_list[index]],
+ OCT_TAG_TYPE_STRING(dump->smemload[index][1].s_smemload0_cn68xx.tag_type),
+ dump->smemload[index][1].s_smemload0_cn68xx.tag,
+ dump->smemload[index][2].s_smemload1_cn68xx.grp,
+ CAST64(dump->smemload[index][2].s_smemload1_cn68xx.wqp));
+ if (dump->smemload[index][1].s_smemload0_cn68xx.tail)
+ printf(" tail");
+ else
+ printf(" next=%d", dump->smemload[index][4].s_smemload3_cn68xx.fwd_index);
+ if (entry_list[index] >= CVMX_POW_LIST_DESCHED)
+ {
+ printf(" prev=%d", dump->smemload[index][4].s_smemload3_cn68xx.fwd_index);
+ printf(" nosched=%d", dump->smemload[index][1].s_smemload1_cn68xx.nosched);
+ if (dump->smemload[index][3].s_smemload2_cn68xx.pend_switch)
+ {
+ printf(" pending tag=%s,0x%08x",
+ OCT_TAG_TYPE_STRING(dump->smemload[index][3].s_smemload2_cn68xx.pend_type),
+ dump->smemload[index][3].s_smemload2_cn68xx.pend_tag);
+ }
+ }
+ printf("\n");
+ }
+}
+
+/**
+ * Dump a POW capture to the console in a human readable format.
+ *
+ * @param buffer POW capture from cvmx_pow_capture()
+ * @param buffer_size
+ * Size of the buffer
+ */
+void cvmx_pow_display(void *buffer, int buffer_size)
+{
+ printf("POW Display Start\n");
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ __cvmx_pow_display_v2(buffer, buffer_size);
+ else
+ __cvmx_pow_display_v1(buffer, buffer_size);
+
+ printf("POW Display End\n");
+ return;
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pow.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-pow.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-pow.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-pow.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,2351 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Interface to the hardware Packet Order / Work unit.
+ *
+ * New, starting with SDK 1.7.0, cvmx-pow supports a number of
+ * extended consistency checks. The define
+ * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
+ * internal state checks to find common programming errors. If
+ * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
+ * enabled. For example, cvmx-pow will check for the following
+ * program errors or POW state inconsistency.
+ * - Requesting a POW operation with an active tag switch in
+ * progress.
+ * - Waiting for a tag switch to complete for an excessively
+ * long period. This is normally a sign of an error in locking
+ * causing deadlock.
+ * - Illegal tag switches from NULL_NULL.
+ * - Illegal tag switches from NULL.
+ * - Illegal deschedule request.
+ * - WQE pointer not matching the one attached to the core by
+ * the POW.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_POW_H__
+#define __CVMX_POW_H__
+
+#include "cvmx-scratch.h"
+#include "cvmx-wqe.h"
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx-sso-defs.h>
+#else
+#include "cvmx-warn.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+ /*
+ * For the FreeBSD kernel, have POW consistency checks depend on
+ * the setting of INVARIANTS.
+ */
+ #ifndef CVMX_ENABLE_POW_CHECKS
+ #ifdef INVARIANTS
+ #define CVMX_ENABLE_POW_CHECKS 1
+ #else
+ #define CVMX_ENABLE_POW_CHECKS 0
+ #endif
+ #endif
+#else
+ /* Default to having all POW constancy checks turned on */
+ #ifndef CVMX_ENABLE_POW_CHECKS
+ #define CVMX_ENABLE_POW_CHECKS 1
+ #endif
+#endif
+
+/**
+ * Wait flag values for pow functions.
+ */
+typedef enum
+{
+ CVMX_POW_WAIT = 1,
+ CVMX_POW_NO_WAIT = 0,
+} cvmx_pow_wait_t;
+
+/**
+ * POW tag operations. These are used in the data stored to the POW.
+ */
+typedef enum
+{
+ CVMX_POW_TAG_OP_SWTAG = 0L, /**< switch the tag (only) for this PP
+ - the previous tag should be non-NULL in this case
+ - tag switch response required
+ - fields used: op, type, tag */
+ CVMX_POW_TAG_OP_SWTAG_FULL = 1L, /**< switch the tag for this PP, with full information
+ - this should be used when the previous tag is NULL
+ - tag switch response required
+ - fields used: address, op, grp, type, tag */
+ CVMX_POW_TAG_OP_SWTAG_DESCH = 2L, /**< switch the tag (and/or group) for this PP and de-schedule
+ - OK to keep the tag the same and only change the group
+ - fields used: op, no_sched, grp, type, tag */
+ CVMX_POW_TAG_OP_DESCH = 3L, /**< just de-schedule
+ - fields used: op, no_sched */
+ CVMX_POW_TAG_OP_ADDWQ = 4L, /**< create an entirely new work queue entry
+ - fields used: address, op, qos, grp, type, tag */
+ CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,/**< just update the work queue pointer and grp for this PP
+ - fields used: address, op, grp */
+ CVMX_POW_TAG_OP_SET_NSCHED = 6L, /**< set the no_sched bit on the de-schedule list
+ - does nothing if the selected entry is not on the de-schedule list
+ - does nothing if the stored work queue pointer does not match the address field
+ - fields used: address, index, op
+ Before issuing a *_NSCHED operation, SW must guarantee that all
+ prior deschedules and set/clr NSCHED operations are complete and all
+ prior switches are complete. The hardware provides the opsdone bit
+ and swdone bit for SW polling. After issuing a *_NSCHED operation,
+ SW must guarantee that the set/clr NSCHED is complete before
+ any subsequent operations. */
+ CVMX_POW_TAG_OP_CLR_NSCHED = 7L, /**< clears the no_sched bit on the de-schedule list
+ - does nothing if the selected entry is not on the de-schedule list
+ - does nothing if the stored work queue pointer does not match the address field
+ - fields used: address, index, op
+ Before issuing a *_NSCHED operation, SW must guarantee that all
+ prior deschedules and set/clr NSCHED operations are complete and all
+ prior switches are complete. The hardware provides the opsdone bit
+ and swdone bit for SW polling. After issuing a *_NSCHED operation,
+ SW must guarantee that the set/clr NSCHED is complete before
+ any subsequent operations. */
+ CVMX_POW_TAG_OP_NOP = 15L /**< do nothing */
+} cvmx_pow_tag_op_t;
+
+/**
+ * This structure defines the store data on a store to POW
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
+ uint64_t unused : 2;
+ uint64_t index :13; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
+ cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
+ uint64_t unused2 : 2;
+ uint64_t qos : 3; /**< the QOS level for the packet. qos is only used for CVMX_POW_TAG_OP_ADDWQ */
+ uint64_t grp : 4; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
+ cvmx_pow_tag_type_t type : 3; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
+ uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
+#else
+ uint64_t tag :32;
+ cvmx_pow_tag_type_t type : 3;
+ uint64_t grp : 4;
+ uint64_t qos : 3;
+ uint64_t unused2 : 2;
+ cvmx_pow_tag_op_t op : 4;
+ uint64_t index :13;
+ uint64_t unused : 2;
+ uint64_t no_sched : 1;
+#endif
+ } s_cn38xx;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
+ cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
+ uint64_t unused1 : 4;
+ uint64_t index :11; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
+ uint64_t unused2 : 1;
+ uint64_t grp : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
+ uint64_t unused3 : 3;
+ cvmx_pow_tag_type_t type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
+ uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
+#else
+ uint64_t tag :32;
+ cvmx_pow_tag_type_t type : 2;
+ uint64_t unused3 : 3;
+ uint64_t grp : 6;
+ uint64_t unused2 : 1;
+ uint64_t index :11;
+ uint64_t unused1 : 4;
+ cvmx_pow_tag_op_t op : 4;
+ uint64_t no_sched : 1;
+#endif
+ } s_cn68xx_clr;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
+ cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
+ uint64_t unused1 : 12;
+ uint64_t qos : 3; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
+ uint64_t unused2 : 1;
+ uint64_t grp : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
+ uint64_t unused3 : 3;
+ cvmx_pow_tag_type_t type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
+ uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
+#else
+ uint64_t tag :32;
+ cvmx_pow_tag_type_t type : 2;
+ uint64_t unused3 : 3;
+ uint64_t grp : 6;
+ uint64_t unused2 : 1;
+ uint64_t qos : 3;
+ uint64_t unused1 : 12;
+ cvmx_pow_tag_op_t op : 4;
+ uint64_t no_sched : 1;
+#endif
+ } s_cn68xx_add;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
+ cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
+ uint64_t unused1 : 16;
+ uint64_t grp : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
+ uint64_t unused3 : 3;
+ cvmx_pow_tag_type_t type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
+ uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
+#else
+ uint64_t tag :32;
+ cvmx_pow_tag_type_t type : 2;
+ uint64_t unused3 : 3;
+ uint64_t grp : 6;
+ uint64_t unused1 : 16;
+ cvmx_pow_tag_op_t op : 4;
+ uint64_t no_sched : 1;
+#endif
+ } s_cn68xx_other;
+
+} cvmx_pow_tag_req_t;
+
+typedef struct {
+ uint32_t tag;
+ uint16_t index;
+ uint8_t grp;
+ uint8_t tag_type;
+}cvmx_pow_tag_info_t;
+
+/**
+ * This structure describes the address to load stuff from POW
+ */
+typedef union
+{
+ uint64_t u64;
+
+ /**
+ * Address for new work request loads (did<2:0> == 0)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t reserved_49_61 : 13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< the ID of POW -- did<2:0> == 0 in this case */
+ uint64_t reserved_4_39 : 36; /**< Must be zero */
+ uint64_t wait : 1; /**< If set, don't return load response until work is available */
+ uint64_t reserved_0_2 : 3; /**< Must be zero */
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t wait : 1;
+ uint64_t reserved_4_39 : 36;
+ uint64_t did : 8;
+ uint64_t is_io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t mem_region : 2;
+#endif
+ } swork;
+
+ /**
+ * Address for loads to get POW internal status
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t reserved_49_61 : 13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< the ID of POW -- did<2:0> == 1 in this case */
+ uint64_t reserved_10_39 : 30; /**< Must be zero */
+ uint64_t coreid : 4; /**< The core id to get status for */
+ uint64_t get_rev : 1; /**< If set and get_cur is set, return reverse tag-list pointer rather than forward tag-list pointer */
+ uint64_t get_cur : 1; /**< If set, return current status rather than pending status */
+ uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type */
+ uint64_t reserved_0_2 : 3; /**< Must be zero */
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t get_wqp : 1;
+ uint64_t get_cur : 1;
+ uint64_t get_rev : 1;
+ uint64_t coreid : 4;
+ uint64_t reserved_10_39 : 30;
+ uint64_t did : 8;
+ uint64_t is_io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t mem_region : 2;
+#endif
+ } sstatus;
+
+ /**
+ * Address for loads to get 68XX SS0 internal status
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t reserved_49_61 : 13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< the ID of POW -- did<2:0> == 1 in this case */
+ uint64_t reserved_14_39 : 26; /**< Must be zero */
+ uint64_t coreid : 5; /**< The core id to get status for */
+ uint64_t reserved_6_8 : 3;
+ uint64_t opcode : 3; /**< Status operation */
+ uint64_t reserved_0_2 : 3; /**< Must be zero */
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t opcode : 3;
+ uint64_t reserved_6_8 : 3;
+ uint64_t coreid : 5;
+ uint64_t reserved_14_39 : 26;
+ uint64_t did : 8;
+ uint64_t is_io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t mem_region : 2;
+#endif
+ } sstatus_cn68xx;
+
+ /**
+ * Address for memory loads to get POW internal state
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t reserved_49_61 : 13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< the ID of POW -- did<2:0> == 2 in this case */
+ uint64_t reserved_16_39 : 24; /**< Must be zero */
+ uint64_t index : 11; /**< POW memory index */
+ uint64_t get_des : 1; /**< If set, return deschedule information rather than the standard
+ response for work-queue index (invalid if the work-queue entry is not on the
+ deschedule list). */
+ uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type (no effect when get_des set). */
+ uint64_t reserved_0_2 : 3; /**< Must be zero */
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t get_wqp : 1;
+ uint64_t get_des : 1;
+ uint64_t index : 11;
+ uint64_t reserved_16_39 : 24;
+ uint64_t did : 8;
+ uint64_t is_io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t mem_region : 2;
+#endif
+ } smemload;
+
+ /**
+ * Address for memory loads to get SSO internal state
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t reserved_49_61 : 13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< the ID of SSO - did<2:0> == 2 in this case */
+ uint64_t reserved_20_39 : 20; /**< Must be zero */
+ uint64_t index : 11; /**< SSO memory index */
+ uint64_t reserved_6_8 : 3; /**< Must be zero */
+ uint64_t opcode : 3; /**< Read TAG/WQ pointer/pending tag/next potr */
+ uint64_t reserved_0_2 : 3; /**< Must be zero */
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t opcode : 3;
+ uint64_t reserved_3_5 : 3;
+ uint64_t index : 11;
+ uint64_t reserved_20_39 : 20;
+ uint64_t did : 8;
+ uint64_t is_io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t mem_region : 2;
+#endif
+ } smemload_cn68xx;
+
+ /**
+ * Address for index/pointer loads
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t reserved_49_61 : 13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< the ID of POW -- did<2:0> == 3 in this case */
+ uint64_t reserved_9_39 : 31; /**< Must be zero */
+ uint64_t qosgrp : 4; /**< when {get_rmt ==0 AND get_des_get_tail == 0}, this field selects one of
+ eight POW internal-input queues (0-7), one per QOS level; values 8-15 are
+ illegal in this case;
+ when {get_rmt ==0 AND get_des_get_tail == 1}, this field selects one of
+ 16 deschedule lists (per group);
+ when get_rmt ==1, this field selects one of 16 memory-input queue lists.
+ The two memory-input queue lists associated with each QOS level are:
+ - qosgrp = 0, qosgrp = 8: QOS0
+ - qosgrp = 1, qosgrp = 9: QOS1
+ - qosgrp = 2, qosgrp = 10: QOS2
+ - qosgrp = 3, qosgrp = 11: QOS3
+ - qosgrp = 4, qosgrp = 12: QOS4
+ - qosgrp = 5, qosgrp = 13: QOS5
+ - qosgrp = 6, qosgrp = 14: QOS6
+ - qosgrp = 7, qosgrp = 15: QOS7 */
+ uint64_t get_des_get_tail: 1; /**< If set and get_rmt is clear, return deschedule list indexes
+ rather than indexes for the specified qos level; if set and get_rmt is set, return
+ the tail pointer rather than the head pointer for the specified qos level. */
+ uint64_t get_rmt : 1; /**< If set, return remote pointers rather than the local indexes for the specified qos level. */
+ uint64_t reserved_0_2 : 3; /**< Must be zero */
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t get_rmt : 1;
+ uint64_t get_des_get_tail: 1;
+ uint64_t qosgrp : 4;
+ uint64_t reserved_9_39 : 31;
+ uint64_t did : 8;
+ uint64_t is_io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t mem_region : 2;
+#endif
+ } sindexload;
+
+ /**
+ * Address for a Index/Pointer loads to get SSO internal state
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t reserved_49_61 : 13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< the ID of SSO - did<2:0> == 2 in this case */
+ uint64_t reserved_15_39 : 25; /**< Must be zero */
+ uint64_t qos_grp : 6; /**< When opcode = IPL_IQ, this field specifies IQ (or QOS).
+ When opcode = IPL_DESCHED, this field specifies the group.
+ This field is reserved for all other opcodes. */
+ uint64_t reserved_6_8 : 3; /**< Must be zero */
+ uint64_t opcode : 3; /**< Read TAG/WQ pointer/pending tag/next potr */
+ uint64_t reserved_0_2 : 3; /**< Must be zero */
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t opcode : 3;
+ uint64_t reserved_3_5 : 3;
+ uint64_t qos_grp : 6;
+ uint64_t reserved_15_39 : 25;
+ uint64_t did : 8;
+ uint64_t is_io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t mem_region : 2;
+#endif
+ } sindexload_cn68xx;
+
+ /**
+ * address for NULL_RD request (did<2:0> == 4)
+ * when this is read, HW attempts to change the state to NULL if it is NULL_NULL
+ * (the hardware cannot switch from NULL_NULL to NULL if a POW entry is not available -
+ * software may need to recover by finishing another piece of work before a POW
+ * entry can ever become available.)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t reserved_49_61 : 13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< the ID of POW -- did<2:0> == 4 in this case */
+ uint64_t reserved_0_39 : 40; /**< Must be zero */
+#else
+ uint64_t reserved_0_39 : 40;
+ uint64_t did : 8;
+ uint64_t is_io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t mem_region : 2;
+#endif
+ } snull_rd;
+} cvmx_pow_load_addr_t;
+
+/**
+ * This structure defines the response to a load/SENDSINGLE to POW (except CSR reads)
+ */
+typedef union
+{
+ uint64_t u64;
+
+ /**
+ * Response to new work request loads
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t no_work : 1; /**< Set when no new work queue entry was returned.
+ If there was de-scheduled work, the HW will definitely
+ return it. When this bit is set, it could mean
+ either mean:
+ - There was no work, or
+ - There was no work that the HW could find. This
+ case can happen, regardless of the wait bit value
+ in the original request, when there is work
+ in the IQ's that is too deep down the list. */
+ uint64_t reserved_40_62 : 23; /**< Must be zero */
+ uint64_t addr : 40; /**< 36 in O1 -- the work queue pointer */
+#else
+ uint64_t addr : 40;
+ uint64_t reserved_40_62 : 23;
+ uint64_t no_work : 1;
+#endif
+ } s_work;
+
+ /**
+ * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or
+ SWTAG_FULL, and the POW entry has not left the list for the original tag. */
+ uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */
+ uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
+ uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
+ uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
+ uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
+ uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */
+ uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
+ uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */
+ uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
+ uint64_t reserved_51 : 1;
+ uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
+ uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
+ uint64_t reserved_34_35 : 2;
+ uint64_t pend_type : 2; /**< This is the tag type when pend_switch or (pend_desched AND pend_desched_switch) are set. */
+ uint64_t pend_tag : 32; /**< - this is the tag when pend_switch or (pend_desched AND pend_desched_switch) are set. */
+#else
+ uint64_t pend_tag : 32;
+ uint64_t pend_type : 2;
+ uint64_t reserved_34_35 : 2;
+ uint64_t pend_grp : 4;
+ uint64_t pend_index : 11;
+ uint64_t reserved_51 : 1;
+ uint64_t pend_nosched_clr: 1;
+ uint64_t pend_null_rd : 1;
+ uint64_t pend_new_work_wait: 1;
+ uint64_t pend_new_work : 1;
+ uint64_t pend_nosched : 1;
+ uint64_t pend_desched_switch: 1;
+ uint64_t pend_desched : 1;
+ uint64_t pend_switch_null: 1;
+ uint64_t pend_switch_full: 1;
+ uint64_t pend_switch : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s_sstatus0;
+
+ /**
+ * Result for a SSO Status Load (when opcode is SL_PENDTAG)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pend_switch : 1; /**< Set when there is a pending non-UNSCHEDULED SWTAG or
+ SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
+ uint64_t pend_get_work : 1; /**< Set when there is a pending GET_WORK */
+ uint64_t pend_get_work_wait: 1; /**< when pend_get_work is set, this biit indicates that the
+ wait bit was set. */
+ uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
+ uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
+ uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
+ uint64_t pend_alloc_we : 1; /**< Set when there is a pending ALLOC_WE. */
+ uint64_t reserved_48_56 : 9;
+ uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
+ uint64_t reserved_34_36 : 3;
+ uint64_t pend_type : 2; /**< This is the tag type when pend_switch is set. */
+ uint64_t pend_tag : 32; /**< This is the tag when pend_switch is set. */
+#else
+ uint64_t pend_tag : 32;
+ uint64_t pend_type : 2;
+ uint64_t reserved_34_36 : 3;
+ uint64_t pend_index : 11;
+ uint64_t reserved_48_56 : 9;
+ uint64_t pend_alloc_we : 1;
+ uint64_t pend_desched : 1;
+ uint64_t pend_nosched_clr: 1;
+ uint64_t pend_nosched : 1;
+ uint64_t pend_get_work_wait: 1;
+ uint64_t pend_get_work : 1;
+ uint64_t pend_switch : 1;
+#endif
+ } s_sstatus0_cn68xx;
+
+ /**
+ * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or
+ SWTAG_FULL, and the POW entry has not left the list for the original tag. */
+ uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */
+ uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
+ uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
+ uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
+ uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
+ uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */
+ uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
+ uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */
+ uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
+ uint64_t reserved_51 : 1;
+ uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
+ uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
+ uint64_t pend_wqp : 36; /**< This is the wqp when pend_nosched_clr is set. */
+#else
+ uint64_t pend_wqp : 36;
+ uint64_t pend_grp : 4;
+ uint64_t pend_index : 11;
+ uint64_t reserved_51 : 1;
+ uint64_t pend_nosched_clr: 1;
+ uint64_t pend_null_rd : 1;
+ uint64_t pend_new_work_wait: 1;
+ uint64_t pend_new_work : 1;
+ uint64_t pend_nosched : 1;
+ uint64_t pend_desched_switch: 1;
+ uint64_t pend_desched : 1;
+ uint64_t pend_switch_null: 1;
+ uint64_t pend_switch_full: 1;
+ uint64_t pend_switch : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s_sstatus1;
+
+ /**
+ * Result for a SSO Status Load (when opcode is SL_PENDWQP)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pend_switch : 1; /**< Set when there is a pending non-UNSCHEDULED SWTAG or
+ SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
+ uint64_t pend_get_work : 1; /**< Set when there is a pending GET_WORK */
+ uint64_t pend_get_work_wait: 1; /**< when pend_get_work is set, this biit indicates that the
+ wait bit was set. */
+ uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
+ uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
+ uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
+ uint64_t pend_alloc_we : 1; /**< Set when there is a pending ALLOC_WE. */
+ uint64_t reserved_51_56 : 6;
+ uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t pend_wqp : 38; /**< This is the wqp when pend_nosched_clr is set. */
+#else
+ uint64_t pend_wqp : 38;
+ uint64_t reserved_38_39 : 2;
+ uint64_t pend_index : 11;
+ uint64_t reserved_51_56 : 6;
+ uint64_t pend_alloc_we : 1;
+ uint64_t pend_desched : 1;
+ uint64_t pend_nosched_clr: 1;
+ uint64_t pend_nosched : 1;
+ uint64_t pend_get_work_wait: 1;
+ uint64_t pend_get_work : 1;
+ uint64_t pend_switch : 1;
+#endif
+ } s_sstatus1_cn68xx;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==0)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and
+ tag_type is not NULL or NULL_NULL). */
+ uint64_t index : 11; /**< The POW entry attached to the core. */
+ uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
+ uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in
+ the NULL or NULL_NULL state). */
+ uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
+ NULL or NULL_NULL state). */
+ uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list
+ entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
+ uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on
+ SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
+#else
+ uint64_t tag : 32;
+ uint64_t tag_type : 2;
+ uint64_t tail : 1;
+ uint64_t head : 1;
+ uint64_t grp : 4;
+ uint64_t index : 11;
+ uint64_t link_index : 11;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s_sstatus2;
+
+ /**
+ * Result for a SSO Status Load (when opcode is SL_TAG)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63 : 7;
+ uint64_t index : 11; /**< The SSO entry attached to the core. */
+ uint64_t reserved_45 : 1;
+ uint64_t grp : 6; /**< The group attached to the core (updated when new tag list entered on
+ SWTAG_FULL). */
+ uint64_t head : 1; /**< Set when this SSO entry is at the head of its tag list (also set when in the
+ UNSCHEDULED or EMPTY state). */
+ uint64_t tail : 1; /**< Set when this SSO entry is at the tail of its tag list (also set when in the
+ UNSCHEDULED or EMPTY state). */
+ uint64_t reserved_34_36 : 3;
+ uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list entered
+ on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
+ uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on SWTAG,
+ SWTAG_FULL, or SWTAG_DESCHED). */
+#else
+ uint64_t tag : 32;
+ uint64_t tag_type : 2;
+ uint64_t reserved_34_36 : 3;
+ uint64_t tail : 1;
+ uint64_t head : 1;
+ uint64_t grp : 6;
+ uint64_t reserved_45 : 1;
+ uint64_t index : 11;
+ uint64_t reserved_57_63 : 7;
+#endif
+ } s_sstatus2_cn68xx;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0
+ (and tag_type is not NULL or NULL_NULL). This field is unpredictable
+ when the core's state is NULL or NULL_NULL. */
+ uint64_t index : 11; /**< The POW entry attached to the core. */
+ uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
+ uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in
+ the NULL or NULL_NULL state). */
+ uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
+ NULL or NULL_NULL state). */
+ uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list
+ entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
+ uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on
+ SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
+#else
+ uint64_t tag : 32;
+ uint64_t tag_type : 2;
+ uint64_t tail : 1;
+ uint64_t head : 1;
+ uint64_t grp : 4;
+ uint64_t index : 11;
+ uint64_t revlink_index : 11;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s_sstatus3;
+
+ /**
+ * Result for a SSO Status Load (when opcode is SL_WQP)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t index : 11; /**< The SSO entry attached to the core. */
+ uint64_t reserved_46 : 1;
+ uint64_t grp : 6; /**< The group attached to the core (updated when new tag list entered on
+ SWTAG_FULL). */
+ uint64_t reserved_38_39 : 2;
+ uint64_t wqp : 38; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
+#else
+ uint64_t wqp : 38;
+ uint64_t reserved_38_39 : 2;
+ uint64_t grp : 6;
+ uint64_t reserved_46 : 1;
+ uint64_t index : 11;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } s_sstatus3_cn68xx;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==0)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and
+ tag_type is not NULL or NULL_NULL). */
+ uint64_t index : 11; /**< The POW entry attached to the core. */
+ uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
+ uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
+#else
+ uint64_t wqp : 36;
+ uint64_t grp : 4;
+ uint64_t index : 11;
+ uint64_t link_index : 11;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s_sstatus4;
+
+ /**
+ * Result for a SSO Status Load (when opcode is SL_LINKS)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t index : 11; /**< The SSO entry attached to the core. */
+ uint64_t reserved_34 : 1;
+ uint64_t grp : 6; /**< The group attached to the core (updated when new tag list entered on
+ SWTAG_FULL). */
+ uint64_t head : 1; /**< Set when this SSO entry is at the head of its tag list (also set when in the
+ UNSCHEDULED or EMPTY state). */
+ uint64_t tail : 1; /**< Set when this SSO entry is at the tail of its tag list (also set when in the
+ UNSCHEDULED or EMPTY state). */
+ uint64_t reserved_24_25 : 2;
+ uint64_t revlink_index : 11; /**< Points to the prior SSO entry in the tag list when head==0 (and tag_type is not UNSCHEDULED or EMPTY). */
+ uint64_t reserved_11_12 : 2;
+ uint64_t link_index : 11; /**< Points to the next SSO entry in the tag list when tail==0 (and tag_type is not UNSCHEDULDED or EMPTY). */
+#else
+ uint64_t link_index : 11;
+ uint64_t reserved_11_12 : 2;
+ uint64_t revlink_index : 11;
+ uint64_t reserved_24_25 : 2;
+ uint64_t tail : 1;
+ uint64_t head : 1;
+ uint64_t grp : 6;
+ uint64_t reserved_34 : 1;
+ uint64_t index : 11;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } s_sstatus4_cn68xx;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==1)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0
+ (and tag_type is not NULL or NULL_NULL). This field is unpredictable
+ when the core's state is NULL or NULL_NULL. */
+ uint64_t index : 11; /**< The POW entry attached to the core. */
+ uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
+ uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
+#else
+ uint64_t wqp : 36;
+ uint64_t grp : 4;
+ uint64_t index : 11;
+ uint64_t revlink_index : 11;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s_sstatus5;
+
+ /**
+ * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63 : 13;
+ uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
+ (unpredictable if entry is the tail of the list). */
+ uint64_t grp : 4; /**< The group of the POW entry. */
+ uint64_t reserved_35 : 1;
+ uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
+ NULL or NULL_NULL state). */
+ uint64_t tag_type : 2; /**< The tag type of the POW entry. */
+ uint64_t tag : 32; /**< The tag of the POW entry. */
+#else
+ uint64_t tag : 32;
+ uint64_t tag_type : 2;
+ uint64_t tail : 1;
+ uint64_t reserved_35 : 1;
+ uint64_t grp : 4;
+ uint64_t next_index : 11;
+ uint64_t reserved_51_63 : 13;
+#endif
+ } s_smemload0;
+
+ /**
+ * Result For SSO Memory Load (opcode is ML_TAG)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t tail : 1; /**< Set when this SSO entry is at the tail of its tag list (also set when in the
+ NULL or NULL_NULL state). */
+ uint64_t reserved_34_36 : 3;
+ uint64_t tag_type : 2; /**< The tag type of the SSO entry. */
+ uint64_t tag : 32; /**< The tag of the SSO entry. */
+#else
+ uint64_t tag : 32;
+ uint64_t tag_type : 2;
+ uint64_t reserved_34_36 : 3;
+ uint64_t tail : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s_smemload0_cn68xx;
+
+ /**
+ * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63 : 13;
+ uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
+ (unpredictable if entry is the tail of the list). */
+ uint64_t grp : 4; /**< The group of the POW entry. */
+ uint64_t wqp : 36; /**< The WQP held in the POW entry. */
+#else
+ uint64_t wqp : 36;
+ uint64_t grp : 4;
+ uint64_t next_index : 11;
+ uint64_t reserved_51_63 : 13;
+#endif
+ } s_smemload1;
+
+ /**
+ * Result For SSO Memory Load (opcode is ML_WQPGRP)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t nosched : 1; /**< The nosched bit for the SSO entry. */
+ uint64_t reserved_46 : 1;
+ uint64_t grp : 6; /**< The group of the SSO entry. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t wqp : 38; /**< The WQP held in the SSO entry. */
+#else
+ uint64_t wqp : 38;
+ uint64_t reserved_38_39 : 2;
+ uint64_t grp : 6;
+ uint64_t reserved_46 : 1;
+ uint64_t nosched : 1;
+ uint64_t reserved_51_63 : 16;
+#endif
+ } s_smemload1_cn68xx;
+
+ /**
+ * Result For POW Memory Load (get_des == 1)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63 : 13;
+ uint64_t fwd_index : 11; /**< The next entry in the tag list connected to the descheduled head. */
+ uint64_t grp : 4; /**< The group of the POW entry. */
+ uint64_t nosched : 1; /**< The nosched bit for the POW entry. */
+ uint64_t pend_switch : 1; /**< There is a pending tag switch */
+ uint64_t pend_type : 2; /**< The next tag type for the new tag list when pend_switch is set. */
+ uint64_t pend_tag : 32; /**< The next tag for the new tag list when pend_switch is set. */
+#else
+ uint64_t pend_tag : 32;
+ uint64_t pend_type : 2;
+ uint64_t pend_switch : 1;
+ uint64_t nosched : 1;
+ uint64_t grp : 4;
+ uint64_t fwd_index : 11;
+ uint64_t reserved_51_63 : 13;
+#endif
+ } s_smemload2;
+
+ /**
+ * Result For SSO Memory Load (opcode is ML_PENTAG)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t pend_switch : 1; /**< Set when there is a pending non-UNSCHEDULED SWTAG or
+ SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
+ uint64_t reserved_34_36 : 3;
+ uint64_t pend_type : 2; /**< The next tag type for the new tag list when pend_switch is set. */
+ uint64_t pend_tag : 32; /**< The next tag for the new tag list when pend_switch is set. */
+#else
+ uint64_t pend_tag : 32;
+ uint64_t pend_type : 2;
+ uint64_t reserved_34_36 : 3;
+ uint64_t pend_switch : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s_smemload2_cn68xx;
+
+ /**
+ * Result For SSO Memory Load (opcode is ML_LINKS)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t fwd_index : 11; /**< The next entry in the tag list connected to the descheduled head. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
+ (unpredicatble if entry is the tail of the list). */
+#else
+ uint64_t next_index : 11;
+ uint64_t reserved_11_12 : 2;
+ uint64_t fwd_index : 11;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s_smemload3_cn68xx;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t free_val : 1; /**< - set when there is one or more POW entries on the free list. */
+ uint64_t free_one : 1; /**< - set when there is exactly one POW entry on the free list. */
+ uint64_t reserved_49 : 1;
+ uint64_t free_head : 11; /**< - when free_val is set, indicates the first entry on the free list. */
+ uint64_t reserved_37 : 1;
+ uint64_t free_tail : 11; /**< - when free_val is set, indicates the last entry on the free list. */
+ uint64_t loc_val : 1; /**< - set when there is one or more POW entries on the input Q list selected by qosgrp. */
+ uint64_t loc_one : 1; /**< - set when there is exactly one POW entry on the input Q list selected by qosgrp. */
+ uint64_t reserved_23 : 1;
+ uint64_t loc_head : 11; /**< - when loc_val is set, indicates the first entry on the input Q list selected by qosgrp. */
+ uint64_t reserved_11 : 1;
+ uint64_t loc_tail : 11; /**< - when loc_val is set, indicates the last entry on the input Q list selected by qosgrp. */
+#else
+ uint64_t loc_tail : 11;
+ uint64_t reserved_11 : 1;
+ uint64_t loc_head : 11;
+ uint64_t reserved_23 : 1;
+ uint64_t loc_one : 1;
+ uint64_t loc_val : 1;
+ uint64_t free_tail : 11;
+ uint64_t reserved_37 : 1;
+ uint64_t free_head : 11;
+ uint64_t reserved_49 : 1;
+ uint64_t free_one : 1;
+ uint64_t free_val : 1;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } sindexload0;
+
+ /**
+ * Result for SSO Index/Pointer Load(opcode == IPL_IQ/IPL_DESCHED/IPL_NOSCHED)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t queue_val : 1; /**< - If set, one or more valid entries are in the queue. */
+ uint64_t queue_one : 1; /**< - If set, exactly one valid entry is in the queue. */
+ uint64_t reserved_24_25 : 2;
+ uint64_t queue_head : 11; /**< - Index of entry at the head of the queue. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t queue_tail : 11; /**< - Index of entry at the tail of the queue. */
+#else
+ uint64_t queue_tail : 11;
+ uint64_t reserved_11_12 : 2;
+ uint64_t queue_head : 11;
+ uint64_t reserved_24_25 : 2;
+ uint64_t queue_one : 1;
+ uint64_t queue_val : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } sindexload0_cn68xx;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t nosched_val : 1; /**< - set when there is one or more POW entries on the nosched list. */
+ uint64_t nosched_one : 1; /**< - set when there is exactly one POW entry on the nosched list. */
+ uint64_t reserved_49 : 1;
+ uint64_t nosched_head : 11; /**< - when nosched_val is set, indicates the first entry on the nosched list. */
+ uint64_t reserved_37 : 1;
+ uint64_t nosched_tail : 11; /**< - when nosched_val is set, indicates the last entry on the nosched list. */
+ uint64_t des_val : 1; /**< - set when there is one or more descheduled heads on the descheduled list selected by qosgrp. */
+ uint64_t des_one : 1; /**< - set when there is exactly one descheduled head on the descheduled list selected by qosgrp. */
+ uint64_t reserved_23 : 1;
+ uint64_t des_head : 11; /**< - when des_val is set, indicates the first descheduled head on the descheduled list selected by qosgrp. */
+ uint64_t reserved_11 : 1;
+ uint64_t des_tail : 11; /**< - when des_val is set, indicates the last descheduled head on the descheduled list selected by qosgrp. */
+#else
+ uint64_t des_tail : 11;
+ uint64_t reserved_11 : 1;
+ uint64_t des_head : 11;
+ uint64_t reserved_23 : 1;
+ uint64_t des_one : 1;
+ uint64_t des_val : 1;
+ uint64_t nosched_tail : 11;
+ uint64_t reserved_37 : 1;
+ uint64_t nosched_head : 11;
+ uint64_t reserved_49 : 1;
+ uint64_t nosched_one : 1;
+ uint64_t nosched_val : 1;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } sindexload1;
+
+ /**
+ * Result for SSO Index/Pointer Load(opcode == IPL_FREE0/IPL_FREE1/IPL_FREE2)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t qnum_head : 2; /**< - Subqueue with current head */
+ uint64_t qnum_tail : 2; /**< - Subqueue with current tail */
+ uint64_t reserved_28_55 : 28;
+ uint64_t queue_val : 1; /**< - If set, one or more valid entries are in the queue. */
+ uint64_t queue_one : 1; /**< - If set, exactly one valid entry is in the queue. */
+ uint64_t reserved_24_25 : 2;
+ uint64_t queue_head : 11; /**< - Index of entry at the head of the queue. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t queue_tail : 11; /**< - Index of entry at the tail of the queue. */
+#else
+ uint64_t queue_tail : 11;
+ uint64_t reserved_11_12 : 2;
+ uint64_t queue_head : 11;
+ uint64_t reserved_24_25 : 2;
+ uint64_t queue_one : 1;
+ uint64_t queue_val : 1;
+ uint64_t reserved_28_55 : 28;
+ uint64_t qnum_tail : 2;
+ uint64_t qnum_head : 2;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } sindexload1_cn68xx;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t rmt_is_head : 1; /**< Set when this DRAM list is the current head (i.e. is the next to
+ be reloaded when the POW hardware reloads a POW entry from DRAM). The
+ POW hardware alternates between the two DRAM lists associated with a QOS
+ level when it reloads work from DRAM into the POW unit. */
+ uint64_t rmt_val : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp
+ contains one or more pieces of work. */
+ uint64_t rmt_one : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp
+ contains exactly one piece of work. */
+ uint64_t rmt_head : 36; /**< When rmt_val is set, indicates the first piece of work on the
+ DRAM input Q list selected by qosgrp. */
+#else
+ uint64_t rmt_head : 36;
+ uint64_t rmt_one : 1;
+ uint64_t rmt_val : 1;
+ uint64_t rmt_is_head : 1;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } sindexload2;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 1)
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63 : 25;
+ uint64_t rmt_is_head : 1; /**< - set when this DRAM list is the current head (i.e. is the next to
+ be reloaded when the POW hardware reloads a POW entry from DRAM). The
+ POW hardware alternates between the two DRAM lists associated with a QOS
+ level when it reloads work from DRAM into the POW unit. */
+ uint64_t rmt_val : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp
+ contains one or more pieces of work. */
+ uint64_t rmt_one : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp
+ contains exactly one piece of work. */
+ uint64_t rmt_tail : 36; /**< - when rmt_val is set, indicates the last piece of work on the DRAM
+ input Q list selected by qosgrp. */
+#else
+ uint64_t rmt_tail : 36;
+ uint64_t rmt_one : 1;
+ uint64_t rmt_val : 1;
+ uint64_t rmt_is_head : 1;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } sindexload3;
+
+ /**
+ * Response to NULL_RD request loads
+ */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused : 62;
+ uint64_t state : 2; /**< of type cvmx_pow_tag_type_t. state is one of the following:
+ - CVMX_POW_TAG_TYPE_ORDERED
+ - CVMX_POW_TAG_TYPE_ATOMIC
+ - CVMX_POW_TAG_TYPE_NULL
+ - CVMX_POW_TAG_TYPE_NULL_NULL */
+#else
+ uint64_t state : 2;
+ uint64_t unused : 62;
+#endif
+ } s_null_rd;
+
+} cvmx_pow_tag_load_resp_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63 : 7;
+ uint64_t index : 11;
+ uint64_t reserved_45 : 1;
+ uint64_t grp : 6;
+ uint64_t head : 1;
+ uint64_t tail : 1;
+ uint64_t reserved_34_36 : 3;
+ uint64_t tag_type : 2;
+ uint64_t tag : 32;
+#else
+ uint64_t tag : 32;
+ uint64_t tag_type : 2;
+ uint64_t reserved_34_36 : 3;
+ uint64_t tail : 1;
+ uint64_t head : 1;
+ uint64_t grp : 6;
+ uint64_t reserved_45 : 1;
+ uint64_t index : 11;
+ uint64_t reserved_57_63 : 7;
+#endif
+ } s;
+} cvmx_pow_sl_tag_resp_t;
+
+/**
+ * This structure describes the address used for stores to the POW.
+ * The store address is meaningful on stores to the POW. The hardware assumes that an aligned
+ * 64-bit store was used for all these stores.
+ * Note the assumption that the work queue entry is aligned on an 8-byte
+ * boundary (since the low-order 3 address bits must be zero).
+ * Note that not all fields are used by all operations.
+ *
+ * NOTE: The following is the behavior of the pending switch bit at the PP
+ * for POW stores (i.e. when did<7:3> == 0xc)
+ * - did<2:0> == 0 => pending switch bit is set
+ * - did<2:0> == 1 => no affect on the pending switch bit
+ * - did<2:0> == 3 => pending switch bit is cleared
+ * - did<2:0> == 7 => no affect on the pending switch bit
+ * - did<2:0> == others => must not be used
+ * - No other loads/stores have an affect on the pending switch bit
+ * - The switch bus from POW can clear the pending switch bit
+ *
+ * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle ADDWQ command
+ * that only contains the pointer). SW must never use did<2:0> == 2.
+ */
+typedef union
+{
+ /**
+ * Unsigned 64 bit integer representation of store address
+ */
+ uint64_t u64;
+
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_reg : 2; /**< Memory region. Should be CVMX_IO_SEG in most cases */
+ uint64_t reserved_49_61 : 13; /**< Must be zero */
+ uint64_t is_io : 1; /**< Must be one */
+ uint64_t did : 8; /**< Device ID of POW. Note that different sub-dids are used. */
+ uint64_t reserved_36_39 : 4; /**< Must be zero */
+ uint64_t addr : 36; /**< Address field. addr<2:0> must be zero */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_39 : 4;
+ uint64_t did : 8;
+ uint64_t is_io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t mem_reg : 2;
+#endif
+ } stag;
+} cvmx_pow_tag_store_addr_t;
+
+/**
+ * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
+ */
+typedef union
+{
+ uint64_t u64;
+
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
+ uint64_t len : 8; /**< the number of words in the response (0 => no response) */
+ uint64_t did : 8; /**< the ID of the device on the non-coherent bus */
+ uint64_t unused :36;
+ uint64_t wait : 1; /**< if set, don't return load response until work is available */
+ uint64_t unused2 : 3;
+#else
+ uint64_t unused2 : 3;
+ uint64_t wait : 1;
+ uint64_t unused :36;
+ uint64_t did : 8;
+ uint64_t len : 8;
+ uint64_t scraddr : 8;
+#endif
+ } s;
+
+} cvmx_pow_iobdma_store_t;
+
+
+/* CSR typedefs have been moved to cvmx-pow-defs.h */
+
+/**
+ * Get the POW tag for this core. This returns the current
+ * tag type, tag, group, and POW entry index associated with
+ * this core. Index is only valid if the tag type isn't NULL_NULL.
+ * If a tag switch is pending this routine returns the tag before
+ * the tag switch, not after.
+ *
+ * @return Current tag
+ */
+static inline cvmx_pow_tag_info_t cvmx_pow_get_current_tag(void)
+{
+ cvmx_pow_load_addr_t load_addr;
+ cvmx_pow_tag_info_t result;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
+ cvmx_pow_sl_tag_resp_t load_resp;
+ load_addr.u64 = 0;
+ load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus_cn68xx.is_io = 1;
+ load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
+ load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
+ load_addr.sstatus_cn68xx.opcode = 3;
+ load_resp.u64 = cvmx_read_csr(load_addr.u64);
+ result.grp = load_resp.s.grp;
+ result.index = load_resp.s.index;
+ result.tag_type = load_resp.s.tag_type;
+ result.tag = load_resp.s.tag;
+ } else {
+ cvmx_pow_tag_load_resp_t load_resp;
+ load_addr.u64 = 0;
+ load_addr.sstatus.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus.is_io = 1;
+ load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
+ load_addr.sstatus.coreid = cvmx_get_core_num();
+ load_addr.sstatus.get_cur = 1;
+ load_resp.u64 = cvmx_read_csr(load_addr.u64);
+ result.grp = load_resp.s_sstatus2.grp;
+ result.index = load_resp.s_sstatus2.index;
+ result.tag_type = load_resp.s_sstatus2.tag_type;
+ result.tag = load_resp.s_sstatus2.tag;
+ }
+ return result;
+}
+
+/**
+ * Get the POW WQE for this core. This returns the work queue
+ * entry currently associated with this core.
+ *
+ * @return WQE pointer
+ */
+static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
+{
+ cvmx_pow_load_addr_t load_addr;
+ cvmx_pow_tag_load_resp_t load_resp;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
+ load_addr.u64 = 0;
+ load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus_cn68xx.is_io = 1;
+ load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
+ load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
+ load_addr.sstatus_cn68xx.opcode = 3;
+ load_resp.u64 = cvmx_read_csr(load_addr.u64);
+ if (load_resp.s_sstatus3_cn68xx.wqp)
+ return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus3_cn68xx.wqp);
+ else
+ return (cvmx_wqe_t*)0;
+ } else {
+ load_addr.u64 = 0;
+ load_addr.sstatus.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus.is_io = 1;
+ load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
+ load_addr.sstatus.coreid = cvmx_get_core_num();
+ load_addr.sstatus.get_cur = 1;
+ load_addr.sstatus.get_wqp = 1;
+ load_resp.u64 = cvmx_read_csr(load_addr.u64);
+ return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
+ }
+}
+
+
+/**
+ * @INTERNAL
+ * Print a warning if a tag switch is pending for this core
+ *
+ * @param function Function name checking for a pending tag switch
+ */
+static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
+{
+ uint64_t switch_complete;
+ CVMX_MF_CHORD(switch_complete);
+ cvmx_warn_if(!switch_complete, "%s called with tag switch in progress\n", function);
+}
+
+
+/**
+ * Waits for a tag switch to complete by polling the completion bit.
+ * Note that switches to NULL complete immediately and do not need
+ * to be waited for.
+ */
+static inline void cvmx_pow_tag_sw_wait(void)
+{
+ const uint64_t MAX_CYCLES = 1ull<<31;
+ uint64_t switch_complete;
+ uint64_t start_cycle = cvmx_get_cycle();
+ while (1)
+ {
+ CVMX_MF_CHORD(switch_complete);
+ if (cvmx_unlikely(switch_complete))
+ break;
+ if (cvmx_unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES))
+ {
+ cvmx_dprintf("WARNING: Tag switch is taking a long time, possible deadlock\n");
+ start_cycle = -MAX_CYCLES-1;
+ }
+ }
+}
+
+
+/**
+ * Synchronous work request. Requests work from the POW.
+ * This function does NOT wait for previous tag switches to complete,
+ * so the caller must ensure that there is not a pending tag switch.
+ *
+ * @param wait When set, call stalls until work becomes avaiable, or times out.
+ * If not set, returns immediately.
+ *
+ * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
+ */
+static inline cvmx_wqe_t * cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t wait)
+{
+ cvmx_pow_load_addr_t ptr;
+ cvmx_pow_tag_load_resp_t result;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+
+ ptr.u64 = 0;
+ ptr.swork.mem_region = CVMX_IO_SEG;
+ ptr.swork.is_io = 1;
+ ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
+ ptr.swork.wait = wait;
+
+ result.u64 = cvmx_read_csr(ptr.u64);
+
+ if (result.s_work.no_work)
+ return NULL;
+ else
+ return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
+}
+
+
+/**
+ * Synchronous work request. Requests work from the POW.
+ * This function waits for any previous tag switch to complete before
+ * requesting the new work.
+ *
+ * @param wait When set, call stalls until work becomes avaiable, or times out.
+ * If not set, returns immediately.
+ *
+ * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
+ */
+static inline cvmx_wqe_t * cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+
+ /* Must not have a switch pending when requesting work */
+ cvmx_pow_tag_sw_wait();
+ return(cvmx_pow_work_request_sync_nocheck(wait));
+
+}
+
+
+/**
+ * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
+ * This function waits for any previous tag switch to complete before
+ * requesting the null_rd.
+ *
+ * @return Returns the POW state of type cvmx_pow_tag_type_t.
+ */
+static inline cvmx_pow_tag_type_t cvmx_pow_work_request_null_rd(void)
+{
+ cvmx_pow_load_addr_t ptr;
+ cvmx_pow_tag_load_resp_t result;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+
+ /* Must not have a switch pending when requesting work */
+ cvmx_pow_tag_sw_wait();
+
+ ptr.u64 = 0;
+ ptr.snull_rd.mem_region = CVMX_IO_SEG;
+ ptr.snull_rd.is_io = 1;
+ ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
+
+ result.u64 = cvmx_read_csr(ptr.u64);
+
+ return (cvmx_pow_tag_type_t)result.s_null_rd.state;
+}
+
+
+/**
+ * Asynchronous work request. Work is requested from the POW unit, and should later
+ * be checked with function cvmx_pow_work_response_async.
+ * This function does NOT wait for previous tag switches to complete,
+ * so the caller must ensure that there is not a pending tag switch.
+ *
+ * @param scr_addr Scratch memory address that response will be returned to,
+ * which is either a valid WQE, or a response with the invalid bit set.
+ * Byte address, must be 8 byte aligned.
+ * @param wait 1 to cause response to wait for work to become available (or timeout)
+ * 0 to cause response to return immediately
+ */
+static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, cvmx_pow_wait_t wait)
+{
+ cvmx_pow_iobdma_store_t data;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+
+ /* scr_addr must be 8 byte aligned */
+ data.u64 = 0;
+ data.s.scraddr = scr_addr >> 3;
+ data.s.len = 1;
+ data.s.did = CVMX_OCT_DID_TAG_SWTAG;
+ data.s.wait = wait;
+ cvmx_send_single(data.u64);
+}
+/**
+ * Asynchronous work request. Work is requested from the POW unit, and should later
+ * be checked with function cvmx_pow_work_response_async.
+ * This function waits for any previous tag switch to complete before
+ * requesting the new work.
+ *
+ * @param scr_addr Scratch memory address that response will be returned to,
+ * which is either a valid WQE, or a response with the invalid bit set.
+ * Byte address, must be 8 byte aligned.
+ * @param wait 1 to cause response to wait for work to become available (or timeout)
+ * 0 to cause response to return immediately
+ */
+static inline void cvmx_pow_work_request_async(int scr_addr, cvmx_pow_wait_t wait)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+
+ /* Must not have a switch pending when requesting work */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_work_request_async_nocheck(scr_addr, wait);
+}
+
+
+/**
+ * Gets result of asynchronous work request. Performs a IOBDMA sync
+ * to wait for the response.
+ *
+ * @param scr_addr Scratch memory address to get result from
+ * Byte address, must be 8 byte aligned.
+ * @return Returns the WQE from the scratch register, or NULL if no work was available.
+ */
+static inline cvmx_wqe_t * cvmx_pow_work_response_async(int scr_addr)
+{
+ cvmx_pow_tag_load_resp_t result;
+
+ CVMX_SYNCIOBDMA;
+ result.u64 = cvmx_scratch_read64(scr_addr);
+
+ if (result.s_work.no_work)
+ return NULL;
+ else
+ return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
+}
+
+
+/**
+ * Checks if a work queue entry pointer returned by a work
+ * request is valid. It may be invalid due to no work
+ * being available or due to a timeout.
+ *
+ * @param wqe_ptr pointer to a work queue entry returned by the POW
+ *
+ * @return 0 if pointer is valid
+ * 1 if invalid (no work was returned)
+ */
+static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
+{
+ return (wqe_ptr == NULL);
+}
+
+
+
+/**
+ * Starts a tag switch to the provided tag value and tag type. Completion for
+ * the tag switch must be checked for separately.
+ * This function does NOT update the
+ * work queue entry in dram to match tag value and type, so the application must
+ * keep track of these if they are important to the application.
+ * This tag switch command must not be used for switches to NULL, as the tag
+ * switch pending bit will be set by the switch request, but never cleared by the
+ * hardware.
+ *
+ * NOTE: This should not be used when switching from a NULL tag. Use
+ * cvmx_pow_tag_sw_full() instead.
+ *
+ * This function does no checks, so the caller must ensure that any previous tag
+ * switch has completed.
+ *
+ * @param tag new tag value
+ * @param tag_type new tag type (ordered or atomic)
+ */
+static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ {
+ cvmx_pow_tag_info_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+ current_tag = cvmx_pow_get_current_tag();
+ cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
+ cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag\n", __FUNCTION__);
+ cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
+ cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
+ }
+
+ /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
+ ** once the WQE is in flight. See hardware manual for complete details.
+ ** It is the application's responsibility to keep track of the current tag
+ ** value if that is important.
+ */
+
+ tag_req.u64 = 0;
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
+ tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
+ tag_req.s_cn68xx_other.tag = tag;
+ tag_req.s_cn68xx_other.type = tag_type;
+ } else {
+ tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
+ tag_req.s_cn38xx.tag = tag;
+ tag_req.s_cn38xx.type = tag_type;
+ }
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
+
+ /* once this store arrives at POW, it will attempt the switch
+ software must wait for the switch to complete separately */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+
+/**
+ * Starts a tag switch to the provided tag value and tag type. Completion for
+ * the tag switch must be checked for separately.
+ * This function does NOT update the
+ * work queue entry in dram to match tag value and type, so the application must
+ * keep track of these if they are important to the application.
+ * This tag switch command must not be used for switches to NULL, as the tag
+ * switch pending bit will be set by the switch request, but never cleared by the
+ * hardware.
+ *
+ * NOTE: This should not be used when switching from a NULL tag. Use
+ * cvmx_pow_tag_sw_full() instead.
+ *
+ * This function waits for any previous tag switch to complete, and also
+ * displays an error on tag switches to NULL.
+ *
+ * @param tag new tag value
+ * @param tag_type new tag type (ordered or atomic)
+ */
+static inline void cvmx_pow_tag_sw(uint32_t tag, cvmx_pow_tag_type_t tag_type)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+
+ /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
+ ** once the WQE is in flight. See hardware manual for complete details.
+ ** It is the application's responsibility to keep track of the current tag
+ ** value if that is important.
+ */
+
+ /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
+ ** if a previous switch is still pending. */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_nocheck(tag, tag_type);
+}
+
+
+/**
+ * Starts a tag switch to the provided tag value and tag type. Completion for
+ * the tag switch must be checked for separately.
+ * This function does NOT update the
+ * work queue entry in dram to match tag value and type, so the application must
+ * keep track of these if they are important to the application.
+ * This tag switch command must not be used for switches to NULL, as the tag
+ * switch pending bit will be set by the switch request, but never cleared by the
+ * hardware.
+ *
+ * This function must be used for tag switches from NULL.
+ *
+ * This function does no checks, so the caller must ensure that any previous tag
+ * switch has completed.
+ *
+ * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
+ * @param tag tag value to be assigned to work queue entry
+ * @param tag_type type of tag
+ * @param group group value for the work queue entry.
+ */
+static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ {
+ cvmx_pow_tag_info_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+ current_tag = cvmx_pow_get_current_tag();
+ cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
+ cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
+ cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
+ if ((wqp != cvmx_phys_to_ptr(0x80)) && cvmx_pow_get_current_wqp())
+ cvmx_warn_if(wqp != cvmx_pow_get_current_wqp(), "%s passed WQE(%p) doesn't match the address in the POW(%p)\n", __FUNCTION__, wqp, cvmx_pow_get_current_wqp());
+ }
+
+ /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
+ ** once the WQE is in flight. See hardware manual for complete details.
+ ** It is the application's responsibility to keep track of the current tag
+ ** value if that is important.
+ */
+
+ tag_req.u64 = 0;
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
+ tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;
+ tag_req.s_cn68xx_other.tag = tag;
+ tag_req.s_cn68xx_other.type = tag_type;
+ tag_req.s_cn68xx_other.grp = group;
+ } else {
+ tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_FULL;
+ tag_req.s_cn38xx.tag = tag;
+ tag_req.s_cn38xx.type = tag_type;
+ tag_req.s_cn38xx.grp = group;
+ }
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
+ ptr.sio.offset = CAST64(wqp);
+
+ /* once this store arrives at POW, it will attempt the switch
+ software must wait for the switch to complete separately */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+
+/**
+ * Starts a tag switch to the provided tag value and tag type. Completion for
+ * the tag switch must be checked for separately.
+ * This function does NOT update the
+ * work queue entry in dram to match tag value and type, so the application must
+ * keep track of these if they are important to the application.
+ * This tag switch command must not be used for switches to NULL, as the tag
+ * switch pending bit will be set by the switch request, but never cleared by the
+ * hardware.
+ *
+ * This function must be used for tag switches from NULL.
+ *
+ * This function waits for any pending tag switches to complete
+ * before requesting the tag switch.
+ *
+ * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
+ * @param tag tag value to be assigned to work queue entry
+ * @param tag_type type of tag
+ * @param group group value for the work queue entry.
+ */
+static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+
+ /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
+ ** if a previous switch is still pending. */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
+}
+
+
+/**
+ * Switch to a NULL tag, which ends any ordering or
+ * synchronization provided by the POW for the current
+ * work queue entry. This operation completes immediately,
+ * so completion should not be waited for.
+ * This function does NOT wait for previous tag switches to complete,
+ * so the caller must ensure that any previous tag switches have completed.
+ */
+static inline void cvmx_pow_tag_sw_null_nocheck(void)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ {
+ cvmx_pow_tag_info_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+ current_tag = cvmx_pow_get_current_tag();
+ cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
+ cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called when we already have a NULL tag\n", __FUNCTION__);
+ }
+
+ tag_req.u64 = 0;
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
+ tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
+ tag_req.s_cn68xx_other.type = CVMX_POW_TAG_TYPE_NULL;
+ } else {
+ tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
+ tag_req.s_cn38xx.type = CVMX_POW_TAG_TYPE_NULL;
+ }
+
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
+
+
+ cvmx_write_io(ptr.u64, tag_req.u64);
+
+ /* switch to NULL completes immediately */
+}
+
+/**
+ * Switch to a NULL tag, which ends any ordering or
+ * synchronization provided by the POW for the current
+ * work queue entry. This operation completes immediatly,
+ * so completion should not be waited for.
+ * This function waits for any pending tag switches to complete
+ * before requesting the switch to NULL.
+ */
+static inline void cvmx_pow_tag_sw_null(void)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+
+ /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
+ ** if a previous switch is still pending. */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_null_nocheck();
+
+ /* switch to NULL completes immediately */
+}
+
+
+
+/**
+ * Submits work to an input queue. This function updates the work queue entry in DRAM to match
+ * the arguments given.
+ * Note that the tag provided is for the work queue entry submitted, and is unrelated to the tag that
+ * the core currently holds.
+ *
+ * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
+ * @param tag tag value to be assigned to work queue entry
+ * @param tag_type type of tag
+ * @param qos Input queue to add to.
+ * @param grp group value for the work queue entry.
+ */
+static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t qos, uint64_t grp)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ tag_req.u64 = 0;
+
+ wqp->word1.s.tag = tag;
+ wqp->word1.s.tag_type = tag_type;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
+ /* Reset all reserved bits */
+ wqp->word1.cn68xx.zero_0 = 0;
+ wqp->word1.cn68xx.zero_1 = 0;
+ wqp->word1.cn68xx.zero_2 = 0;
+ wqp->word1.cn68xx.qos = qos;
+ wqp->word1.cn68xx.grp = grp;
+
+ tag_req.s_cn68xx_add.op = CVMX_POW_TAG_OP_ADDWQ;
+ tag_req.s_cn68xx_add.type = tag_type;
+ tag_req.s_cn68xx_add.tag = tag;
+ tag_req.s_cn68xx_add.qos = qos;
+ tag_req.s_cn68xx_add.grp = grp;
+ } else {
+ /* Reset all reserved bits */
+ wqp->word1.cn38xx.zero_2 = 0;
+ wqp->word1.cn38xx.qos = qos;
+ wqp->word1.cn38xx.grp = grp;
+
+ tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_ADDWQ;
+ tag_req.s_cn38xx.type = tag_type;
+ tag_req.s_cn38xx.tag = tag;
+ tag_req.s_cn38xx.qos = qos;
+ tag_req.s_cn38xx.grp = grp;
+ }
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
+ ptr.sio.offset = cvmx_ptr_to_phys(wqp);
+
+ /* SYNC write to memory before the work submit. This is necessary
+ ** as POW may read values from DRAM at this time */
+ CVMX_SYNCWS;
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+
+
+/**
+ * This function sets the group mask for a core. The group mask
+ * indicates which groups each core will accept work from. There are
+ * 16 groups.
+ *
+ * @param core_num core to apply mask to
+ * @param mask Group mask. There are 16 groups, so only bits 0-15 are valid,
+ * representing groups 0-15.
+ * Each 1 bit in the mask enables the core to accept work from
+ * the corresponding group.
+ */
+static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
+{
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ {
+ cvmx_sso_ppx_grp_msk_t grp_msk;
+ grp_msk.s.grp_msk = mask;
+ cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(core_num), grp_msk.u64);
+ }
+ else
+ {
+ cvmx_pow_pp_grp_mskx_t grp_msk;
+ grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
+ grp_msk.s.grp_msk = mask;
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
+ }
+}
+
+/**
+ * This function sets POW static priorities for a core. Each input queue has
+ * an associated priority value.
+ *
+ * @param core_num core to apply priorities to
+ * @param priority Vector of 8 priorities, one per POW Input Queue (0-7).
+ * Highest priority is 0 and lowest is 7. A priority value
+ * of 0xF instructs POW to skip the Input Queue when
+ * scheduling to this specific core.
+ * NOTE: priorities should not have gaps in values, meaning
+ * {0,1,1,1,1,1,1,1} is a valid configuration while
+ * {0,2,2,2,2,2,2,2} is not.
+ */
+static inline void cvmx_pow_set_priority(uint64_t core_num, const uint8_t priority[])
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return;
+
+ /* Detect gaps between priorities and flag error */
+ {
+ int i;
+ uint32_t prio_mask = 0;
+
+ for(i=0; i<8; i++)
+ if (priority[i] != 0xF)
+ prio_mask |= 1<<priority[i];
+
+ if ( prio_mask ^ ((1<<cvmx_pop(prio_mask)) - 1))
+ {
+ cvmx_dprintf("ERROR: POW static priorities should be contiguous (0x%llx)\n", (unsigned long long)prio_mask);
+ return;
+ }
+ }
+
+ /* POW priorities are supported on CN5xxx and later */
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ {
+ cvmx_sso_ppx_qos_pri_t qos_pri;
+
+ qos_pri.u64 = cvmx_read_csr(CVMX_SSO_PPX_QOS_PRI(core_num));
+ qos_pri.s.qos0_pri = priority[0];
+ qos_pri.s.qos1_pri = priority[1];
+ qos_pri.s.qos2_pri = priority[2];
+ qos_pri.s.qos3_pri = priority[3];
+ qos_pri.s.qos4_pri = priority[4];
+ qos_pri.s.qos5_pri = priority[5];
+ qos_pri.s.qos6_pri = priority[6];
+ qos_pri.s.qos7_pri = priority[7];
+ cvmx_write_csr(CVMX_SSO_PPX_QOS_PRI(core_num), qos_pri.u64);
+ }
+ else
+ {
+ cvmx_pow_pp_grp_mskx_t grp_msk;
+
+ grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
+ grp_msk.s.qos0_pri = priority[0];
+ grp_msk.s.qos1_pri = priority[1];
+ grp_msk.s.qos2_pri = priority[2];
+ grp_msk.s.qos3_pri = priority[3];
+ grp_msk.s.qos4_pri = priority[4];
+ grp_msk.s.qos5_pri = priority[5];
+ grp_msk.s.qos6_pri = priority[6];
+ grp_msk.s.qos7_pri = priority[7];
+
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
+ }
+}
+
+/**
+ * Performs a tag switch and then an immediate deschedule. This completes
+ * immediately, so completion must not be waited for. This function does NOT
+ * update the wqe in DRAM to match arguments.
+ *
+ * This function does NOT wait for any prior tag switches to complete, so the
+ * calling code must do this.
+ *
+ * Note the following CAVEAT of the Octeon HW behavior when
+ * re-scheduling DE-SCHEDULEd items whose (next) state is
+ * ORDERED:
+ * - If there are no switches pending at the time that the
+ * HW executes the de-schedule, the HW will only re-schedule
+ * the head of the FIFO associated with the given tag. This
+ * means that in many respects, the HW treats this ORDERED
+ * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
+ * case (to an ORDERED tag), the HW will do the switch
+ * before the deschedule whenever it is possible to do
+ * the switch immediately, so it may often look like
+ * this case.
+ * - If there is a pending switch to ORDERED at the time
+ * the HW executes the de-schedule, the HW will perform
+ * the switch at the time it re-schedules, and will be
+ * able to reschedule any/all of the entries with the
+ * same tag.
+ * Due to this behavior, the RECOMMENDATION to software is
+ * that they have a (next) state of ATOMIC when they
+ * DE-SCHEDULE. If an ORDERED tag is what was really desired,
+ * SW can choose to immediately switch to an ORDERED tag
+ * after the work (that has an ATOMIC tag) is re-scheduled.
+ * Note that since there are never any tag switches pending
+ * when the HW re-schedules, this switch can be IMMEDIATE upon
+ * the reception of the pointer during the re-schedule.
+ *
+ * @param tag New tag value
+ * @param tag_type New tag type
+ * @param group New group value
+ * @param no_sched Control whether this work queue entry will be rescheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
+ */
+static inline void cvmx_pow_tag_sw_desched_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ {
+ cvmx_pow_tag_info_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+ current_tag = cvmx_pow_get_current_tag();
+ cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
+ cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not allowed from NULL state\n", __FUNCTION__);
+ cvmx_warn_if((current_tag.tag_type != CVMX_POW_TAG_TYPE_ATOMIC) && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC), "%s called where neither the before or after tag is ATOMIC\n", __FUNCTION__);
+ }
+
+ tag_req.u64 = 0;
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
+ tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
+ tag_req.s_cn68xx_other.tag = tag;
+ tag_req.s_cn68xx_other.type = tag_type;
+ tag_req.s_cn68xx_other.grp = group;
+ tag_req.s_cn68xx_other.no_sched = no_sched;
+ } else {
+ tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
+ tag_req.s_cn38xx.tag = tag;
+ tag_req.s_cn38xx.type = tag_type;
+ tag_req.s_cn38xx.grp = group;
+ tag_req.s_cn38xx.no_sched = no_sched;
+ }
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
+
+ cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
+}
+/**
+ * Performs a tag switch and then an immediate deschedule. This completes
+ * immediately, so completion must not be waited for. This function does NOT
+ * update the wqe in DRAM to match arguments.
+ *
+ * This function waits for any prior tag switches to complete, so the
+ * calling code may call this function with a pending tag switch.
+ *
+ * Note the following CAVEAT of the Octeon HW behavior when
+ * re-scheduling DE-SCHEDULEd items whose (next) state is
+ * ORDERED:
+ * - If there are no switches pending at the time that the
+ * HW executes the de-schedule, the HW will only re-schedule
+ * the head of the FIFO associated with the given tag. This
+ * means that in many respects, the HW treats this ORDERED
+ * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
+ * case (to an ORDERED tag), the HW will do the switch
+ * before the deschedule whenever it is possible to do
+ * the switch immediately, so it may often look like
+ * this case.
+ * - If there is a pending switch to ORDERED at the time
+ * the HW executes the de-schedule, the HW will perform
+ * the switch at the time it re-schedules, and will be
+ * able to reschedule any/all of the entries with the
+ * same tag.
+ * Due to this behavior, the RECOMMENDATION to software is
+ * that they have a (next) state of ATOMIC when they
+ * DE-SCHEDULE. If an ORDERED tag is what was really desired,
+ * SW can choose to immediately switch to an ORDERED tag
+ * after the work (that has an ATOMIC tag) is re-scheduled.
+ * Note that since there are never any tag switches pending
+ * when the HW re-schedules, this switch can be IMMEDIATE upon
+ * the reception of the pointer during the re-schedule.
+ *
+ * @param tag New tag value
+ * @param tag_type New tag type
+ * @param group New group value
+ * @param no_sched Control whether this work queue entry will be rescheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
+ */
+static inline void cvmx_pow_tag_sw_desched(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+
+ /* Need to make sure any writes to the work queue entry are complete */
+ CVMX_SYNCWS;
+ /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
+ ** if a previous switch is still pending. */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
+}
+
+
+
+
+
+/**
+ * Descchedules the current work queue entry.
+ *
+ * @param no_sched no schedule flag value to be set on the work queue entry. If this is set
+ * the entry will not be rescheduled.
+ */
+static inline void cvmx_pow_desched(uint64_t no_sched)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ {
+ cvmx_pow_tag_info_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
+ current_tag = cvmx_pow_get_current_tag();
+ cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
+ cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not expected from NULL state\n", __FUNCTION__);
+ }
+
+ /* Need to make sure any writes to the work queue entry are complete */
+ CVMX_SYNCWS;
+
+ tag_req.u64 = 0;
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
+ tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_DESCH;
+ tag_req.s_cn68xx_other.no_sched = no_sched;
+ } else {
+ tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_DESCH;
+ tag_req.s_cn38xx.no_sched = no_sched;
+ }
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
+
+ cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
+}
+
+
+
+
+
+
+
+/***********************************************************************************************
+** Define usage of bits within the 32 bit tag values.
+***********************************************************************************************/
+
+/*
+ * Number of bits of the tag used by software. The SW bits
+ * are always a contiguous block of the high starting at bit 31.
+ * The hardware bits are always the low bits. By default, the top 8 bits
+ * of the tag are reserved for software, and the low 24 are set by the IPD unit.
+ */
+#define CVMX_TAG_SW_BITS (8)
+#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
+
+/* Below is the list of values for the top 8 bits of the tag. */
+#define CVMX_TAG_SW_BITS_INTERNAL 0x1 /* Tag values with top byte of this value are reserved for internal executive uses */
+/* The executive divides the remaining 24 bits as follows:
+** * the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
+** * the lower 16 bits (bits 15 - 0 of the tag) define are the value with the subgroup
+** Note that this section describes the format of tags generated by software - refer to the
+** hardware documentation for a description of the tags values generated by the packet input
+** hardware.
+** Subgroups are defined here */
+#define CVMX_TAG_SUBGROUP_MASK 0xFFFF /* Mask for the value portion of the tag */
+#define CVMX_TAG_SUBGROUP_SHIFT 16
+#define CVMX_TAG_SUBGROUP_PKO 0x1
+
+
+/* End of executive tag subgroup definitions */
+
+/* The remaining values software bit values 0x2 - 0xff are available for application use */
+
+
+
+/**
+ * This function creates a 32 bit tag value from the two values provided.
+ *
+ * @param sw_bits The upper bits (number depends on configuration) are set to this value. The remainder of
+ * bits are set by the hw_bits parameter.
+ * @param hw_bits The lower bits (number depends on configuration) are set to this value. The remainder of
+ * bits are set by the sw_bits parameter.
+ *
+ * @return 32 bit value of the combined hw and sw bits.
+ */
+static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
+{
+ return((((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << CVMX_TAG_SW_SHIFT) | (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS))));
+}
+/**
+ * Extracts the bits allocated for software use from the tag
+ *
+ * @param tag 32 bit tag value
+ *
+ * @return N bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
+ */
+static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
+{
+ return((tag >> (32 - CVMX_TAG_SW_BITS)) & cvmx_build_mask(CVMX_TAG_SW_BITS));
+}
+/**
+ *
+ * Extracts the bits allocated for hardware use from the tag
+ *
+ * @param tag 32 bit tag value
+ *
+ * @return (32 - N) bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
+ */
+static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
+{
+ return(tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
+}
+
+/**
+ * Store the current POW internal state into the supplied
+ * buffer. It is recommended that you pass a buffer of at least
+ * 128KB. The format of the capture may change based on SDK
+ * version and Octeon chip.
+ *
+ * @param buffer Buffer to store capture into
+ * @param buffer_size
+ * The size of the supplied buffer
+ *
+ * @return Zero on sucess, negative on failure
+ */
+extern int cvmx_pow_capture(void *buffer, int buffer_size);
+
+/**
+ * Dump a POW capture to the console in a human readable format.
+ *
+ * @param buffer POW capture from cvmx_pow_capture()
+ * @param buffer_size
+ * Size of the buffer
+ */
+extern void cvmx_pow_display(void *buffer, int buffer_size);
+
+/**
+ * Return the number of POW entries supported by this chip
+ *
+ * @return Number of POW entries
+ */
+extern int cvmx_pow_get_num_entries(void);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_POW_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-pow.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,291 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to power-throttle control, measurement, and debugging
+ * facilities.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#include "cvmx.h"
+#include "cvmx-asm.h"
+#include "cvmx-coremask.h"
+#include "cvmx-power-throttle.h"
+
+
+#define CVMX_PTH_GET_MASK(len, pos) \
+ ((((uint64_t)1 << (len)) - 1) << (pos))
+
+#define CVMX_PTH_AVAILABLE \
+ (cvmx_power_throttle_get_register(0) != (uint64_t)-1)
+
+/**
+ * a field of the POWTHROTTLE register
+ */
+static struct cvmx_power_throttle_rfield_t {
+ char name[16]; /* the field's name */
+ int32_t pos; /* position of the field's LSb */
+ int32_t len; /* the field's length */
+ int present; /* 1 for present */
+} cvmx_power_throttle_rfield[] = {
+ {"MAXPOW", 56, 8, 0},
+ {"POWER" , 48, 8, 0},
+ {"THROTT", 40, 8, 0},
+ {"Reserved", 28, 12, 0},
+ {"DISTAG", 27, 1, 0},
+ {"PERIOD", 24, 3, 0},
+ {"POWLIM", 16, 8, 0},
+ {"MAXTHR", 8, 8, 0},
+ {"MINTHR", 0, 8, 0},
+ {"HRMPOWADJ",32, 8, 0},
+ {"OVRRD", 28, 1, 0}
+};
+
+static uint64_t cvmx_power_throttle_csr_addr(int ppid);
+
+static int cvmx_power_throttle_initialized;
+
+/**
+ * @INTERNAL
+ * Initialize cvmx_power_throttle_rfield[] based on model.
+ */
+static void cvmx_power_throttle_init(void)
+{
+ /*
+ * Turn on the fields for a model
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ int i;
+ struct cvmx_power_throttle_rfield_t *p;
+
+ for (i = 0; i < CVMX_PTH_INDEX_MAX; i++)
+ cvmx_power_throttle_rfield[i].present = 1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ /*
+ * These fields do not come with o63
+ */
+ p = &cvmx_power_throttle_rfield[CVMX_PTH_INDEX_HRMPOWADJ];
+ p->present = 0;
+ p = &cvmx_power_throttle_rfield[CVMX_PTH_INDEX_OVRRD];
+ p->present = 0;
+ }
+ else
+ {
+ /*
+ * The reserved field shrinks in models newer than o63
+ */
+ p = &cvmx_power_throttle_rfield[CVMX_PTH_INDEX_RESERVED];
+ p->pos = 29;
+ p->len = 3;
+ }
+ }
+}
+
+uint64_t cvmx_power_throttle_get_field(uint64_t r,
+ cvmx_power_throttle_field_index_t i)
+{
+ uint64_t m;
+ struct cvmx_power_throttle_rfield_t *p;
+
+ assert(i < CVMX_PTH_INDEX_MAX);
+ p = &cvmx_power_throttle_rfield[i];
+ if (!p->present)
+ return (uint64_t) -1;
+ m = CVMX_PTH_GET_MASK(p->len, p->pos);
+
+ return((r & m) >> p->pos);
+}
+
+/**
+ * @INTERNAL
+ * Set the i'th field of power-throttle register r to v.
+ */
+static int cvmx_power_throttle_set_field(int i, uint64_t r, uint64_t v)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ uint64_t m;
+ struct cvmx_power_throttle_rfield_t *p;
+
+ assert(i < CVMX_PTH_INDEX_MAX);
+
+ p = &cvmx_power_throttle_rfield[i];
+ m = CVMX_PTH_GET_MASK(p->len, p->pos);
+
+ return((~m & r) | ((v << p->pos) & m));
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Set the POWLIM field as percentage% of the MAXPOW field in r.
+ */
+static uint64_t cvmx_power_throttle_set_powlim(int ppid,
+ uint8_t percentage)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ uint64_t t, csr_addr, r;
+
+ assert(percentage < 101);
+ csr_addr = cvmx_power_throttle_csr_addr(ppid);
+ r = cvmx_read_csr(csr_addr);
+
+ t = cvmx_power_throttle_get_field(r, CVMX_PTH_INDEX_MAXPOW);
+ if (!OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ uint64_t s;
+
+ s = cvmx_power_throttle_get_field(r, CVMX_PTH_INDEX_HRMPOWADJ);
+ assert(t > s);
+ t = t - s;
+ }
+
+ t = percentage * t / 100;
+ r = cvmx_power_throttle_set_field(CVMX_PTH_INDEX_POWLIM, r, t);
+
+ cvmx_write_csr(csr_addr, r);
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Given ppid, calculate its PowThrottle register's L2C_COP0_MAP CSR
+ * address. (ppid == PTH_PPID_BCAST is for broadcasting)
+ */
+static uint64_t cvmx_power_throttle_csr_addr(int ppid)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ uint64_t csr_addr, reg_num, reg_reg, reg_sel;
+
+ assert(ppid < CVMX_MAX_CORES);
+
+ /*
+ * register 11 selection 6
+ */
+ reg_reg = 11;
+ reg_sel = 6;
+ reg_num = (ppid << 8) + (reg_reg << 3) + reg_sel;
+ csr_addr = CVMX_L2C_COP0_MAPX(0) + ((reg_num) << 3);
+
+ return csr_addr;
+ }
+ return 0;
+}
+
+int cvmx_power_throttle_self(uint8_t percentage)
+{
+ if (!CVMX_PTH_AVAILABLE)
+ return -1;
+
+ if (cvmx_power_throttle_set_powlim(cvmx_get_core_num(),
+ percentage) == 0)
+ return -1;
+
+ return 0;
+}
+
+int cvmx_power_throttle(uint8_t percentage, uint64_t coremask)
+{
+ int ppid;
+ int ret;
+
+ if (!CVMX_PTH_AVAILABLE)
+ return -1;
+
+ ret = 0;
+ for (ppid = 0; ppid < CVMX_MAX_CORES; ppid++)
+ {
+ if ((((uint64_t) 1) << ppid) & coremask)
+ {
+ if (cvmx_power_throttle_set_powlim(ppid, percentage) == 0)
+ ret = -2;
+ }
+ }
+
+ return ret;
+}
+
+int cvmx_power_throttle_bmp(uint8_t percentage, struct cvmx_coremask *pcm)
+{
+ int ppid;
+ int ret;
+
+ if (!CVMX_PTH_AVAILABLE)
+ return -1;
+
+ ret = 0;
+ CVMX_COREMASK_FOR_EACH_CORE_BEGIN(pcm, ppid)
+ {
+ if (cvmx_power_throttle_set_powlim(ppid, percentage) == 0)
+ ret = -2;
+ } CVMX_COREMASK_FOR_EACH_CORE_END;
+
+ return ret;
+}
+
+uint64_t cvmx_power_throttle_get_register(int ppid)
+{
+ uint64_t csr_addr;
+
+ if (!cvmx_power_throttle_initialized)
+ {
+ cvmx_power_throttle_init();
+ cvmx_power_throttle_initialized = 1;
+ }
+
+ csr_addr = cvmx_power_throttle_csr_addr(ppid);
+
+ if (csr_addr == 0)
+ return -1;
+
+ return cvmx_read_csr(csr_addr);
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,121 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to power-throttle control, measurement, and debugging
+ * facilities.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMX_POWER_THROTTLE_H__
+#define __CVMX_POWER_THROTTLE_H__
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum cvmx_power_throttle_field_index {
+ CVMX_PTH_INDEX_MAXPOW,
+ CVMX_PTH_INDEX_POWER,
+ CVMX_PTH_INDEX_THROTT,
+ CVMX_PTH_INDEX_RESERVED,
+ CVMX_PTH_INDEX_DISTAG,
+ CVMX_PTH_INDEX_PERIOD,
+ CVMX_PTH_INDEX_POWLIM,
+ CVMX_PTH_INDEX_MAXTHR,
+ CVMX_PTH_INDEX_MINTHR,
+ CVMX_PTH_INDEX_HRMPOWADJ,
+ CVMX_PTH_INDEX_OVRRD,
+ CVMX_PTH_INDEX_MAX
+};
+typedef enum cvmx_power_throttle_field_index cvmx_power_throttle_field_index_t;
+
+/**
+ * Throttle power to percentage% of configured maximum (MAXPOW).
+ *
+ * @param percentage 0 to 100
+ * @return 0 for success and -1 for error.
+ */
+extern int cvmx_power_throttle_self(uint8_t percentage);
+
+/**
+ * Throttle power to percentage% of configured maximum (MAXPOW)
+ * for the cores identified in coremask.
+ *
+ * @param percentage 0 to 100
+ * @param coremask bit mask where each bit identifies a core.
+ * @return 0 for success and -1 for error.
+ */
+extern int cvmx_power_throttle(uint8_t percentage, uint64_t coremask);
+
+/**
+ * The same functionality as cvmx_power_throttle() but it takes a
+ * bitmap-based coremask as a parameter.
+ */
+extern int cvmx_power_throttle_bmp(uint8_t percentage,
+ struct cvmx_coremask *pcm);
+
+/**
+ * Get the i'th field of the power throttle register
+ *
+ * @param r is the value of the power throttle register
+ * @param i is the index of the field
+ *
+ * @return (uint64_t)-1 on failure.
+ */
+extern uint64_t cvmx_power_throttle_get_field(uint64_t r,
+ cvmx_power_throttle_field_index_t i);
+
+/**
+ * Retrieve the content of the power throttle register of a core
+ *
+ * @param ppid is the core id
+ *
+ * @return (uint64_t)-1 on failure.
+ */
+extern uint64_t cvmx_power_throttle_get_register(int ppid);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __CVMX_POWER_THROTTLE_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-power-throttle.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-profiler.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-profiler.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-profiler.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,240 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ************************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to event profiler.
+ *
+ */
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-interrupt.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-coremask.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-atomic.h"
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "cvmx-error.h"
+#endif
+#include "cvmx-asm.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-profiler.h"
+
+#ifdef PROFILER_DEBUG
+#define PRINTF(fmt, args...) cvmx_safe_printf(fmt, ##args)
+#else
+#define PRINTF(fmt, args...)
+#endif
+
+CVMX_SHARED static event_counter_control_block_t eccb;
+cvmx_config_block_t *pcpu_cfg_blk;
+
+int read_percpu_block = 1;
+
+/**
+ * Set Interrupt IRQ line for Performance Counter
+ *
+ */
+void cvmx_update_perfcnt_irq(void)
+{
+ uint64_t cvmctl;
+
+ /* Clear CvmCtl[IPPCI] bit and move the Performance Counter
+ * interrupt to IRQ 6
+ */
+ CVMX_MF_COP0(cvmctl, COP0_CVMCTL);
+ cvmctl &= ~(7 << 7);
+ cvmctl |= 6 << 7;
+ CVMX_MT_COP0(cvmctl, COP0_CVMCTL);
+}
+
+/**
+ * @INTERNAL
+ * Return the baseaddress of the namedblock
+ * @param buf_name Name of Namedblock
+ *
+ * @return baseaddress of block on Success, NULL on failure.
+ */
+static
+void *cvmx_get_memory_addr(const char* buf_name)
+{
+ void *buffer_ptr = NULL;
+ const struct cvmx_bootmem_named_block_desc *block_desc = cvmx_bootmem_find_named_block(buf_name);
+ if (block_desc)
+ buffer_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
+ assert (buffer_ptr != NULL);
+
+ return buffer_ptr;
+}
+
+/**
+ * @INTERNAL
+ * Initialize the cpu block metadata.
+ *
+ * @param cpu core no
+ * @param size size of per cpu memory in named block
+ *
+ */
+static
+void cvmx_init_pcpu_block(int cpu, int size)
+{
+ eccb.cfg_blk.pcpu_base_addr[cpu] = (char *)cvmx_get_memory_addr(EVENT_BUFFER_BLOCK) + (size * cpu);
+ assert (eccb.cfg_blk.pcpu_base_addr[cpu] != NULL);
+
+ cvmx_ringbuf_t *cpu_buf = (cvmx_ringbuf_t *) eccb.cfg_blk.pcpu_base_addr[cpu];
+
+ cpu_buf->pcpu_blk_info.size = size;
+ cpu_buf->pcpu_blk_info.max_samples = ((size - sizeof(cvmx_cpu_event_block_t)) / sizeof(cvmx_sample_entry_t));
+ cpu_buf->pcpu_blk_info.sample_count = 0;
+ cpu_buf->pcpu_blk_info.sample_read = 0;
+ cpu_buf->pcpu_blk_info.data = eccb.cfg_blk.pcpu_base_addr[cpu] + sizeof(cvmx_cpu_event_block_t) + PADBYTES;
+ cpu_buf->pcpu_blk_info.head = cpu_buf->pcpu_blk_info.tail = \
+ cpu_buf->pcpu_data = cpu_buf->pcpu_blk_info.data;
+ cpu_buf->pcpu_blk_info.end = eccb.cfg_blk.pcpu_base_addr[cpu] + size;
+
+ cvmx_atomic_set32(&read_percpu_block, 0);
+
+ /*
+ * Write per cpu mem base address info in to 'event config' named block,
+ * This info is needed by oct-remote-profile to get Per cpu memory
+ * base address of each core of the named block.
+ */
+ pcpu_cfg_blk = (cvmx_config_block_t *) eccb.config_blk_base_addr;
+ pcpu_cfg_blk->pcpu_base_addr[cpu] = eccb.cfg_blk.pcpu_base_addr[cpu];
+}
+
+/**
+ * @INTERNAL
+ * Retrieve the info from the 'event_config' named block.
+ *
+ * Here events value is read(as passed to oct-remote-profile) to reset perf
+ * counters on every Perf counter overflow.
+ *
+ */
+static
+void cvmx_read_config_blk(void)
+{
+ eccb.config_blk_base_addr = (char *)cvmx_get_memory_addr(EVENT_BUFFER_CONFIG_BLOCK);
+ memcpy(&(eccb.cfg_blk.events), eccb.config_blk_base_addr + \
+ offsetof(cvmx_config_block_t, events), sizeof(int64_t));
+
+ cvmx_atomic_set32(&eccb.read_cfg_blk,1);
+ PRINTF("cfg_blk.events=%lu, sample_count=%ld\n", eccb.cfg_blk.events, eccb.cfg_blk.sample_count);
+}
+
+/**
+ * @INTERNAL
+ * Add new sample to the buffer and increment the head pointer and
+ * global sample count(i.e sum total of samples collected on all cores)
+ *
+ */
+static
+void cvmx_add_sample_to_buffer(void)
+{
+ uint32_t epc;
+ int cpu = cvmx_get_core_num();
+ CVMX_MF_COP0(epc, COP0_EPC);
+
+ cvmx_ringbuf_t *cpu_buf = (cvmx_ringbuf_t *) eccb.cfg_blk.pcpu_base_addr[cpu];
+
+ /*
+ * head/tail pointer can be NULL, and this case arises when oct-remote-profile is
+ * invoked afresh. To keep memory sane for current instance, we clear namedblock off
+ * previous data and this is accomplished by octeon_remote_write_mem from host.
+ */
+ if (cvmx_unlikely(!cpu_buf->pcpu_blk_info.head && !cpu_buf->pcpu_blk_info.end)) {
+ /* Reread the event count as a different threshold val could be
+ * passed with profiler alongside --events flag */
+ cvmx_read_config_blk();
+ cvmx_init_pcpu_block(cpu, EVENT_PERCPU_BUFFER_SIZE);
+ }
+
+ /* In case of hitting end of buffer, reset head,data ptr to start */
+ if (cpu_buf->pcpu_blk_info.head == cpu_buf->pcpu_blk_info.end)
+ cpu_buf->pcpu_blk_info.head = cpu_buf->pcpu_blk_info.data = cpu_buf->pcpu_data;
+
+ /* Store the pc, respective core no.*/
+ cvmx_sample_entry_t *sample = (cvmx_sample_entry_t *) cpu_buf->pcpu_blk_info.data;
+ sample->pc = epc;
+ sample->core = cpu;
+
+ /* Update Per CPU stats */
+ cpu_buf->pcpu_blk_info.sample_count++;
+ cpu_buf->pcpu_blk_info.data += sizeof(cvmx_sample_entry_t);
+ cpu_buf->pcpu_blk_info.head = cpu_buf->pcpu_blk_info.data;
+
+ /* Increment the global sample count i.e sum total of samples on all cores*/
+ cvmx_atomic_add64(&(pcpu_cfg_blk->sample_count), 1);
+
+ PRINTF("the core%d:pc 0x%016lx, sample_count=%ld\n", cpu, sample->pc, cpu_buf->pcpu_blk_info.sample_count);
+}
+
+/**
+ * @INTERNAL
+ * Reset performance counters
+ *
+ * @param pf The performance counter Number (0, 1)
+ * @param events The threshold value for which interrupt has to be asserted
+ */
+static
+void cvmx_reset_perf_counter(int pf, uint64_t events)
+{
+ uint64_t pfc;
+ pfc = (1ull << 63) - events;
+
+ if (!pf) {
+ CVMX_MT_COP0(pfc, COP0_PERFVALUE0);
+ } else
+ CVMX_MT_COP0(pfc, COP0_PERFVALUE1);
+}
+
+void cvmx_collect_sample(void)
+{
+ if (!eccb.read_cfg_blk)
+ cvmx_read_config_blk();
+
+ if (read_percpu_block)
+ cvmx_init_pcpu_block(cvmx_get_core_num(), EVENT_PERCPU_BUFFER_SIZE);
+
+ cvmx_add_sample_to_buffer();
+ cvmx_reset_perf_counter(0, eccb.cfg_blk.events);
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-profiler.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-profiler.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-profiler.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-profiler.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,104 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *************************license end**************************************/
+
+/**
+ * @file
+ *
+ * Header file for the event Profiler.
+ *
+ */
+
+#ifndef __CVMX_PROFILER_H__
+#define __CVMX_PROFILER_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EVENT_PERCPU_BUFFER_SIZE 8192
+#define PADBYTES 24
+
+#define EVENT_BUFFER_BLOCK "event_block"
+#define EVENT_BUFFER_SIZE EVENT_PERCPU_BUFFER_SIZE * (cvmx_octeon_num_cores() + 1)
+
+#define EVENT_BUFFER_CONFIG_BLOCK "event_config_block"
+#define EBC_BLOCK_SIZE 256
+
+typedef struct {
+ int core;
+ uint32_t pc;
+} cvmx_sample_entry_t;
+
+typedef struct cpu_event_block {
+ int size;
+ int sample_read;
+ int64_t max_samples;
+ int64_t sample_count;
+ char *head;
+ char *tail;
+ char *end;
+ char *data;
+} cvmx_cpu_event_block_t;
+
+typedef struct {
+ cvmx_cpu_event_block_t pcpu_blk_info;
+ char *pcpu_data;
+} cvmx_ringbuf_t;
+
+typedef struct config_block {
+ int64_t sample_count;
+ uint64_t events;
+ char *pcpu_base_addr[CVMX_MAX_CORES];
+} cvmx_config_block_t;
+
+typedef struct event_counter_control_block {
+ int32_t read_cfg_blk;
+ char *config_blk_base_addr;
+ cvmx_config_block_t cfg_blk;
+} event_counter_control_block_t;
+
+extern void cvmx_update_perfcnt_irq(void);
+extern void cvmx_collect_sample(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_PROFILER_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-profiler.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-qlm-tables.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-qlm-tables.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-qlm-tables.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,446 @@
+/* $MidnightBSD$ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-qlm.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include <cvmx.h>
+#include <cvmx-qlm.h>
+#else
+#include "cvmx.h"
+#include "cvmx-qlm.h"
+#endif
+#endif
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn56xx[] =
+{
+ {"prbs_error_count", 267, 220}, // BIST/PRBS error count (only valid if pbrs_lock asserted)
+ {"prbs_unlock_count", 219, 212}, // BIST/PRBS unlock count (only valid if pbrs_lock asserted)
+ {"prbs_locked", 211, 211}, // BIST/PRBS lock (asserted after QLM achieves lock)
+ {"reset_prbs", 210, 210}, // BIST/PRBS reset (write 0 to reset)
+ {"run_prbs", 209, 209}, // run PRBS test pattern
+ {"run_bist", 208, 208}, // run bist (May only work for PCIe ?)
+ {"unknown", 207, 202}, //
+ {"biasdrvsel", 201, 199}, // assign biasdrvsel = fus_cfg_reg[201:199] ^ jtg_cfg_reg[201:199] ^ ((pi_qlm_cfg == 2'h0) ? 3'h4 : (pi_qlm_cfg == 2'h2) ? 3'h7 : 3'h2);
+ {"biasbuffsel", 198, 196}, // assign biasbuffsel = fus_cfg_reg[198:196] ^ jtg_cfg_reg[198:196] ^ 3'h4;
+ {"tcoeff", 195, 192}, // assign tcoeff = fus_cfg_reg[195:192] ^ jtg_cfg_reg[195:192] ^ (pi_qlm_cfg[1] ? 4'h0 : 4'hc);
+ {"mb5000", 181, 181}, // assign mb5000 = fus_cfg_reg[181] ^ jtg_cfg_reg[181] ^ 1'h0;
+ {"interpbw", 180, 176}, // assign interpbw = fus_cfg_reg[180:176] ^ jtg_cfg_reg[180:176] ^ ((qlm_spd == 2'h0) ? 5'h1f : (qlm_spd == 2'h1) ? 5'h10 : 5'h0);
+ {"mb", 175, 172}, // assign mb = fus_cfg_reg[175:172] ^ jtg_cfg_reg[175:172] ^ 4'h0;
+ {"bwoff", 171, 160}, // assign bwoff = fus_cfg_reg[171:160] ^ jtg_cfg_reg[171:160] ^ 12'h0;
+ {"bg_ref_sel", 153, 153}, // assign bg_ref_sel = fus_cfg_reg[153] ^ jtg_cfg_reg[153] ^ 1'h0;
+ {"div2en", 152, 152}, // assign div2en = fus_cfg_reg[152] ^ jtg_cfg_reg[152] ^ 1'h0;
+ {"trimen", 151, 150}, // assign trimen = fus_cfg_reg[151:150] ^ jtg_cfg_reg[151:150] ^ 2'h0;
+ {"clkr", 149, 144}, // assign clkr = fus_cfg_reg[149:144] ^ jtg_cfg_reg[149:144] ^ 6'h0;
+ {"clkf", 143, 132}, // assign clkf = fus_cfg_reg[143:132] ^ jtg_cfg_reg[143:132] ^ 12'h18;
+ {"bwadj", 131, 120}, // assign bwadj = fus_cfg_reg[131:120] ^ jtg_cfg_reg[131:120] ^ 12'h30;
+ {"shlpbck", 119, 118}, // assign shlpbck = fus_cfg_reg[119:118] ^ jtg_cfg_reg[119:118] ^ 2'h0;
+ {"serdes_pll_byp", 117, 117}, // assign serdes_pll_byp = fus_cfg_reg[117] ^ jtg_cfg_reg[117] ^ 1'h0;
+ {"ic50dac", 116, 112}, // assign ic50dac = fus_cfg_reg[116:112] ^ jtg_cfg_reg[116:112] ^ 5'h11;
+ {"sl_posedge_sample", 111, 111}, // assign sl_posedge_sample = fus_cfg_reg[111] ^ jtg_cfg_reg[111] ^ 1'h0;
+ {"sl_enable", 110, 110}, // assign sl_enable = fus_cfg_reg[110] ^ jtg_cfg_reg[110] ^ 1'h0;
+ {"rx_rout_comp_bypass", 109, 109}, // assign rx_rout_comp_bypass = fus_cfg_reg[109] ^ jtg_cfg_reg[109] ^ 1'h0;
+ {"ir50dac", 108, 104}, // assign ir50dac = fus_cfg_reg[108:104] ^ jtg_cfg_reg[108:104] ^ 5'h11;
+ {"rx_res_offset", 103, 100}, // assign rx_res_offset = fus_cfg_reg[103:100] ^ jtg_cfg_reg[103:100] ^ 4'h2;
+ {"rx_rout_comp_value", 99, 96}, // assign rx_rout_comp_value = fus_cfg_reg[99:96] ^ jtg_cfg_reg[99:96] ^ 4'h7;
+ {"tx_rout_comp_value", 95, 92}, // assign tx_rout_comp_value = fus_cfg_reg[95:92] ^ jtg_cfg_reg[95:92] ^ 4'h7;
+ {"tx_res_offset", 91, 88}, // assign tx_res_offset = fus_cfg_reg[91:88] ^ jtg_cfg_reg[91:88] ^ 4'h1;
+ {"tx_rout_comp_bypass", 87, 87}, // assign tx_rout_comp_bypass = fus_cfg_reg[87] ^ jtg_cfg_reg[87] ^ 1'h0;
+ {"idle_dac", 86, 84}, // assign idle_dac = fus_cfg_reg[86:84] ^ jtg_cfg_reg[86:84] ^ 3'h4;
+ {"hyst_en", 83, 83}, // assign hyst_en = fus_cfg_reg[83] ^ jtg_cfg_reg[83] ^ 1'h1;
+ {"rndt", 82, 82}, // assign rndt = fus_cfg_reg[82] ^ jtg_cfg_reg[82] ^ 1'h0;
+ {"cfg_tx_com", 79, 79}, // CN52XX cfg_tx_com = fus_cfg_reg[79] ^ jtg_cfg_reg[79] ^ 1'h0;
+ {"cfg_cdr_errcor", 78, 78}, // CN52XX cfg_cdr_errcor = fus_cfg_reg[78] ^ jtg_cfg_reg[78] ^ 1'h0;
+ {"cfg_cdr_secord", 77, 77}, // CN52XX cfg_cdr_secord = fus_cfg_reg[77] ^ jtg_cfg_reg[77] ^ 1'h1;
+ {"cfg_cdr_rotate", 76, 76}, // assign cfg_cdr_rotate = fus_cfg_reg[76] ^ jtg_cfg_reg[76] ^ 1'h0;
+ {"cfg_cdr_rqoffs", 75, 68}, // assign cfg_cdr_rqoffs = fus_cfg_reg[75:68] ^ jtg_cfg_reg[75:68] ^ 8'h40;
+ {"cfg_cdr_incx", 67, 64}, // assign cfg_cdr_incx = fus_cfg_reg[67:64] ^ jtg_cfg_reg[67:64] ^ 4'h2;
+ {"cfg_cdr_state", 63, 56}, // assign cfg_cdr_state = fus_cfg_reg[63:56] ^ jtg_cfg_reg[63:56] ^ 8'h0;
+ {"cfg_cdr_bypass", 55, 55}, // assign cfg_cdr_bypass = fus_cfg_reg[55] ^ jtg_cfg_reg[55] ^ 1'h0;
+ {"cfg_tx_byp", 54, 54}, // assign cfg_tx_byp = fus_cfg_reg[54] ^ jtg_cfg_reg[54] ^ 1'h0;
+ {"cfg_tx_val", 53, 44}, // assign cfg_tx_val = fus_cfg_reg[53:44] ^ jtg_cfg_reg[53:44] ^ 10'h0;
+ {"cfg_rx_pol_set", 43, 43}, // assign cfg_rx_pol_set = fus_cfg_reg[43] ^ jtg_cfg_reg[43] ^ 1'h0;
+ {"cfg_rx_pol_clr", 42, 42}, // assign cfg_rx_pol_clr = fus_cfg_reg[42] ^ jtg_cfg_reg[42] ^ 1'h0;
+ {"cfg_cdr_bw_ctl", 41, 40}, // assign cfg_cdr_bw_ctl = fus_cfg_reg[41:40] ^ jtg_cfg_reg[41:40] ^ 2'h0;
+ {"cfg_rst_n_set", 39, 39}, // assign cfg_rst_n_set = fus_cfg_reg[39] ^ jtg_cfg_reg[39] ^ 1'h0;
+ {"cfg_rst_n_clr", 38, 38}, // assign cfg_rst_n_clr = fus_cfg_reg[38] ^ jtg_cfg_reg[38] ^ 1'h0;
+ {"cfg_tx_clk2", 37, 37}, // assign cfg_tx_clk2 = fus_cfg_reg[37] ^ jtg_cfg_reg[37] ^ 1'h0;
+ {"cfg_tx_clk1", 36, 36}, // assign cfg_tx_clk1 = fus_cfg_reg[36] ^ jtg_cfg_reg[36] ^ 1'h0;
+ {"cfg_tx_pol_set", 35, 35}, // assign cfg_tx_pol_set = fus_cfg_reg[35] ^ jtg_cfg_reg[35] ^ 1'h0;
+ {"cfg_tx_pol_clr", 34, 34}, // assign cfg_tx_pol_clr = fus_cfg_reg[34] ^ jtg_cfg_reg[34] ^ 1'h0;
+ {"cfg_tx_one", 33, 33}, // assign cfg_tx_one = fus_cfg_reg[33] ^ jtg_cfg_reg[33] ^ 1'h0;
+ {"cfg_tx_zero", 32, 32}, // assign cfg_tx_zero = fus_cfg_reg[32] ^ jtg_cfg_reg[32] ^ 1'h0;
+ {"cfg_rxd_wait", 31, 28}, // assign cfg_rxd_wait = fus_cfg_reg[31:28] ^ jtg_cfg_reg[31:28] ^ 4'h3;
+ {"cfg_rxd_short", 27, 27}, // assign cfg_rxd_short = fus_cfg_reg[27] ^ jtg_cfg_reg[27] ^ 1'h0;
+ {"cfg_rxd_set", 26, 26}, // assign cfg_rxd_set = fus_cfg_reg[26] ^ jtg_cfg_reg[26] ^ 1'h0;
+ {"cfg_rxd_clr", 25, 25}, // assign cfg_rxd_clr = fus_cfg_reg[25] ^ jtg_cfg_reg[25] ^ 1'h0;
+ {"cfg_loopback", 24, 24}, // assign cfg_loopback = fus_cfg_reg[24] ^ jtg_cfg_reg[24] ^ 1'h0;
+ {"cfg_tx_idle_set", 23, 23}, // assign cfg_tx_idle_set = fus_cfg_reg[23] ^ jtg_cfg_reg[23] ^ 1'h0;
+ {"cfg_tx_idle_clr", 22, 22}, // assign cfg_tx_idle_clr = fus_cfg_reg[22] ^ jtg_cfg_reg[22] ^ 1'h0;
+ {"cfg_rx_idle_set", 21, 21}, // assign cfg_rx_idle_set = fus_cfg_reg[21] ^ jtg_cfg_reg[21] ^ 1'h0;
+ {"cfg_rx_idle_clr", 20, 20}, // assign cfg_rx_idle_clr = fus_cfg_reg[20] ^ jtg_cfg_reg[20] ^ 1'h0;
+ {"cfg_rx_idle_thr", 19, 16}, // assign cfg_rx_idle_thr = fus_cfg_reg[19:16] ^ jtg_cfg_reg[19:16] ^ 4'h0;
+ {"cfg_com_thr", 15, 12}, // assign cfg_com_thr = fus_cfg_reg[15:12] ^ jtg_cfg_reg[15:12] ^ 4'h3;
+ {"cfg_rx_offset", 11, 8}, // assign cfg_rx_offset = fus_cfg_reg[11:8] ^ jtg_cfg_reg[11:8] ^ 4'h4;
+ {"cfg_skp_max", 7, 4}, // assign cfg_skp_max = fus_cfg_reg[7:4] ^ jtg_cfg_reg[7:4] ^ 4'hc;
+ {"cfg_skp_min", 3, 0}, // assign cfg_skp_min = fus_cfg_reg[3:0] ^ jtg_cfg_reg[3:0] ^ 4'h4;
+ {NULL, -1, -1}
+};
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn52xx[] =
+{
+ {"prbs_error_count", 267, 220}, // BIST/PRBS error count (only valid if pbrs_lock asserted)
+ {"prbs_unlock_count", 219, 212}, // BIST/PRBS unlock count (only valid if pbrs_lock asserted)
+ {"prbs_locked", 211, 211}, // BIST/PRBS lock (asserted after QLM achieves lock)
+ {"reset_prbs", 210, 210}, // BIST/PRBS reset (write 0 to reset)
+ {"run_prbs", 209, 209}, // run PRBS test pattern
+ {"run_bist", 208, 208}, // run bist (May only work for PCIe ?)
+ {"unknown", 207, 202}, //
+
+ {"biasdrvsel", 201, 199}, // assign biasdrvsel = fus_cfg_reg[201:199] ^ jtg_cfg_reg[201:199] ^ ((pi_qlm_cfg == 2'h0) ? 3'h4 : (pi_qlm_cfg == 2'h2) ? 3'h7 : 3'h2);
+ {"biasbuffsel", 198, 196}, // assign biasbuffsel = fus_cfg_reg[198:196] ^ jtg_cfg_reg[198:196] ^ 3'h4;
+ {"tcoeff", 195, 192}, // assign tcoeff = fus_cfg_reg[195:192] ^ jtg_cfg_reg[195:192] ^ (pi_qlm_cfg[1] ? 4'h0 : 4'hc);
+ {"mb5000", 181, 181}, // assign mb5000 = fus_cfg_reg[181] ^ jtg_cfg_reg[181] ^ 1'h0;
+ {"interpbw", 180, 176}, // assign interpbw = fus_cfg_reg[180:176] ^ jtg_cfg_reg[180:176] ^ ((qlm_spd == 2'h0) ? 5'h1f : (qlm_spd == 2'h1) ? 5'h10 : 5'h0);
+ {"mb", 175, 172}, // assign mb = fus_cfg_reg[175:172] ^ jtg_cfg_reg[175:172] ^ 4'h0;
+ {"bwoff", 171, 160}, // assign bwoff = fus_cfg_reg[171:160] ^ jtg_cfg_reg[171:160] ^ 12'h0;
+ {"bg_ref_sel", 153, 153}, // assign bg_ref_sel = fus_cfg_reg[153] ^ jtg_cfg_reg[153] ^ 1'h0;
+ {"div2en", 152, 152}, // assign div2en = fus_cfg_reg[152] ^ jtg_cfg_reg[152] ^ 1'h0;
+ {"trimen", 151, 150}, // assign trimen = fus_cfg_reg[151:150] ^ jtg_cfg_reg[151:150] ^ 2'h0;
+ {"clkr", 149, 144}, // assign clkr = fus_cfg_reg[149:144] ^ jtg_cfg_reg[149:144] ^ 6'h0;
+ {"clkf", 143, 132}, // assign clkf = fus_cfg_reg[143:132] ^ jtg_cfg_reg[143:132] ^ 12'h18;
+ {"bwadj", 131, 120}, // assign bwadj = fus_cfg_reg[131:120] ^ jtg_cfg_reg[131:120] ^ 12'h30;
+ {"shlpbck", 119, 118}, // assign shlpbck = fus_cfg_reg[119:118] ^ jtg_cfg_reg[119:118] ^ 2'h0;
+ {"serdes_pll_byp", 117, 117}, // assign serdes_pll_byp = fus_cfg_reg[117] ^ jtg_cfg_reg[117] ^ 1'h0;
+ {"ic50dac", 116, 112}, // assign ic50dac = fus_cfg_reg[116:112] ^ jtg_cfg_reg[116:112] ^ 5'h11;
+ {"sl_posedge_sample", 111, 111}, // assign sl_posedge_sample = fus_cfg_reg[111] ^ jtg_cfg_reg[111] ^ 1'h0;
+ {"sl_enable", 110, 110}, // assign sl_enable = fus_cfg_reg[110] ^ jtg_cfg_reg[110] ^ 1'h0;
+ {"rx_rout_comp_bypass", 109, 109}, // assign rx_rout_comp_bypass = fus_cfg_reg[109] ^ jtg_cfg_reg[109] ^ 1'h0;
+ {"ir50dac", 108, 104}, // assign ir50dac = fus_cfg_reg[108:104] ^ jtg_cfg_reg[108:104] ^ 5'h11;
+ {"rx_res_offset", 103, 100}, // assign rx_res_offset = fus_cfg_reg[103:100] ^ jtg_cfg_reg[103:100] ^ 4'h2;
+ {"rx_rout_comp_value", 99, 96}, // assign rx_rout_comp_value = fus_cfg_reg[99:96] ^ jtg_cfg_reg[99:96] ^ 4'h7;
+ {"tx_rout_comp_value", 95, 92}, // assign tx_rout_comp_value = fus_cfg_reg[95:92] ^ jtg_cfg_reg[95:92] ^ 4'h7;
+ {"tx_res_offset", 91, 88}, // assign tx_res_offset = fus_cfg_reg[91:88] ^ jtg_cfg_reg[91:88] ^ 4'h1;
+ {"tx_rout_comp_bypass", 87, 87}, // assign tx_rout_comp_bypass = fus_cfg_reg[87] ^ jtg_cfg_reg[87] ^ 1'h0;
+ {"idle_dac", 86, 84}, // assign idle_dac = fus_cfg_reg[86:84] ^ jtg_cfg_reg[86:84] ^ 3'h4;
+ {"hyst_en", 83, 83}, // assign hyst_en = fus_cfg_reg[83] ^ jtg_cfg_reg[83] ^ 1'h1;
+ {"rndt", 82, 82}, // assign rndt = fus_cfg_reg[82] ^ jtg_cfg_reg[82] ^ 1'h0;
+ {"cfg_tx_com", 79, 79}, // CN52XX cfg_tx_com = fus_cfg_reg[79] ^ jtg_cfg_reg[79] ^ 1'h0;
+ {"cfg_cdr_errcor", 78, 78}, // CN52XX cfg_cdr_errcor = fus_cfg_reg[78] ^ jtg_cfg_reg[78] ^ 1'h0;
+ {"cfg_cdr_secord", 77, 77}, // CN52XX cfg_cdr_secord = fus_cfg_reg[77] ^ jtg_cfg_reg[77] ^ 1'h1;
+ {"cfg_cdr_rotate", 76, 76}, // assign cfg_cdr_rotate = fus_cfg_reg[76] ^ jtg_cfg_reg[76] ^ 1'h0;
+ {"cfg_cdr_rqoffs", 75, 68}, // assign cfg_cdr_rqoffs = fus_cfg_reg[75:68] ^ jtg_cfg_reg[75:68] ^ 8'h40;
+ {"cfg_cdr_incx", 67, 64}, // assign cfg_cdr_incx = fus_cfg_reg[67:64] ^ jtg_cfg_reg[67:64] ^ 4'h2;
+ {"cfg_cdr_state", 63, 56}, // assign cfg_cdr_state = fus_cfg_reg[63:56] ^ jtg_cfg_reg[63:56] ^ 8'h0;
+ {"cfg_cdr_bypass", 55, 55}, // assign cfg_cdr_bypass = fus_cfg_reg[55] ^ jtg_cfg_reg[55] ^ 1'h0;
+ {"cfg_tx_byp", 54, 54}, // assign cfg_tx_byp = fus_cfg_reg[54] ^ jtg_cfg_reg[54] ^ 1'h0;
+ {"cfg_tx_val", 53, 44}, // assign cfg_tx_val = fus_cfg_reg[53:44] ^ jtg_cfg_reg[53:44] ^ 10'h0;
+ {"cfg_rx_pol_set", 43, 43}, // assign cfg_rx_pol_set = fus_cfg_reg[43] ^ jtg_cfg_reg[43] ^ 1'h0;
+ {"cfg_rx_pol_clr", 42, 42}, // assign cfg_rx_pol_clr = fus_cfg_reg[42] ^ jtg_cfg_reg[42] ^ 1'h0;
+ {"cfg_cdr_bw_ctl", 41, 40}, // assign cfg_cdr_bw_ctl = fus_cfg_reg[41:40] ^ jtg_cfg_reg[41:40] ^ 2'h0;
+ {"cfg_rst_n_set", 39, 39}, // assign cfg_rst_n_set = fus_cfg_reg[39] ^ jtg_cfg_reg[39] ^ 1'h0;
+ {"cfg_rst_n_clr", 38, 38}, // assign cfg_rst_n_clr = fus_cfg_reg[38] ^ jtg_cfg_reg[38] ^ 1'h0;
+ {"cfg_tx_clk2", 37, 37}, // assign cfg_tx_clk2 = fus_cfg_reg[37] ^ jtg_cfg_reg[37] ^ 1'h0;
+ {"cfg_tx_clk1", 36, 36}, // assign cfg_tx_clk1 = fus_cfg_reg[36] ^ jtg_cfg_reg[36] ^ 1'h0;
+ {"cfg_tx_pol_set", 35, 35}, // assign cfg_tx_pol_set = fus_cfg_reg[35] ^ jtg_cfg_reg[35] ^ 1'h0;
+ {"cfg_tx_pol_clr", 34, 34}, // assign cfg_tx_pol_clr = fus_cfg_reg[34] ^ jtg_cfg_reg[34] ^ 1'h0;
+ {"cfg_tx_one", 33, 33}, // assign cfg_tx_one = fus_cfg_reg[33] ^ jtg_cfg_reg[33] ^ 1'h0;
+ {"cfg_tx_zero", 32, 32}, // assign cfg_tx_zero = fus_cfg_reg[32] ^ jtg_cfg_reg[32] ^ 1'h0;
+ {"cfg_rxd_wait", 31, 28}, // assign cfg_rxd_wait = fus_cfg_reg[31:28] ^ jtg_cfg_reg[31:28] ^ 4'h3;
+ {"cfg_rxd_short", 27, 27}, // assign cfg_rxd_short = fus_cfg_reg[27] ^ jtg_cfg_reg[27] ^ 1'h0;
+ {"cfg_rxd_set", 26, 26}, // assign cfg_rxd_set = fus_cfg_reg[26] ^ jtg_cfg_reg[26] ^ 1'h0;
+ {"cfg_rxd_clr", 25, 25}, // assign cfg_rxd_clr = fus_cfg_reg[25] ^ jtg_cfg_reg[25] ^ 1'h0;
+ {"cfg_loopback", 24, 24}, // assign cfg_loopback = fus_cfg_reg[24] ^ jtg_cfg_reg[24] ^ 1'h0;
+ {"cfg_tx_idle_set", 23, 23}, // assign cfg_tx_idle_set = fus_cfg_reg[23] ^ jtg_cfg_reg[23] ^ 1'h0;
+ {"cfg_tx_idle_clr", 22, 22}, // assign cfg_tx_idle_clr = fus_cfg_reg[22] ^ jtg_cfg_reg[22] ^ 1'h0;
+ {"cfg_rx_idle_set", 21, 21}, // assign cfg_rx_idle_set = fus_cfg_reg[21] ^ jtg_cfg_reg[21] ^ 1'h0;
+ {"cfg_rx_idle_clr", 20, 20}, // assign cfg_rx_idle_clr = fus_cfg_reg[20] ^ jtg_cfg_reg[20] ^ 1'h0;
+ {"cfg_rx_idle_thr", 19, 16}, // assign cfg_rx_idle_thr = fus_cfg_reg[19:16] ^ jtg_cfg_reg[19:16] ^ 4'h0;
+ {"cfg_com_thr", 15, 12}, // assign cfg_com_thr = fus_cfg_reg[15:12] ^ jtg_cfg_reg[15:12] ^ 4'h3;
+ {"cfg_rx_offset", 11, 8}, // assign cfg_rx_offset = fus_cfg_reg[11:8] ^ jtg_cfg_reg[11:8] ^ 4'h4;
+ {"cfg_skp_max", 7, 4}, // assign cfg_skp_max = fus_cfg_reg[7:4] ^ jtg_cfg_reg[7:4] ^ 4'hc;
+ {"cfg_skp_min", 3, 0}, // assign cfg_skp_min = fus_cfg_reg[3:0] ^ jtg_cfg_reg[3:0] ^ 4'h4;
+ {NULL, -1, -1}
+};
+
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn63xx[] =
+{
+ {"prbs_err_cnt", 299, 252}, // prbs_err_cnt[47..0]
+ {"prbs_lock", 251, 251}, // prbs_lock
+ {"jtg_prbs_rst_n", 250, 250}, // jtg_prbs_rst_n
+ {"jtg_run_prbs31", 249, 249}, // jtg_run_prbs31
+ {"jtg_run_prbs7", 248, 248}, // jtg_run_prbs7
+ {"Unused1", 247, 245}, // 0
+ {"cfg_pwrup_set", 244, 244}, // cfg_pwrup_set
+ {"cfg_pwrup_clr", 243, 243}, // cfg_pwrup_clr
+ {"cfg_rst_n_set", 242, 242}, // cfg_rst_n_set
+ {"cfg_rst_n_clr", 241, 241}, // cfg_rst_n_clr
+ {"cfg_tx_idle_set", 240, 240}, // cfg_tx_idle_set
+ {"cfg_tx_idle_clr", 239, 239}, // cfg_tx_idle_clr
+ {"cfg_tx_byp", 238, 238}, // cfg_tx_byp
+ {"cfg_tx_byp_inv", 237, 237}, // cfg_tx_byp_inv
+ {"cfg_tx_byp_val", 236, 227}, // cfg_tx_byp_val[9..0]
+ {"cfg_loopback", 226, 226}, // cfg_loopback
+ {"shlpbck", 225, 224}, // shlpbck[1..0]
+ {"sl_enable", 223, 223}, // sl_enable
+ {"sl_posedge_sample", 222, 222}, // sl_posedge_sample
+ {"trimen", 221, 220}, // trimen[1..0]
+ {"serdes_tx_byp", 219, 219}, // serdes_tx_byp
+ {"serdes_pll_byp", 218, 218}, // serdes_pll_byp
+ {"lowf_byp", 217, 217}, // lowf_byp
+ {"spdsel_byp", 216, 216}, // spdsel_byp
+ {"div4_byp", 215, 215}, // div4_byp
+ {"clkf_byp", 214, 208}, // clkf_byp[6..0]
+ {"Unused2", 207, 206}, // 0
+ {"biasdrv_hs_ls_byp", 205, 201}, // biasdrv_hs_ls_byp[4..0]
+ {"tcoeff_hf_ls_byp", 200, 197}, // tcoeff_hf_ls_byp[3..0]
+ {"biasdrv_hf_byp", 196, 192}, // biasdrv_hf_byp[4..0]
+ {"tcoeff_hf_byp", 191, 188}, // tcoeff_hf_byp[3..0]
+ {"Unused3", 187, 186}, // 0
+ {"biasdrv_lf_ls_byp", 185, 181}, // biasdrv_lf_ls_byp[4..0]
+ {"tcoeff_lf_ls_byp", 180, 177}, // tcoeff_lf_ls_byp[3..0]
+ {"biasdrv_lf_byp", 176, 172}, // biasdrv_lf_byp[4..0]
+ {"tcoeff_lf_byp", 171, 168}, // tcoeff_lf_byp[3..0]
+ {"Unused4", 167, 167}, // 0
+ {"interpbw", 166, 162}, // interpbw[4..0]
+ {"pll_cpb", 161, 159}, // pll_cpb[2..0]
+ {"pll_cps", 158, 156}, // pll_cps[2..0]
+ {"pll_diffamp", 155, 152}, // pll_diffamp[3..0]
+ {"Unused5", 151, 150}, // 0
+ {"cfg_rx_idle_set", 149, 149}, // cfg_rx_idle_set
+ {"cfg_rx_idle_clr", 148, 148}, // cfg_rx_idle_clr
+ {"cfg_rx_idle_thr", 147, 144}, // cfg_rx_idle_thr[3..0]
+ {"cfg_com_thr", 143, 140}, // cfg_com_thr[3..0]
+ {"cfg_rx_offset", 139, 136}, // cfg_rx_offset[3..0]
+ {"cfg_skp_max", 135, 132}, // cfg_skp_max[3..0]
+ {"cfg_skp_min", 131, 128}, // cfg_skp_min[3..0]
+ {"cfg_fast_pwrup", 127, 127}, // cfg_fast_pwrup
+ {"Unused6", 126, 100}, // 0
+ {"detected_n", 99, 99}, // detected_n
+ {"detected_p", 98, 98}, // detected_p
+ {"dbg_res_rx", 97, 94}, // dbg_res_rx[3..0]
+ {"dbg_res_tx", 93, 90}, // dbg_res_tx[3..0]
+ {"cfg_tx_pol_set", 89, 89}, // cfg_tx_pol_set
+ {"cfg_tx_pol_clr", 88, 88}, // cfg_tx_pol_clr
+ {"cfg_rx_pol_set", 87, 87}, // cfg_rx_pol_set
+ {"cfg_rx_pol_clr", 86, 86}, // cfg_rx_pol_clr
+ {"cfg_rxd_set", 85, 85}, // cfg_rxd_set
+ {"cfg_rxd_clr", 84, 84}, // cfg_rxd_clr
+ {"cfg_rxd_wait", 83, 80}, // cfg_rxd_wait[3..0]
+ {"cfg_cdr_limit", 79, 79}, // cfg_cdr_limit
+ {"cfg_cdr_rotate", 78, 78}, // cfg_cdr_rotate
+ {"cfg_cdr_bw_ctl", 77, 76}, // cfg_cdr_bw_ctl[1..0]
+ {"cfg_cdr_trunc", 75, 74}, // cfg_cdr_trunc[1..0]
+ {"cfg_cdr_rqoffs", 73, 64}, // cfg_cdr_rqoffs[9..0]
+ {"cfg_cdr_inc2", 63, 58}, // cfg_cdr_inc2[5..0]
+ {"cfg_cdr_inc1", 57, 52}, // cfg_cdr_inc1[5..0]
+ {"fusopt_voter_sync", 51, 51}, // fusopt_voter_sync
+ {"rndt", 50, 50}, // rndt
+ {"hcya", 49, 49}, // hcya
+ {"hyst", 48, 48}, // hyst
+ {"idle_dac", 47, 45}, // idle_dac[2..0]
+ {"bg_ref_sel", 44, 44}, // bg_ref_sel
+ {"ic50dac", 43, 39}, // ic50dac[4..0]
+ {"ir50dac", 38, 34}, // ir50dac[4..0]
+ {"tx_rout_comp_bypass", 33, 33}, // tx_rout_comp_bypass
+ {"tx_rout_comp_value", 32, 29}, // tx_rout_comp_value[3..0]
+ {"tx_res_offset", 28, 25}, // tx_res_offset[3..0]
+ {"rx_rout_comp_bypass", 24, 24}, // rx_rout_comp_bypass
+ {"rx_rout_comp_value", 23, 20}, // rx_rout_comp_value[3..0]
+ {"rx_res_offset", 19, 16}, // rx_res_offset[3..0]
+ {"rx_cap_gen2", 15, 12}, // rx_cap_gen2[3..0]
+ {"rx_eq_gen2", 11, 8}, // rx_eq_gen2[3..0]
+ {"rx_cap_gen1", 7, 4}, // rx_cap_gen1[3..0]
+ {"rx_eq_gen1", 3, 0}, // rx_eq_gen1[3..0]
+ {NULL, -1, -1}
+};
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn66xx[] =
+{
+ {"prbs_err_cnt", 303, 256}, // prbs_err_cnt[47..0]
+ {"prbs_lock", 255, 255}, // prbs_lock
+ {"jtg_prbs_rx_rst_n", 254, 254}, // jtg_prbs_rx_rst_n
+ {"jtg_prbs_tx_rst_n", 253, 253}, // jtg_prbs_tx_rst_n
+ {"jtg_prbs_mode", 252, 251}, // jtg_prbs_mode[252:251]
+ {"jtg_prbs_rst_n", 250, 250}, // jtg_prbs_rst_n
+ {"jtg_run_prbs31", 249, 249}, // jtg_run_prbs31 - Use jtg_prbs_mode instead
+ {"jtg_run_prbs7", 248, 248}, // jtg_run_prbs7 - Use jtg_prbs_mode instead
+ {"Unused1", 247, 246}, // 0
+ {"div5_byp", 245, 245}, // div5_byp
+ {"cfg_pwrup_set", 244, 244}, // cfg_pwrup_set
+ {"cfg_pwrup_clr", 243, 243}, // cfg_pwrup_clr
+ {"cfg_rst_n_set", 242, 242}, // cfg_rst_n_set
+ {"cfg_rst_n_clr", 241, 241}, // cfg_rst_n_clr
+ {"cfg_tx_idle_set", 240, 240}, // cfg_tx_idle_set
+ {"cfg_tx_idle_clr", 239, 239}, // cfg_tx_idle_clr
+ {"cfg_tx_byp", 238, 238}, // cfg_tx_byp
+ {"cfg_tx_byp_inv", 237, 237}, // cfg_tx_byp_inv
+ {"cfg_tx_byp_val", 236, 227}, // cfg_tx_byp_val[9..0]
+ {"cfg_loopback", 226, 226}, // cfg_loopback
+ {"shlpbck", 225, 224}, // shlpbck[1..0]
+ {"sl_enable", 223, 223}, // sl_enable
+ {"sl_posedge_sample", 222, 222}, // sl_posedge_sample
+ {"trimen", 221, 220}, // trimen[1..0]
+ {"serdes_tx_byp", 219, 219}, // serdes_tx_byp
+ {"serdes_pll_byp", 218, 218}, // serdes_pll_byp
+ {"lowf_byp", 217, 217}, // lowf_byp
+ {"spdsel_byp", 216, 216}, // spdsel_byp
+ {"div4_byp", 215, 215}, // div4_byp
+ {"clkf_byp", 214, 208}, // clkf_byp[6..0]
+ {"biasdrv_hs_ls_byp", 207, 203}, // biasdrv_hs_ls_byp[4..0]
+ {"tcoeff_hf_ls_byp", 202, 198}, // tcoeff_hf_ls_byp[4..0]
+ {"biasdrv_hf_byp", 197, 193}, // biasdrv_hf_byp[4..0]
+ {"tcoeff_hf_byp", 192, 188}, // tcoeff_hf_byp[4..0]
+ {"biasdrv_lf_ls_byp", 187, 183}, // biasdrv_lf_ls_byp[4..0]
+ {"tcoeff_lf_ls_byp", 182, 178}, // tcoeff_lf_ls_byp[4..0]
+ {"biasdrv_lf_byp", 177, 173}, // biasdrv_lf_byp[4..0]
+ {"tcoeff_lf_byp", 172, 168}, // tcoeff_lf_byp[4..0]
+ {"Unused4", 167, 167}, // 0
+ {"interpbw", 166, 162}, // interpbw[4..0]
+ {"pll_cpb", 161, 159}, // pll_cpb[2..0]
+ {"pll_cps", 158, 156}, // pll_cps[2..0]
+ {"pll_diffamp", 155, 152}, // pll_diffamp[3..0]
+ {"cfg_err_thr", 151, 150}, // cfg_err_thr
+ {"cfg_rx_idle_set", 149, 149}, // cfg_rx_idle_set
+ {"cfg_rx_idle_clr", 148, 148}, // cfg_rx_idle_clr
+ {"cfg_rx_idle_thr", 147, 144}, // cfg_rx_idle_thr[3..0]
+ {"cfg_com_thr", 143, 140}, // cfg_com_thr[3..0]
+ {"cfg_rx_offset", 139, 136}, // cfg_rx_offset[3..0]
+ {"cfg_skp_max", 135, 132}, // cfg_skp_max[3..0]
+ {"cfg_skp_min", 131, 128}, // cfg_skp_min[3..0]
+ {"cfg_fast_pwrup", 127, 127}, // cfg_fast_pwrup
+ {"Unused6", 126, 101}, // 0
+ {"cfg_indep_dis", 100, 100}, // cfg_indep_dis
+ {"detected_n", 99, 99}, // detected_n
+ {"detected_p", 98, 98}, // detected_p
+ {"dbg_res_rx", 97, 94}, // dbg_res_rx[3..0]
+ {"dbg_res_tx", 93, 90}, // dbg_res_tx[3..0]
+ {"cfg_tx_pol_set", 89, 89}, // cfg_tx_pol_set
+ {"cfg_tx_pol_clr", 88, 88}, // cfg_tx_pol_clr
+ {"cfg_rx_pol_set", 87, 87}, // cfg_rx_pol_set
+ {"cfg_rx_pol_clr", 86, 86}, // cfg_rx_pol_clr
+ {"cfg_rxd_set", 85, 85}, // cfg_rxd_set
+ {"cfg_rxd_clr", 84, 84}, // cfg_rxd_clr
+ {"cfg_rxd_wait", 83, 80}, // cfg_rxd_wait[3..0]
+ {"cfg_cdr_limit", 79, 79}, // cfg_cdr_limit
+ {"cfg_cdr_rotate", 78, 78}, // cfg_cdr_rotate
+ {"cfg_cdr_bw_ctl", 77, 76}, // cfg_cdr_bw_ctl[1..0]
+ {"cfg_cdr_trunc", 75, 74}, // cfg_cdr_trunc[1..0]
+ {"cfg_cdr_rqoffs", 73, 64}, // cfg_cdr_rqoffs[9..0]
+ {"cfg_cdr_inc2", 63, 58}, // cfg_cdr_inc2[5..0]
+ {"cfg_cdr_inc1", 57, 52}, // cfg_cdr_inc1[5..0]
+ {"fusopt_voter_sync", 51, 51}, // fusopt_voter_sync
+ {"rndt", 50, 50}, // rndt
+ {"hcya", 49, 49}, // hcya
+ {"hyst", 48, 48}, // hyst
+ {"idle_dac", 47, 45}, // idle_dac[2..0]
+ {"bg_ref_sel", 44, 44}, // bg_ref_sel
+ {"ic50dac", 43, 39}, // ic50dac[4..0]
+ {"ir50dac", 38, 34}, // ir50dac[4..0]
+ {"tx_rout_comp_bypass", 33, 33}, // tx_rout_comp_bypass
+ {"tx_rout_comp_value", 32, 29}, // tx_rout_comp_value[3..0]
+ {"tx_res_offset", 28, 25}, // tx_res_offset[3..0]
+ {"rx_rout_comp_bypass", 24, 24}, // rx_rout_comp_bypass
+ {"rx_rout_comp_value", 23, 20}, // rx_rout_comp_value[3..0]
+ {"rx_res_offset", 19, 16}, // rx_res_offset[3..0]
+ {"rx_cap_gen2", 15, 12}, // rx_cap_gen2[3..0]
+ {"rx_eq_gen2", 11, 8}, // rx_eq_gen2[3..0]
+ {"rx_cap_gen1", 7, 4}, // rx_cap_gen1[3..0]
+ {"rx_eq_gen1", 3, 0}, // rx_eq_gen1[3..0]
+ {NULL, -1, -1}
+};
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn68xx[] =
+{
+ {"prbs_err_cnt", 303, 256}, // prbs_err_cnt[47..0]
+ {"prbs_lock", 255, 255}, // prbs_lock
+ {"jtg_prbs_rx_rst_n", 254, 254}, // jtg_prbs_rx_rst_n
+ {"jtg_prbs_tx_rst_n", 253, 253}, // jtg_prbs_tx_rst_n
+ {"jtg_prbs_mode", 252, 251}, // jtg_prbs_mode[252:251]
+ {"jtg_prbs_rst_n", 250, 250}, // jtg_prbs_rst_n
+ {"jtg_run_prbs31", 249, 249}, // jtg_run_prbs31 - Use jtg_prbs_mode instead
+ {"jtg_run_prbs7", 248, 248}, // jtg_run_prbs7 - Use jtg_prbs_mode instead
+ {"Unused1", 247, 245}, // 0
+ {"cfg_pwrup_set", 244, 244}, // cfg_pwrup_set
+ {"cfg_pwrup_clr", 243, 243}, // cfg_pwrup_clr
+ {"cfg_rst_n_set", 242, 242}, // cfg_rst_n_set
+ {"cfg_rst_n_clr", 241, 241}, // cfg_rst_n_clr
+ {"cfg_tx_idle_set", 240, 240}, // cfg_tx_idle_set
+ {"cfg_tx_idle_clr", 239, 239}, // cfg_tx_idle_clr
+ {"cfg_tx_byp", 238, 238}, // cfg_tx_byp
+ {"cfg_tx_byp_inv", 237, 237}, // cfg_tx_byp_inv
+ {"cfg_tx_byp_val", 236, 227}, // cfg_tx_byp_val[9..0]
+ {"cfg_loopback", 226, 226}, // cfg_loopback
+ {"shlpbck", 225, 224}, // shlpbck[1..0]
+ {"sl_enable", 223, 223}, // sl_enable
+ {"sl_posedge_sample", 222, 222}, // sl_posedge_sample
+ {"trimen", 221, 220}, // trimen[1..0]
+ {"serdes_tx_byp", 219, 219}, // serdes_tx_byp
+ {"serdes_pll_byp", 218, 218}, // serdes_pll_byp
+ {"lowf_byp", 217, 217}, // lowf_byp
+ {"spdsel_byp", 216, 216}, // spdsel_byp
+ {"div4_byp", 215, 215}, // div4_byp
+ {"clkf_byp", 214, 208}, // clkf_byp[6..0]
+ {"biasdrv_hs_ls_byp", 207, 203}, // biasdrv_hs_ls_byp[4..0]
+ {"tcoeff_hf_ls_byp", 202, 198}, // tcoeff_hf_ls_byp[4..0]
+ {"biasdrv_hf_byp", 197, 193}, // biasdrv_hf_byp[4..0]
+ {"tcoeff_hf_byp", 192, 188}, // tcoeff_hf_byp[4..0]
+ {"biasdrv_lf_ls_byp", 187, 183}, // biasdrv_lf_ls_byp[4..0]
+ {"tcoeff_lf_ls_byp", 182, 178}, // tcoeff_lf_ls_byp[4..0]
+ {"biasdrv_lf_byp", 177, 173}, // biasdrv_lf_byp[4..0]
+ {"tcoeff_lf_byp", 172, 168}, // tcoeff_lf_byp[4..0]
+ {"Unused4", 167, 167}, // 0
+ {"interpbw", 166, 162}, // interpbw[4..0]
+ {"pll_cpb", 161, 159}, // pll_cpb[2..0]
+ {"pll_cps", 158, 156}, // pll_cps[2..0]
+ {"pll_diffamp", 155, 152}, // pll_diffamp[3..0]
+ {"cfg_err_thr", 151, 150}, // cfg_err_thr
+ {"cfg_rx_idle_set", 149, 149}, // cfg_rx_idle_set
+ {"cfg_rx_idle_clr", 148, 148}, // cfg_rx_idle_clr
+ {"cfg_rx_idle_thr", 147, 144}, // cfg_rx_idle_thr[3..0]
+ {"cfg_com_thr", 143, 140}, // cfg_com_thr[3..0]
+ {"cfg_rx_offset", 139, 136}, // cfg_rx_offset[3..0]
+ {"cfg_skp_max", 135, 132}, // cfg_skp_max[3..0]
+ {"cfg_skp_min", 131, 128}, // cfg_skp_min[3..0]
+ {"cfg_fast_pwrup", 127, 127}, // cfg_fast_pwrup
+ {"Unused6", 126, 100}, // 0
+ {"detected_n", 99, 99}, // detected_n
+ {"detected_p", 98, 98}, // detected_p
+ {"dbg_res_rx", 97, 94}, // dbg_res_rx[3..0]
+ {"dbg_res_tx", 93, 90}, // dbg_res_tx[3..0]
+ {"cfg_tx_pol_set", 89, 89}, // cfg_tx_pol_set
+ {"cfg_tx_pol_clr", 88, 88}, // cfg_tx_pol_clr
+ {"cfg_rx_pol_set", 87, 87}, // cfg_rx_pol_set
+ {"cfg_rx_pol_clr", 86, 86}, // cfg_rx_pol_clr
+ {"cfg_rxd_set", 85, 85}, // cfg_rxd_set
+ {"cfg_rxd_clr", 84, 84}, // cfg_rxd_clr
+ {"cfg_rxd_wait", 83, 80}, // cfg_rxd_wait[3..0]
+ {"cfg_cdr_limit", 79, 79}, // cfg_cdr_limit
+ {"cfg_cdr_rotate", 78, 78}, // cfg_cdr_rotate
+ {"cfg_cdr_bw_ctl", 77, 76}, // cfg_cdr_bw_ctl[1..0]
+ {"cfg_cdr_trunc", 75, 74}, // cfg_cdr_trunc[1..0]
+ {"cfg_cdr_rqoffs", 73, 64}, // cfg_cdr_rqoffs[9..0]
+ {"cfg_cdr_inc2", 63, 58}, // cfg_cdr_inc2[5..0]
+ {"cfg_cdr_inc1", 57, 52}, // cfg_cdr_inc1[5..0]
+ {"fusopt_voter_sync", 51, 51}, // fusopt_voter_sync
+ {"rndt", 50, 50}, // rndt
+ {"hcya", 49, 49}, // hcya
+ {"hyst", 48, 48}, // hyst
+ {"idle_dac", 47, 45}, // idle_dac[2..0]
+ {"bg_ref_sel", 44, 44}, // bg_ref_sel
+ {"ic50dac", 43, 39}, // ic50dac[4..0]
+ {"ir50dac", 38, 34}, // ir50dac[4..0]
+ {"tx_rout_comp_bypass", 33, 33}, // tx_rout_comp_bypass
+ {"tx_rout_comp_value", 32, 29}, // tx_rout_comp_value[3..0]
+ {"tx_res_offset", 28, 25}, // tx_res_offset[3..0]
+ {"rx_rout_comp_bypass", 24, 24}, // rx_rout_comp_bypass
+ {"rx_rout_comp_value", 23, 20}, // rx_rout_comp_value[3..0]
+ {"rx_res_offset", 19, 16}, // rx_res_offset[3..0]
+ {"rx_cap_gen2", 15, 12}, // rx_cap_gen2[3..0]
+ {"rx_eq_gen2", 11, 8}, // rx_eq_gen2[3..0]
+ {"rx_cap_gen1", 7, 4}, // rx_cap_gen1[3..0]
+ {"rx_eq_gen1", 3, 0}, // rx_eq_gen1[3..0]
+ {NULL, -1, -1}
+};
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-qlm-tables.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-qlm.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-qlm.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-qlm.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,741 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Helper utilities for qlm.
+ *
+ * <hr>$Revision: 70129 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#include <asm/octeon/cvmx-helper-jtag.h>
+#include <asm/octeon/cvmx-qlm.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-sriox-defs.h>
+#include <asm/octeon/cvmx-sriomaintx-defs.h>
+#include <asm/octeon/cvmx-pciercx-defs.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "executive-config.h"
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-helper-jtag.h"
+#include "cvmx-qlm.h"
+#else
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-helper-jtag.h"
+#include "cvmx-qlm.h"
+#endif
+
+#endif
+
+/**
+ * The JTAG chain for CN52XX and CN56XX is 4 * 268 bits long, or 1072.
+ * CN5XXX full chain shift is:
+ * new data => lane 3 => lane 2 => lane 1 => lane 0 => data out
+ * The JTAG chain for CN63XX is 4 * 300 bits long, or 1200.
+ * The JTAG chain for CN68XX is 4 * 304 bits long, or 1216.
+ * The JTAG chain for CN66XX/CN61XX/CNF71XX is 4 * 304 bits long, or 1216.
+ * CN6XXX full chain shift is:
+ * new data => lane 0 => lane 1 => lane 2 => lane 3 => data out
+ * Shift LSB first, get LSB out
+ */
+extern const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn52xx[];
+extern const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn56xx[];
+extern const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn63xx[];
+extern const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn66xx[];
+extern const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn68xx[];
+
+#define CVMX_QLM_JTAG_UINT32 40
+#ifdef CVMX_BUILD_FOR_LINUX_HOST
+extern void octeon_remote_read_mem(void *buffer, uint64_t physical_address, int length);
+extern void octeon_remote_write_mem(uint64_t physical_address, const void *buffer, int length);
+uint32_t __cvmx_qlm_jtag_xor_ref[5][CVMX_QLM_JTAG_UINT32];
+#else
+typedef uint32_t qlm_jtag_uint32_t[CVMX_QLM_JTAG_UINT32];
+CVMX_SHARED qlm_jtag_uint32_t *__cvmx_qlm_jtag_xor_ref;
+#endif
+
+
+/**
+ * Return the number of QLMs supported by the chip
+ *
+ * @return Number of QLMs
+ */
+int cvmx_qlm_get_num(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return 5;
+ else if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ return 3;
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return 3;
+ else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
+ return 3;
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
+ return 4;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ return 2;
+
+ //cvmx_dprintf("Warning: cvmx_qlm_get_num: This chip does not have QLMs\n");
+ return 0;
+}
+
+/**
+ * Return the qlm number based on the interface
+ *
+ * @param interface Interface to look up
+ */
+int cvmx_qlm_interface(int interface)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
+ return (interface == 0) ? 2 : 0;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)) {
+ return 2 - interface;
+ } else {
+ /* Must be cn68XX */
+ switch(interface) {
+ case 1:
+ return 0;
+ default:
+ return interface;
+ }
+ }
+}
+
+/**
+ * Return number of lanes for a given qlm
+ *
+ * @return Number of lanes
+ */
+int cvmx_qlm_get_lanes(int qlm)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN61XX) && qlm == 1)
+ return 2;
+ else if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ return 2;
+
+ return 4;
+}
+
+/**
+ * Get the QLM JTAG fields based on Octeon model on the supported chips.
+ *
+ * @return qlm_jtag_field_t structure
+ */
+const __cvmx_qlm_jtag_field_t *cvmx_qlm_jtag_get_field(void)
+{
+ /* Figure out which JTAG chain description we're using */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return __cvmx_qlm_jtag_field_cn68xx;
+ else if (OCTEON_IS_MODEL(OCTEON_CN66XX)
+ || OCTEON_IS_MODEL(OCTEON_CN61XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ return __cvmx_qlm_jtag_field_cn66xx;
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return __cvmx_qlm_jtag_field_cn63xx;
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
+ return __cvmx_qlm_jtag_field_cn56xx;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ return __cvmx_qlm_jtag_field_cn52xx;
+ else
+ {
+ //cvmx_dprintf("cvmx_qlm_jtag_get_field: Needs update for this chip\n");
+ return NULL;
+ }
+}
+
+/**
+ * Get the QLM JTAG length by going through qlm_jtag_field for each
+ * Octeon model that is supported
+ *
+ * @return return the length.
+ */
+int cvmx_qlm_jtag_get_length(void)
+{
+ const __cvmx_qlm_jtag_field_t *qlm_ptr = cvmx_qlm_jtag_get_field();
+ int length = 0;
+
+ /* Figure out how many bits are in the JTAG chain */
+ while (qlm_ptr != NULL && qlm_ptr->name)
+ {
+ if (qlm_ptr->stop_bit > length)
+ length = qlm_ptr->stop_bit + 1;
+ qlm_ptr++;
+ }
+ return length;
+}
+
+/**
+ * Initialize the QLM layer
+ */
+void cvmx_qlm_init(void)
+{
+ int qlm;
+ int qlm_jtag_length;
+ char *qlm_jtag_name = "cvmx_qlm_jtag";
+ int qlm_jtag_size = CVMX_QLM_JTAG_UINT32 * 8 * 4;
+ static uint64_t qlm_base = 0;
+ const cvmx_bootmem_named_block_desc_t *desc;
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ /* Skip actual JTAG accesses on simulator */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ return;
+#endif
+
+ qlm_jtag_length = cvmx_qlm_jtag_get_length();
+
+ if (4 * qlm_jtag_length > (int)sizeof(__cvmx_qlm_jtag_xor_ref[0]) * 8)
+ {
+ cvmx_dprintf("ERROR: cvmx_qlm_init: JTAG chain larger than XOR ref size\n");
+ return;
+ }
+
+ /* No need to initialize the initial JTAG state if cvmx_qlm_jtag
+ named block is already created. */
+ if ((desc = cvmx_bootmem_find_named_block(qlm_jtag_name)) != NULL)
+ {
+#ifdef CVMX_BUILD_FOR_LINUX_HOST
+ char buffer[qlm_jtag_size];
+
+ octeon_remote_read_mem(buffer, desc->base_addr, qlm_jtag_size);
+ memcpy(__cvmx_qlm_jtag_xor_ref, buffer, qlm_jtag_size);
+#else
+ __cvmx_qlm_jtag_xor_ref = cvmx_phys_to_ptr(desc->base_addr);
+#endif
+ /* Initialize the internal JTAG */
+ cvmx_helper_qlm_jtag_init();
+ return;
+ }
+
+ /* Create named block to store the initial JTAG state. */
+ qlm_base = cvmx_bootmem_phy_named_block_alloc(qlm_jtag_size, 0, 0, 128, qlm_jtag_name, CVMX_BOOTMEM_FLAG_END_ALLOC);
+
+ if (qlm_base == -1ull)
+ {
+ cvmx_dprintf("ERROR: cvmx_qlm_init: Error in creating %s named block\n", qlm_jtag_name);
+ return;
+ }
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ __cvmx_qlm_jtag_xor_ref = cvmx_phys_to_ptr(qlm_base);
+#endif
+ memset(__cvmx_qlm_jtag_xor_ref, 0, qlm_jtag_size);
+
+ /* Initialize the internal JTAG */
+ cvmx_helper_qlm_jtag_init();
+
+ /* Read the XOR defaults for the JTAG chain */
+ for (qlm=0; qlm<cvmx_qlm_get_num(); qlm++)
+ {
+ int i;
+ /* Capture the reset defaults */
+ cvmx_helper_qlm_jtag_capture(qlm);
+ /* Save the reset defaults. This will shift out too much data, but
+ the extra zeros don't hurt anything */
+ for (i=0; i<CVMX_QLM_JTAG_UINT32; i++)
+ __cvmx_qlm_jtag_xor_ref[qlm][i] = cvmx_helper_qlm_jtag_shift(qlm, 32, 0);
+ }
+
+#ifdef CVMX_BUILD_FOR_LINUX_HOST
+ /* Update the initial state for oct-remote utils. */
+ {
+ char buffer[qlm_jtag_size];
+
+ memcpy(buffer, &__cvmx_qlm_jtag_xor_ref, qlm_jtag_size);
+ octeon_remote_write_mem(qlm_base, buffer, qlm_jtag_size);
+ }
+#endif
+
+ /* Apply speed tweak as a workaround for errata G-16094. */
+ __cvmx_qlm_speed_tweak();
+ __cvmx_qlm_pcie_idle_dac_tweak();
+}
+
+/**
+ * Lookup the bit information for a JTAG field name
+ *
+ * @param name Name to lookup
+ *
+ * @return Field info, or NULL on failure
+ */
+static const __cvmx_qlm_jtag_field_t *__cvmx_qlm_lookup_field(const char *name)
+{
+ const __cvmx_qlm_jtag_field_t *ptr = cvmx_qlm_jtag_get_field();
+ while (ptr->name)
+ {
+ if (strcmp(name, ptr->name) == 0)
+ return ptr;
+ ptr++;
+ }
+ cvmx_dprintf("__cvmx_qlm_lookup_field: Illegal field name %s\n", name);
+ return NULL;
+}
+
+/**
+ * Get a field in a QLM JTAG chain
+ *
+ * @param qlm QLM to get
+ * @param lane Lane in QLM to get
+ * @param name String name of field
+ *
+ * @return JTAG field value
+ */
+uint64_t cvmx_qlm_jtag_get(int qlm, int lane, const char *name)
+{
+ const __cvmx_qlm_jtag_field_t *field = __cvmx_qlm_lookup_field(name);
+ int qlm_jtag_length = cvmx_qlm_jtag_get_length();
+ int num_lanes = cvmx_qlm_get_lanes(qlm);
+
+ if (!field)
+ return 0;
+
+ /* Capture the current settings */
+ cvmx_helper_qlm_jtag_capture(qlm);
+ /* Shift past lanes we don't care about. CN6XXX shifts lane 3 first */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, qlm_jtag_length * (num_lanes-1-lane)); /* Shift to the start of the field */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, field->start_bit);
+ /* Shift out the value and return it */
+ return cvmx_helper_qlm_jtag_shift(qlm, field->stop_bit - field->start_bit + 1, 0);
+}
+
+/**
+ * Set a field in a QLM JTAG chain
+ *
+ * @param qlm QLM to set
+ * @param lane Lane in QLM to set, or -1 for all lanes
+ * @param name String name of field
+ * @param value Value of the field
+ */
+void cvmx_qlm_jtag_set(int qlm, int lane, const char *name, uint64_t value)
+{
+ int i, l;
+ uint32_t shift_values[CVMX_QLM_JTAG_UINT32];
+ int num_lanes = cvmx_qlm_get_lanes(qlm);
+ const __cvmx_qlm_jtag_field_t *field = __cvmx_qlm_lookup_field(name);
+ int qlm_jtag_length = cvmx_qlm_jtag_get_length();
+ int total_length = qlm_jtag_length * num_lanes;
+ int bits = 0;
+
+ if (!field)
+ return;
+
+ /* Get the current state */
+ cvmx_helper_qlm_jtag_capture(qlm);
+ for (i=0; i<CVMX_QLM_JTAG_UINT32; i++)
+ shift_values[i] = cvmx_helper_qlm_jtag_shift(qlm, 32, 0);
+
+ /* Put new data in our local array */
+ for (l=0; l<num_lanes; l++)
+ {
+ uint64_t new_value = value;
+ int bits;
+ if ((l != lane) && (lane != -1))
+ continue;
+ for (bits = field->start_bit + (num_lanes-1-l)*qlm_jtag_length;
+ bits <= field->stop_bit + (num_lanes-1-l)*qlm_jtag_length;
+ bits++)
+ {
+ if (new_value & 1)
+ shift_values[bits/32] |= 1<<(bits&31);
+ else
+ shift_values[bits/32] &= ~(1<<(bits&31));
+ new_value>>=1;
+ }
+ }
+
+ /* Shift out data and xor with reference */
+ while (bits < total_length)
+ {
+ uint32_t shift = shift_values[bits/32] ^ __cvmx_qlm_jtag_xor_ref[qlm][bits/32];
+ int width = total_length - bits;
+ if (width > 32)
+ width = 32;
+ cvmx_helper_qlm_jtag_shift(qlm, width, shift);
+ bits += 32;
+ }
+
+ /* Update the new data */
+ cvmx_helper_qlm_jtag_update(qlm);
+ /* Always give the QLM 1ms to settle after every update. This may not
+ always be needed, but some of the options make significant
+ electrical changes */
+ cvmx_wait_usec(1000);
+}
+
+/**
+ * Errata G-16094: QLM Gen2 Equalizer Default Setting Change.
+ * CN68XX pass 1.x and CN66XX pass 1.x QLM tweak. This function tweaks the
+ * JTAG setting for a QLMs to run better at 5 and 6.25Ghz.
+ */
+void __cvmx_qlm_speed_tweak(void)
+{
+ cvmx_mio_qlmx_cfg_t qlm_cfg;
+ int num_qlms = 0;
+ int qlm;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X))
+ num_qlms = 5;
+ else if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X))
+ num_qlms = 3;
+ else
+ return;
+
+ /* Loop through the QLMs */
+ for (qlm = 0; qlm < num_qlms; qlm++)
+ {
+ /* Read the QLM speed */
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
+
+ /* If the QLM is at 6.25Ghz or 5Ghz then program JTAG */
+ if ((qlm_cfg.s.qlm_spd == 5) || (qlm_cfg.s.qlm_spd == 12) ||
+ (qlm_cfg.s.qlm_spd == 0) || (qlm_cfg.s.qlm_spd == 6) ||
+ (qlm_cfg.s.qlm_spd == 11))
+ {
+ cvmx_qlm_jtag_set(qlm, -1, "rx_cap_gen2", 0x1);
+ cvmx_qlm_jtag_set(qlm, -1, "rx_eq_gen2", 0x8);
+ }
+ }
+}
+
+/**
+ * Errata G-16174: QLM Gen2 PCIe IDLE DAC change.
+ * CN68XX pass 1.x, CN66XX pass 1.x and CN63XX pass 1.0-2.2 QLM tweak.
+ * This function tweaks the JTAG setting for a QLMs for PCIe to run better.
+ */
+void __cvmx_qlm_pcie_idle_dac_tweak(void)
+{
+ int num_qlms = 0;
+ int qlm;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X))
+ num_qlms = 5;
+ else if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X))
+ num_qlms = 3;
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X))
+ num_qlms = 3;
+ else
+ return;
+
+ /* Loop through the QLMs */
+ for (qlm = 0; qlm < num_qlms; qlm++)
+ cvmx_qlm_jtag_set(qlm, -1, "idle_dac", 0x2);
+}
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+/**
+ * Get the speed (Gbaud) of the QLM in Mhz.
+ *
+ * @param qlm QLM to examine
+ *
+ * @return Speed in Mhz
+ */
+int cvmx_qlm_get_gbaud_mhz(int qlm)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ if (qlm == 2)
+ {
+ cvmx_gmxx_inf_mode_t inf_mode;
+ inf_mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(0));
+ switch (inf_mode.s.speed)
+ {
+ case 0: return 5000; /* 5 Gbaud */
+ case 1: return 2500; /* 2.5 Gbaud */
+ case 2: return 2500; /* 2.5 Gbaud */
+ case 3: return 1250; /* 1.25 Gbaud */
+ case 4: return 1250; /* 1.25 Gbaud */
+ case 5: return 6250; /* 6.25 Gbaud */
+ case 6: return 5000; /* 5 Gbaud */
+ case 7: return 2500; /* 2.5 Gbaud */
+ case 8: return 3125; /* 3.125 Gbaud */
+ case 9: return 2500; /* 2.5 Gbaud */
+ case 10: return 1250; /* 1.25 Gbaud */
+ case 11: return 5000; /* 5 Gbaud */
+ case 12: return 6250; /* 6.25 Gbaud */
+ case 13: return 3750; /* 3.75 Gbaud */
+ case 14: return 3125; /* 3.125 Gbaud */
+ default: return 0; /* Disabled */
+ }
+ }
+ else
+ {
+ cvmx_sriox_status_reg_t status_reg;
+ status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(qlm));
+ if (status_reg.s.srio)
+ {
+ cvmx_sriomaintx_port_0_ctl2_t sriomaintx_port_0_ctl2;
+ sriomaintx_port_0_ctl2.u32 = cvmx_read_csr(CVMX_SRIOMAINTX_PORT_0_CTL2(qlm));
+ switch (sriomaintx_port_0_ctl2.s.sel_baud)
+ {
+ case 1: return 1250; /* 1.25 Gbaud */
+ case 2: return 2500; /* 2.5 Gbaud */
+ case 3: return 3125; /* 3.125 Gbaud */
+ case 4: return 5000; /* 5 Gbaud */
+ case 5: return 6250; /* 6.250 Gbaud */
+ default: return 0; /* Disabled */
+ }
+ }
+ else
+ {
+ cvmx_pciercx_cfg032_t pciercx_cfg032;
+ pciercx_cfg032.u32 = cvmx_read_csr(CVMX_PCIERCX_CFG032(qlm));
+ switch (pciercx_cfg032.s.ls)
+ {
+ case 1:
+ return 2500;
+ case 2:
+ return 5000;
+ case 4:
+ return 8000;
+ default:
+ {
+ cvmx_mio_rst_boot_t mio_rst_boot;
+ mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+ if ((qlm == 0) && mio_rst_boot.s.qlm0_spd == 0xf)
+ return 0;
+ if ((qlm == 1) && mio_rst_boot.s.qlm1_spd == 0xf)
+ return 0;
+ return 5000; /* Best guess I can make */
+ }
+ }
+ }
+ }
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ {
+ cvmx_mio_qlmx_cfg_t qlm_cfg;
+
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
+ switch (qlm_cfg.s.qlm_spd)
+ {
+ case 0: return 5000; /* 5 Gbaud */
+ case 1: return 2500; /* 2.5 Gbaud */
+ case 2: return 2500; /* 2.5 Gbaud */
+ case 3: return 1250; /* 1.25 Gbaud */
+ case 4: return 1250; /* 1.25 Gbaud */
+ case 5: return 6250; /* 6.25 Gbaud */
+ case 6: return 5000; /* 5 Gbaud */
+ case 7: return 2500; /* 2.5 Gbaud */
+ case 8: return 3125; /* 3.125 Gbaud */
+ case 9: return 2500; /* 2.5 Gbaud */
+ case 10: return 1250; /* 1.25 Gbaud */
+ case 11: return 5000; /* 5 Gbaud */
+ case 12: return 6250; /* 6.25 Gbaud */
+ case 13: return 3750; /* 3.75 Gbaud */
+ case 14: return 3125; /* 3.125 Gbaud */
+ default: return 0; /* Disabled */
+ }
+ }
+ return 0;
+}
+#endif
+
+/*
+ * Read QLM and return status based on CN66XX.
+ * @return Return 1 if QLM is SGMII
+ * 2 if QLM is XAUI
+ * 3 if QLM is PCIe gen2 / gen1
+ * 4 if QLM is SRIO 1x4 short / long
+ * 5 if QLM is SRIO 2x2 short / long
+ * 6 if QLM is SRIO 4x1 short / long
+ * 7 if QLM is PCIe 1x2 gen2 / gen1
+ * 8 if QLM is PCIe 2x1 gen2 / gen1
+ * 9 if QLM is ILK
+ * 10 if QLM is RXAUI
+ * -1 otherwise
+ */
+int cvmx_qlm_get_status(int qlm)
+{
+ cvmx_mio_qlmx_cfg_t qlmx_cfg;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlmx_cfg.s.qlm_spd == 15)
+ return -1;
+
+ switch (qlmx_cfg.s.qlm_cfg)
+ {
+ case 0: /* PCIE */
+ return 3;
+ case 1: /* ILK */
+ return 9;
+ case 2: /* SGMII */
+ return 1;
+ case 3: /* XAUI */
+ return 2;
+ case 7: /* RXAUI */
+ return 10;
+ default: return -1;
+ }
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ {
+ qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlmx_cfg.s.qlm_spd == 15)
+ return -1;
+
+ switch (qlmx_cfg.s.qlm_cfg)
+ {
+ case 0x9: /* SGMII */
+ return 1;
+ case 0xb: /* XAUI */
+ return 2;
+ case 0x0: /* PCIE gen2 */
+ case 0x8: /* PCIE gen2 (alias) */
+ case 0x2: /* PCIE gen1 */
+ case 0xa: /* PCIE gen1 (alias) */
+ return 3;
+ case 0x1: /* SRIO 1x4 short */
+ case 0x3: /* SRIO 1x4 long */
+ return 4;
+ case 0x4: /* SRIO 2x2 short */
+ case 0x6: /* SRIO 2x2 long */
+ return 5;
+ case 0x5: /* SRIO 4x1 short */
+ case 0x7: /* SRIO 4x1 long */
+ if (!OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
+ return 6;
+ default:
+ return -1;
+ }
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ cvmx_sriox_status_reg_t status_reg;
+ /* For now skip qlm2 */
+ if (qlm == 2)
+ {
+ cvmx_gmxx_inf_mode_t inf_mode;
+ inf_mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(0));
+ if (inf_mode.s.speed == 15)
+ return -1;
+ else if(inf_mode.s.mode == 0)
+ return 1;
+ else
+ return 2;
+ }
+ status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(qlm));
+ if (status_reg.s.srio)
+ return 4;
+ else
+ return 3;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
+ {
+ qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlmx_cfg.s.qlm_spd == 15)
+ return -1;
+
+ switch(qlm)
+ {
+ case 0:
+ switch (qlmx_cfg.s.qlm_cfg)
+ {
+ case 0: /* PCIe 1x4 gen2 / gen1 */
+ return 3;
+ case 2: /* SGMII */
+ return 1;
+ case 3: /* XAUI */
+ return 2;
+ default: return -1;
+ }
+ break;
+ case 1:
+ switch (qlmx_cfg.s.qlm_cfg)
+ {
+ case 0: /* PCIe 1x2 gen2 / gen1 */
+ return 7;
+ case 1: /* PCIe 2x1 gen2 / gen1 */
+ return 8;
+ default: return -1;
+ }
+ break;
+ case 2:
+ switch (qlmx_cfg.s.qlm_cfg)
+ {
+ case 2: /* SGMII */
+ return 1;
+ case 3: /* XAUI */
+ return 2;
+ default: return -1;
+ }
+ break;
+ }
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ {
+ qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlmx_cfg.s.qlm_spd == 15)
+ return -1;
+
+ switch(qlm)
+ {
+ case 0:
+ if (qlmx_cfg.s.qlm_cfg == 2) /* SGMII */
+ return 1;
+ break;
+ case 1:
+ switch (qlmx_cfg.s.qlm_cfg)
+ {
+ case 0: /* PCIe 1x2 gen2 / gen1 */
+ return 7;
+ case 1: /* PCIe 2x1 gen2 / gen1 */
+ return 8;
+ default: return -1;
+ }
+ break;
+ }
+ }
+ return -1;
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-qlm.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-qlm.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-qlm.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-qlm.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,166 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Helper utilities for qlm.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+
+#ifndef __CVMX_QLM_H__
+#define __CVMX_QLM_H__
+
+#include "cvmx.h"
+
+typedef struct
+{
+ const char *name;
+ int stop_bit;
+ int start_bit;
+} __cvmx_qlm_jtag_field_t;
+
+/**
+ * Return the number of QLMs supported by the chip
+ *
+ * @return Number of QLMs
+ */
+extern int cvmx_qlm_get_num(void);
+
+/**
+ * Return the qlm number based on the interface
+ *
+ * @param interface Interface to look up
+ */
+extern int cvmx_qlm_interface(int interface);
+
+/**
+ * Return number of lanes for a given qlm
+ *
+ * @return Number of lanes
+ */
+extern int cvmx_qlm_get_lanes(int qlm);
+
+/**
+ * Get the QLM JTAG fields based on Octeon model on the supported chips.
+ *
+ * @return qlm_jtag_field_t structure
+ */
+extern const __cvmx_qlm_jtag_field_t *cvmx_qlm_jtag_get_field(void);
+
+/**
+ * Get the QLM JTAG length by going through qlm_jtag_field for each
+ * Octeon model that is supported
+ *
+ * @return return the length.
+ */
+extern int cvmx_qlm_jtag_get_length(void);
+
+/**
+ * Initialize the QLM layer
+ */
+extern void cvmx_qlm_init(void);
+
+/**
+ * Get a field in a QLM JTAG chain
+ *
+ * @param qlm QLM to get
+ * @param lane Lane in QLM to get
+ * @param name String name of field
+ *
+ * @return JTAG field value
+ */
+extern uint64_t cvmx_qlm_jtag_get(int qlm, int lane, const char *name);
+
+/**
+ * Set a field in a QLM JTAG chain
+ *
+ * @param qlm QLM to set
+ * @param lane Lane in QLM to set, or -1 for all lanes
+ * @param name String name of field
+ * @param value Value of the field
+ */
+extern void cvmx_qlm_jtag_set(int qlm, int lane, const char *name, uint64_t value);
+
+/**
+ * Errata G-16094: QLM Gen2 Equalizer Default Setting Change.
+ * CN68XX pass 1.x and CN66XX pass 1.x QLM tweak. This function tweaks the
+ * JTAG setting for a QLMs to run better at 5 and 6.25Ghz.
+ */
+extern void __cvmx_qlm_speed_tweak(void);
+
+/**
+ * Errata G-16174: QLM Gen2 PCIe IDLE DAC change.
+ * CN68XX pass 1.x, CN66XX pass 1.x and CN63XX pass 1.0-2.2 QLM tweak.
+ * This function tweaks the JTAG setting for a QLMs for PCIe to run better.
+ */
+extern void __cvmx_qlm_pcie_idle_dac_tweak(void);
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+/**
+ * Get the speed (Gbaud) of the QLM in Mhz.
+ *
+ * @param qlm QLM to examine
+ *
+ * @return Speed in Mhz
+ */
+extern int cvmx_qlm_get_gbaud_mhz(int qlm);
+#endif
+
+/*
+ * Read QLM and return status based on CN66XX.
+ * @return Return 1 if QLM is SGMII
+ * 2 if QLM is XAUI
+ * 3 if QLM is PCIe gen2 / gen1
+ * 4 if QLM is SRIO 1x4 short / long
+ * 5 if QLM is SRIO 2x2 short / long
+ * 6 is reserved
+ * 7 if QLM is PCIe 1x2 gen2 / gen1
+ * 8 if QLM is PCIe 2x1 gen2 / gen1
+ * 9 if QLM is ILK
+ * 10 if QLM is RXAUI
+ * -1 otherwise
+ */
+extern int cvmx_qlm_get_status(int qlm);
+
+#endif /* __CVMX_QLM_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-qlm.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-rad-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-rad-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-rad-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1076 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-rad-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon rad.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_RAD_DEFS_H__
+#define __CVMX_RAD_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_MEM_DEBUG0 CVMX_RAD_MEM_DEBUG0_FUNC()
+static inline uint64_t CVMX_RAD_MEM_DEBUG0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_MEM_DEBUG0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070001000ull);
+}
+#else
+#define CVMX_RAD_MEM_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180070001000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_MEM_DEBUG1 CVMX_RAD_MEM_DEBUG1_FUNC()
+static inline uint64_t CVMX_RAD_MEM_DEBUG1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_MEM_DEBUG1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070001008ull);
+}
+#else
+#define CVMX_RAD_MEM_DEBUG1 (CVMX_ADD_IO_SEG(0x0001180070001008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_MEM_DEBUG2 CVMX_RAD_MEM_DEBUG2_FUNC()
+static inline uint64_t CVMX_RAD_MEM_DEBUG2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_MEM_DEBUG2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070001010ull);
+}
+#else
+#define CVMX_RAD_MEM_DEBUG2 (CVMX_ADD_IO_SEG(0x0001180070001010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_BIST_RESULT CVMX_RAD_REG_BIST_RESULT_FUNC()
+static inline uint64_t CVMX_RAD_REG_BIST_RESULT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_BIST_RESULT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000080ull);
+}
+#else
+#define CVMX_RAD_REG_BIST_RESULT (CVMX_ADD_IO_SEG(0x0001180070000080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_CMD_BUF CVMX_RAD_REG_CMD_BUF_FUNC()
+static inline uint64_t CVMX_RAD_REG_CMD_BUF_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_CMD_BUF not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000008ull);
+}
+#else
+#define CVMX_RAD_REG_CMD_BUF (CVMX_ADD_IO_SEG(0x0001180070000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_CTL CVMX_RAD_REG_CTL_FUNC()
+static inline uint64_t CVMX_RAD_REG_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000000ull);
+}
+#else
+#define CVMX_RAD_REG_CTL (CVMX_ADD_IO_SEG(0x0001180070000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG0 CVMX_RAD_REG_DEBUG0_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000100ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180070000100ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG1 CVMX_RAD_REG_DEBUG1_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000108ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG1 (CVMX_ADD_IO_SEG(0x0001180070000108ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG10 CVMX_RAD_REG_DEBUG10_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG10_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG10 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000150ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG10 (CVMX_ADD_IO_SEG(0x0001180070000150ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG11 CVMX_RAD_REG_DEBUG11_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG11_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG11 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000158ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG11 (CVMX_ADD_IO_SEG(0x0001180070000158ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG12 CVMX_RAD_REG_DEBUG12_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG12_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG12 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000160ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG12 (CVMX_ADD_IO_SEG(0x0001180070000160ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG2 CVMX_RAD_REG_DEBUG2_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000110ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG2 (CVMX_ADD_IO_SEG(0x0001180070000110ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG3 CVMX_RAD_REG_DEBUG3_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000118ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG3 (CVMX_ADD_IO_SEG(0x0001180070000118ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG4 CVMX_RAD_REG_DEBUG4_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG4_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG4 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000120ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG4 (CVMX_ADD_IO_SEG(0x0001180070000120ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG5 CVMX_RAD_REG_DEBUG5_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG5_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG5 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000128ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG5 (CVMX_ADD_IO_SEG(0x0001180070000128ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG6 CVMX_RAD_REG_DEBUG6_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG6_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG6 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000130ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG6 (CVMX_ADD_IO_SEG(0x0001180070000130ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG7 CVMX_RAD_REG_DEBUG7_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG7_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG7 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000138ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG7 (CVMX_ADD_IO_SEG(0x0001180070000138ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG8 CVMX_RAD_REG_DEBUG8_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG8_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG8 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000140ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG8 (CVMX_ADD_IO_SEG(0x0001180070000140ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_DEBUG9 CVMX_RAD_REG_DEBUG9_FUNC()
+static inline uint64_t CVMX_RAD_REG_DEBUG9_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_DEBUG9 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000148ull);
+}
+#else
+#define CVMX_RAD_REG_DEBUG9 (CVMX_ADD_IO_SEG(0x0001180070000148ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_ERROR CVMX_RAD_REG_ERROR_FUNC()
+static inline uint64_t CVMX_RAD_REG_ERROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_ERROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000088ull);
+}
+#else
+#define CVMX_RAD_REG_ERROR (CVMX_ADD_IO_SEG(0x0001180070000088ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_INT_MASK CVMX_RAD_REG_INT_MASK_FUNC()
+static inline uint64_t CVMX_RAD_REG_INT_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_INT_MASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000090ull);
+}
+#else
+#define CVMX_RAD_REG_INT_MASK (CVMX_ADD_IO_SEG(0x0001180070000090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_POLYNOMIAL CVMX_RAD_REG_POLYNOMIAL_FUNC()
+static inline uint64_t CVMX_RAD_REG_POLYNOMIAL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_POLYNOMIAL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000010ull);
+}
+#else
+#define CVMX_RAD_REG_POLYNOMIAL (CVMX_ADD_IO_SEG(0x0001180070000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RAD_REG_READ_IDX CVMX_RAD_REG_READ_IDX_FUNC()
+static inline uint64_t CVMX_RAD_REG_READ_IDX_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RAD_REG_READ_IDX not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180070000018ull);
+}
+#else
+#define CVMX_RAD_REG_READ_IDX (CVMX_ADD_IO_SEG(0x0001180070000018ull))
+#endif
+
+/**
+ * cvmx_rad_mem_debug0
+ *
+ * Notes:
+ * This CSR is a memory of 32 entries, and thus, the RAD_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_rad_mem_debug0 {
+ uint64_t u64;
+ struct cvmx_rad_mem_debug0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t iword : 64; /**< IWord */
+#else
+ uint64_t iword : 64;
+#endif
+ } s;
+ struct cvmx_rad_mem_debug0_s cn52xx;
+ struct cvmx_rad_mem_debug0_s cn52xxp1;
+ struct cvmx_rad_mem_debug0_s cn56xx;
+ struct cvmx_rad_mem_debug0_s cn56xxp1;
+ struct cvmx_rad_mem_debug0_s cn61xx;
+ struct cvmx_rad_mem_debug0_s cn63xx;
+ struct cvmx_rad_mem_debug0_s cn63xxp1;
+ struct cvmx_rad_mem_debug0_s cn66xx;
+ struct cvmx_rad_mem_debug0_s cn68xx;
+ struct cvmx_rad_mem_debug0_s cn68xxp1;
+ struct cvmx_rad_mem_debug0_s cnf71xx;
+};
+typedef union cvmx_rad_mem_debug0 cvmx_rad_mem_debug0_t;
+
+/**
+ * cvmx_rad_mem_debug1
+ *
+ * Notes:
+ * This CSR is a memory of 256 entries, and thus, the RAD_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_rad_mem_debug1 {
+ uint64_t u64;
+ struct cvmx_rad_mem_debug1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t p_dat : 64; /**< P data */
+#else
+ uint64_t p_dat : 64;
+#endif
+ } s;
+ struct cvmx_rad_mem_debug1_s cn52xx;
+ struct cvmx_rad_mem_debug1_s cn52xxp1;
+ struct cvmx_rad_mem_debug1_s cn56xx;
+ struct cvmx_rad_mem_debug1_s cn56xxp1;
+ struct cvmx_rad_mem_debug1_s cn61xx;
+ struct cvmx_rad_mem_debug1_s cn63xx;
+ struct cvmx_rad_mem_debug1_s cn63xxp1;
+ struct cvmx_rad_mem_debug1_s cn66xx;
+ struct cvmx_rad_mem_debug1_s cn68xx;
+ struct cvmx_rad_mem_debug1_s cn68xxp1;
+ struct cvmx_rad_mem_debug1_s cnf71xx;
+};
+typedef union cvmx_rad_mem_debug1 cvmx_rad_mem_debug1_t;
+
+/**
+ * cvmx_rad_mem_debug2
+ *
+ * Notes:
+ * This CSR is a memory of 256 entries, and thus, the RAD_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed. A read of any entry that has not been
+ * previously written is illegal and will result in unpredictable CSR read data.
+ */
+union cvmx_rad_mem_debug2 {
+ uint64_t u64;
+ struct cvmx_rad_mem_debug2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t q_dat : 64; /**< Q data */
+#else
+ uint64_t q_dat : 64;
+#endif
+ } s;
+ struct cvmx_rad_mem_debug2_s cn52xx;
+ struct cvmx_rad_mem_debug2_s cn52xxp1;
+ struct cvmx_rad_mem_debug2_s cn56xx;
+ struct cvmx_rad_mem_debug2_s cn56xxp1;
+ struct cvmx_rad_mem_debug2_s cn61xx;
+ struct cvmx_rad_mem_debug2_s cn63xx;
+ struct cvmx_rad_mem_debug2_s cn63xxp1;
+ struct cvmx_rad_mem_debug2_s cn66xx;
+ struct cvmx_rad_mem_debug2_s cn68xx;
+ struct cvmx_rad_mem_debug2_s cn68xxp1;
+ struct cvmx_rad_mem_debug2_s cnf71xx;
+};
+typedef union cvmx_rad_mem_debug2 cvmx_rad_mem_debug2_t;
+
+/**
+ * cvmx_rad_reg_bist_result
+ *
+ * Notes:
+ * Access to the internal BiST results
+ * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail).
+ */
+union cvmx_rad_reg_bist_result {
+ uint64_t u64;
+ struct cvmx_rad_reg_bist_result_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t sta : 1; /**< BiST result of the STA memories */
+ uint64_t ncb_oub : 1; /**< BiST result of the NCB_OUB memories */
+ uint64_t ncb_inb : 2; /**< BiST result of the NCB_INB memories */
+ uint64_t dat : 2; /**< BiST result of the DAT memories */
+#else
+ uint64_t dat : 2;
+ uint64_t ncb_inb : 2;
+ uint64_t ncb_oub : 1;
+ uint64_t sta : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_rad_reg_bist_result_s cn52xx;
+ struct cvmx_rad_reg_bist_result_s cn52xxp1;
+ struct cvmx_rad_reg_bist_result_s cn56xx;
+ struct cvmx_rad_reg_bist_result_s cn56xxp1;
+ struct cvmx_rad_reg_bist_result_s cn61xx;
+ struct cvmx_rad_reg_bist_result_s cn63xx;
+ struct cvmx_rad_reg_bist_result_s cn63xxp1;
+ struct cvmx_rad_reg_bist_result_s cn66xx;
+ struct cvmx_rad_reg_bist_result_s cn68xx;
+ struct cvmx_rad_reg_bist_result_s cn68xxp1;
+ struct cvmx_rad_reg_bist_result_s cnf71xx;
+};
+typedef union cvmx_rad_reg_bist_result cvmx_rad_reg_bist_result_t;
+
+/**
+ * cvmx_rad_reg_cmd_buf
+ *
+ * Notes:
+ * Sets the command buffer parameters
+ * The size of the command buffer segments is measured in uint64s. The pool specifies 1 of 8 free
+ * lists to be used when freeing command buffer segments. The PTR field is overwritten with the next
+ * pointer each time that the command buffer segment is exhausted.
+ */
+union cvmx_rad_reg_cmd_buf {
+ uint64_t u64;
+ struct cvmx_rad_reg_cmd_buf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t dwb : 9; /**< Number of DontWriteBacks */
+ uint64_t pool : 3; /**< Free list used to free command buffer segments */
+ uint64_t size : 13; /**< Number of uint64s per command buffer segment */
+ uint64_t ptr : 33; /**< Initial command buffer pointer[39:7] (128B-aligned) */
+#else
+ uint64_t ptr : 33;
+ uint64_t size : 13;
+ uint64_t pool : 3;
+ uint64_t dwb : 9;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } s;
+ struct cvmx_rad_reg_cmd_buf_s cn52xx;
+ struct cvmx_rad_reg_cmd_buf_s cn52xxp1;
+ struct cvmx_rad_reg_cmd_buf_s cn56xx;
+ struct cvmx_rad_reg_cmd_buf_s cn56xxp1;
+ struct cvmx_rad_reg_cmd_buf_s cn61xx;
+ struct cvmx_rad_reg_cmd_buf_s cn63xx;
+ struct cvmx_rad_reg_cmd_buf_s cn63xxp1;
+ struct cvmx_rad_reg_cmd_buf_s cn66xx;
+ struct cvmx_rad_reg_cmd_buf_s cn68xx;
+ struct cvmx_rad_reg_cmd_buf_s cn68xxp1;
+ struct cvmx_rad_reg_cmd_buf_s cnf71xx;
+};
+typedef union cvmx_rad_reg_cmd_buf cvmx_rad_reg_cmd_buf_t;
+
+/**
+ * cvmx_rad_reg_ctl
+ *
+ * Notes:
+ * MAX_READ is a throttle to control NCB usage. Values >8 are illegal.
+ *
+ */
+union cvmx_rad_reg_ctl {
+ uint64_t u64;
+ struct cvmx_rad_reg_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t max_read : 4; /**< Maximum number of outstanding data read commands */
+ uint64_t store_le : 1; /**< Force STORE0 byte write address to little endian */
+ uint64_t reset : 1; /**< Reset oneshot pulse (lasts for 4 cycles) */
+#else
+ uint64_t reset : 1;
+ uint64_t store_le : 1;
+ uint64_t max_read : 4;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_rad_reg_ctl_s cn52xx;
+ struct cvmx_rad_reg_ctl_s cn52xxp1;
+ struct cvmx_rad_reg_ctl_s cn56xx;
+ struct cvmx_rad_reg_ctl_s cn56xxp1;
+ struct cvmx_rad_reg_ctl_s cn61xx;
+ struct cvmx_rad_reg_ctl_s cn63xx;
+ struct cvmx_rad_reg_ctl_s cn63xxp1;
+ struct cvmx_rad_reg_ctl_s cn66xx;
+ struct cvmx_rad_reg_ctl_s cn68xx;
+ struct cvmx_rad_reg_ctl_s cn68xxp1;
+ struct cvmx_rad_reg_ctl_s cnf71xx;
+};
+typedef union cvmx_rad_reg_ctl cvmx_rad_reg_ctl_t;
+
+/**
+ * cvmx_rad_reg_debug0
+ */
+union cvmx_rad_reg_debug0 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63 : 7;
+ uint64_t loop : 25; /**< Loop offset */
+ uint64_t reserved_22_31 : 10;
+ uint64_t iridx : 6; /**< IWords read index */
+ uint64_t reserved_14_15 : 2;
+ uint64_t iwidx : 6; /**< IWords write index */
+ uint64_t owordqv : 1; /**< Valid for OWORDQ */
+ uint64_t owordpv : 1; /**< Valid for OWORDP */
+ uint64_t commit : 1; /**< Waiting for write commit */
+ uint64_t state : 5; /**< Main state */
+#else
+ uint64_t state : 5;
+ uint64_t commit : 1;
+ uint64_t owordpv : 1;
+ uint64_t owordqv : 1;
+ uint64_t iwidx : 6;
+ uint64_t reserved_14_15 : 2;
+ uint64_t iridx : 6;
+ uint64_t reserved_22_31 : 10;
+ uint64_t loop : 25;
+ uint64_t reserved_57_63 : 7;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug0_s cn52xx;
+ struct cvmx_rad_reg_debug0_s cn52xxp1;
+ struct cvmx_rad_reg_debug0_s cn56xx;
+ struct cvmx_rad_reg_debug0_s cn56xxp1;
+ struct cvmx_rad_reg_debug0_s cn61xx;
+ struct cvmx_rad_reg_debug0_s cn63xx;
+ struct cvmx_rad_reg_debug0_s cn63xxp1;
+ struct cvmx_rad_reg_debug0_s cn66xx;
+ struct cvmx_rad_reg_debug0_s cn68xx;
+ struct cvmx_rad_reg_debug0_s cn68xxp1;
+ struct cvmx_rad_reg_debug0_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug0 cvmx_rad_reg_debug0_t;
+
+/**
+ * cvmx_rad_reg_debug1
+ */
+union cvmx_rad_reg_debug1 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cword : 64; /**< CWord */
+#else
+ uint64_t cword : 64;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug1_s cn52xx;
+ struct cvmx_rad_reg_debug1_s cn52xxp1;
+ struct cvmx_rad_reg_debug1_s cn56xx;
+ struct cvmx_rad_reg_debug1_s cn56xxp1;
+ struct cvmx_rad_reg_debug1_s cn61xx;
+ struct cvmx_rad_reg_debug1_s cn63xx;
+ struct cvmx_rad_reg_debug1_s cn63xxp1;
+ struct cvmx_rad_reg_debug1_s cn66xx;
+ struct cvmx_rad_reg_debug1_s cn68xx;
+ struct cvmx_rad_reg_debug1_s cn68xxp1;
+ struct cvmx_rad_reg_debug1_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug1 cvmx_rad_reg_debug1_t;
+
+/**
+ * cvmx_rad_reg_debug10
+ */
+union cvmx_rad_reg_debug10 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug10_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t flags : 8; /**< OCTL flags */
+ uint64_t size : 16; /**< OCTL size (bytes) */
+ uint64_t ptr : 40; /**< OCTL pointer */
+#else
+ uint64_t ptr : 40;
+ uint64_t size : 16;
+ uint64_t flags : 8;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug10_s cn52xx;
+ struct cvmx_rad_reg_debug10_s cn52xxp1;
+ struct cvmx_rad_reg_debug10_s cn56xx;
+ struct cvmx_rad_reg_debug10_s cn56xxp1;
+ struct cvmx_rad_reg_debug10_s cn61xx;
+ struct cvmx_rad_reg_debug10_s cn63xx;
+ struct cvmx_rad_reg_debug10_s cn63xxp1;
+ struct cvmx_rad_reg_debug10_s cn66xx;
+ struct cvmx_rad_reg_debug10_s cn68xx;
+ struct cvmx_rad_reg_debug10_s cn68xxp1;
+ struct cvmx_rad_reg_debug10_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug10 cvmx_rad_reg_debug10_t;
+
+/**
+ * cvmx_rad_reg_debug11
+ */
+union cvmx_rad_reg_debug11 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug11_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t q : 1; /**< OCTL q flag */
+ uint64_t p : 1; /**< OCTL p flag */
+ uint64_t wc : 1; /**< OCTL write commit flag */
+ uint64_t eod : 1; /**< OCTL eod flag */
+ uint64_t sod : 1; /**< OCTL sod flag */
+ uint64_t index : 8; /**< OCTL index */
+#else
+ uint64_t index : 8;
+ uint64_t sod : 1;
+ uint64_t eod : 1;
+ uint64_t wc : 1;
+ uint64_t p : 1;
+ uint64_t q : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug11_s cn52xx;
+ struct cvmx_rad_reg_debug11_s cn52xxp1;
+ struct cvmx_rad_reg_debug11_s cn56xx;
+ struct cvmx_rad_reg_debug11_s cn56xxp1;
+ struct cvmx_rad_reg_debug11_s cn61xx;
+ struct cvmx_rad_reg_debug11_s cn63xx;
+ struct cvmx_rad_reg_debug11_s cn63xxp1;
+ struct cvmx_rad_reg_debug11_s cn66xx;
+ struct cvmx_rad_reg_debug11_s cn68xx;
+ struct cvmx_rad_reg_debug11_s cn68xxp1;
+ struct cvmx_rad_reg_debug11_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug11 cvmx_rad_reg_debug11_t;
+
+/**
+ * cvmx_rad_reg_debug12
+ */
+union cvmx_rad_reg_debug12 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug12_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t asserts : 15; /**< Various assertion checks */
+#else
+ uint64_t asserts : 15;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug12_s cn52xx;
+ struct cvmx_rad_reg_debug12_s cn52xxp1;
+ struct cvmx_rad_reg_debug12_s cn56xx;
+ struct cvmx_rad_reg_debug12_s cn56xxp1;
+ struct cvmx_rad_reg_debug12_s cn61xx;
+ struct cvmx_rad_reg_debug12_s cn63xx;
+ struct cvmx_rad_reg_debug12_s cn63xxp1;
+ struct cvmx_rad_reg_debug12_s cn66xx;
+ struct cvmx_rad_reg_debug12_s cn68xx;
+ struct cvmx_rad_reg_debug12_s cn68xxp1;
+ struct cvmx_rad_reg_debug12_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug12 cvmx_rad_reg_debug12_t;
+
+/**
+ * cvmx_rad_reg_debug2
+ */
+union cvmx_rad_reg_debug2 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t owordp : 64; /**< OWordP */
+#else
+ uint64_t owordp : 64;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug2_s cn52xx;
+ struct cvmx_rad_reg_debug2_s cn52xxp1;
+ struct cvmx_rad_reg_debug2_s cn56xx;
+ struct cvmx_rad_reg_debug2_s cn56xxp1;
+ struct cvmx_rad_reg_debug2_s cn61xx;
+ struct cvmx_rad_reg_debug2_s cn63xx;
+ struct cvmx_rad_reg_debug2_s cn63xxp1;
+ struct cvmx_rad_reg_debug2_s cn66xx;
+ struct cvmx_rad_reg_debug2_s cn68xx;
+ struct cvmx_rad_reg_debug2_s cn68xxp1;
+ struct cvmx_rad_reg_debug2_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug2 cvmx_rad_reg_debug2_t;
+
+/**
+ * cvmx_rad_reg_debug3
+ */
+union cvmx_rad_reg_debug3 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t owordq : 64; /**< OWordQ */
+#else
+ uint64_t owordq : 64;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug3_s cn52xx;
+ struct cvmx_rad_reg_debug3_s cn52xxp1;
+ struct cvmx_rad_reg_debug3_s cn56xx;
+ struct cvmx_rad_reg_debug3_s cn56xxp1;
+ struct cvmx_rad_reg_debug3_s cn61xx;
+ struct cvmx_rad_reg_debug3_s cn63xx;
+ struct cvmx_rad_reg_debug3_s cn63xxp1;
+ struct cvmx_rad_reg_debug3_s cn66xx;
+ struct cvmx_rad_reg_debug3_s cn68xx;
+ struct cvmx_rad_reg_debug3_s cn68xxp1;
+ struct cvmx_rad_reg_debug3_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug3 cvmx_rad_reg_debug3_t;
+
+/**
+ * cvmx_rad_reg_debug4
+ */
+union cvmx_rad_reg_debug4 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rword : 64; /**< RWord */
+#else
+ uint64_t rword : 64;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug4_s cn52xx;
+ struct cvmx_rad_reg_debug4_s cn52xxp1;
+ struct cvmx_rad_reg_debug4_s cn56xx;
+ struct cvmx_rad_reg_debug4_s cn56xxp1;
+ struct cvmx_rad_reg_debug4_s cn61xx;
+ struct cvmx_rad_reg_debug4_s cn63xx;
+ struct cvmx_rad_reg_debug4_s cn63xxp1;
+ struct cvmx_rad_reg_debug4_s cn66xx;
+ struct cvmx_rad_reg_debug4_s cn68xx;
+ struct cvmx_rad_reg_debug4_s cn68xxp1;
+ struct cvmx_rad_reg_debug4_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug4 cvmx_rad_reg_debug4_t;
+
+/**
+ * cvmx_rad_reg_debug5
+ */
+union cvmx_rad_reg_debug5 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_53_63 : 11;
+ uint64_t niropc7 : 3; /**< NCBI ropc (stage7 grant) */
+ uint64_t nirque7 : 2; /**< NCBI rque (stage7 grant) */
+ uint64_t nirval7 : 5; /**< NCBI rval (stage7 grant) */
+ uint64_t niropc6 : 3; /**< NCBI ropc (stage6 arb) */
+ uint64_t nirque6 : 2; /**< NCBI rque (stage6 arb) */
+ uint64_t nirarb6 : 1; /**< NCBI rarb (stage6 arb) */
+ uint64_t nirval6 : 5; /**< NCBI rval (stage6 arb) */
+ uint64_t niridx1 : 4; /**< NCBI ridx1 */
+ uint64_t niwidx1 : 4; /**< NCBI widx1 */
+ uint64_t niridx0 : 4; /**< NCBI ridx0 */
+ uint64_t niwidx0 : 4; /**< NCBI widx0 */
+ uint64_t wccreds : 2; /**< WC credits */
+ uint64_t fpacreds : 2; /**< POW credits */
+ uint64_t reserved_10_11 : 2;
+ uint64_t powcreds : 2; /**< POW credits */
+ uint64_t n1creds : 4; /**< NCBI1 credits */
+ uint64_t n0creds : 4; /**< NCBI0 credits */
+#else
+ uint64_t n0creds : 4;
+ uint64_t n1creds : 4;
+ uint64_t powcreds : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t fpacreds : 2;
+ uint64_t wccreds : 2;
+ uint64_t niwidx0 : 4;
+ uint64_t niridx0 : 4;
+ uint64_t niwidx1 : 4;
+ uint64_t niridx1 : 4;
+ uint64_t nirval6 : 5;
+ uint64_t nirarb6 : 1;
+ uint64_t nirque6 : 2;
+ uint64_t niropc6 : 3;
+ uint64_t nirval7 : 5;
+ uint64_t nirque7 : 2;
+ uint64_t niropc7 : 3;
+ uint64_t reserved_53_63 : 11;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug5_s cn52xx;
+ struct cvmx_rad_reg_debug5_s cn52xxp1;
+ struct cvmx_rad_reg_debug5_s cn56xx;
+ struct cvmx_rad_reg_debug5_s cn56xxp1;
+ struct cvmx_rad_reg_debug5_s cn61xx;
+ struct cvmx_rad_reg_debug5_s cn63xx;
+ struct cvmx_rad_reg_debug5_s cn63xxp1;
+ struct cvmx_rad_reg_debug5_s cn66xx;
+ struct cvmx_rad_reg_debug5_s cn68xx;
+ struct cvmx_rad_reg_debug5_s cn68xxp1;
+ struct cvmx_rad_reg_debug5_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug5 cvmx_rad_reg_debug5_t;
+
+/**
+ * cvmx_rad_reg_debug6
+ */
+union cvmx_rad_reg_debug6 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cnt : 8; /**< CCTL count[7:0] (bytes) */
+ uint64_t size : 16; /**< CCTL size (bytes) */
+ uint64_t ptr : 40; /**< CCTL pointer */
+#else
+ uint64_t ptr : 40;
+ uint64_t size : 16;
+ uint64_t cnt : 8;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug6_s cn52xx;
+ struct cvmx_rad_reg_debug6_s cn52xxp1;
+ struct cvmx_rad_reg_debug6_s cn56xx;
+ struct cvmx_rad_reg_debug6_s cn56xxp1;
+ struct cvmx_rad_reg_debug6_s cn61xx;
+ struct cvmx_rad_reg_debug6_s cn63xx;
+ struct cvmx_rad_reg_debug6_s cn63xxp1;
+ struct cvmx_rad_reg_debug6_s cn66xx;
+ struct cvmx_rad_reg_debug6_s cn68xx;
+ struct cvmx_rad_reg_debug6_s cn68xxp1;
+ struct cvmx_rad_reg_debug6_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug6 cvmx_rad_reg_debug6_t;
+
+/**
+ * cvmx_rad_reg_debug7
+ */
+union cvmx_rad_reg_debug7 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t cnt : 15; /**< CCTL count[22:8] (bytes) */
+#else
+ uint64_t cnt : 15;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug7_s cn52xx;
+ struct cvmx_rad_reg_debug7_s cn52xxp1;
+ struct cvmx_rad_reg_debug7_s cn56xx;
+ struct cvmx_rad_reg_debug7_s cn56xxp1;
+ struct cvmx_rad_reg_debug7_s cn61xx;
+ struct cvmx_rad_reg_debug7_s cn63xx;
+ struct cvmx_rad_reg_debug7_s cn63xxp1;
+ struct cvmx_rad_reg_debug7_s cn66xx;
+ struct cvmx_rad_reg_debug7_s cn68xx;
+ struct cvmx_rad_reg_debug7_s cn68xxp1;
+ struct cvmx_rad_reg_debug7_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug7 cvmx_rad_reg_debug7_t;
+
+/**
+ * cvmx_rad_reg_debug8
+ */
+union cvmx_rad_reg_debug8 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug8_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t flags : 8; /**< ICTL flags */
+ uint64_t size : 16; /**< ICTL size (bytes) */
+ uint64_t ptr : 40; /**< ICTL pointer */
+#else
+ uint64_t ptr : 40;
+ uint64_t size : 16;
+ uint64_t flags : 8;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug8_s cn52xx;
+ struct cvmx_rad_reg_debug8_s cn52xxp1;
+ struct cvmx_rad_reg_debug8_s cn56xx;
+ struct cvmx_rad_reg_debug8_s cn56xxp1;
+ struct cvmx_rad_reg_debug8_s cn61xx;
+ struct cvmx_rad_reg_debug8_s cn63xx;
+ struct cvmx_rad_reg_debug8_s cn63xxp1;
+ struct cvmx_rad_reg_debug8_s cn66xx;
+ struct cvmx_rad_reg_debug8_s cn68xx;
+ struct cvmx_rad_reg_debug8_s cn68xxp1;
+ struct cvmx_rad_reg_debug8_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug8 cvmx_rad_reg_debug8_t;
+
+/**
+ * cvmx_rad_reg_debug9
+ */
+union cvmx_rad_reg_debug9 {
+ uint64_t u64;
+ struct cvmx_rad_reg_debug9_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t eod : 1; /**< ICTL eod flag */
+ uint64_t ini : 1; /**< ICTL init flag */
+ uint64_t q : 1; /**< ICTL q enable */
+ uint64_t p : 1; /**< ICTL p enable */
+ uint64_t mul : 8; /**< ICTL multiplier */
+ uint64_t index : 8; /**< ICTL index */
+#else
+ uint64_t index : 8;
+ uint64_t mul : 8;
+ uint64_t p : 1;
+ uint64_t q : 1;
+ uint64_t ini : 1;
+ uint64_t eod : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_rad_reg_debug9_s cn52xx;
+ struct cvmx_rad_reg_debug9_s cn52xxp1;
+ struct cvmx_rad_reg_debug9_s cn56xx;
+ struct cvmx_rad_reg_debug9_s cn56xxp1;
+ struct cvmx_rad_reg_debug9_s cn61xx;
+ struct cvmx_rad_reg_debug9_s cn63xx;
+ struct cvmx_rad_reg_debug9_s cn63xxp1;
+ struct cvmx_rad_reg_debug9_s cn66xx;
+ struct cvmx_rad_reg_debug9_s cn68xx;
+ struct cvmx_rad_reg_debug9_s cn68xxp1;
+ struct cvmx_rad_reg_debug9_s cnf71xx;
+};
+typedef union cvmx_rad_reg_debug9 cvmx_rad_reg_debug9_t;
+
+/**
+ * cvmx_rad_reg_error
+ */
+union cvmx_rad_reg_error {
+ uint64_t u64;
+ struct cvmx_rad_reg_error_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t doorbell : 1; /**< A doorbell count has overflowed */
+#else
+ uint64_t doorbell : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_rad_reg_error_s cn52xx;
+ struct cvmx_rad_reg_error_s cn52xxp1;
+ struct cvmx_rad_reg_error_s cn56xx;
+ struct cvmx_rad_reg_error_s cn56xxp1;
+ struct cvmx_rad_reg_error_s cn61xx;
+ struct cvmx_rad_reg_error_s cn63xx;
+ struct cvmx_rad_reg_error_s cn63xxp1;
+ struct cvmx_rad_reg_error_s cn66xx;
+ struct cvmx_rad_reg_error_s cn68xx;
+ struct cvmx_rad_reg_error_s cn68xxp1;
+ struct cvmx_rad_reg_error_s cnf71xx;
+};
+typedef union cvmx_rad_reg_error cvmx_rad_reg_error_t;
+
+/**
+ * cvmx_rad_reg_int_mask
+ *
+ * Notes:
+ * When a mask bit is set, the corresponding interrupt is enabled.
+ *
+ */
+union cvmx_rad_reg_int_mask {
+ uint64_t u64;
+ struct cvmx_rad_reg_int_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t doorbell : 1; /**< Bit mask corresponding to RAD_REG_ERROR[0] above */
+#else
+ uint64_t doorbell : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_rad_reg_int_mask_s cn52xx;
+ struct cvmx_rad_reg_int_mask_s cn52xxp1;
+ struct cvmx_rad_reg_int_mask_s cn56xx;
+ struct cvmx_rad_reg_int_mask_s cn56xxp1;
+ struct cvmx_rad_reg_int_mask_s cn61xx;
+ struct cvmx_rad_reg_int_mask_s cn63xx;
+ struct cvmx_rad_reg_int_mask_s cn63xxp1;
+ struct cvmx_rad_reg_int_mask_s cn66xx;
+ struct cvmx_rad_reg_int_mask_s cn68xx;
+ struct cvmx_rad_reg_int_mask_s cn68xxp1;
+ struct cvmx_rad_reg_int_mask_s cnf71xx;
+};
+typedef union cvmx_rad_reg_int_mask cvmx_rad_reg_int_mask_t;
+
+/**
+ * cvmx_rad_reg_polynomial
+ *
+ * Notes:
+ * The polynomial is x^8 + C7*x^7 + C6*x^6 + C5*x^5 + C4*x^4 + C3*x^3 + C2*x^2 + C1*x^1 + C0.
+ *
+ */
+union cvmx_rad_reg_polynomial {
+ uint64_t u64;
+ struct cvmx_rad_reg_polynomial_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t coeffs : 8; /**< coefficients of GF(2^8) irreducible polynomial */
+#else
+ uint64_t coeffs : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_rad_reg_polynomial_s cn52xx;
+ struct cvmx_rad_reg_polynomial_s cn52xxp1;
+ struct cvmx_rad_reg_polynomial_s cn56xx;
+ struct cvmx_rad_reg_polynomial_s cn56xxp1;
+ struct cvmx_rad_reg_polynomial_s cn61xx;
+ struct cvmx_rad_reg_polynomial_s cn63xx;
+ struct cvmx_rad_reg_polynomial_s cn63xxp1;
+ struct cvmx_rad_reg_polynomial_s cn66xx;
+ struct cvmx_rad_reg_polynomial_s cn68xx;
+ struct cvmx_rad_reg_polynomial_s cn68xxp1;
+ struct cvmx_rad_reg_polynomial_s cnf71xx;
+};
+typedef union cvmx_rad_reg_polynomial cvmx_rad_reg_polynomial_t;
+
+/**
+ * cvmx_rad_reg_read_idx
+ *
+ * Notes:
+ * Provides the read index during a CSR read operation to any of the CSRs that are physically stored
+ * as memories. The names of these CSRs begin with the prefix "RAD_MEM_".
+ * IDX[15:0] is the read index. INC[15:0] is an increment that is added to IDX[15:0] after any CSR read.
+ * The intended use is to initially write this CSR such that IDX=0 and INC=1. Then, the entire
+ * contents of a CSR memory can be read with consecutive CSR read commands.
+ */
+union cvmx_rad_reg_read_idx {
+ uint64_t u64;
+ struct cvmx_rad_reg_read_idx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t inc : 16; /**< Increment to add to current index for next index */
+ uint64_t index : 16; /**< Index to use for next memory CSR read */
+#else
+ uint64_t index : 16;
+ uint64_t inc : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_rad_reg_read_idx_s cn52xx;
+ struct cvmx_rad_reg_read_idx_s cn52xxp1;
+ struct cvmx_rad_reg_read_idx_s cn56xx;
+ struct cvmx_rad_reg_read_idx_s cn56xxp1;
+ struct cvmx_rad_reg_read_idx_s cn61xx;
+ struct cvmx_rad_reg_read_idx_s cn63xx;
+ struct cvmx_rad_reg_read_idx_s cn63xxp1;
+ struct cvmx_rad_reg_read_idx_s cn66xx;
+ struct cvmx_rad_reg_read_idx_s cn68xx;
+ struct cvmx_rad_reg_read_idx_s cn68xxp1;
+ struct cvmx_rad_reg_read_idx_s cnf71xx;
+};
+typedef union cvmx_rad_reg_read_idx cvmx_rad_reg_read_idx_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-rad-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-raid.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-raid.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-raid.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,149 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to RAID block. This is not available on all chips.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-cmd-queue.h>
+#include <asm/octeon/cvmx-raid.h>
+#else
+#include "executive-config.h"
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-cmd-queue.h"
+#include "cvmx-raid.h"
+#endif
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/**
+ * Initialize the RAID block
+ *
+ * @param polynomial Coefficients for the RAID polynomial
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_raid_initialize(cvmx_rad_reg_polynomial_t polynomial)
+{
+ cvmx_cmd_queue_result_t result;
+ cvmx_rad_reg_cmd_buf_t rad_reg_cmd_buf;
+
+ cvmx_write_csr(CVMX_RAD_REG_POLYNOMIAL, polynomial.u64);
+
+ result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_RAID, 0,
+ CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE);
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return -1;
+
+ rad_reg_cmd_buf.u64 = 0;
+ rad_reg_cmd_buf.s.dwb = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
+ rad_reg_cmd_buf.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
+ rad_reg_cmd_buf.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
+ rad_reg_cmd_buf.s.ptr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_RAID))>>7;
+ cvmx_write_csr(CVMX_RAD_REG_CMD_BUF, rad_reg_cmd_buf.u64);
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_raid_initialize);
+#endif
+
+/**
+ * Shutdown the RAID block. RAID must be idle when
+ * this function is called.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_raid_shutdown(void)
+{
+ cvmx_rad_reg_ctl_t rad_reg_ctl;
+
+ if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_RAID))
+ {
+ cvmx_dprintf("ERROR: cvmx_raid_shutdown: RAID not idle.\n");
+ return -1;
+ }
+
+ rad_reg_ctl.u64 = cvmx_read_csr(CVMX_RAD_REG_CTL);
+ rad_reg_ctl.s.reset = 1;
+ cvmx_write_csr(CVMX_RAD_REG_CTL, rad_reg_ctl.u64);
+ cvmx_wait(100);
+
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_RAID);
+ cvmx_write_csr(CVMX_RAD_REG_CMD_BUF, 0);
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_raid_shutdown);
+#endif
+
+/**
+ * Submit a command to the RAID block
+ *
+ * @param num_words Number of command words to submit
+ * @param words Command words
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_raid_submit(int num_words, cvmx_raid_word_t words[])
+{
+ cvmx_cmd_queue_result_t result = cvmx_cmd_queue_write(CVMX_CMD_QUEUE_RAID, 1, num_words, (uint64_t *)words);
+ if (result == CVMX_CMD_QUEUE_SUCCESS)
+ cvmx_write_csr(CVMX_ADDR_DID(CVMX_FULL_DID(14, 0)), num_words);
+ return result;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_raid_submit);
+#endif
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-raid.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-raid.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-raid.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-raid.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,209 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to RAID block. This is not available on all chips.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_RAID_H__
+#define __CVMX_RAID_H__
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx-rad-defs.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * This structure defines the type of command words the RAID block
+ * will accept.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t reserved_37_63 : 27; /**< Must be zero */
+ uint64_t q_cmp : 1; /**< Indicates whether the Q pipe is in normal mode (CWORD[Q_CMP]=0) or in non-zero
+ byte detect mode (CWORD[Q_CMP]=1).
+ In non-zero byte detect mode, the Q OWORD[PTR] result is the non-zero detect
+ result, which indicates the position of the first non-zero byte in the pipe result bytes.
+ CWORD[Q_CMP] must not be set when CWORD[QOUT]=0, and must not be set
+ when CWORD[Q_XOR] is set. */
+ uint64_t p_cmp : 1; /**< Indicates whether the P pipe is in normal mode (CWORD[P_CMP]=0) or in non-zero
+ byte detect mode (CWORD[P_CMP]=1).
+ In non-zero byte detect mode, the P OWORD[PTR] result is the non-zero detect
+ result, which indicates the position of the first non-zero byte in the pipe result bytes.
+ CWORD[P_CMP] must not be set when CWORD[POUT]=0, and must not be set
+ when CWORD[P_XOR] is set. */
+ uint64_t q_xor : 1; /**< Indicates whether the Q output buffer bytes are the normal Q pipe result or the
+ normal Q pipe result exclusive-OR'ed with the P pipe result.
+ When CWORD[Q_XOR]=0 (and CWORD[Q_CMP]=0), the Q output buffer bytes are
+ the normal Q pipe result, which does not include the P pipe result in any way.
+ When CWORD[Q_XOR]=1, the Q output buffer bytes are the normal Q pipe result
+ exclusive-OR'ed with the P pipe result, as if the P pipe result were another Q IWORD
+ for the Q pipe with QMULT=1.
+ CWORD[Q_XOR] must not be set unless both CWORD[POUT,QOUT] are set, and
+ must not be set when CWORD[Q_CMP] is set. */
+ uint64_t p_xor : 1; /**< Indicates whether the P output buffer bytes are the normal P pipe result or the
+ normal P pipe result exclusive-OR'ed with the Q pipe result.
+ When CWORD[P_XOR]=0 (and CWORD[P_CMP]=0), the P output buffer bytes are
+ the normal P pipe result, which does not include the Q pipe result in any way.
+ When CWORD[P_XOR]=1, the P output buffer bytes are the normal P pipe result
+ exclusive-OR'ed with the Q pipe result, as if the Q pipe result were another P
+ IWORD for the P pipe.
+ CWORD[P_XOR] must not be set unless both CWORD[POUT,QOUT] are set, and
+ must not be set when CWORD[P_CMP] is set. */
+ uint64_t wqe : 1; /**< Indicates whether RAD submits a work queue entry or writes an L2/DRAM byte to
+ zero after completing the instruction.
+ When CWORD[WQE] is set and RESP[PTR]!=0, RAD adds the work queue entry
+ indicated by RESP[PTR] to the selected POW input queue after completing the
+ instruction.
+ When CWORD[WQE] is clear and RESP[PTR]!=0, RAD writes the L2/DRAM byte
+ indicated by RESP[PTR] to zero after completing the instruction. */
+ uint64_t qout : 1; /**< Indicates whether the Q pipe is used by this instruction.
+ If CWORD[QOUT] is set, IWORD[QEN] must be set for at least one IWORD.
+ At least one of CWORD[QOUT,POUT] must be set. */
+ uint64_t pout : 1; /**< Indicates whether the P pipe is used by this instruction.
+ If CWORD[POUT] is set, IWORD[PEN] must be set for at least one IWORD.
+ At least one of CWORD[QOUT,POUT] must be set. */
+ uint64_t iword : 6; /**< Indicates the number of input buffers used.
+ 1 <= CWORD[IWORD] <= 32. */
+ uint64_t size : 24; /**< Indicates the size in bytes of all input buffers. When CWORD[Q_CMP,P_CMP]=0,
+ also indicates the size of the Q/P output buffers.
+ CWORD[SIZE] must be a multiple of 8B (i.e. <2:0> must be zero). */
+ } cword;
+ struct
+ {
+ uint64_t reserved_58_63 : 6; /**< Must be zero */
+ uint64_t fw : 1; /**< When set, indicates that RAD can modify any byte in any (128B) cache line touched
+ by L2/DRAM addresses OWORD[PTR] through OWORD[PTR]+CWORD[SIZE]\xAD1.
+ Setting OWORD[FW] can improve hardware performance, as some DRAM loads can
+ be avoided on L2 cache misses. The Q OWORD[FW] must not be set when
+ CWORD[Q_CMP] is set, and the P OWORD[FW] must not be set when
+ CWORD[P_CMP] is set. */
+ uint64_t nc : 1; /**< When set, indicates that RAD should not allocate L2 cache space for the P/Q data on
+ L2 cache misses.
+ OWORD[NC] should typically be clear, though setting OWORD[NC] can improve
+ performance in some circumstances, as the L2 cache will not be polluted by P/Q data.
+ The Q OWORD[NC] must not be set when CWORD[Q_CMP] is set, and the P
+ OWORD[NC] must not be set when CWORD[P_CMP] is set. */
+ uint64_t reserved_40_55 : 16; /**< Must be zero */
+ uint64_t addr : 40; /**< When CWORD[P_CMP,Q_CMP]=0, OWORD[PTR] indicates the starting address of
+ the L2/DRAM buffer that will receive the P/Q data. In the non-compare mode, the
+ output buffer receives all of the output buffer bytes.
+ When CWORD[P_CMP,Q_CMP]=1, the corresponding P/Q pipe is in compare mode,
+ and the only output of the pipe is the non-zero detect result. In this case,
+ OWORD[PTR] indicates the 8-byte location of the non-zero detect result. */
+ } oword;
+ struct
+ {
+ uint64_t reserved_57_63 : 7; /**< Must be zero */
+ uint64_t nc : 1; /**< When set, indicates that RAD should not allocate L2 cache space for this input buffer
+ data on L2 cache misses.
+ Setting IWORD[NC] may improve performance in some circumstances, as the L2
+ cache may not be polluted with input buffer data. */
+ uint64_t reserved_50_55 : 6; /**< Must be zero */
+ uint64_t qen : 1; /**< Indicates that this input buffer data should participate in the Q pipe result.
+ The Q pipe hardware multiplies each participating input byte by IWORD[QMULT]
+ before accumulating them by exclusive-OR'ing.
+ IWORD[QEN] must not be set when CWORD[QOUT] is not set.
+ If CWORD[QOUT] is set, IWORD[QEN] must be set for at least one IWORD. */
+ uint64_t pen : 1; /**< Indicates that this input buffer data should participate in the P pipe result.
+ The P pipe hardware accumulates each participating input byte by bit-wise
+ exclusive-OR'ing it.
+ IWORD[PEN] must not be set when CWORD[POUT] is not set.
+ If CWORD[POUT] is set, IWORD[PEN] must be set for at least one IWORD. */
+ uint64_t qmult : 8; /**< The Q pipe multiplier for the input buffer. Section 26.1 above describes the GF(28)
+ multiplication algorithm.
+ IWORD[QMULT] must be zero when IWORD[QEN] is not set.
+ IWORD[QMULT] must not be zero when IWORD[QEN] is set.
+ When IWORD[QMULT] is 1, the multiplication simplifies to the identity function,
+ and the Q pipe performs the same XOR function as the P pipe. */
+ uint64_t addr : 40; /**< The starting address of the input buffer in L2/DRAM.
+ IWORD[PTR] must be naturally-aligned on an 8 byte boundary (i.e. <2:0> must be
+ zero). */
+ } iword;
+} cvmx_raid_word_t;
+
+/**
+ * Initialize the RAID block
+ *
+ * @param polynomial Coefficients for the RAID polynomial
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_raid_initialize(cvmx_rad_reg_polynomial_t polynomial);
+
+/**
+ * Shutdown the RAID block. RAID must be idle when
+ * this function is called.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_raid_shutdown(void);
+
+/**
+ * Submit a command to the RAID block
+ *
+ * @param num_words Number of command words to submit
+ * @param words Command words
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_raid_submit(int num_words, cvmx_raid_word_t words[]);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __CVMX_CMD_QUEUE_H__
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-raid.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-resources.config
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-resources.config (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-resources.config 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,197 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/*
+ * File version info: $Id: cvmx-resources.config 70030 2012-02-16 04:23:43Z cchavva $
+ *
+ */
+#ifndef __CVMX_RESOURCES_CONFIG__
+#define __CVMX_RESOURCES_CONFIG__
+
+
+#if (CVMX_HELPER_FIRST_MBUFF_SKIP > 256)
+#error CVMX_HELPER_FIRST_MBUFF_SKIP is greater than the maximum of 256
+#endif
+
+#if (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP > 256)
+#error CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is greater than the maximum of 256
+#endif
+
+
+/* Content below this point is only used by the cvmx-config tool, and is
+** not used by any C files as CAVIUM_COMPONENT_REQUIREMENT is never
+defined.
+*/
+ #ifdef CAVIUM_COMPONENT_REQUIREMENT
+ /* Define the number of LLM ports (interfaces), can be 1 or 2 */
+ cvmxconfig
+ {
+ #if CVMX_LLM_CONFIG_NUM_PORTS == 2
+ define CVMX_LLM_NUM_PORTS value = 2;
+ #else
+ define CVMX_LLM_NUM_PORTS value = 1;
+ #endif
+ }
+ /* Control the setting of Null pointer detection, default to enabled */
+ cvmxconfig {
+ #ifdef CVMX_CONFIG_NULL_POINTER_PROTECT
+ define CVMX_NULL_POINTER_PROTECT value = CVMX_CONFIG_NULL_POINTER_PROTECT;
+ #else
+ define CVMX_NULL_POINTER_PROTECT value = 1;
+ #endif
+ }
+ /* Control Debug prints, default to enabled */
+ cvmxconfig {
+ #ifdef CVMX_CONFIG_ENABLE_DEBUG_PRINTS
+ define CVMX_ENABLE_DEBUG_PRINTS value = CVMX_CONFIG_ENABLE_DEBUG_PRINTS;
+ #else
+ define CVMX_ENABLE_DEBUG_PRINTS value = 1;
+ #endif
+ }
+
+ /* Define CVMX_ENABLE_DFA_FUNCTIONS to allocate resources for the DFA functions */
+ #ifdef CVMX_ENABLE_DFA_FUNCTIONS
+ cvmxconfig
+ {
+ fpa CVMX_FPA_DFA_POOL
+ size = 2
+ protected = 1
+ description = "DFA command buffers";
+ fau CVMX_FAU_DFA_STATE
+ size = 8
+ count = 1
+ description = "FAU registers for the state of the DFA command queue";
+ }
+ #endif
+
+ /* Define CVMX_ENABLE_PKO_FUNCTIONS to allocate resources for the PKO functions */
+ #ifdef CVMX_ENABLE_PKO_FUNCTIONS
+ cvmxconfig
+ {
+ define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0
+ value = CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE0
+ description = "PKO queues per port for interface 0 (ports 0-15)";
+ define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1
+ value = CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE1
+ description = "PKO queues per port for interface 1 (ports 16-31)";
+ define CVMX_PKO_QUEUES_PER_PORT_INTERFACE2
+ value = CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE2
+ description = "PKO queues per port for interface 2";
+ define CVMX_PKO_QUEUES_PER_PORT_INTERFACE3
+ value = CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE3
+ description = "PKO queues per port for interface 3";
+ define CVMX_PKO_QUEUES_PER_PORT_INTERFACE4
+ value = CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE4
+ description = "PKO queues per port for interface 4";
+ define CVMX_PKO_MAX_PORTS_INTERFACE0
+ value = CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
+ description = "Limit on the number of PKO ports enabled for interface 0";
+ define CVMX_PKO_MAX_PORTS_INTERFACE1
+ value = CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
+ description = "Limit on the number of PKO ports enabled for interface 1";
+ define CVMX_PKO_QUEUES_PER_PORT_PCI
+ value = 1
+ description = "PKO queues per port for PCI (ports 32-35)";
+ define CVMX_PKO_QUEUES_PER_PORT_LOOP
+ value = 1
+ description = "PKO queues per port for Loop devices (ports 36-39)";
+ /* We use two queues per port for SRIO0. Having two queues per
+ port with two ports gives us four queues, one for each mailbox */
+ define CVMX_PKO_QUEUES_PER_PORT_SRIO0
+ value = 2
+ description = "PKO queues per port for SRIO0 devices (ports 40-41)";
+ /* We use two queues per port for SRIO1. Having two queues per
+ port with two ports gives us four queues, one for each mailbox */
+ define CVMX_PKO_QUEUES_PER_PORT_SRIO1
+ value = 2
+ description = "PKO queues per port for SRIO1 devices (ports 42-43)";
+ /* Set the IPD cache mode, select from cvmx_ipd_mode_t. */
+ define CVMX_IPD_DRAM_MODE
+ value = CVMX_HELPER_IPD_DRAM_MODE
+ description = "set the IPD cache mode to CVMX_IPD_OPC_MODE_STT";
+ fpa CVMX_FPA_PACKET_POOL
+ pool = 0
+ size = 16
+ priority = 1
+ protected = 1
+ description = "Packet buffers";
+ fpa CVMX_FPA_OUTPUT_BUFFER_POOL
+ size = 8
+ protected = 1
+ description = "PKO queue command buffers";
+ scratch CVMX_SCR_SCRATCH
+ size = 8
+ iobdma = true
+ permanent = false
+ description = "Generic scratch iobdma area";
+ }
+ #endif
+
+ /* Define CVMX_ENABLE_HELPER_FUNCTIONS to allocate resources for the helper functions */
+ #ifdef CVMX_ENABLE_HELPER_FUNCTIONS
+ cvmxconfig
+ {
+ fpa CVMX_FPA_WQE_POOL
+ size = 1
+ priority = 1
+ protected = 1
+ description = "Work queue entrys";
+ }
+ #endif
+
+ /* Define CVMX_ENABLE_TIMER_FUNCTIONS to allocate resources for the timer functions */
+ #ifdef CVMX_ENABLE_TIMER_FUNCTIONS
+ cvmxconfig
+ {
+ fpa CVMX_FPA_TIMER_POOL
+ size = 8
+ protected = 1
+ description = "TIM command buffers";
+ }
+ #endif
+
+#endif
+
+
+#endif /* __CVMX_RESOURCES_CONFIG__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-resources.config
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-rng.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-rng.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-rng.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,166 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Function and structure definitions for random number generator hardware
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+
+#ifndef __CMVX_RNG_H__
+#define __CMVX_RNG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_RNG_LOAD_ADDRESS CVMX_ADD_IO_SEG(cvmx_build_io_address(CVMX_OCT_DID_RNG, 0))
+
+/**
+ * Structure describing the data format used for IOBDMA stores to the RNG.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct {
+ uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
+ uint64_t len : 8; /**< the number of words in the response (0 => no response) */
+ uint64_t did : 5; /**< the ID of the device on the non-coherent bus */
+ uint64_t subdid : 3; /**< the sub ID of the device on the non-coherent bus */
+ uint64_t addr :40; /**< the address that will appear in the first tick on the NCB bus */
+ } s;
+} cvmx_rng_iobdma_data_t;
+
+/**
+ * Enables the random number generator. Must be called before RNG is used
+ */
+static inline void cvmx_rng_enable(void)
+{
+ cvmx_rnm_ctl_status_t rnm_ctl_status;
+ rnm_ctl_status.u64 = cvmx_read_csr(CVMX_RNM_CTL_STATUS);
+ rnm_ctl_status.s.ent_en = 1;
+ rnm_ctl_status.s.rng_en = 1;
+ cvmx_write_csr(CVMX_RNM_CTL_STATUS, rnm_ctl_status.u64);
+}
+/**
+ * Reads 8 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+static inline uint8_t cvmx_rng_get_random8(void)
+{
+ return cvmx_read64_uint8(CVMX_RNG_LOAD_ADDRESS);
+}
+
+/**
+ * Reads 16 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+static inline uint16_t cvmx_rng_get_random16(void)
+{
+ return cvmx_read64_uint16(CVMX_RNG_LOAD_ADDRESS);
+}
+
+/**
+ * Reads 32 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+static inline uint32_t cvmx_rng_get_random32(void)
+{
+ return cvmx_read64_uint32(CVMX_RNG_LOAD_ADDRESS);
+}
+
+/**
+ * Reads 64 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+static inline uint64_t cvmx_rng_get_random64(void)
+{
+ return cvmx_read64_uint64(CVMX_RNG_LOAD_ADDRESS);
+}
+
+/**
+ * Requests random data from the RNG block asynchronously using and IOBDMA operation.
+ * The random data will be written into the cores
+ * local memory at the specified address. A SYNCIOBDMA
+ * operation should be issued to stall for completion of the write.
+ *
+ * @param scr_addr Address in scratch memory to put the result
+ * MUST be a multiple of 8 bytes
+ * @param num_bytes Number of bytes of random data to write at
+ * scr_addr
+ * MUST be a multiple of 8 bytes
+ *
+ * @return 0 on success
+ * 1 on error
+ */
+static inline int cvmx_rng_request_random_async(uint64_t scr_addr, uint64_t num_bytes)
+{
+ cvmx_rng_iobdma_data_t data;
+
+ if (num_bytes & 0x7 || scr_addr & 0x7)
+ return(1);
+
+ data.u64 = 0;
+ /* scr_addr must be 8 byte aligned */
+ data.s.scraddr = scr_addr >> 3;
+ data.s.len = num_bytes >> 3;
+ data.s.did = CVMX_OCT_DID_RNG;
+ cvmx_send_single(data.u64);
+ return(0);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CMVX_RNG_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-rng.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-rnm-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-rnm-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-rnm-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,329 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-rnm-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon rnm.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_RNM_DEFS_H__
+#define __CVMX_RNM_DEFS_H__
+
+#define CVMX_RNM_BIST_STATUS (CVMX_ADD_IO_SEG(0x0001180040000008ull))
+#define CVMX_RNM_CTL_STATUS (CVMX_ADD_IO_SEG(0x0001180040000000ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RNM_EER_DBG CVMX_RNM_EER_DBG_FUNC()
+static inline uint64_t CVMX_RNM_EER_DBG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RNM_EER_DBG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180040000018ull);
+}
+#else
+#define CVMX_RNM_EER_DBG (CVMX_ADD_IO_SEG(0x0001180040000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RNM_EER_KEY CVMX_RNM_EER_KEY_FUNC()
+static inline uint64_t CVMX_RNM_EER_KEY_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RNM_EER_KEY not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180040000010ull);
+}
+#else
+#define CVMX_RNM_EER_KEY (CVMX_ADD_IO_SEG(0x0001180040000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_RNM_SERIAL_NUM CVMX_RNM_SERIAL_NUM_FUNC()
+static inline uint64_t CVMX_RNM_SERIAL_NUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_RNM_SERIAL_NUM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180040000020ull);
+}
+#else
+#define CVMX_RNM_SERIAL_NUM (CVMX_ADD_IO_SEG(0x0001180040000020ull))
+#endif
+
+/**
+ * cvmx_rnm_bist_status
+ *
+ * RNM_BIST_STATUS = RNM's BIST Status Register
+ *
+ * The RNM's Memory Bist Status register.
+ */
+union cvmx_rnm_bist_status {
+ uint64_t u64;
+ struct cvmx_rnm_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t rrc : 1; /**< Status of RRC block bist. */
+ uint64_t mem : 1; /**< Status of MEM block bist. */
+#else
+ uint64_t mem : 1;
+ uint64_t rrc : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_rnm_bist_status_s cn30xx;
+ struct cvmx_rnm_bist_status_s cn31xx;
+ struct cvmx_rnm_bist_status_s cn38xx;
+ struct cvmx_rnm_bist_status_s cn38xxp2;
+ struct cvmx_rnm_bist_status_s cn50xx;
+ struct cvmx_rnm_bist_status_s cn52xx;
+ struct cvmx_rnm_bist_status_s cn52xxp1;
+ struct cvmx_rnm_bist_status_s cn56xx;
+ struct cvmx_rnm_bist_status_s cn56xxp1;
+ struct cvmx_rnm_bist_status_s cn58xx;
+ struct cvmx_rnm_bist_status_s cn58xxp1;
+ struct cvmx_rnm_bist_status_s cn61xx;
+ struct cvmx_rnm_bist_status_s cn63xx;
+ struct cvmx_rnm_bist_status_s cn63xxp1;
+ struct cvmx_rnm_bist_status_s cn66xx;
+ struct cvmx_rnm_bist_status_s cn68xx;
+ struct cvmx_rnm_bist_status_s cn68xxp1;
+ struct cvmx_rnm_bist_status_s cnf71xx;
+};
+typedef union cvmx_rnm_bist_status cvmx_rnm_bist_status_t;
+
+/**
+ * cvmx_rnm_ctl_status
+ *
+ * RNM_CTL_STATUS = RNM's Control/Status Register
+ *
+ * The RNM's interrupt enable register.
+ */
+union cvmx_rnm_ctl_status {
+ uint64_t u64;
+ struct cvmx_rnm_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t dis_mak : 1; /**< Disable use of Master AES KEY */
+ uint64_t eer_lck : 1; /**< Encryption enable register locked */
+ uint64_t eer_val : 1; /**< Dormant encryption key match */
+ uint64_t ent_sel : 4; /**< ? */
+ uint64_t exp_ent : 1; /**< Exported entropy enable for random number generator */
+ uint64_t rng_rst : 1; /**< Reset RNG as core reset. */
+ uint64_t rnm_rst : 1; /**< Reset the RNM as core reset except for register
+ logic. */
+ uint64_t rng_en : 1; /**< Enable the output of the RNG. */
+ uint64_t ent_en : 1; /**< Entropy enable for random number generator. */
+#else
+ uint64_t ent_en : 1;
+ uint64_t rng_en : 1;
+ uint64_t rnm_rst : 1;
+ uint64_t rng_rst : 1;
+ uint64_t exp_ent : 1;
+ uint64_t ent_sel : 4;
+ uint64_t eer_val : 1;
+ uint64_t eer_lck : 1;
+ uint64_t dis_mak : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_rnm_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t rng_rst : 1; /**< Reset RNG as core reset. */
+ uint64_t rnm_rst : 1; /**< Reset the RNM as core reset except for register
+ logic. */
+ uint64_t rng_en : 1; /**< Enable the output of the RNG. */
+ uint64_t ent_en : 1; /**< Entropy enable for random number generator. */
+#else
+ uint64_t ent_en : 1;
+ uint64_t rng_en : 1;
+ uint64_t rnm_rst : 1;
+ uint64_t rng_rst : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_rnm_ctl_status_cn30xx cn31xx;
+ struct cvmx_rnm_ctl_status_cn30xx cn38xx;
+ struct cvmx_rnm_ctl_status_cn30xx cn38xxp2;
+ struct cvmx_rnm_ctl_status_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t ent_sel : 4; /**< ? */
+ uint64_t exp_ent : 1; /**< Exported entropy enable for random number generator */
+ uint64_t rng_rst : 1; /**< Reset RNG as core reset. */
+ uint64_t rnm_rst : 1; /**< Reset the RNM as core reset except for register
+ logic. */
+ uint64_t rng_en : 1; /**< Enable the output of the RNG. */
+ uint64_t ent_en : 1; /**< Entropy enable for random number generator. */
+#else
+ uint64_t ent_en : 1;
+ uint64_t rng_en : 1;
+ uint64_t rnm_rst : 1;
+ uint64_t rng_rst : 1;
+ uint64_t exp_ent : 1;
+ uint64_t ent_sel : 4;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn50xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn52xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn52xxp1;
+ struct cvmx_rnm_ctl_status_cn50xx cn56xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn56xxp1;
+ struct cvmx_rnm_ctl_status_cn50xx cn58xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn58xxp1;
+ struct cvmx_rnm_ctl_status_s cn61xx;
+ struct cvmx_rnm_ctl_status_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t eer_lck : 1; /**< Encryption enable register locked */
+ uint64_t eer_val : 1; /**< Dormant encryption key match */
+ uint64_t ent_sel : 4; /**< ? */
+ uint64_t exp_ent : 1; /**< Exported entropy enable for random number generator */
+ uint64_t rng_rst : 1; /**< Reset RNG as core reset. */
+ uint64_t rnm_rst : 1; /**< Reset the RNM as core reset except for register
+ logic. */
+ uint64_t rng_en : 1; /**< Enable the output of the RNG. */
+ uint64_t ent_en : 1; /**< Entropy enable for random number generator. */
+#else
+ uint64_t ent_en : 1;
+ uint64_t rng_en : 1;
+ uint64_t rnm_rst : 1;
+ uint64_t rng_rst : 1;
+ uint64_t exp_ent : 1;
+ uint64_t ent_sel : 4;
+ uint64_t eer_val : 1;
+ uint64_t eer_lck : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn63xx;
+ struct cvmx_rnm_ctl_status_cn63xx cn63xxp1;
+ struct cvmx_rnm_ctl_status_s cn66xx;
+ struct cvmx_rnm_ctl_status_cn63xx cn68xx;
+ struct cvmx_rnm_ctl_status_cn63xx cn68xxp1;
+ struct cvmx_rnm_ctl_status_s cnf71xx;
+};
+typedef union cvmx_rnm_ctl_status cvmx_rnm_ctl_status_t;
+
+/**
+ * cvmx_rnm_eer_dbg
+ *
+ * RNM_EER_DBG = RNM's Encryption enable debug register
+ *
+ * The RNM's Encryption enable debug register
+ */
+union cvmx_rnm_eer_dbg {
+ uint64_t u64;
+ struct cvmx_rnm_eer_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat : 64; /**< Dormant encryption debug info. */
+#else
+ uint64_t dat : 64;
+#endif
+ } s;
+ struct cvmx_rnm_eer_dbg_s cn61xx;
+ struct cvmx_rnm_eer_dbg_s cn63xx;
+ struct cvmx_rnm_eer_dbg_s cn63xxp1;
+ struct cvmx_rnm_eer_dbg_s cn66xx;
+ struct cvmx_rnm_eer_dbg_s cn68xx;
+ struct cvmx_rnm_eer_dbg_s cn68xxp1;
+ struct cvmx_rnm_eer_dbg_s cnf71xx;
+};
+typedef union cvmx_rnm_eer_dbg cvmx_rnm_eer_dbg_t;
+
+/**
+ * cvmx_rnm_eer_key
+ *
+ * RNM_EER_KEY = RNM's Encryption enable register
+ *
+ * The RNM's Encryption enable register
+ */
+union cvmx_rnm_eer_key {
+ uint64_t u64;
+ struct cvmx_rnm_eer_key_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t key : 64; /**< Dormant encryption key. If dormant crypto is fuse
+ enabled, crypto can be enable by writing this
+ register with the correct key. */
+#else
+ uint64_t key : 64;
+#endif
+ } s;
+ struct cvmx_rnm_eer_key_s cn61xx;
+ struct cvmx_rnm_eer_key_s cn63xx;
+ struct cvmx_rnm_eer_key_s cn63xxp1;
+ struct cvmx_rnm_eer_key_s cn66xx;
+ struct cvmx_rnm_eer_key_s cn68xx;
+ struct cvmx_rnm_eer_key_s cn68xxp1;
+ struct cvmx_rnm_eer_key_s cnf71xx;
+};
+typedef union cvmx_rnm_eer_key cvmx_rnm_eer_key_t;
+
+/**
+ * cvmx_rnm_serial_num
+ *
+ * RNM_SERIAL_NUM = RNM's fuse serial number register
+ *
+ * The RNM's fuse serial number register
+ *
+ * Notes:
+ * Added RNM_SERIAL_NUM in pass 2.0
+ *
+ */
+union cvmx_rnm_serial_num {
+ uint64_t u64;
+ struct cvmx_rnm_serial_num_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat : 64; /**< Dormant encryption serial number */
+#else
+ uint64_t dat : 64;
+#endif
+ } s;
+ struct cvmx_rnm_serial_num_s cn61xx;
+ struct cvmx_rnm_serial_num_s cn63xx;
+ struct cvmx_rnm_serial_num_s cn66xx;
+ struct cvmx_rnm_serial_num_s cn68xx;
+ struct cvmx_rnm_serial_num_s cn68xxp1;
+ struct cvmx_rnm_serial_num_s cnf71xx;
+};
+typedef union cvmx_rnm_serial_num cvmx_rnm_serial_num_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-rnm-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-rtc.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-rtc.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-rtc.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,154 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file provides support for real time clocks on some boards
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+
+#ifndef __CVMX_RTC_H__
+#define __CVMX_RTC_H__
+
+#include "cvmx-sysinfo.h"
+#include "cvmx-thunder.h"
+#include "cvmx-cn3010-evb-hs5.h"
+
+/**
+ * Supported RTC options
+ */
+typedef enum
+{
+ CVMX_RTC_READ = 0x1, /**< Device supports read access */
+ CVMX_RTC_WRITE = 0x2, /**< Device supports write access */
+ CVMX_RTC_TIME_EPOCH = 0x10, /**< Time stored as seconds from epoch */
+ CVMX_RTC_TIME_CAL = 0x20, /**< Time stored as calendar */
+} cvmx_rtc_options_t;
+
+/**
+ * Return options supported by the RTC device
+ *
+ * @return Supported options, or 0 if RTC is not supported
+ */
+static inline cvmx_rtc_options_t cvmx_rtc_supported(void)
+{
+ static int supported = -1;
+
+ if (supported < 0) {
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+ case CVMX_BOARD_TYPE_THUNDER:
+ supported = CVMX_RTC_READ | CVMX_RTC_WRITE | CVMX_RTC_TIME_EPOCH;
+ break;
+
+ case CVMX_BOARD_TYPE_EBH3000:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_EBH5200:
+ supported = CVMX_RTC_READ | CVMX_RTC_WRITE | CVMX_RTC_TIME_CAL;
+ break;
+
+ default:
+ supported = 0;
+ break;
+ }
+
+#ifdef CVMX_RTC_DEBUG
+ cvmx_dprintf("Board type: %s, RTC support: 0x%x\n",
+ cvmx_board_type_to_string(cvmx_sysinfo_get()->board_type),
+ supported);
+#endif
+ }
+
+ return (cvmx_rtc_options_t) supported;
+}
+
+/**
+ * Read time from RTC device.
+ *
+ * Time is expressed in seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
+ *
+ * @return Time in seconds or 0 if RTC is not supported
+ */
+static inline uint32_t cvmx_rtc_read(void)
+{
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+ case CVMX_BOARD_TYPE_THUNDER:
+ return cvmx_rtc_ds1374_read();
+ break;
+
+ default:
+ return cvmx_rtc_ds1337_read();
+ break;
+ }
+}
+
+/**
+ * Write time to the RTC device
+ *
+ * @param time Number of seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
+ *
+ * @return Zero on success or device-specific error on failure.
+ */
+static inline uint32_t cvmx_rtc_write(uint32_t time)
+{
+ switch (cvmx_sysinfo_get()->board_type)
+ {
+ case CVMX_BOARD_TYPE_THUNDER:
+ return cvmx_rtc_ds1374_write(time);
+ break;
+
+ default:
+ return cvmx_rtc_ds1337_write(time);
+ break;
+ }
+}
+
+#endif /* __CVMX_RTC_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-rtc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-rwlock.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-rwlock.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-rwlock.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,172 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file provides reader/writer locks.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ *
+ */
+
+
+#ifndef __CVMX_RWLOCK_H__
+#define __CVMX_RWLOCK_H__
+
+/* include to get atomic compare and store */
+#include "cvmx-atomic.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Flags for lock value in rw lock structure */
+#define CVMX_RWLOCK_WRITE_FLAG 0x1
+#define CVMX_RWLOCK_READ_INC 0x2
+
+
+/* Writer preference locks (wp). Can be starved by writers. When a writer
+ * is waiting, no readers are given the lock until all writers are done.
+ */
+typedef struct
+{
+ volatile uint32_t lock;
+ volatile uint32_t write_req;
+ volatile uint32_t write_comp;
+} cvmx_rwlock_wp_lock_t;
+
+/**
+ * Initialize a reader/writer lock. This must be done
+ * by a single core before used.
+ *
+ * @param lock pointer to rwlock structure
+ */
+static inline void cvmx_rwlock_wp_init(cvmx_rwlock_wp_lock_t *lock)
+{
+ lock->lock = 0;
+ lock->write_req = 0;
+ lock->write_comp = 0;
+}
+
+/**
+ * Perform a reader lock. If a writer is pending, this
+ * will wait for that writer to complete before locking.
+ *
+ * NOTE: Each thread/process must only lock any rwlock
+ * once, or else a deadlock may result.
+ *
+ * @param lock pointer to rwlock structure
+ */
+static inline void cvmx_rwlock_wp_read_lock(cvmx_rwlock_wp_lock_t *lock)
+{
+
+ /* Wait for outstanding write requests to be serviced */
+ while (lock->write_req != lock->write_comp)
+ ;
+ /* Add ourselves to interested reader count */
+ cvmx_atomic_add32_nosync((int32_t *)&(lock->lock), CVMX_RWLOCK_READ_INC);
+ /* Wait for writer to finish. No writer will start again
+ ** until after we are done since we have already incremented
+ ** the reader count
+ */
+ while (lock->lock & CVMX_RWLOCK_WRITE_FLAG)
+ ;
+
+}
+
+/**
+ * Perform a reader unlock.
+ *
+ * @param lock pointer to rwlock structure
+ */
+static inline void cvmx_rwlock_wp_read_unlock(cvmx_rwlock_wp_lock_t *lock)
+{
+ /* Remove ourselves to reader count */
+ cvmx_atomic_add32_nosync((int32_t *)&(lock->lock), -CVMX_RWLOCK_READ_INC);
+}
+
+/**
+ * Perform a writer lock. Any readers that attempt
+ * to get a lock while there are any pending write locks
+ * will wait until all writers have completed. Starvation
+ * of readers by writers is possible and must be avoided
+ * by the application.
+ *
+ * @param lock pointer to rwlock structure
+ */
+static inline void cvmx_rwlock_wp_write_lock(cvmx_rwlock_wp_lock_t *lock)
+{
+ /* Get previous value of write requests */
+ uint32_t prev_writers = ((uint32_t)cvmx_atomic_fetch_and_add32((int32_t *)&(lock->write_req), 1));
+ /* Spin until our turn */
+ while (prev_writers != lock->write_comp)
+ ;
+ /* Spin until no other readers or writers, then set write flag */
+ while (!cvmx_atomic_compare_and_store32((uint32_t *)&(lock->lock), 0, CVMX_RWLOCK_WRITE_FLAG))
+ ;
+
+}
+/**
+ * Perform a writer unlock.
+ *
+ * @param lock pointer to rwlock structure
+ */
+static inline void cvmx_rwlock_wp_write_unlock(cvmx_rwlock_wp_lock_t *lock)
+{
+ /* Remove our writer flag */
+ CVMX_SYNCWS; /* Make sure all writes in protected region are visible before unlock */
+ cvmx_atomic_add32_nosync((int32_t *)&(lock->lock), -CVMX_RWLOCK_WRITE_FLAG);
+ cvmx_atomic_add32_nosync((int32_t *)&(lock->write_comp), 1);
+ CVMX_SYNCWS; /* push unlock writes out, but don't stall */
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_RWLOCK_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-rwlock.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-scratch.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-scratch.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-scratch.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,164 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file provides support for the processor local scratch memory.
+ * Scratch memory is byte addressable - all addresses are byte addresses.
+ *
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ *
+ */
+
+
+#ifndef __CVMX_SCRATCH_H__
+#define __CVMX_SCRATCH_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Note: This define must be a long, not a long long in order to compile
+ without warnings for both 32bit and 64bit. */
+#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
+
+
+/**
+ * Reads an 8 bit value from the processor local scratchpad memory.
+ *
+ * @param address byte address to read from
+ *
+ * @return value read
+ */
+static inline uint8_t cvmx_scratch_read8(uint64_t address)
+{
+ return *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address);
+}
+/**
+ * Reads a 16 bit value from the processor local scratchpad memory.
+ *
+ * @param address byte address to read from
+ *
+ * @return value read
+ */
+static inline uint16_t cvmx_scratch_read16(uint64_t address)
+{
+ return *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address);
+}
+/**
+ * Reads a 32 bit value from the processor local scratchpad memory.
+ *
+ * @param address byte address to read from
+ *
+ * @return value read
+ */
+static inline uint32_t cvmx_scratch_read32(uint64_t address)
+{
+ return *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address);
+}
+/**
+ * Reads a 64 bit value from the processor local scratchpad memory.
+ *
+ * @param address byte address to read from
+ *
+ * @return value read
+ */
+static inline uint64_t cvmx_scratch_read64(uint64_t address)
+{
+ return *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address);
+}
+
+
+
+/**
+ * Writes an 8 bit value to the processor local scratchpad memory.
+ *
+ * @param address byte address to write to
+ * @param value value to write
+ */
+static inline void cvmx_scratch_write8(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address) = (uint8_t)value;
+}
+/**
+ * Writes a 32 bit value to the processor local scratchpad memory.
+ *
+ * @param address byte address to write to
+ * @param value value to write
+ */
+static inline void cvmx_scratch_write16(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address) = (uint16_t)value;
+}
+/**
+ * Writes a 16 bit value to the processor local scratchpad memory.
+ *
+ * @param address byte address to write to
+ * @param value value to write
+ */
+static inline void cvmx_scratch_write32(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address) = (uint32_t)value;
+}
+/**
+ * Writes a 64 bit value to the processor local scratchpad memory.
+ *
+ * @param address byte address to write to
+ * @param value value to write
+ */
+static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address) = value;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_SCRATCH_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-scratch.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-n32.ld
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-n32.ld (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-n32.ld 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,307 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/*
+ * This was created from a template supplied by GNU binutils.
+ * Copyright (C) 2005 Cavium Inc.
+ */
+
+/**
+ * @file
+ * This linker script for use in building simple executive application to run
+ * under Linux in userspace. The important difference from a standard Linux
+ * binary is the addition of the ".cvmx_shared" memory section. This script
+ * adds two symbols __cvmx_shared_start and __cvmx_shared_end before and after
+ * the CVMX_SHARED data. These are used by cvmx-app-init-linux.c to create a
+ * shared region across all application processes.
+ *
+ * The original template for this files was:
+ * ${OCTEON_ROOT}/tools/mips64-octeon-linux-gnu/lib/ldscripts/elf32btsmipn32.x
+ */
+OUTPUT_FORMAT("elf32-ntradbigmips", "elf32-ntradbigmips",
+ "elf32-ntradlittlemips")
+OUTPUT_ARCH(mips)
+ENTRY(__start)
+SEARCH_DIR("${OCTEON_ROOT}/tools/mips64-octeon-linux-gnu/lib");
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x10000000); . = 0x10000000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .reginfo : { *(.reginfo) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .dynamic : { *(.dynamic) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.dyn : { *(.rel.dyn) }
+ .rel.sdata : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) }
+ .rela.sdata : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) }
+ .rel.sbss : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) }
+ .rela.sbss : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) }
+ .rel.sdata2 : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) }
+ .rela.sdata2 : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) }
+ .rel.sbss2 : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) }
+ .rela.sbss2 : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init :
+ {
+ KEEP (*(.init))
+ } =0
+ .plt : { *(.plt) }
+ .text :
+ {
+ _ftext = . ;
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.mips16.fn.*) *(.mips16.call.*)
+ } =0
+ .fini :
+ {
+ KEEP (*(.fini))
+ } =0
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .sdata2 :
+ {
+ *(.sdata2 .sdata2.* .gnu.linkonce.s2.*)
+ }
+ .sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ .preinit_array :
+ {
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ KEEP (*(.preinit_array))
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ }
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(.fini_array))
+ KEEP (*(SORT(.fini_array.*)))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin?.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin?.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ _fdata = . ;
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ .got.plt : { *(.got.plt) }
+ . = .;
+ _gp = ALIGN(16) + 0x7ff0;
+ .got : { *(.got) }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata :
+ {
+ *(.sdata .sdata.* .gnu.linkonce.s.*)
+ }
+ .lit8 : { *(.lit8) }
+ .lit4 : { *(.lit4) }
+ .srdata : { *(.srdata) }
+
+ . = ALIGN (0x10000);
+ __cvmx_shared_start = .;
+ .cvmx_shared : {*(.cvmx_shared .cvmx_shared.linkonce.*)}
+ .cvmx_shared_bss : { *(.cvmx_shared_bss .cvmx_shared_bss.linkonce.*) }
+ . = ALIGN (0x10000);
+ __cvmx_shared_end = .;
+
+ _edata = .; PROVIDE (edata = .);
+ __bss_start = .;
+ _fbss = .;
+ .sbss :
+ {
+ PROVIDE (__sbss_start = .);
+ PROVIDE (___sbss_start = .);
+ *(.dynsbss)
+ *(.sbss .sbss.* .gnu.linkonce.sb.*)
+ *(.scommon)
+ PROVIDE (__sbss_end = .);
+ PROVIDE (___sbss_end = .);
+ }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections.
+ FIXME: Why do we need it? When there is no .bss section, we don't
+ pad the .data section. */
+ . = ALIGN(. != 0 ? 32 / 8 : 1);
+ }
+ . = ALIGN(32 / 8);
+ . = ALIGN(32M); /* RBF added alignment of data */
+ .cvmx_shared : { *(.cvmx_shared) }
+ _end = .; PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
+ .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) }
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-n32.ld
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-o32.ld
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-o32.ld (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-o32.ld 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,279 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/*
+ * This was created from a template supplied by GNU binutils.
+ * Copyright (C) 2004 Cavium Inc.
+ */
+
+/**
+ * @file
+ * This linker script for use in building simple executive application to run
+ * under Linux in userspace. The important difference from a standard Linux
+ * binary is the addition of the ".cvmx_shared" memory section. This script
+ * adds two symbols __cvmx_shared_start and __cvmx_sahred_end before and after
+ * the CVMX_SHARED data. These are used by cvmx-app-init-linux.c to create a
+ * shared region across all application processes.
+ *
+ * The original template for this files was:
+ * ${OCTEON_ROOT}/tools/mips64-octeon-linux-gnu/lib/ldscripts/elf32btsmip.x
+ */
+OUTPUT_FORMAT("elf32-tradbigmips", "elf32-tradbigmips",
+ "elf32-tradlittlemips")
+OUTPUT_ARCH(mips)
+ENTRY(__start)
+SEARCH_DIR("${OCTEON_ROOT}/tools/mips64-octeon-linux-gnu/lib");
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x10000000); . = 0x10000000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .reginfo : { *(.reginfo) }
+ .dynamic : { *(.dynamic) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.sdata : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) }
+ .rela.sdata : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) }
+ .rel.sbss : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) }
+ .rela.sbss : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) }
+ .rel.sdata2 : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) }
+ .rela.sdata2 : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) }
+ .rel.sbss2 : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) }
+ .rela.sbss2 : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init :
+ {
+ KEEP (*(.init))
+ } =0
+ .plt : { *(.plt) }
+ .text :
+ {
+ _ftext = . ;
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.mips16.fn.*) *(.mips16.call.*)
+ } =0
+ .fini :
+ {
+ KEEP (*(.fini))
+ } =0
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .sdata2 : { *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) }
+ .sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = 0x10000000;
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE (__preinit_array_start = .);
+ .preinit_array : { *(.preinit_array) }
+ PROVIDE (__preinit_array_end = .);
+ PROVIDE (__init_array_start = .);
+ .init_array : { *(.init_array) }
+ PROVIDE (__init_array_end = .);
+ PROVIDE (__fini_array_start = .);
+ .fini_array : { *(.fini_array) }
+ PROVIDE (__fini_array_end = .);
+ .data :
+ {
+ _fdata = . ;
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ .eh_frame : { KEEP (*(.eh_frame)) }
+ .gcc_except_table : { *(.gcc_except_table) }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ from the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ _gp = ALIGN(16) + 0x7ff0;
+ .got : { *(.got.plt) *(.got) }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata :
+ {
+ *(.sdata .sdata.* .gnu.linkonce.s.*)
+ }
+ .lit8 : { *(.lit8) }
+ .lit4 : { *(.lit4) }
+
+ . = ALIGN (0x10000);
+ __cvmx_shared_start = .;
+ .cvmx_shared : {*(.cvmx_shared .cvmx_shared.linkonce.*)}
+ .cvmx_shared_bss : {*(.cvmx_shared_bss .cvmx_shared_bss.linkonce.*)}
+ . = ALIGN (0x10000);
+ __cvmx_shared_end = .;
+
+ _edata = .;
+ PROVIDE (edata = .);
+ __bss_start = .;
+ _fbss = .;
+ .sbss :
+ {
+ PROVIDE (__sbss_start = .);
+ PROVIDE (___sbss_start = .);
+ *(.dynsbss)
+ *(.sbss .sbss.* .gnu.linkonce.sb.*)
+ *(.scommon)
+ PROVIDE (__sbss_end = .);
+ PROVIDE (___sbss_end = .);
+ }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ . = ALIGN(32 / 8);
+ . = ALIGN(32M); /* RBF added alignment of data */
+ .cvmx_shared : { *(.cvmx_shared) }
+ _end = .;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
+ .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
+ /DISCARD/ : { *(.note.GNU-stack) }
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-shared-linux-o32.ld
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-shared-linux.ld
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-shared-linux.ld (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-shared-linux.ld 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,306 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/*
+ * This was created from a template supplied by GNU binutils.
+ * Copyright (C) 2004 Cavium Inc.
+ */
+
+/**
+ * @file
+ * This linker script for use in building simple executive application to run
+ * under Linux in userspace. The important difference from a standard Linux
+ * binary is the addition of the ".cvmx_shared" memory section. This script
+ * adds two symbols __cvmx_shared_start and __cvmx_sahred_end before and after
+ * the CVMX_SHARED data. These are used by cvmx-app-init-linux.c to create a
+ * shared region across all application processes.
+ *
+ * The original template for this files was:
+ * ${OCTEON_ROOT}/tools/mips64-octeon-linux-gnu/lib/ldscripts/elf64btsmip.x
+ */
+OUTPUT_FORMAT("elf64-tradbigmips", "elf64-tradbigmips",
+ "elf64-tradlittlemips")
+OUTPUT_ARCH(mips)
+ENTRY(__start)
+SEARCH_DIR("${OCTEON_ROOT}/tools/mips64-octeon-linux-gnu/lib");
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x120000000); . = 0x120000000 + SIZEOF_HEADERS;
+ .MIPS.options : { *(.MIPS.options) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .dynamic : { *(.dynamic) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.dyn : { *(.rel.dyn) }
+ .rel.sdata : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) }
+ .rela.sdata : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) }
+ .rel.sbss : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) }
+ .rela.sbss : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) }
+ .rel.sdata2 : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) }
+ .rela.sdata2 : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) }
+ .rel.sbss2 : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) }
+ .rela.sbss2 : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init :
+ {
+ KEEP (*(.init))
+ } =0
+ .plt : { *(.plt) }
+ .text :
+ {
+ _ftext = . ;
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.mips16.fn.*) *(.mips16.call.*)
+ } =0
+ .fini :
+ {
+ KEEP (*(.fini))
+ } =0
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .sdata2 :
+ {
+ *(.sdata2 .sdata2.* .gnu.linkonce.s2.*)
+ }
+ .sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ .preinit_array :
+ {
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ KEEP (*(.preinit_array))
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ }
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(.fini_array))
+ KEEP (*(SORT(.fini_array.*)))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin?.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin?.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ _fdata = . ;
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ .got.plt : { *(.got.plt) }
+ . = .;
+ _gp = ALIGN(16) + 0x7ff0;
+ .got : { *(.got) }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata :
+ {
+ *(.sdata .sdata.* .gnu.linkonce.s.*)
+ }
+ .lit8 : { *(.lit8) }
+ .lit4 : { *(.lit4) }
+ .srdata : { *(.srdata) }
+
+ . = ALIGN (0x10000);
+ __cvmx_shared_start = .;
+ .cvmx_shared : {*(.cvmx_shared .cvmx_shared.linkonce.*)}
+ .cvmx_shared_bss : { *(.cvmx_shared_bss .cvmx_shared_bss.linkonce.*) }
+ . = ALIGN (0x10000);
+ __cvmx_shared_end = .;
+
+ _edata = .; PROVIDE (edata = .);
+ __bss_start = .;
+ _fbss = .;
+ .sbss :
+ {
+ PROVIDE (__sbss_start = .);
+ PROVIDE (___sbss_start = .);
+ *(.dynsbss)
+ *(.sbss .sbss.* .gnu.linkonce.sb.*)
+ *(.scommon)
+ PROVIDE (__sbss_end = .);
+ PROVIDE (___sbss_end = .);
+ }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections.
+ FIXME: Why do we need it? When there is no .bss section, we don't
+ pad the .data section. */
+ . = ALIGN(. != 0 ? 64 / 8 : 1);
+ }
+ . = ALIGN(64 / 8);
+ . = ALIGN(32M); /* RBF added alignment of data */
+ .cvmx_shared : { *(.cvmx_shared) }
+ _end = .; PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
+ .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) }
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-shared-linux.ld
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-shmem.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-shmem.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-shmem.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,749 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+/**
+ * @file
+ * cvmx-shmem supplies the cross application shared memory implementation
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-tlb.h"
+#include "cvmx-shmem.h"
+
+//#define DEBUG
+
+struct cvmx_shmem_smdr *__smdr = NULL;
+
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+static int __cvmx_shmem_devmemfd = 0; /* fd for /dev/mem */
+#endif
+
+#define __CHECK_APP_SMDR do { \
+ if (__smdr == NULL) { \
+ cvmx_dprintf("cvmx_shmem: %s is not set up, Quit line %d \n", \
+ CVMX_SHMEM_DSCPTR_NAME, __LINE__ ); \
+ exit(-1); \
+ } \
+ }while(0)
+
+
+
+/**
+ * @INTERNAL
+ * Virtual sbrk, assigning virtual address in a global virtual address space.
+ *
+ * @param alignment alignment requirement in bytes
+ * @param size size in bytes
+ */
+static inline void *__cvmx_shmem_vsbrk_64(uint64_t alignment, uint64_t size)
+{
+ uint64_t nbase_64 = CAST64(__smdr->break64);
+ void *nbase = NULL;
+
+ /* Skip unaligned bytes */
+ if (nbase_64 & alignment)
+ nbase_64 += ~(nbase_64 & alignment) + 1;
+
+ if (nbase_64 + size < CVMX_SHMEM_VADDR64_END)
+ {
+ nbase = CASTPTR(void *, nbase_64);
+ __smdr->break64 = nbase + size;
+ }
+
+ return nbase;
+}
+
+/**
+ * @INTERNAL
+ * Initialize all SMDR entries, only need to be called once
+ *
+ * @param smdr pointer to the SMDR
+ */
+static inline void __smdr_new(struct cvmx_shmem_smdr *smdr) {
+
+ if (smdr != NULL)
+ {
+ int i;
+
+ cvmx_spinlock_init (&smdr->lock);
+ cvmx_spinlock_lock (&smdr->lock);
+
+ for ( i = 0; i < CVMX_SHMEM_NUM_DSCPTR; i++ )
+ {
+ smdr -> shmd[i].owner = CVMX_SHMEM_OWNER_NONE;
+ smdr -> shmd[i].is_named_block = 0;
+ smdr -> shmd[i].use_count = 0;
+ smdr -> shmd[i].name = NULL;
+ smdr -> shmd[i].vaddr = NULL;
+ smdr -> shmd[i].paddr = 0;
+ smdr -> shmd[i].size = 0;
+ smdr -> shmd[i].alignment = 0;
+ };
+
+ /* Init vaddr */
+ smdr->break64 = (void *)CVMX_SHMEM_VADDR64_START;
+ cvmx_spinlock_unlock (&smdr->lock);
+ }
+
+ /* Make sure the shmem descriptor region is created */
+ __CHECK_APP_SMDR;
+};
+
+
+
+/**
+ * @INTERNAL
+ * Initialize __smdr pointer, if SMDR exits already. If not, create a new
+ * one. Once SMDR is created (as a bootmem named block), it is persistent.
+ */
+static inline struct cvmx_shmem_smdr *__smdr_init()
+{
+ const cvmx_bootmem_named_block_desc_t *smdr_nblk = NULL;
+ size_t smdr_size = sizeof(*__smdr);
+ char *smdr_name = CVMX_SHMEM_DSCPTR_NAME;
+
+ __smdr = (struct cvmx_shmem_smdr *) cvmx_bootmem_alloc_named(smdr_size, 0x10000, smdr_name);
+
+ if (__smdr)
+ __smdr_new (__smdr);
+ else
+ {
+ /* Check if SMDR exists already */
+ smdr_nblk = cvmx_bootmem_find_named_block(smdr_name);
+ if (smdr_nblk)
+ {
+ __smdr = (struct cvmx_shmem_smdr *)
+ (cvmx_phys_to_ptr(smdr_nblk->base_addr));
+
+ cvmx_spinlock_lock (&__smdr->lock);
+ if (smdr_nblk->size != smdr_size)
+ {
+ cvmx_dprintf("SMDR named block is created by another "
+ "application with different size %lu, "
+ "expecting %lu \n",
+ (long unsigned int)smdr_nblk->size, (long unsigned int)smdr_size);
+ __smdr = NULL;
+ }
+ cvmx_spinlock_unlock (&__smdr->lock);
+ }
+ }
+
+ if (!__smdr)
+ cvmx_dprintf("cvmx_shmem: Failed to allocate or find SMDR from bootmem \n");
+
+ return __smdr;
+};
+
+
+/**
+ * @INTERNAL
+ * Generic Iterator function for all SMDR entries
+ *
+ * @param void(*f)(dscptr) the function to be invoked for every descriptor
+ * @param param
+ *
+ * @return the descriptor iterator stopped at.
+ */
+static struct cvmx_shmem_dscptr *__smdr_iterator(struct cvmx_shmem_dscptr *(*f)(struct cvmx_shmem_dscptr *dscptr, void *p), void *param )
+{
+ struct cvmx_shmem_dscptr *d, *dscptr = NULL;
+ int i;
+
+ __CHECK_APP_SMDR;
+
+ for (i = 0; i < CVMX_SHMEM_NUM_DSCPTR; i++)
+ {
+ d = &__smdr->shmd[i];
+ if ((dscptr = (*f)(d, param)) != NULL)
+ break; /* stop iteration */
+ }
+
+ return dscptr;
+}
+
+
+/**
+ * @INTERNAL
+ * SMDR name match functor. to be used for iterator.
+ *
+ * @param dscptr descriptor passed in by the iterator
+ * @param name string to match against
+ *
+ * @return !NULL descriptor matched
+ * NULL not match
+ */
+static struct cvmx_shmem_dscptr *__cvmx_shmem_smdr_match_name(struct cvmx_shmem_dscptr *dscptr, void *name)
+{
+ char *name_to_match = (char *) name;
+ struct cvmx_shmem_dscptr *ret = NULL;
+
+ if (dscptr->owner == CVMX_SHMEM_OWNER_NONE)
+ return NULL;
+
+ if (strcmp(dscptr->name, name_to_match) == 0)
+ ret = dscptr;
+
+ return ret;
+}
+
+/**
+ * @INTERNAL
+ * Find by name
+ *
+ * @param name string to match against
+ *
+ * @return !NULL descriptor matched
+ * NULL not match
+ */
+static struct cvmx_shmem_dscptr *__cvmx_shmem_smdr_find_by_name(char *name)
+{
+ return __smdr_iterator( __cvmx_shmem_smdr_match_name, name);
+}
+
+/**
+ * @INTERNAL
+ * SMDR is free functor. to be used for iterator.
+ *
+ * @param dscptr descriptor passed in by the iterator
+ * @param nouse
+ *
+ * @return !NULL descriptor is free
+ * NULL descriptor is not free
+ */
+static struct cvmx_shmem_dscptr *__cvmx_shmem_smdr_is_free(struct cvmx_shmem_dscptr* dscptr, void *nouse)
+{
+ if (dscptr->owner == CVMX_SHMEM_OWNER_NONE)
+ return dscptr;
+ else
+ return NULL;
+}
+
+/**
+ * @INTERNAL
+ * Search SMDR to find the first free descriptor
+ *
+ * @return !NULL free descriptor found
+ * NULL nothing found
+ */
+struct cvmx_shmem_dscptr *__cvmx_shmem_smdr_find_free_dscptr(void)
+{
+ return __smdr_iterator(__cvmx_shmem_smdr_is_free, NULL);
+}
+
+/**
+ * @INTERNAL
+ * free a descriptor
+ *
+ * @param dscptr descriptor to be freed
+ */
+static void __cvmx_shmem_smdr_free(struct cvmx_shmem_dscptr *dscptr)
+{
+ dscptr->owner = CVMX_SHMEM_OWNER_NONE;
+}
+
+
+/**
+ * Per core shmem init function
+ *
+ * @return cvmx_shmem_smdr* pointer to __smdr
+ */
+struct cvmx_shmem_smdr *cvmx_shmem_init()
+{
+ return __smdr_init();
+}
+
+/**
+ * Open shared memory based on named block
+ *
+ * @return dscptr descriptor of the opened named block
+ */
+struct cvmx_shmem_dscptr *cvmx_shmem_named_block_open(char *name, uint32_t size, int oflag)
+{
+ const cvmx_bootmem_named_block_desc_t *shmem_nblk = NULL;
+ struct cvmx_shmem_dscptr *dscptr = NULL;
+ int nblk_allocated = 0; /* Assume we don't need to allocate a new
+ bootmem block */
+ void *vaddr = NULL;
+ const uint64_t size_4k = 4*1024, size_512mb = 512*1024*1024;
+
+ __CHECK_APP_SMDR;
+
+ /* Check size, Make sure it is minimal 4K, no bigger than 512MB */
+ if (size > size_512mb) {
+ cvmx_dprintf("Shared memory size can not be bigger than 512MB \n");
+ return NULL;
+ }
+ if (size < size_4k)
+ size = size_4k;
+
+ size = __upper_power_of_two(size);
+
+ cvmx_spinlock_lock(&__smdr->lock);
+
+ shmem_nblk = cvmx_bootmem_find_named_block(name);
+ if ((shmem_nblk == NULL) && (oflag & CVMX_SHMEM_O_CREAT))
+ {
+ void *p;
+ /* The named block does not exist, create it if caller specifies
+ the O_CREAT flag */
+ nblk_allocated = 1;
+ p = cvmx_bootmem_alloc_named(size, size, name);
+ if (p)
+ shmem_nblk = cvmx_bootmem_find_named_block(name);
+#ifdef DEBUG
+ cvmx_dprintf("cvmx-shmem-dbg:"
+ "creating a new block %s: blk %p, shmem_nblk %p \n",
+ name, p, shmem_nblk);
+#endif
+ }
+
+ if (shmem_nblk == NULL)
+ goto err;
+
+ /* We are now holding a valid named block */
+
+ dscptr = __cvmx_shmem_smdr_find_by_name(name);
+ if (dscptr)
+ {
+ if (nblk_allocated)
+ {
+ /* name conflict between bootmem name space and SMDR name space */
+ cvmx_dprintf("cvmx-shmem: SMDR descriptor name conflict, %s \n", name);
+ goto err;
+ }
+ /* Make sure size and alignment matches with existing descriptor */
+ if ((size != dscptr->size) || (size != dscptr -> alignment))
+ goto err;
+ }
+ else
+ {
+ /* Create a new descriptor */
+ dscptr = __cvmx_shmem_smdr_find_free_dscptr();
+ if (dscptr)
+ goto init;
+ else
+ {
+ cvmx_dprintf("cvmx-shmem: SMDR out of descriptors \n");
+ goto err;
+ }
+ }
+
+ /* Maintain the reference count */
+ if (dscptr != NULL)
+ dscptr->use_count += 1;
+
+ cvmx_spinlock_unlock(&__smdr->lock);
+ return dscptr;
+
+err:
+#ifdef DEBUG
+ cvmx_dprintf("cvmx-shmem-dbg: named block open failed \n");
+#endif
+
+ if (dscptr)
+ __cvmx_shmem_smdr_free(dscptr);
+ if (shmem_nblk && nblk_allocated)
+ cvmx_bootmem_free_named(name);
+ cvmx_spinlock_unlock(&__smdr->lock);
+
+ return NULL;
+
+init:
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx-shmem-dbg: init SMDR descriptor %p \n", dscptr);
+#endif
+
+ /* Assign vaddr for single address space mapping */
+ vaddr = __cvmx_shmem_vsbrk_64(size, size);
+ if (vaddr == NULL) {
+ /* Failed to allocate virtual address, clean up */
+ goto err;
+ }
+
+#ifdef DEBUG
+ cvmx_dprintf("cmvx-shmem-dbg: allocated vaddr %p \n", vaddr);
+#endif
+ dscptr->vaddr = vaddr;
+
+ /* Store descriptor information, name, alignment,size... */
+ dscptr->owner = cvmx_get_core_num();
+ dscptr->is_named_block = 1;
+ dscptr->use_count = 1;
+ dscptr->name =shmem_nblk->name ;
+ dscptr->paddr = shmem_nblk->base_addr;
+ dscptr->size = size;
+ dscptr->alignment = size;
+
+ /* Store permission bits */
+ if (oflag & CVMX_SHMEM_O_WRONLY)
+ dscptr->p_wronly = 1;
+ if (oflag & CVMX_SHMEM_O_RDWR)
+ dscptr->p_rdwr = 1;
+
+ cvmx_spinlock_unlock(&__smdr->lock);
+ return dscptr;
+}
+
+/**
+ * @INTERNAL
+ *
+ * For stand along SE application only.
+ *
+ * Add TLB mapping to map the shared memory
+ *
+ * @param dscptr shared memory descriptor
+ * @param pflag protection flags
+ *
+ * @return vaddr the virtual address mapped for the shared memory
+ */
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+void *__cvmx_shmem_map_standalone(struct cvmx_shmem_dscptr *dscptr, int pflag)
+{
+ int free_index;
+
+ /* Find a free tlb entry */
+ free_index = cvmx_tlb_allocate_runtime_entry();
+
+ if (free_index < 0 )
+ {
+ cvmx_dprintf("cvmx-shmem: shmem_map failed, out TLB entries \n");
+ return NULL;
+ }
+
+#ifdef DEBUG
+ cvmx_dprintf("cmvx-shmem-dbg:"
+ "shmem_map TLB %d: vaddr %p paddr %lx, size %x \n",
+ free_index, dscptr->vaddr, dscptr->paddr, dscptr->size );
+#endif
+
+ cvmx_tlb_write_runtime_entry(free_index, CAST64(dscptr->vaddr),
+ dscptr->paddr, dscptr->size,
+ TLB_DIRTY | TLB_VALID | TLB_GLOBAL);
+
+ return dscptr -> vaddr;
+}
+#endif
+
+/**
+ * @INTERNAL
+ *
+ * For Linux user application only
+ *
+ * Add mmap the shared memory
+ *
+ * @param dscptr shared memory descriptor
+ * @param pflag protection flags
+ *
+ * @return vaddr the virtual address mapped for the shared memory
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+static inline void *__cvmx_shmem_map_linux(struct cvmx_shmem_dscptr *dscptr, int pflag)
+{
+ void *vaddr = NULL;
+
+ if(__cvmx_shmem_devmemfd == 0)
+ {
+ __cvmx_shmem_devmemfd = open("/dev/mem", O_RDWR);
+ if (__cvmx_shmem_devmemfd < 0)
+ {
+ cvmx_dprintf("Failed to open /dev/mem\n");
+ exit(-1);
+ }
+ }
+
+ vaddr = mmap(dscptr->vaddr, dscptr->size, PROT_READ|PROT_WRITE,
+ MAP_SHARED, __cvmx_shmem_devmemfd, 0);
+
+ /* Make sure the mmap maps to the same virtual address specified in
+ * descriptor
+ */
+ if ((vaddr!=NULL) && (vaddr != dscptr->vaddr))
+ {
+ munmap(vaddr, dscptr->size);
+ vaddr = NULL;
+ }
+ return vaddr;
+}
+#endif
+
+/**
+ * cvmx_shmem API
+ *
+ * Add mapping for the shared memory
+ *
+ * @param dscptr shared memory descriptor
+ * @param pflag protection flags
+ *
+ * @return vaddr the virtual address mapped for the shared memory
+ */
+void *cvmx_shmem_map(struct cvmx_shmem_dscptr *dscptr, int pflag)
+{
+ void *vaddr = NULL;
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+ vaddr = __cvmx_shmem_map_linux(dscptr, pflag);
+#else
+ vaddr = __cvmx_shmem_map_standalone(dscptr, pflag);
+#endif
+ return vaddr;
+}
+
+
+/**
+ * @INTERNAL
+ *
+ * For Linux user application only
+ *
+ * ummap the shared memory
+ *
+ * @param dscptr shared memory descriptor
+ *
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+static inline void __cvmx_shmem_unmap_linux(struct cvmx_shmem_dscptr* dscptr)
+{
+ if (__cvmx_shmem_devmemfd && dscptr)
+ munmap(dscptr->vaddr, dscptr->size);
+}
+#endif
+
+
+/**
+ * @INTERNAL
+ *
+ * For stand along SE application only.
+ *
+ * ummap the shared memory
+ *
+ * @param dscptr shared memory descriptor
+ *
+ */
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+static inline void
+__cvmx_shmem_unmap_standalone(struct cvmx_shmem_dscptr *dscptr)
+{
+ int index;
+
+ index = cvmx_tlb_lookup(CAST64(dscptr->vaddr));
+
+#ifdef DEBUG
+ cvmx_dprintf("cmvx-shmem-dbg:"
+ "shmem_unmap TLB %d \n", index);
+#endif
+ cvmx_tlb_free_runtime_entry(index);
+}
+#endif
+
+/**
+ * ummap the shared memory
+ *
+ * @param dscptr shared memory descriptor
+ *
+ */
+void cvmx_shmem_unmap(struct cvmx_shmem_dscptr *dscptr)
+{
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+ __cvmx_shmem_unmap_linux(dscptr);
+#else
+ __cvmx_shmem_unmap_standalone(dscptr);
+#endif
+}
+
+/**
+ * @INTERNAL
+ *
+ * Common implementation of closing a descriptor.
+ *
+ * @param dscptr shared memory descriptor
+ * @param remove 1: remove the descriptor and named block if this
+ * this is the last user of the descriptor
+ * 0: do not remove
+ * @return 0: Success
+ * !0: Failed
+ *
+ */
+static inline int __cvmx_shmem_close_dscptr(struct cvmx_shmem_dscptr *dscptr, int remove)
+{
+ cvmx_spinlock_lock(&dscptr->lock);
+
+ if (dscptr->use_count >0)
+ dscptr->use_count-= 1;
+
+ if ((dscptr->use_count == 0) && remove)
+ {
+ /* Free this descriptor */
+ __cvmx_shmem_smdr_free(dscptr);
+
+ /* Free named block if this is the last user, and the block
+ is created by the application */
+ if (dscptr->is_named_block)
+ {
+#ifdef DEBUG
+ cvmx_dprintf("cvmx-shmem-dbg: remove named block %s \n", dscptr->name);
+#endif
+ cvmx_bootmem_phy_named_block_free(dscptr->name, 0);
+ }
+ }
+ cvmx_spinlock_unlock(&dscptr->lock);
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ *
+ * For stand along SE application only.
+ *
+ * close a descriptor.
+ *
+ * @param dscptr shared memory descriptor
+ * @param remove 1: remove the descriptor and named block if this
+ * this is the last user of the descriptor
+ * 0: do not remove
+ * @return 0: Success
+ * !0: Failed
+ *
+ */
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+static inline int __cvmx_shmem_close_standalone(struct cvmx_shmem_dscptr *dscptr, int remove)
+{
+ return __cvmx_shmem_close_dscptr(dscptr, remove);
+}
+#endif
+
+/**
+ * @INTERNAL
+ *
+ * For Linux user application only.
+ *
+ * close a descriptor.
+ *
+ * @param dscptr shared memory descriptor
+ * @param remove 1: remove the descriptor and named block if this
+ * this is the last user of the descriptor
+ * 0: do not remove
+ * @return 0: Success
+ * !0: Failed
+ *
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+int __cvmx_shmem_close_linux(struct cvmx_shmem_dscptr *dscptr, int remove)
+{
+ int ret;
+ ret = __cvmx_shmem_close_dscptr(dscptr, remove);
+
+ if (ret && __cvmx_shmem_devmemfd)
+ {
+ close(__cvmx_shmem_devmemfd);
+ __cvmx_shmem_devmemfd=0;
+ }
+
+ return ret;
+
+}
+#endif
+
+/**
+ *
+ * close a descriptor.
+ *
+ * @param dscptr shared memory descriptor
+ * @param remove 1: remove the descriptor and named block if this
+ * this is the last user of the descriptor
+ * 0: do not remove
+ * @return 0: Success
+ * !0: Failed
+ *
+ */
+int cvmx_shmem_close(struct cvmx_shmem_dscptr *dscptr, int remove)
+{
+ int ret;
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+ ret = __cvmx_shmem_close_linux(dscptr, remove);
+#else
+ ret = __cvmx_shmem_close_standalone(dscptr, remove);
+#endif
+ return ret;
+}
+
+#ifdef DEBUG
+/**
+ * @INTERNAL
+ * SMDR non-free descriptor dump functor. to be used for iterator.
+ *
+ * @param dscptr descriptor passed in by the iterator
+ *
+ * @return NULL always
+ */
+static struct cvmx_shmem_dscptr *__cvmx_shmem_smdr_display_dscptr(struct cvmx_shmem_dscptr *dscptr, void *nouse)
+{
+ if ((dscptr != NULL ) && (dscptr -> owner != CVMX_SHMEM_OWNER_NONE))
+ {
+ cvmx_dprintf(" %s: phy: %lx, size %d, alignment %lx, virt %p use_count %d\n",
+ dscptr->name, dscptr-> paddr,
+ dscptr->size, dscptr-> alignment,
+ dscptr->vaddr, dscptr->use_count);
+ }
+
+ return NULL;
+}
+#endif
+
+/**
+ * SMDR descriptor show
+ *
+ * list all non-free descriptors
+ */
+void cvmx_shmem_show(void)
+{
+ __CHECK_APP_SMDR;
+
+#ifdef DEBUG
+ cvmx_dprintf("SMDR descriptor list: \n");
+ cvmx_spinlock_lock(&__smdr->lock);
+ __smdr_iterator(__cvmx_shmem_smdr_display_dscptr, NULL);
+ cvmx_spinlock_unlock(&__smdr->lock);
+ cvmx_dprintf("\n\n");
+#endif
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-shmem.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-shmem.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-shmem.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-shmem.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,140 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+#ifndef __CVMX_SHMEM_H__
+#define __CVMX_SHMEM_H__
+
+/**
+ * @file
+ *
+ * cvmx-shmem provides APIs for setting up shared memory between Linux
+ * and simple executive applications.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "cvmx-spinlock.h"
+
+#define CVMX_SHMEM_NUM_DSCPTR 8
+#define CVMX_SHMEM_DSCPTR_NAME "SMDR"
+
+#define CVMX_SHMEM_O_RDONLY 0x00
+#define CVMX_SHMEM_O_WRONLY 0x01
+#define CVMX_SHMEM_O_RDWR 0x02
+#define CVMX_SHMEM_O_CREAT 0x04
+
+#define CVMX_SHMEM_MAP_PROT_READ 0x01
+#define CVMX_SHMEM_MAP_PROT_WRITE 0x02
+#define CVMX_SHMEM_MAP_EXEC 0x04
+
+#define CVMX_SHMEM_OWNER_NONE 0xff
+
+#define CVMX_SHMEM_VADDR64_START 0x500000000ULL
+#define CVMX_SHMEM_VADDR64_END 0x600000000ULL
+
+#define CVMX_SHMEM_VADDR32_START 0x10000000
+#define CVMX_SHMEM_VADDR32_END 0x18000000
+
+struct cvmx_shmem_dscptr {
+ cvmx_spinlock_t lock;
+ uint64_t owner: 8;
+ uint64_t is_named_block: 1;
+ uint64_t p_wronly: 1;
+ uint64_t p_rdwr: 1;
+ int32_t use_count; /* must use atomic operation to maintain count */
+ const char *name;
+ void *vaddr;
+ uint64_t paddr;
+ uint32_t size;
+ uint64_t alignment;
+};
+
+struct cvmx_shmem_smdr {
+ cvmx_spinlock_t lock;
+ struct cvmx_shmem_dscptr shmd[CVMX_SHMEM_NUM_DSCPTR];
+ void *break64; /* Keep track of unused 64 bit virtual address space */
+};
+
+
+struct cvmx_shmem_smdr *cvmx_shmem_init(void);
+
+/**
+ * Create a piece memory out of named block
+ *
+ * @param name Named block name
+ * @param flag create flag
+ */
+struct cvmx_shmem_dscptr *cvmx_shmem_named_block_open(char *name, uint32_t size, int oflag);
+
+/**
+ * Update TLB mapping based on the descriptor
+ */
+void* cvmx_shmem_map(struct cvmx_shmem_dscptr *desc, int pflag);
+
+/**
+ * Remove the TLB mapping created for the descriptor
+ */
+void cvmx_shmem_unmap(struct cvmx_shmem_dscptr *desc);
+
+
+/**
+ * Close the share memory,
+ *
+ * @Param remove Remove the named block if it is created by the application
+ */
+int cvmx_shmem_close(struct cvmx_shmem_dscptr *desc, int remove);
+
+/**
+ * Debug function, dump all SMDR descriptors
+ */
+void cvmx_shmem_show(void);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-shmem.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-sim-magic.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-sim-magic.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-sim-magic.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,199 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * This is file defines ASM primitives for sim magic functions.
+
+ * <hr>$Revision: 70030 $<hr>
+ *
+ *
+ */
+#ifndef __CVMX_SIM_MAGIC_H__
+#define __CVMX_SIM_MAGIC_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Note, the following Magic function are only useful in the simulator
+ * environment. Typical simple executive application should not use
+ * these functions. Their access functions are defined and implemented in
+ * the newlib
+ * SIM_MAGIC_PUTS
+ * SIM_MAGIC_WRITE
+ * SIM_MAGIC_READ
+ * SIM_MAGIC_OPEN
+ * SIM_MAGIC_CLOSE
+ * SIM_MAGIC_STAT
+ * SIM_MAGIC_FSTAT
+ * SIM_MAGIC_LSEEK
+ * SIM_MAGIC_ALLOC_MEM
+ */
+
+/* Assembler macros for accessing simulator magic functions */
+#define OCTEON_SIM_MAGIC_TRAP_ADDRESS 0x8000000feffe0000ull
+
+/* Reg t9 (r25) specifies the actual magic function*/
+#define SIM_MAGIC_PUTS 0x05
+#define SIM_MAGIC_SIMPRINTF 0x06
+#define SIM_MAGIC_WRITE 0x07
+#define SIM_MAGIC_READ 0x08
+#define SIM_MAGIC_OPEN 0x09
+#define SIM_MAGIC_CLOSE 0x0A
+#define SIM_MAGIC_STAT 0x0B
+#define SIM_MAGIC_FSTAT 0x0C
+#define SIM_MAGIC_LSEEK 0x0D
+#define SIM_MAGIC_ALLOC_MEM 0x20
+#define SIM_MAGIC_GET_CPUFREQ 0x31 /* SDK 1.9 release and after */
+#define SIM_MAGIC_GET_MEMFREQ 0x32 /* SDK 1.9 release and after */
+#define SIM_MAGIC_GET_IOFREQ 0x33 /* SDK 2.0 release and after, only set in Octeon2 */
+
+/**
+ * @INTERNAL
+ * sim_magci implementation function with return code.
+ *
+ * @param func_no SIM magic function to invoke
+ *
+ * @return Result of the SIM magic function
+ */
+static inline int __cvmx_sim_magic_return(unsigned long long func_no)
+{
+ register unsigned long long magic_addr asm ("$15");
+ register unsigned long long magic_func asm ("$25"); /* t9 */
+ int ret;
+
+ magic_addr = OCTEON_SIM_MAGIC_TRAP_ADDRESS;
+ magic_func = func_no;
+ asm volatile (
+ "dadd $24, $31, $0 \n"
+ "jalr $15 \n"
+ "dadd $31, $24, $0 \n"
+ "move %0, $2"
+ : "=r" (ret)
+ : "r" (magic_addr), "r" (magic_func)
+ : "$2", "$24" );
+
+
+ return ret;
+}
+
+/**
+ * @INTERNAL
+ * sim_magci implementation function without return code.
+ *
+ * @param func_no SIM magic function to invoke
+ */
+static inline void __cvmx_sim_magic_no_return(unsigned long long func_no)
+{
+ register unsigned long long magic_addr asm ("$15");
+ register unsigned long long magic_func asm ("$25"); /* t9 */
+
+ magic_addr = OCTEON_SIM_MAGIC_TRAP_ADDRESS;
+ magic_func = func_no;
+ asm volatile (
+ "dadd $24, $31, $0 \n"
+ "jalr $15 \n"
+ "dadd $31, $24, $0 \n"
+ :
+ : "r" (magic_addr), "r" (magic_func)
+ : "$24" );
+
+}
+
+/**
+ * @INTERNAL
+ * SIM magic printf function, only support up to 8 parameters
+ *
+ * @param format
+ */
+static inline void __cvmx_sim_magic_simprintf(const char *format, ...)
+{
+ CVMX_SYNC;
+
+ __cvmx_sim_magic_no_return( SIM_MAGIC_SIMPRINTF);
+}
+
+/**
+ * Retrive cpu core clock frequency from the simulator
+ *
+ * @return simulating core frequency
+ */
+static inline int cvmx_sim_magic_get_cpufreq(void)
+{
+ CVMX_SYNC;
+
+ return __cvmx_sim_magic_return(SIM_MAGIC_GET_CPUFREQ);
+}
+
+/**
+ * Retrive DDR clock frequency from the simulator
+ *
+ * @return simulating DDR frequency
+ */
+static inline int cvmx_sim_magic_get_memfreq(void)
+{
+ CVMX_SYNC;
+
+ return __cvmx_sim_magic_return(SIM_MAGIC_GET_MEMFREQ);
+}
+
+/**
+ * Retrive io core clock frequency from the simulator
+ *
+ * @return simulating core frequency
+ */
+static inline int cvmx_sim_magic_get_iofreq(void)
+{
+ CVMX_SYNC;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ return __cvmx_sim_magic_return(SIM_MAGIC_GET_IOFREQ);
+ else
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_SIM_MAGIC_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-sim-magic.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-sli-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-sli-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-sli-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,6331 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-sli-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon sli.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_SLI_DEFS_H__
+#define __CVMX_SLI_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_BIST_STATUS CVMX_SLI_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_SLI_BIST_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_BIST_STATUS not supported on this chip\n");
+ return 0x0000000000000580ull;
+}
+#else
+#define CVMX_SLI_BIST_STATUS (0x0000000000000580ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_CTL_PORTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_SLI_CTL_PORTX(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000050ull + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_SLI_CTL_PORTX(offset) (0x0000000000000050ull + ((offset) & 3) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_CTL_STATUS CVMX_SLI_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_SLI_CTL_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_CTL_STATUS not supported on this chip\n");
+ return 0x0000000000000570ull;
+}
+#else
+#define CVMX_SLI_CTL_STATUS (0x0000000000000570ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_DATA_OUT_CNT CVMX_SLI_DATA_OUT_CNT_FUNC()
+static inline uint64_t CVMX_SLI_DATA_OUT_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_DATA_OUT_CNT not supported on this chip\n");
+ return 0x00000000000005F0ull;
+}
+#else
+#define CVMX_SLI_DATA_OUT_CNT (0x00000000000005F0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_DBG_DATA CVMX_SLI_DBG_DATA_FUNC()
+static inline uint64_t CVMX_SLI_DBG_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_DBG_DATA not supported on this chip\n");
+ return 0x0000000000000310ull;
+}
+#else
+#define CVMX_SLI_DBG_DATA (0x0000000000000310ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_DBG_SELECT CVMX_SLI_DBG_SELECT_FUNC()
+static inline uint64_t CVMX_SLI_DBG_SELECT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_DBG_SELECT not supported on this chip\n");
+ return 0x0000000000000300ull;
+}
+#else
+#define CVMX_SLI_DBG_SELECT (0x0000000000000300ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_DMAX_CNT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_SLI_DMAX_CNT(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000400ull + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_SLI_DMAX_CNT(offset) (0x0000000000000400ull + ((offset) & 1) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_DMAX_INT_LEVEL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_SLI_DMAX_INT_LEVEL(%lu) is invalid on this chip\n", offset);
+ return 0x00000000000003E0ull + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_SLI_DMAX_INT_LEVEL(offset) (0x00000000000003E0ull + ((offset) & 1) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_DMAX_TIM(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_SLI_DMAX_TIM(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000420ull + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_SLI_DMAX_TIM(offset) (0x0000000000000420ull + ((offset) & 1) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_INT_ENB_CIU CVMX_SLI_INT_ENB_CIU_FUNC()
+static inline uint64_t CVMX_SLI_INT_ENB_CIU_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_INT_ENB_CIU not supported on this chip\n");
+ return 0x0000000000003CD0ull;
+}
+#else
+#define CVMX_SLI_INT_ENB_CIU (0x0000000000003CD0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_INT_ENB_PORTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_SLI_INT_ENB_PORTX(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000340ull + ((offset) & 1) * 16;
+}
+#else
+#define CVMX_SLI_INT_ENB_PORTX(offset) (0x0000000000000340ull + ((offset) & 1) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_INT_SUM CVMX_SLI_INT_SUM_FUNC()
+static inline uint64_t CVMX_SLI_INT_SUM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_INT_SUM not supported on this chip\n");
+ return 0x0000000000000330ull;
+}
+#else
+#define CVMX_SLI_INT_SUM (0x0000000000000330ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_LAST_WIN_RDATA0 CVMX_SLI_LAST_WIN_RDATA0_FUNC()
+static inline uint64_t CVMX_SLI_LAST_WIN_RDATA0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_LAST_WIN_RDATA0 not supported on this chip\n");
+ return 0x0000000000000600ull;
+}
+#else
+#define CVMX_SLI_LAST_WIN_RDATA0 (0x0000000000000600ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_LAST_WIN_RDATA1 CVMX_SLI_LAST_WIN_RDATA1_FUNC()
+static inline uint64_t CVMX_SLI_LAST_WIN_RDATA1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_LAST_WIN_RDATA1 not supported on this chip\n");
+ return 0x0000000000000610ull;
+}
+#else
+#define CVMX_SLI_LAST_WIN_RDATA1 (0x0000000000000610ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_LAST_WIN_RDATA2 CVMX_SLI_LAST_WIN_RDATA2_FUNC()
+static inline uint64_t CVMX_SLI_LAST_WIN_RDATA2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_LAST_WIN_RDATA2 not supported on this chip\n");
+ return 0x00000000000006C0ull;
+}
+#else
+#define CVMX_SLI_LAST_WIN_RDATA2 (0x00000000000006C0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_LAST_WIN_RDATA3 CVMX_SLI_LAST_WIN_RDATA3_FUNC()
+static inline uint64_t CVMX_SLI_LAST_WIN_RDATA3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_LAST_WIN_RDATA3 not supported on this chip\n");
+ return 0x00000000000006D0ull;
+}
+#else
+#define CVMX_SLI_LAST_WIN_RDATA3 (0x00000000000006D0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MAC_CREDIT_CNT CVMX_SLI_MAC_CREDIT_CNT_FUNC()
+static inline uint64_t CVMX_SLI_MAC_CREDIT_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MAC_CREDIT_CNT not supported on this chip\n");
+ return 0x0000000000003D70ull;
+}
+#else
+#define CVMX_SLI_MAC_CREDIT_CNT (0x0000000000003D70ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MAC_CREDIT_CNT2 CVMX_SLI_MAC_CREDIT_CNT2_FUNC()
+static inline uint64_t CVMX_SLI_MAC_CREDIT_CNT2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MAC_CREDIT_CNT2 not supported on this chip\n");
+ return 0x0000000000003E10ull;
+}
+#else
+#define CVMX_SLI_MAC_CREDIT_CNT2 (0x0000000000003E10ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MAC_NUMBER CVMX_SLI_MAC_NUMBER_FUNC()
+static inline uint64_t CVMX_SLI_MAC_NUMBER_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MAC_NUMBER not supported on this chip\n");
+ return 0x0000000000003E00ull;
+}
+#else
+#define CVMX_SLI_MAC_NUMBER (0x0000000000003E00ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MEM_ACCESS_CTL CVMX_SLI_MEM_ACCESS_CTL_FUNC()
+static inline uint64_t CVMX_SLI_MEM_ACCESS_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MEM_ACCESS_CTL not supported on this chip\n");
+ return 0x00000000000002F0ull;
+}
+#else
+#define CVMX_SLI_MEM_ACCESS_CTL (0x00000000000002F0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_MEM_ACCESS_SUBIDX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 12) && (offset <= 27))))))
+ cvmx_warn("CVMX_SLI_MEM_ACCESS_SUBIDX(%lu) is invalid on this chip\n", offset);
+ return 0x00000000000000E0ull + ((offset) & 31) * 16 - 16*12;
+}
+#else
+#define CVMX_SLI_MEM_ACCESS_SUBIDX(offset) (0x00000000000000E0ull + ((offset) & 31) * 16 - 16*12)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_ENB0 CVMX_SLI_MSI_ENB0_FUNC()
+static inline uint64_t CVMX_SLI_MSI_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_ENB0 not supported on this chip\n");
+ return 0x0000000000003C50ull;
+}
+#else
+#define CVMX_SLI_MSI_ENB0 (0x0000000000003C50ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_ENB1 CVMX_SLI_MSI_ENB1_FUNC()
+static inline uint64_t CVMX_SLI_MSI_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_ENB1 not supported on this chip\n");
+ return 0x0000000000003C60ull;
+}
+#else
+#define CVMX_SLI_MSI_ENB1 (0x0000000000003C60ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_ENB2 CVMX_SLI_MSI_ENB2_FUNC()
+static inline uint64_t CVMX_SLI_MSI_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_ENB2 not supported on this chip\n");
+ return 0x0000000000003C70ull;
+}
+#else
+#define CVMX_SLI_MSI_ENB2 (0x0000000000003C70ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_ENB3 CVMX_SLI_MSI_ENB3_FUNC()
+static inline uint64_t CVMX_SLI_MSI_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_ENB3 not supported on this chip\n");
+ return 0x0000000000003C80ull;
+}
+#else
+#define CVMX_SLI_MSI_ENB3 (0x0000000000003C80ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_RCV0 CVMX_SLI_MSI_RCV0_FUNC()
+static inline uint64_t CVMX_SLI_MSI_RCV0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_RCV0 not supported on this chip\n");
+ return 0x0000000000003C10ull;
+}
+#else
+#define CVMX_SLI_MSI_RCV0 (0x0000000000003C10ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_RCV1 CVMX_SLI_MSI_RCV1_FUNC()
+static inline uint64_t CVMX_SLI_MSI_RCV1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_RCV1 not supported on this chip\n");
+ return 0x0000000000003C20ull;
+}
+#else
+#define CVMX_SLI_MSI_RCV1 (0x0000000000003C20ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_RCV2 CVMX_SLI_MSI_RCV2_FUNC()
+static inline uint64_t CVMX_SLI_MSI_RCV2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_RCV2 not supported on this chip\n");
+ return 0x0000000000003C30ull;
+}
+#else
+#define CVMX_SLI_MSI_RCV2 (0x0000000000003C30ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_RCV3 CVMX_SLI_MSI_RCV3_FUNC()
+static inline uint64_t CVMX_SLI_MSI_RCV3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_RCV3 not supported on this chip\n");
+ return 0x0000000000003C40ull;
+}
+#else
+#define CVMX_SLI_MSI_RCV3 (0x0000000000003C40ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_RD_MAP CVMX_SLI_MSI_RD_MAP_FUNC()
+static inline uint64_t CVMX_SLI_MSI_RD_MAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_RD_MAP not supported on this chip\n");
+ return 0x0000000000003CA0ull;
+}
+#else
+#define CVMX_SLI_MSI_RD_MAP (0x0000000000003CA0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_W1C_ENB0 CVMX_SLI_MSI_W1C_ENB0_FUNC()
+static inline uint64_t CVMX_SLI_MSI_W1C_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_W1C_ENB0 not supported on this chip\n");
+ return 0x0000000000003CF0ull;
+}
+#else
+#define CVMX_SLI_MSI_W1C_ENB0 (0x0000000000003CF0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_W1C_ENB1 CVMX_SLI_MSI_W1C_ENB1_FUNC()
+static inline uint64_t CVMX_SLI_MSI_W1C_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_W1C_ENB1 not supported on this chip\n");
+ return 0x0000000000003D00ull;
+}
+#else
+#define CVMX_SLI_MSI_W1C_ENB1 (0x0000000000003D00ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_W1C_ENB2 CVMX_SLI_MSI_W1C_ENB2_FUNC()
+static inline uint64_t CVMX_SLI_MSI_W1C_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_W1C_ENB2 not supported on this chip\n");
+ return 0x0000000000003D10ull;
+}
+#else
+#define CVMX_SLI_MSI_W1C_ENB2 (0x0000000000003D10ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_W1C_ENB3 CVMX_SLI_MSI_W1C_ENB3_FUNC()
+static inline uint64_t CVMX_SLI_MSI_W1C_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_W1C_ENB3 not supported on this chip\n");
+ return 0x0000000000003D20ull;
+}
+#else
+#define CVMX_SLI_MSI_W1C_ENB3 (0x0000000000003D20ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_W1S_ENB0 CVMX_SLI_MSI_W1S_ENB0_FUNC()
+static inline uint64_t CVMX_SLI_MSI_W1S_ENB0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_W1S_ENB0 not supported on this chip\n");
+ return 0x0000000000003D30ull;
+}
+#else
+#define CVMX_SLI_MSI_W1S_ENB0 (0x0000000000003D30ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_W1S_ENB1 CVMX_SLI_MSI_W1S_ENB1_FUNC()
+static inline uint64_t CVMX_SLI_MSI_W1S_ENB1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_W1S_ENB1 not supported on this chip\n");
+ return 0x0000000000003D40ull;
+}
+#else
+#define CVMX_SLI_MSI_W1S_ENB1 (0x0000000000003D40ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_W1S_ENB2 CVMX_SLI_MSI_W1S_ENB2_FUNC()
+static inline uint64_t CVMX_SLI_MSI_W1S_ENB2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_W1S_ENB2 not supported on this chip\n");
+ return 0x0000000000003D50ull;
+}
+#else
+#define CVMX_SLI_MSI_W1S_ENB2 (0x0000000000003D50ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_W1S_ENB3 CVMX_SLI_MSI_W1S_ENB3_FUNC()
+static inline uint64_t CVMX_SLI_MSI_W1S_ENB3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_W1S_ENB3 not supported on this chip\n");
+ return 0x0000000000003D60ull;
+}
+#else
+#define CVMX_SLI_MSI_W1S_ENB3 (0x0000000000003D60ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_MSI_WR_MAP CVMX_SLI_MSI_WR_MAP_FUNC()
+static inline uint64_t CVMX_SLI_MSI_WR_MAP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_MSI_WR_MAP not supported on this chip\n");
+ return 0x0000000000003C90ull;
+}
+#else
+#define CVMX_SLI_MSI_WR_MAP (0x0000000000003C90ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PCIE_MSI_RCV CVMX_SLI_PCIE_MSI_RCV_FUNC()
+static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PCIE_MSI_RCV not supported on this chip\n");
+ return 0x0000000000003CB0ull;
+}
+#else
+#define CVMX_SLI_PCIE_MSI_RCV (0x0000000000003CB0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PCIE_MSI_RCV_B1 CVMX_SLI_PCIE_MSI_RCV_B1_FUNC()
+static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_B1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PCIE_MSI_RCV_B1 not supported on this chip\n");
+ return 0x0000000000000650ull;
+}
+#else
+#define CVMX_SLI_PCIE_MSI_RCV_B1 (0x0000000000000650ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PCIE_MSI_RCV_B2 CVMX_SLI_PCIE_MSI_RCV_B2_FUNC()
+static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_B2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PCIE_MSI_RCV_B2 not supported on this chip\n");
+ return 0x0000000000000660ull;
+}
+#else
+#define CVMX_SLI_PCIE_MSI_RCV_B2 (0x0000000000000660ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PCIE_MSI_RCV_B3 CVMX_SLI_PCIE_MSI_RCV_B3_FUNC()
+static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_B3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PCIE_MSI_RCV_B3 not supported on this chip\n");
+ return 0x0000000000000670ull;
+}
+#else
+#define CVMX_SLI_PCIE_MSI_RCV_B3 (0x0000000000000670ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_CNTS(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000002400ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_CNTS(offset) (0x0000000000002400ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_INSTR_BADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_INSTR_BADDR(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000002800ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_INSTR_BADDR(offset) (0x0000000000002800ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_INSTR_BAOFF_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_INSTR_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000002C00ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_INSTR_BAOFF_DBELL(offset) (0x0000000000002C00ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_INSTR_FIFO_RSIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_INSTR_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000003000ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_INSTR_FIFO_RSIZE(offset) (0x0000000000003000ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_INSTR_HEADER(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_INSTR_HEADER(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000003400ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_INSTR_HEADER(offset) (0x0000000000003400ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_IN_BP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_IN_BP(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000003800ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_IN_BP(offset) (0x0000000000003800ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_OUT_SIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_OUT_SIZE(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000C00ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_OUT_SIZE(offset) (0x0000000000000C00ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_SLIST_BADDR(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_SLIST_BADDR(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000001400ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_SLIST_BADDR(offset) (0x0000000000001400ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_SLIST_BAOFF_DBELL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_SLIST_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000001800ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_SLIST_BAOFF_DBELL(offset) (0x0000000000001800ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKTX_SLIST_FIFO_RSIZE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKTX_SLIST_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000001C00ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKTX_SLIST_FIFO_RSIZE(offset) (0x0000000000001C00ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_CNT_INT CVMX_SLI_PKT_CNT_INT_FUNC()
+static inline uint64_t CVMX_SLI_PKT_CNT_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_CNT_INT not supported on this chip\n");
+ return 0x0000000000001130ull;
+}
+#else
+#define CVMX_SLI_PKT_CNT_INT (0x0000000000001130ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_CNT_INT_ENB CVMX_SLI_PKT_CNT_INT_ENB_FUNC()
+static inline uint64_t CVMX_SLI_PKT_CNT_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_CNT_INT_ENB not supported on this chip\n");
+ return 0x0000000000001150ull;
+}
+#else
+#define CVMX_SLI_PKT_CNT_INT_ENB (0x0000000000001150ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_CTL CVMX_SLI_PKT_CTL_FUNC()
+static inline uint64_t CVMX_SLI_PKT_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_CTL not supported on this chip\n");
+ return 0x0000000000001220ull;
+}
+#else
+#define CVMX_SLI_PKT_CTL (0x0000000000001220ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_DATA_OUT_ES CVMX_SLI_PKT_DATA_OUT_ES_FUNC()
+static inline uint64_t CVMX_SLI_PKT_DATA_OUT_ES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_DATA_OUT_ES not supported on this chip\n");
+ return 0x00000000000010B0ull;
+}
+#else
+#define CVMX_SLI_PKT_DATA_OUT_ES (0x00000000000010B0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_DATA_OUT_NS CVMX_SLI_PKT_DATA_OUT_NS_FUNC()
+static inline uint64_t CVMX_SLI_PKT_DATA_OUT_NS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_DATA_OUT_NS not supported on this chip\n");
+ return 0x00000000000010A0ull;
+}
+#else
+#define CVMX_SLI_PKT_DATA_OUT_NS (0x00000000000010A0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_DATA_OUT_ROR CVMX_SLI_PKT_DATA_OUT_ROR_FUNC()
+static inline uint64_t CVMX_SLI_PKT_DATA_OUT_ROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_DATA_OUT_ROR not supported on this chip\n");
+ return 0x0000000000001090ull;
+}
+#else
+#define CVMX_SLI_PKT_DATA_OUT_ROR (0x0000000000001090ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_DPADDR CVMX_SLI_PKT_DPADDR_FUNC()
+static inline uint64_t CVMX_SLI_PKT_DPADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_DPADDR not supported on this chip\n");
+ return 0x0000000000001080ull;
+}
+#else
+#define CVMX_SLI_PKT_DPADDR (0x0000000000001080ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_INPUT_CONTROL CVMX_SLI_PKT_INPUT_CONTROL_FUNC()
+static inline uint64_t CVMX_SLI_PKT_INPUT_CONTROL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_INPUT_CONTROL not supported on this chip\n");
+ return 0x0000000000001170ull;
+}
+#else
+#define CVMX_SLI_PKT_INPUT_CONTROL (0x0000000000001170ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_INSTR_ENB CVMX_SLI_PKT_INSTR_ENB_FUNC()
+static inline uint64_t CVMX_SLI_PKT_INSTR_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_INSTR_ENB not supported on this chip\n");
+ return 0x0000000000001000ull;
+}
+#else
+#define CVMX_SLI_PKT_INSTR_ENB (0x0000000000001000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_INSTR_RD_SIZE CVMX_SLI_PKT_INSTR_RD_SIZE_FUNC()
+static inline uint64_t CVMX_SLI_PKT_INSTR_RD_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_INSTR_RD_SIZE not supported on this chip\n");
+ return 0x00000000000011A0ull;
+}
+#else
+#define CVMX_SLI_PKT_INSTR_RD_SIZE (0x00000000000011A0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_INSTR_SIZE CVMX_SLI_PKT_INSTR_SIZE_FUNC()
+static inline uint64_t CVMX_SLI_PKT_INSTR_SIZE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_INSTR_SIZE not supported on this chip\n");
+ return 0x0000000000001020ull;
+}
+#else
+#define CVMX_SLI_PKT_INSTR_SIZE (0x0000000000001020ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_INT_LEVELS CVMX_SLI_PKT_INT_LEVELS_FUNC()
+static inline uint64_t CVMX_SLI_PKT_INT_LEVELS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_INT_LEVELS not supported on this chip\n");
+ return 0x0000000000001120ull;
+}
+#else
+#define CVMX_SLI_PKT_INT_LEVELS (0x0000000000001120ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_IN_BP CVMX_SLI_PKT_IN_BP_FUNC()
+static inline uint64_t CVMX_SLI_PKT_IN_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_IN_BP not supported on this chip\n");
+ return 0x0000000000001210ull;
+}
+#else
+#define CVMX_SLI_PKT_IN_BP (0x0000000000001210ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PKT_IN_DONEX_CNTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PKT_IN_DONEX_CNTS(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000002000ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PKT_IN_DONEX_CNTS(offset) (0x0000000000002000ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_IN_INSTR_COUNTS CVMX_SLI_PKT_IN_INSTR_COUNTS_FUNC()
+static inline uint64_t CVMX_SLI_PKT_IN_INSTR_COUNTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_IN_INSTR_COUNTS not supported on this chip\n");
+ return 0x0000000000001200ull;
+}
+#else
+#define CVMX_SLI_PKT_IN_INSTR_COUNTS (0x0000000000001200ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_IN_PCIE_PORT CVMX_SLI_PKT_IN_PCIE_PORT_FUNC()
+static inline uint64_t CVMX_SLI_PKT_IN_PCIE_PORT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_IN_PCIE_PORT not supported on this chip\n");
+ return 0x00000000000011B0ull;
+}
+#else
+#define CVMX_SLI_PKT_IN_PCIE_PORT (0x00000000000011B0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_IPTR CVMX_SLI_PKT_IPTR_FUNC()
+static inline uint64_t CVMX_SLI_PKT_IPTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_IPTR not supported on this chip\n");
+ return 0x0000000000001070ull;
+}
+#else
+#define CVMX_SLI_PKT_IPTR (0x0000000000001070ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_OUTPUT_WMARK CVMX_SLI_PKT_OUTPUT_WMARK_FUNC()
+static inline uint64_t CVMX_SLI_PKT_OUTPUT_WMARK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_OUTPUT_WMARK not supported on this chip\n");
+ return 0x0000000000001180ull;
+}
+#else
+#define CVMX_SLI_PKT_OUTPUT_WMARK (0x0000000000001180ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_OUT_BMODE CVMX_SLI_PKT_OUT_BMODE_FUNC()
+static inline uint64_t CVMX_SLI_PKT_OUT_BMODE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_OUT_BMODE not supported on this chip\n");
+ return 0x00000000000010D0ull;
+}
+#else
+#define CVMX_SLI_PKT_OUT_BMODE (0x00000000000010D0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_OUT_BP_EN CVMX_SLI_PKT_OUT_BP_EN_FUNC()
+static inline uint64_t CVMX_SLI_PKT_OUT_BP_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SLI_PKT_OUT_BP_EN not supported on this chip\n");
+ return 0x0000000000001240ull;
+}
+#else
+#define CVMX_SLI_PKT_OUT_BP_EN (0x0000000000001240ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_OUT_ENB CVMX_SLI_PKT_OUT_ENB_FUNC()
+static inline uint64_t CVMX_SLI_PKT_OUT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_OUT_ENB not supported on this chip\n");
+ return 0x0000000000001010ull;
+}
+#else
+#define CVMX_SLI_PKT_OUT_ENB (0x0000000000001010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_PCIE_PORT CVMX_SLI_PKT_PCIE_PORT_FUNC()
+static inline uint64_t CVMX_SLI_PKT_PCIE_PORT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_PCIE_PORT not supported on this chip\n");
+ return 0x00000000000010E0ull;
+}
+#else
+#define CVMX_SLI_PKT_PCIE_PORT (0x00000000000010E0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_PORT_IN_RST CVMX_SLI_PKT_PORT_IN_RST_FUNC()
+static inline uint64_t CVMX_SLI_PKT_PORT_IN_RST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_PORT_IN_RST not supported on this chip\n");
+ return 0x00000000000011F0ull;
+}
+#else
+#define CVMX_SLI_PKT_PORT_IN_RST (0x00000000000011F0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_SLIST_ES CVMX_SLI_PKT_SLIST_ES_FUNC()
+static inline uint64_t CVMX_SLI_PKT_SLIST_ES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_SLIST_ES not supported on this chip\n");
+ return 0x0000000000001050ull;
+}
+#else
+#define CVMX_SLI_PKT_SLIST_ES (0x0000000000001050ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_SLIST_NS CVMX_SLI_PKT_SLIST_NS_FUNC()
+static inline uint64_t CVMX_SLI_PKT_SLIST_NS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_SLIST_NS not supported on this chip\n");
+ return 0x0000000000001040ull;
+}
+#else
+#define CVMX_SLI_PKT_SLIST_NS (0x0000000000001040ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_SLIST_ROR CVMX_SLI_PKT_SLIST_ROR_FUNC()
+static inline uint64_t CVMX_SLI_PKT_SLIST_ROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_SLIST_ROR not supported on this chip\n");
+ return 0x0000000000001030ull;
+}
+#else
+#define CVMX_SLI_PKT_SLIST_ROR (0x0000000000001030ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_TIME_INT CVMX_SLI_PKT_TIME_INT_FUNC()
+static inline uint64_t CVMX_SLI_PKT_TIME_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_TIME_INT not supported on this chip\n");
+ return 0x0000000000001140ull;
+}
+#else
+#define CVMX_SLI_PKT_TIME_INT (0x0000000000001140ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_PKT_TIME_INT_ENB CVMX_SLI_PKT_TIME_INT_ENB_FUNC()
+static inline uint64_t CVMX_SLI_PKT_TIME_INT_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_PKT_TIME_INT_ENB not supported on this chip\n");
+ return 0x0000000000001160ull;
+}
+#else
+#define CVMX_SLI_PKT_TIME_INT_ENB (0x0000000000001160ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_PORTX_PKIND(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SLI_PORTX_PKIND(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000000800ull + ((offset) & 31) * 16;
+}
+#else
+#define CVMX_SLI_PORTX_PKIND(offset) (0x0000000000000800ull + ((offset) & 31) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SLI_S2M_PORTX_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_SLI_S2M_PORTX_CTL(%lu) is invalid on this chip\n", offset);
+ return 0x0000000000003D80ull + ((offset) & 3) * 16;
+}
+#else
+#define CVMX_SLI_S2M_PORTX_CTL(offset) (0x0000000000003D80ull + ((offset) & 3) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_SCRATCH_1 CVMX_SLI_SCRATCH_1_FUNC()
+static inline uint64_t CVMX_SLI_SCRATCH_1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_SCRATCH_1 not supported on this chip\n");
+ return 0x00000000000003C0ull;
+}
+#else
+#define CVMX_SLI_SCRATCH_1 (0x00000000000003C0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_SCRATCH_2 CVMX_SLI_SCRATCH_2_FUNC()
+static inline uint64_t CVMX_SLI_SCRATCH_2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_SCRATCH_2 not supported on this chip\n");
+ return 0x00000000000003D0ull;
+}
+#else
+#define CVMX_SLI_SCRATCH_2 (0x00000000000003D0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_STATE1 CVMX_SLI_STATE1_FUNC()
+static inline uint64_t CVMX_SLI_STATE1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_STATE1 not supported on this chip\n");
+ return 0x0000000000000620ull;
+}
+#else
+#define CVMX_SLI_STATE1 (0x0000000000000620ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_STATE2 CVMX_SLI_STATE2_FUNC()
+static inline uint64_t CVMX_SLI_STATE2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_STATE2 not supported on this chip\n");
+ return 0x0000000000000630ull;
+}
+#else
+#define CVMX_SLI_STATE2 (0x0000000000000630ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_STATE3 CVMX_SLI_STATE3_FUNC()
+static inline uint64_t CVMX_SLI_STATE3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_STATE3 not supported on this chip\n");
+ return 0x0000000000000640ull;
+}
+#else
+#define CVMX_SLI_STATE3 (0x0000000000000640ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_TX_PIPE CVMX_SLI_TX_PIPE_FUNC()
+static inline uint64_t CVMX_SLI_TX_PIPE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SLI_TX_PIPE not supported on this chip\n");
+ return 0x0000000000001230ull;
+}
+#else
+#define CVMX_SLI_TX_PIPE (0x0000000000001230ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_WINDOW_CTL CVMX_SLI_WINDOW_CTL_FUNC()
+static inline uint64_t CVMX_SLI_WINDOW_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_WINDOW_CTL not supported on this chip\n");
+ return 0x00000000000002E0ull;
+}
+#else
+#define CVMX_SLI_WINDOW_CTL (0x00000000000002E0ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_WIN_RD_ADDR CVMX_SLI_WIN_RD_ADDR_FUNC()
+static inline uint64_t CVMX_SLI_WIN_RD_ADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_WIN_RD_ADDR not supported on this chip\n");
+ return 0x0000000000000010ull;
+}
+#else
+#define CVMX_SLI_WIN_RD_ADDR (0x0000000000000010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_WIN_RD_DATA CVMX_SLI_WIN_RD_DATA_FUNC()
+static inline uint64_t CVMX_SLI_WIN_RD_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_WIN_RD_DATA not supported on this chip\n");
+ return 0x0000000000000040ull;
+}
+#else
+#define CVMX_SLI_WIN_RD_DATA (0x0000000000000040ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_WIN_WR_ADDR CVMX_SLI_WIN_WR_ADDR_FUNC()
+static inline uint64_t CVMX_SLI_WIN_WR_ADDR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_WIN_WR_ADDR not supported on this chip\n");
+ return 0x0000000000000000ull;
+}
+#else
+#define CVMX_SLI_WIN_WR_ADDR (0x0000000000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_WIN_WR_DATA CVMX_SLI_WIN_WR_DATA_FUNC()
+static inline uint64_t CVMX_SLI_WIN_WR_DATA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_WIN_WR_DATA not supported on this chip\n");
+ return 0x0000000000000020ull;
+}
+#else
+#define CVMX_SLI_WIN_WR_DATA (0x0000000000000020ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SLI_WIN_WR_MASK CVMX_SLI_WIN_WR_MASK_FUNC()
+static inline uint64_t CVMX_SLI_WIN_WR_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SLI_WIN_WR_MASK not supported on this chip\n");
+ return 0x0000000000000030ull;
+}
+#else
+#define CVMX_SLI_WIN_WR_MASK (0x0000000000000030ull)
+#endif
+
+/**
+ * cvmx_sli_bist_status
+ *
+ * SLI_BIST_STATUS = SLI's BIST Status Register
+ *
+ * Results from BIST runs of SLI's memories.
+ */
+union cvmx_sli_bist_status {
+ uint64_t u64;
+ struct cvmx_sli_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ncb_req : 1; /**< BIST Status for NCB Request FIFO */
+ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */
+ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */
+ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */
+ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */
+ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */
+ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */
+ uint64_t reserved_19_24 : 6;
+ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */
+ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */
+ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */
+ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */
+ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */
+ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */
+ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */
+ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */
+ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */
+ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */
+ uint64_t reserved_6_8 : 3;
+ uint64_t dsi1_1 : 1; /**< BIST Status for DSI1 Memory 1 */
+ uint64_t dsi1_0 : 1; /**< BIST Status for DSI1 Memory 0 */
+ uint64_t dsi0_1 : 1; /**< BIST Status for DSI0 Memory 1 */
+ uint64_t dsi0_0 : 1; /**< BIST Status for DSI0 Memory 0 */
+ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */
+ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */
+#else
+ uint64_t ncb_cmd : 1;
+ uint64_t msi : 1;
+ uint64_t dsi0_0 : 1;
+ uint64_t dsi0_1 : 1;
+ uint64_t dsi1_0 : 1;
+ uint64_t dsi1_1 : 1;
+ uint64_t reserved_6_8 : 3;
+ uint64_t p2n1_p1 : 1;
+ uint64_t p2n1_p0 : 1;
+ uint64_t p2n1_n : 1;
+ uint64_t p2n1_c1 : 1;
+ uint64_t p2n1_c0 : 1;
+ uint64_t p2n0_p1 : 1;
+ uint64_t p2n0_p0 : 1;
+ uint64_t p2n0_n : 1;
+ uint64_t p2n0_c1 : 1;
+ uint64_t p2n0_c0 : 1;
+ uint64_t reserved_19_24 : 6;
+ uint64_t cpl_p1 : 1;
+ uint64_t cpl_p0 : 1;
+ uint64_t n2p1_o : 1;
+ uint64_t n2p1_c : 1;
+ uint64_t n2p0_o : 1;
+ uint64_t n2p0_c : 1;
+ uint64_t ncb_req : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_bist_status_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */
+ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */
+ uint64_t reserved_27_28 : 2;
+ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */
+ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */
+ uint64_t reserved_19_24 : 6;
+ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */
+ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */
+ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */
+ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */
+ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */
+ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */
+ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */
+ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */
+ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */
+ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */
+ uint64_t reserved_6_8 : 3;
+ uint64_t dsi1_1 : 1; /**< BIST Status for DSI1 Memory 1 */
+ uint64_t dsi1_0 : 1; /**< BIST Status for DSI1 Memory 0 */
+ uint64_t dsi0_1 : 1; /**< BIST Status for DSI0 Memory 1 */
+ uint64_t dsi0_0 : 1; /**< BIST Status for DSI0 Memory 0 */
+ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */
+ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */
+#else
+ uint64_t ncb_cmd : 1;
+ uint64_t msi : 1;
+ uint64_t dsi0_0 : 1;
+ uint64_t dsi0_1 : 1;
+ uint64_t dsi1_0 : 1;
+ uint64_t dsi1_1 : 1;
+ uint64_t reserved_6_8 : 3;
+ uint64_t p2n1_p1 : 1;
+ uint64_t p2n1_p0 : 1;
+ uint64_t p2n1_n : 1;
+ uint64_t p2n1_c1 : 1;
+ uint64_t p2n1_c0 : 1;
+ uint64_t p2n0_p1 : 1;
+ uint64_t p2n0_p0 : 1;
+ uint64_t p2n0_n : 1;
+ uint64_t p2n0_c1 : 1;
+ uint64_t p2n0_c0 : 1;
+ uint64_t reserved_19_24 : 6;
+ uint64_t cpl_p1 : 1;
+ uint64_t cpl_p0 : 1;
+ uint64_t reserved_27_28 : 2;
+ uint64_t n2p0_o : 1;
+ uint64_t n2p0_c : 1;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn61xx;
+ struct cvmx_sli_bist_status_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */
+ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */
+ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */
+ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */
+ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */
+ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */
+ uint64_t reserved_19_24 : 6;
+ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */
+ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */
+ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */
+ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */
+ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */
+ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */
+ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */
+ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */
+ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */
+ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */
+ uint64_t reserved_6_8 : 3;
+ uint64_t dsi1_1 : 1; /**< BIST Status for DSI1 Memory 1 */
+ uint64_t dsi1_0 : 1; /**< BIST Status for DSI1 Memory 0 */
+ uint64_t dsi0_1 : 1; /**< BIST Status for DSI0 Memory 1 */
+ uint64_t dsi0_0 : 1; /**< BIST Status for DSI0 Memory 0 */
+ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */
+ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */
+#else
+ uint64_t ncb_cmd : 1;
+ uint64_t msi : 1;
+ uint64_t dsi0_0 : 1;
+ uint64_t dsi0_1 : 1;
+ uint64_t dsi1_0 : 1;
+ uint64_t dsi1_1 : 1;
+ uint64_t reserved_6_8 : 3;
+ uint64_t p2n1_p1 : 1;
+ uint64_t p2n1_p0 : 1;
+ uint64_t p2n1_n : 1;
+ uint64_t p2n1_c1 : 1;
+ uint64_t p2n1_c0 : 1;
+ uint64_t p2n0_p1 : 1;
+ uint64_t p2n0_p0 : 1;
+ uint64_t p2n0_n : 1;
+ uint64_t p2n0_c1 : 1;
+ uint64_t p2n0_c0 : 1;
+ uint64_t reserved_19_24 : 6;
+ uint64_t cpl_p1 : 1;
+ uint64_t cpl_p0 : 1;
+ uint64_t n2p1_o : 1;
+ uint64_t n2p1_c : 1;
+ uint64_t n2p0_o : 1;
+ uint64_t n2p0_c : 1;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn63xx;
+ struct cvmx_sli_bist_status_cn63xx cn63xxp1;
+ struct cvmx_sli_bist_status_cn61xx cn66xx;
+ struct cvmx_sli_bist_status_s cn68xx;
+ struct cvmx_sli_bist_status_s cn68xxp1;
+ struct cvmx_sli_bist_status_cn61xx cnf71xx;
+};
+typedef union cvmx_sli_bist_status cvmx_sli_bist_status_t;
+
+/**
+ * cvmx_sli_ctl_port#
+ *
+ * SLI_CTL_PORTX = SLI's Control Port X
+ *
+ * Contains control for access for Port0
+ */
+union cvmx_sli_ctl_portx {
+ uint64_t u64;
+ struct cvmx_sli_ctl_portx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t intd : 1; /**< When '0' Intd wire asserted. Before mapping. */
+ uint64_t intc : 1; /**< When '0' Intc wire asserted. Before mapping. */
+ uint64_t intb : 1; /**< When '0' Intb wire asserted. Before mapping. */
+ uint64_t inta : 1; /**< When '0' Inta wire asserted. Before mapping. */
+ uint64_t dis_port : 1; /**< When set the output to the MAC is disabled. This
+ occurs when the MAC reset line transitions from
+ de-asserted to asserted. Writing a '1' to this
+ location will clear this condition when the MAC is
+ no longer in reset and the output to the MAC is at
+ the begining of a transfer. */
+ uint64_t waitl_com : 1; /**< When set '1' casues the SLI to wait for a commit
+ from the L2C before sending additional completions
+ to the L2C from a MAC.
+ Set this for more conservative behavior. Clear
+ this for more aggressive, higher-performance
+ behavior */
+ uint64_t intd_map : 2; /**< Maps INTD to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t intc_map : 2; /**< Maps INTC to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t intb_map : 2; /**< Maps INTB to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t inta_map : 2; /**< Maps INTA to INTA(00), INTB(01), INTC(10) or
+ INTD (11). */
+ uint64_t ctlp_ro : 1; /**< Relaxed ordering enable for Completion TLPS. */
+ uint64_t reserved_6_6 : 1;
+ uint64_t ptlp_ro : 1; /**< Relaxed ordering enable for Posted TLPS. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t wait_com : 1; /**< When set '1' casues the SLI to wait for a commit
+ from the L2C before sending additional stores to
+ the L2C from a MAC.
+ The SLI will request a commit on the last store
+ if more than one STORE operation is required on
+ the NCB.
+ Most applications will not notice a difference, so
+ should not set this bit. Setting the bit is more
+ conservative on ordering, lower performance */
+#else
+ uint64_t wait_com : 1;
+ uint64_t reserved_1_4 : 4;
+ uint64_t ptlp_ro : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t ctlp_ro : 1;
+ uint64_t inta_map : 2;
+ uint64_t intb_map : 2;
+ uint64_t intc_map : 2;
+ uint64_t intd_map : 2;
+ uint64_t waitl_com : 1;
+ uint64_t dis_port : 1;
+ uint64_t inta : 1;
+ uint64_t intb : 1;
+ uint64_t intc : 1;
+ uint64_t intd : 1;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_sli_ctl_portx_s cn61xx;
+ struct cvmx_sli_ctl_portx_s cn63xx;
+ struct cvmx_sli_ctl_portx_s cn63xxp1;
+ struct cvmx_sli_ctl_portx_s cn66xx;
+ struct cvmx_sli_ctl_portx_s cn68xx;
+ struct cvmx_sli_ctl_portx_s cn68xxp1;
+ struct cvmx_sli_ctl_portx_s cnf71xx;
+};
+typedef union cvmx_sli_ctl_portx cvmx_sli_ctl_portx_t;
+
+/**
+ * cvmx_sli_ctl_status
+ *
+ * SLI_CTL_STATUS = SLI Control Status Register
+ *
+ * Contains control and status for SLI. Writes to this register are not ordered with writes/reads to the MAC Memory space.
+ * To ensure that a write has completed the user must read the register before making an access(i.e. MAC memory space)
+ * that requires the value of this register to be updated.
+ */
+union cvmx_sli_ctl_status {
+ uint64_t u64;
+ struct cvmx_sli_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t p1_ntags : 6; /**< Number of tags available for MAC Port1.
+ In RC mode 1 tag is needed for each outbound TLP
+ that requires a CPL TLP. In Endpoint mode the
+ number of tags required for a TLP request is
+ 1 per 64-bytes of CPL data + 1.
+ This field should only be written as part of
+ reset sequence, before issuing any reads, CFGs, or
+ IO transactions from the core(s). */
+ uint64_t p0_ntags : 6; /**< Number of tags available for outbound TLPs to the
+ MACS. One tag is needed for each outbound TLP that
+ requires a CPL TLP.
+ This field should only be written as part of
+ reset sequence, before issuing any reads, CFGs, or
+ IO transactions from the core(s). */
+ uint64_t chip_rev : 8; /**< The chip revision. */
+#else
+ uint64_t chip_rev : 8;
+ uint64_t p0_ntags : 6;
+ uint64_t p1_ntags : 6;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_sli_ctl_status_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t p0_ntags : 6; /**< Number of tags available for outbound TLPs to the
+ MACS. One tag is needed for each outbound TLP that
+ requires a CPL TLP.
+ This field should only be written as part of
+ reset sequence, before issuing any reads, CFGs, or
+ IO transactions from the core(s). */
+ uint64_t chip_rev : 8; /**< The chip revision. */
+#else
+ uint64_t chip_rev : 8;
+ uint64_t p0_ntags : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn61xx;
+ struct cvmx_sli_ctl_status_s cn63xx;
+ struct cvmx_sli_ctl_status_s cn63xxp1;
+ struct cvmx_sli_ctl_status_cn61xx cn66xx;
+ struct cvmx_sli_ctl_status_s cn68xx;
+ struct cvmx_sli_ctl_status_s cn68xxp1;
+ struct cvmx_sli_ctl_status_cn61xx cnf71xx;
+};
+typedef union cvmx_sli_ctl_status cvmx_sli_ctl_status_t;
+
+/**
+ * cvmx_sli_data_out_cnt
+ *
+ * SLI_DATA_OUT_CNT = SLI DATA OUT COUNT
+ *
+ * The EXEC data out fifo-count and the data unload counter.
+ */
+union cvmx_sli_data_out_cnt {
+ uint64_t u64;
+ struct cvmx_sli_data_out_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t p1_ucnt : 16; /**< SLI Order-FIFO1 Fifo Unload Count. This counter is
+ incremented by '1' every time a word is removed
+ from the Data Out FIFO, whose count is shown in
+ P1_FCNT. */
+ uint64_t p1_fcnt : 6; /**< SLI Order-FIFO1 Data Out Fifo Count. Number of
+ address data words to be sent out the Order-FIFO
+ presently buffered in the FIFO. */
+ uint64_t p0_ucnt : 16; /**< SLI Order-FIFO0 Fifo Unload Count. This counter is
+ incremented by '1' every time a word is removed
+ from the Data Out FIFO, whose count is shown in
+ P0_FCNT. */
+ uint64_t p0_fcnt : 6; /**< SLI Order-FIFO0 Data Out Fifo Count. Number of
+ address data words to be sent out the Order-FIFO
+ presently buffered in the FIFO. */
+#else
+ uint64_t p0_fcnt : 6;
+ uint64_t p0_ucnt : 16;
+ uint64_t p1_fcnt : 6;
+ uint64_t p1_ucnt : 16;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_sli_data_out_cnt_s cn61xx;
+ struct cvmx_sli_data_out_cnt_s cn63xx;
+ struct cvmx_sli_data_out_cnt_s cn63xxp1;
+ struct cvmx_sli_data_out_cnt_s cn66xx;
+ struct cvmx_sli_data_out_cnt_s cn68xx;
+ struct cvmx_sli_data_out_cnt_s cn68xxp1;
+ struct cvmx_sli_data_out_cnt_s cnf71xx;
+};
+typedef union cvmx_sli_data_out_cnt cvmx_sli_data_out_cnt_t;
+
+/**
+ * cvmx_sli_dbg_data
+ *
+ * SLI_DBG_DATA = SLI Debug Data Register
+ *
+ * Value returned on the debug-data lines from the RSLs
+ */
+union cvmx_sli_dbg_data {
+ uint64_t u64;
+ struct cvmx_sli_dbg_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_sli_dbg_data_s cn61xx;
+ struct cvmx_sli_dbg_data_s cn63xx;
+ struct cvmx_sli_dbg_data_s cn63xxp1;
+ struct cvmx_sli_dbg_data_s cn66xx;
+ struct cvmx_sli_dbg_data_s cn68xx;
+ struct cvmx_sli_dbg_data_s cn68xxp1;
+ struct cvmx_sli_dbg_data_s cnf71xx;
+};
+typedef union cvmx_sli_dbg_data cvmx_sli_dbg_data_t;
+
+/**
+ * cvmx_sli_dbg_select
+ *
+ * SLI_DBG_SELECT = Debug Select Register
+ *
+ * Contains the debug select value last written to the RSLs.
+ */
+union cvmx_sli_dbg_select {
+ uint64_t u64;
+ struct cvmx_sli_dbg_select_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t adbg_sel : 1; /**< When set '1' the SLI_DBG_DATA[DATA] will only be
+ loaded when SLI_DBG_DATA[DATA] bit [16] is a '1'.
+ When the debug data comes from an Async-RSL bit
+ 16 is used to tell that the data present is valid. */
+ uint64_t dbg_sel : 32; /**< When this register is written the RML will write
+ all "F"s to the previous RTL to disable it from
+ sending Debug-Data. The RML will then send a write
+ to the new RSL with the supplied Debug-Select
+ value. Because it takes time for the new Debug
+ Select value to take effect and the requested
+ Debug-Data to return, time is needed to the new
+ Debug-Data to arrive. The inititator of the Debug
+ Select should issue a read to a CSR before reading
+ the Debug Data (this read could also be to the
+ SLI_DBG_DATA but the returned value for the first
+ read will return NS data. */
+#else
+ uint64_t dbg_sel : 32;
+ uint64_t adbg_sel : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_sli_dbg_select_s cn61xx;
+ struct cvmx_sli_dbg_select_s cn63xx;
+ struct cvmx_sli_dbg_select_s cn63xxp1;
+ struct cvmx_sli_dbg_select_s cn66xx;
+ struct cvmx_sli_dbg_select_s cn68xx;
+ struct cvmx_sli_dbg_select_s cn68xxp1;
+ struct cvmx_sli_dbg_select_s cnf71xx;
+};
+typedef union cvmx_sli_dbg_select cvmx_sli_dbg_select_t;
+
+/**
+ * cvmx_sli_dma#_cnt
+ *
+ * SLI_DMAx_CNT = SLI DMA Count
+ *
+ * The DMA Count value.
+ */
+union cvmx_sli_dmax_cnt {
+ uint64_t u64;
+ struct cvmx_sli_dmax_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< The DMA counter.
+ Writing this field will cause the written value
+ to be subtracted from DMA. HW will optionally
+ increment this field after it completes an
+ OUTBOUND or EXTERNAL-ONLY DMA instruction. These
+ increments may cause interrupts. Refer to
+ SLI_DMAx_INT_LEVEL and SLI_INT_SUM[DCNT,DTIME]. */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_dmax_cnt_s cn61xx;
+ struct cvmx_sli_dmax_cnt_s cn63xx;
+ struct cvmx_sli_dmax_cnt_s cn63xxp1;
+ struct cvmx_sli_dmax_cnt_s cn66xx;
+ struct cvmx_sli_dmax_cnt_s cn68xx;
+ struct cvmx_sli_dmax_cnt_s cn68xxp1;
+ struct cvmx_sli_dmax_cnt_s cnf71xx;
+};
+typedef union cvmx_sli_dmax_cnt cvmx_sli_dmax_cnt_t;
+
+/**
+ * cvmx_sli_dma#_int_level
+ *
+ * SLI_DMAx_INT_LEVEL = SLI DMAx Interrupt Level
+ *
+ * Thresholds for DMA count and timer interrupts.
+ */
+union cvmx_sli_dmax_int_level {
+ uint64_t u64;
+ struct cvmx_sli_dmax_int_level_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t time : 32; /**< Whenever the SLI_DMAx_TIM[TIM] timer exceeds
+ this value, SLI_INT_SUM[DTIME<x>] is set.
+ The SLI_DMAx_TIM[TIM] timer increments every SLI
+ clock whenever SLI_DMAx_CNT[CNT]!=0, and is
+ cleared when SLI_INT_SUM[DTIME<x>] is written with
+ one. */
+ uint64_t cnt : 32; /**< Whenever SLI_DMAx_CNT[CNT] exceeds this value,
+ SLI_INT_SUM[DCNT<x>] is set. */
+#else
+ uint64_t cnt : 32;
+ uint64_t time : 32;
+#endif
+ } s;
+ struct cvmx_sli_dmax_int_level_s cn61xx;
+ struct cvmx_sli_dmax_int_level_s cn63xx;
+ struct cvmx_sli_dmax_int_level_s cn63xxp1;
+ struct cvmx_sli_dmax_int_level_s cn66xx;
+ struct cvmx_sli_dmax_int_level_s cn68xx;
+ struct cvmx_sli_dmax_int_level_s cn68xxp1;
+ struct cvmx_sli_dmax_int_level_s cnf71xx;
+};
+typedef union cvmx_sli_dmax_int_level cvmx_sli_dmax_int_level_t;
+
+/**
+ * cvmx_sli_dma#_tim
+ *
+ * SLI_DMAx_TIM = SLI DMA Timer
+ *
+ * The DMA Timer value.
+ */
+union cvmx_sli_dmax_tim {
+ uint64_t u64;
+ struct cvmx_sli_dmax_tim_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t tim : 32; /**< The DMA timer value.
+ The timer will increment when SLI_DMAx_CNT[CNT]!=0
+ and will clear when SLI_DMAx_CNT[CNT]==0 */
+#else
+ uint64_t tim : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_dmax_tim_s cn61xx;
+ struct cvmx_sli_dmax_tim_s cn63xx;
+ struct cvmx_sli_dmax_tim_s cn63xxp1;
+ struct cvmx_sli_dmax_tim_s cn66xx;
+ struct cvmx_sli_dmax_tim_s cn68xx;
+ struct cvmx_sli_dmax_tim_s cn68xxp1;
+ struct cvmx_sli_dmax_tim_s cnf71xx;
+};
+typedef union cvmx_sli_dmax_tim cvmx_sli_dmax_tim_t;
+
+/**
+ * cvmx_sli_int_enb_ciu
+ *
+ * SLI_INT_ENB_CIU = SLI's Interrupt Enable CIU Register
+ *
+ * Used to enable the various interrupting conditions of SLI
+ */
+union cvmx_sli_int_enb_ciu {
+ uint64_t u64;
+ struct cvmx_sli_int_enb_ciu_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t pipe_err : 1; /**< Illegal packet csr address. */
+ uint64_t ill_pad : 1; /**< Illegal packet csr address. */
+ uint64_t sprt3_err : 1; /**< Error Response received on SLI port 3. */
+ uint64_t sprt2_err : 1; /**< Error Response received on SLI port 2. */
+ uint64_t sprt1_err : 1; /**< Error Response received on SLI port 1. */
+ uint64_t sprt0_err : 1; /**< Error Response received on SLI port 0. */
+ uint64_t pins_err : 1; /**< Read Error during packet instruction fetch. */
+ uint64_t pop_err : 1; /**< Read Error during packet scatter pointer fetch. */
+ uint64_t pdi_err : 1; /**< Read Error during packet data fetch. */
+ uint64_t pgl_err : 1; /**< Read Error during gather list fetch. */
+ uint64_t pin_bp : 1; /**< Packet Input Count exceeded WMARK. */
+ uint64_t pout_err : 1; /**< Packet Out Interrupt, Error From PKO. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell Count Overflow. */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell Count Overflow. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< DMA Timer Interrupts */
+ uint64_t dcnt : 2; /**< DMA Count Interrupts */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts */
+ uint64_t reserved_28_31 : 4;
+ uint64_t m3_un_wi : 1; /**< Reserved. */
+ uint64_t m3_un_b0 : 1; /**< Reserved. */
+ uint64_t m3_up_wi : 1; /**< Reserved. */
+ uint64_t m3_up_b0 : 1; /**< Reserved. */
+ uint64_t m2_un_wi : 1; /**< Reserved. */
+ uint64_t m2_un_b0 : 1; /**< Reserved. */
+ uint64_t m2_up_wi : 1; /**< Reserved. */
+ uint64_t m2_up_b0 : 1; /**< Reserved. */
+ uint64_t reserved_18_19 : 2;
+ uint64_t mio_int1 : 1; /**< Enables SLI_INT_SUM[17] to generate an
+ interrupt on the RSL.
+ THIS SHOULD NEVER BE SET */
+ uint64_t mio_int0 : 1; /**< Enables SLI_INT_SUM[16] to generate an
+ interrupt on the RSL.
+ THIS SHOULD NEVER BE SET */
+ uint64_t m1_un_wi : 1; /**< Enables SLI_INT_SUM[15] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_un_b0 : 1; /**< Enables SLI_INT_SUM[14] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_up_wi : 1; /**< Enables SLI_INT_SUM[13] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_up_b0 : 1; /**< Enables SLI_INT_SUM[12] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_un_wi : 1; /**< Enables SLI_INT_SUM[11] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_un_b0 : 1; /**< Enables SLI_INT_SUM[10] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_up_wi : 1; /**< Enables SLI_INT_SUM[9] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_up_b0 : 1; /**< Enables SLI_INT_SUM[8] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Enables SLI_INT_SUM[5] to generate an
+ interrupt on the RSL. */
+ uint64_t pcnt : 1; /**< Enables SLI_INT_SUM[4] to generate an
+ interrupt on the RSL. */
+ uint64_t iob2big : 1; /**< Enables SLI_INT_SUM[3] to generate an
+ interrupt on the RSL. */
+ uint64_t bar0_to : 1; /**< Enables SLI_INT_SUM[2] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< Enables SLI_INT_SUM[0] to generate an
+ interrupt on the RSL. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t m2_up_b0 : 1;
+ uint64_t m2_up_wi : 1;
+ uint64_t m2_un_b0 : 1;
+ uint64_t m2_un_wi : 1;
+ uint64_t m3_up_b0 : 1;
+ uint64_t m3_up_wi : 1;
+ uint64_t m3_un_b0 : 1;
+ uint64_t m3_un_wi : 1;
+ uint64_t reserved_28_31 : 4;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t sprt2_err : 1;
+ uint64_t sprt3_err : 1;
+ uint64_t ill_pad : 1;
+ uint64_t pipe_err : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_sli_int_enb_ciu_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t ill_pad : 1; /**< Illegal packet csr address. */
+ uint64_t sprt3_err : 1; /**< Error Response received on SLI port 3. */
+ uint64_t sprt2_err : 1; /**< Error Response received on SLI port 2. */
+ uint64_t sprt1_err : 1; /**< Error Response received on SLI port 1. */
+ uint64_t sprt0_err : 1; /**< Error Response received on SLI port 0. */
+ uint64_t pins_err : 1; /**< Read Error during packet instruction fetch. */
+ uint64_t pop_err : 1; /**< Read Error during packet scatter pointer fetch. */
+ uint64_t pdi_err : 1; /**< Read Error during packet data fetch. */
+ uint64_t pgl_err : 1; /**< Read Error during gather list fetch. */
+ uint64_t pin_bp : 1; /**< Packet Input Count exceeded WMARK. */
+ uint64_t pout_err : 1; /**< Packet Out Interrupt, Error From PKO. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell Count Overflow. */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell Count Overflow. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< DMA Timer Interrupts */
+ uint64_t dcnt : 2; /**< DMA Count Interrupts */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts */
+ uint64_t reserved_28_31 : 4;
+ uint64_t m3_un_wi : 1; /**< Reserved. */
+ uint64_t m3_un_b0 : 1; /**< Reserved. */
+ uint64_t m3_up_wi : 1; /**< Reserved. */
+ uint64_t m3_up_b0 : 1; /**< Reserved. */
+ uint64_t m2_un_wi : 1; /**< Reserved. */
+ uint64_t m2_un_b0 : 1; /**< Reserved. */
+ uint64_t m2_up_wi : 1; /**< Reserved. */
+ uint64_t m2_up_b0 : 1; /**< Reserved. */
+ uint64_t reserved_18_19 : 2;
+ uint64_t mio_int1 : 1; /**< Enables SLI_INT_SUM[17] to generate an
+ interrupt on the RSL.
+ THIS SHOULD NEVER BE SET */
+ uint64_t mio_int0 : 1; /**< Enables SLI_INT_SUM[16] to generate an
+ interrupt on the RSL.
+ THIS SHOULD NEVER BE SET */
+ uint64_t m1_un_wi : 1; /**< Enables SLI_INT_SUM[15] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_un_b0 : 1; /**< Enables SLI_INT_SUM[14] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_up_wi : 1; /**< Enables SLI_INT_SUM[13] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_up_b0 : 1; /**< Enables SLI_INT_SUM[12] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_un_wi : 1; /**< Enables SLI_INT_SUM[11] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_un_b0 : 1; /**< Enables SLI_INT_SUM[10] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_up_wi : 1; /**< Enables SLI_INT_SUM[9] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_up_b0 : 1; /**< Enables SLI_INT_SUM[8] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Enables SLI_INT_SUM[5] to generate an
+ interrupt on the RSL. */
+ uint64_t pcnt : 1; /**< Enables SLI_INT_SUM[4] to generate an
+ interrupt on the RSL. */
+ uint64_t iob2big : 1; /**< Enables SLI_INT_SUM[3] to generate an
+ interrupt on the RSL. */
+ uint64_t bar0_to : 1; /**< Enables SLI_INT_SUM[2] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< Enables SLI_INT_SUM[0] to generate an
+ interrupt on the RSL. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t m2_up_b0 : 1;
+ uint64_t m2_up_wi : 1;
+ uint64_t m2_un_b0 : 1;
+ uint64_t m2_un_wi : 1;
+ uint64_t m3_up_b0 : 1;
+ uint64_t m3_up_wi : 1;
+ uint64_t m3_un_b0 : 1;
+ uint64_t m3_un_wi : 1;
+ uint64_t reserved_28_31 : 4;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t sprt2_err : 1;
+ uint64_t sprt3_err : 1;
+ uint64_t ill_pad : 1;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } cn61xx;
+ struct cvmx_sli_int_enb_ciu_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t ill_pad : 1; /**< Illegal packet csr address. */
+ uint64_t reserved_58_59 : 2;
+ uint64_t sprt1_err : 1; /**< Error Response received on SLI port 1. */
+ uint64_t sprt0_err : 1; /**< Error Response received on SLI port 0. */
+ uint64_t pins_err : 1; /**< Read Error during packet instruction fetch. */
+ uint64_t pop_err : 1; /**< Read Error during packet scatter pointer fetch. */
+ uint64_t pdi_err : 1; /**< Read Error during packet data fetch. */
+ uint64_t pgl_err : 1; /**< Read Error during gather list fetch. */
+ uint64_t pin_bp : 1; /**< Packet Input Count exceeded WMARK. */
+ uint64_t pout_err : 1; /**< Packet Out Interrupt, Error From PKO. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell Count Overflow. */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell Count Overflow. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< DMA Timer Interrupts */
+ uint64_t dcnt : 2; /**< DMA Count Interrupts */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts */
+ uint64_t reserved_18_31 : 14;
+ uint64_t mio_int1 : 1; /**< Enables SLI_INT_SUM[17] to generate an
+ interrupt on the RSL.
+ THIS SHOULD NEVER BE SET */
+ uint64_t mio_int0 : 1; /**< Enables SLI_INT_SUM[16] to generate an
+ interrupt on the RSL.
+ THIS SHOULD NEVER BE SET */
+ uint64_t m1_un_wi : 1; /**< Enables SLI_INT_SUM[15] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_un_b0 : 1; /**< Enables SLI_INT_SUM[14] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_up_wi : 1; /**< Enables SLI_INT_SUM[13] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_up_b0 : 1; /**< Enables SLI_INT_SUM[12] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_un_wi : 1; /**< Enables SLI_INT_SUM[11] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_un_b0 : 1; /**< Enables SLI_INT_SUM[10] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_up_wi : 1; /**< Enables SLI_INT_SUM[9] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_up_b0 : 1; /**< Enables SLI_INT_SUM[8] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Enables SLI_INT_SUM[5] to generate an
+ interrupt on the RSL. */
+ uint64_t pcnt : 1; /**< Enables SLI_INT_SUM[4] to generate an
+ interrupt on the RSL. */
+ uint64_t iob2big : 1; /**< Enables SLI_INT_SUM[3] to generate an
+ interrupt on the RSL. */
+ uint64_t bar0_to : 1; /**< Enables SLI_INT_SUM[2] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< Enables SLI_INT_SUM[0] to generate an
+ interrupt on the RSL. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t reserved_18_31 : 14;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t reserved_58_59 : 2;
+ uint64_t ill_pad : 1;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } cn63xx;
+ struct cvmx_sli_int_enb_ciu_cn63xx cn63xxp1;
+ struct cvmx_sli_int_enb_ciu_cn61xx cn66xx;
+ struct cvmx_sli_int_enb_ciu_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t pipe_err : 1; /**< Illegal packet csr address. */
+ uint64_t ill_pad : 1; /**< Illegal packet csr address. */
+ uint64_t reserved_58_59 : 2;
+ uint64_t sprt1_err : 1; /**< Error Response received on SLI port 1. */
+ uint64_t sprt0_err : 1; /**< Error Response received on SLI port 0. */
+ uint64_t pins_err : 1; /**< Read Error during packet instruction fetch. */
+ uint64_t pop_err : 1; /**< Read Error during packet scatter pointer fetch. */
+ uint64_t pdi_err : 1; /**< Read Error during packet data fetch. */
+ uint64_t pgl_err : 1; /**< Read Error during gather list fetch. */
+ uint64_t reserved_51_51 : 1;
+ uint64_t pout_err : 1; /**< Packet Out Interrupt, Error From PKO. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell Count Overflow. */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell Count Overflow. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< DMA Timer Interrupts */
+ uint64_t dcnt : 2; /**< DMA Count Interrupts */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts */
+ uint64_t reserved_18_31 : 14;
+ uint64_t mio_int1 : 1; /**< Enables SLI_INT_SUM[17] to generate an
+ interrupt on the RSL.
+ THIS SHOULD NEVER BE SET */
+ uint64_t mio_int0 : 1; /**< Enables SLI_INT_SUM[16] to generate an
+ interrupt on the RSL.
+ THIS SHOULD NEVER BE SET */
+ uint64_t m1_un_wi : 1; /**< Enables SLI_INT_SUM[15] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_un_b0 : 1; /**< Enables SLI_INT_SUM[14] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_up_wi : 1; /**< Enables SLI_INT_SUM[13] to generate an
+ interrupt on the RSL. */
+ uint64_t m1_up_b0 : 1; /**< Enables SLI_INT_SUM[12] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_un_wi : 1; /**< Enables SLI_INT_SUM[11] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_un_b0 : 1; /**< Enables SLI_INT_SUM[10] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_up_wi : 1; /**< Enables SLI_INT_SUM[9] to generate an
+ interrupt on the RSL. */
+ uint64_t m0_up_b0 : 1; /**< Enables SLI_INT_SUM[8] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Enables SLI_INT_SUM[5] to generate an
+ interrupt on the RSL. */
+ uint64_t pcnt : 1; /**< Enables SLI_INT_SUM[4] to generate an
+ interrupt on the RSL. */
+ uint64_t iob2big : 1; /**< Enables SLI_INT_SUM[3] to generate an
+ interrupt on the RSL. */
+ uint64_t bar0_to : 1; /**< Enables SLI_INT_SUM[2] to generate an
+ interrupt on the RSL. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< Enables SLI_INT_SUM[0] to generate an
+ interrupt on the RSL. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t reserved_18_31 : 14;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t reserved_58_59 : 2;
+ uint64_t ill_pad : 1;
+ uint64_t pipe_err : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn68xx;
+ struct cvmx_sli_int_enb_ciu_cn68xx cn68xxp1;
+ struct cvmx_sli_int_enb_ciu_cn61xx cnf71xx;
+};
+typedef union cvmx_sli_int_enb_ciu cvmx_sli_int_enb_ciu_t;
+
+/**
+ * cvmx_sli_int_enb_port#
+ *
+ * SLI_INT_ENB_PORTX = SLI's Interrupt Enable Register per mac port
+ *
+ * Used to allow the generation of interrupts (MSI/INTA) to the PORT X
+ *
+ * Notes:
+ * This CSR is not used when the corresponding MAC is sRIO.
+ *
+ */
+union cvmx_sli_int_enb_portx {
+ uint64_t u64;
+ struct cvmx_sli_int_enb_portx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t pipe_err : 1; /**< Out of range PIPE value. */
+ uint64_t ill_pad : 1; /**< Illegal packet csr address. */
+ uint64_t sprt3_err : 1; /**< Error Response received on SLI port 3. */
+ uint64_t sprt2_err : 1; /**< Error Response received on SLI port 2. */
+ uint64_t sprt1_err : 1; /**< Error Response received on SLI port 1. */
+ uint64_t sprt0_err : 1; /**< Error Response received on SLI port 0. */
+ uint64_t pins_err : 1; /**< Read Error during packet instruction fetch. */
+ uint64_t pop_err : 1; /**< Read Error during packet scatter pointer fetch. */
+ uint64_t pdi_err : 1; /**< Read Error during packet data fetch. */
+ uint64_t pgl_err : 1; /**< Read Error during gather list fetch. */
+ uint64_t pin_bp : 1; /**< Packet Input Count exceeded WMARK. */
+ uint64_t pout_err : 1; /**< Packet Out Interrupt, Error From PKO. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell Count Overflow. */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell Count Overflow. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< DMA Timer Interrupts */
+ uint64_t dcnt : 2; /**< DMA Count Interrupts */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts */
+ uint64_t reserved_28_31 : 4;
+ uint64_t m3_un_wi : 1; /**< Reserved. */
+ uint64_t m3_un_b0 : 1; /**< Reserved. */
+ uint64_t m3_up_wi : 1; /**< Reserved. */
+ uint64_t m3_up_b0 : 1; /**< Reserved. */
+ uint64_t m2_un_wi : 1; /**< Reserved. */
+ uint64_t m2_un_b0 : 1; /**< Reserved. */
+ uint64_t m2_up_wi : 1; /**< Reserved. */
+ uint64_t m2_up_b0 : 1; /**< Reserved. */
+ uint64_t mac1_int : 1; /**< Enables SLI_INT_SUM[19] to generate an
+ interrupt to the PCIE-Port1 for MSI/inta.
+ The valuse of this bit has NO effect on PCIE Port0.
+ SLI_INT_ENB_PORT0[MAC1_INT] sould NEVER be set. */
+ uint64_t mac0_int : 1; /**< Enables SLI_INT_SUM[18] to generate an
+ interrupt to the PCIE-Port0 for MSI/inta.
+ The valus of this bit has NO effect on PCIE Port1.
+ SLI_INT_ENB_PORT1[MAC0_INT] sould NEVER be set. */
+ uint64_t mio_int1 : 1; /**< Enables SLI_INT_SUM[17] to generate an
+ interrupt to the PCIE core for MSI/inta.
+ SLI_INT_ENB_PORT0[MIO_INT1] should NEVER be set. */
+ uint64_t mio_int0 : 1; /**< Enables SLI_INT_SUM[16] to generate an
+ interrupt to the PCIE core for MSI/inta.
+ SLI_INT_ENB_PORT1[MIO_INT0] should NEVER be set. */
+ uint64_t m1_un_wi : 1; /**< Enables SLI_INT_SUM[15] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_un_b0 : 1; /**< Enables SLI_INT_SUM[14] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_up_wi : 1; /**< Enables SLI_INT_SUM[13] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_up_b0 : 1; /**< Enables SLI_INT_SUM[12] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_un_wi : 1; /**< Enables SLI_INT_SUM[11] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_un_b0 : 1; /**< Enables SLI_INT_SUM[10] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_up_wi : 1; /**< Enables SLI_INT_SUM[9] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_up_b0 : 1; /**< Enables SLI_INT_SUM[8] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Enables SLI_INT_SUM[5] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pcnt : 1; /**< Enables SLI_INT_SUM[4] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t iob2big : 1; /**< Enables SLI_INT_SUM[3] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t bar0_to : 1; /**< Enables SLI_INT_SUM[2] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< Enables SLI_INT_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t mac0_int : 1;
+ uint64_t mac1_int : 1;
+ uint64_t m2_up_b0 : 1;
+ uint64_t m2_up_wi : 1;
+ uint64_t m2_un_b0 : 1;
+ uint64_t m2_un_wi : 1;
+ uint64_t m3_up_b0 : 1;
+ uint64_t m3_up_wi : 1;
+ uint64_t m3_un_b0 : 1;
+ uint64_t m3_un_wi : 1;
+ uint64_t reserved_28_31 : 4;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t sprt2_err : 1;
+ uint64_t sprt3_err : 1;
+ uint64_t ill_pad : 1;
+ uint64_t pipe_err : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_sli_int_enb_portx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t ill_pad : 1; /**< Illegal packet csr address. */
+ uint64_t sprt3_err : 1; /**< Error Response received on SLI port 3. */
+ uint64_t sprt2_err : 1; /**< Error Response received on SLI port 2. */
+ uint64_t sprt1_err : 1; /**< Error Response received on SLI port 1. */
+ uint64_t sprt0_err : 1; /**< Error Response received on SLI port 0. */
+ uint64_t pins_err : 1; /**< Read Error during packet instruction fetch. */
+ uint64_t pop_err : 1; /**< Read Error during packet scatter pointer fetch. */
+ uint64_t pdi_err : 1; /**< Read Error during packet data fetch. */
+ uint64_t pgl_err : 1; /**< Read Error during gather list fetch. */
+ uint64_t pin_bp : 1; /**< Packet Input Count exceeded WMARK. */
+ uint64_t pout_err : 1; /**< Packet Out Interrupt, Error From PKO. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell Count Overflow. */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell Count Overflow. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< DMA Timer Interrupts */
+ uint64_t dcnt : 2; /**< DMA Count Interrupts */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts */
+ uint64_t reserved_28_31 : 4;
+ uint64_t m3_un_wi : 1; /**< Reserved. */
+ uint64_t m3_un_b0 : 1; /**< Reserved. */
+ uint64_t m3_up_wi : 1; /**< Reserved. */
+ uint64_t m3_up_b0 : 1; /**< Reserved. */
+ uint64_t m2_un_wi : 1; /**< Reserved. */
+ uint64_t m2_un_b0 : 1; /**< Reserved. */
+ uint64_t m2_up_wi : 1; /**< Reserved. */
+ uint64_t m2_up_b0 : 1; /**< Reserved. */
+ uint64_t mac1_int : 1; /**< Enables SLI_INT_SUM[19] to generate an
+ interrupt to the PCIE-Port1 for MSI/inta.
+ The valuse of this bit has NO effect on PCIE Port0.
+ SLI_INT_ENB_PORT0[MAC1_INT] sould NEVER be set. */
+ uint64_t mac0_int : 1; /**< Enables SLI_INT_SUM[18] to generate an
+ interrupt to the PCIE-Port0 for MSI/inta.
+ The valus of this bit has NO effect on PCIE Port1.
+ SLI_INT_ENB_PORT1[MAC0_INT] sould NEVER be set. */
+ uint64_t mio_int1 : 1; /**< Enables SLI_INT_SUM[17] to generate an
+ interrupt to the PCIE core for MSI/inta.
+ SLI_INT_ENB_PORT0[MIO_INT1] should NEVER be set. */
+ uint64_t mio_int0 : 1; /**< Enables SLI_INT_SUM[16] to generate an
+ interrupt to the PCIE core for MSI/inta.
+ SLI_INT_ENB_PORT1[MIO_INT0] should NEVER be set. */
+ uint64_t m1_un_wi : 1; /**< Enables SLI_INT_SUM[15] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_un_b0 : 1; /**< Enables SLI_INT_SUM[14] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_up_wi : 1; /**< Enables SLI_INT_SUM[13] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_up_b0 : 1; /**< Enables SLI_INT_SUM[12] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_un_wi : 1; /**< Enables SLI_INT_SUM[11] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_un_b0 : 1; /**< Enables SLI_INT_SUM[10] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_up_wi : 1; /**< Enables SLI_INT_SUM[9] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_up_b0 : 1; /**< Enables SLI_INT_SUM[8] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Enables SLI_INT_SUM[5] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pcnt : 1; /**< Enables SLI_INT_SUM[4] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t iob2big : 1; /**< Enables SLI_INT_SUM[3] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t bar0_to : 1; /**< Enables SLI_INT_SUM[2] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< Enables SLI_INT_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t mac0_int : 1;
+ uint64_t mac1_int : 1;
+ uint64_t m2_up_b0 : 1;
+ uint64_t m2_up_wi : 1;
+ uint64_t m2_un_b0 : 1;
+ uint64_t m2_un_wi : 1;
+ uint64_t m3_up_b0 : 1;
+ uint64_t m3_up_wi : 1;
+ uint64_t m3_un_b0 : 1;
+ uint64_t m3_un_wi : 1;
+ uint64_t reserved_28_31 : 4;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t sprt2_err : 1;
+ uint64_t sprt3_err : 1;
+ uint64_t ill_pad : 1;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } cn61xx;
+ struct cvmx_sli_int_enb_portx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t ill_pad : 1; /**< Illegal packet csr address. */
+ uint64_t reserved_58_59 : 2;
+ uint64_t sprt1_err : 1; /**< Error Response received on SLI port 1. */
+ uint64_t sprt0_err : 1; /**< Error Response received on SLI port 0. */
+ uint64_t pins_err : 1; /**< Read Error during packet instruction fetch. */
+ uint64_t pop_err : 1; /**< Read Error during packet scatter pointer fetch. */
+ uint64_t pdi_err : 1; /**< Read Error during packet data fetch. */
+ uint64_t pgl_err : 1; /**< Read Error during gather list fetch. */
+ uint64_t pin_bp : 1; /**< Packet Input Count exceeded WMARK. */
+ uint64_t pout_err : 1; /**< Packet Out Interrupt, Error From PKO. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell Count Overflow. */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell Count Overflow. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< DMA Timer Interrupts */
+ uint64_t dcnt : 2; /**< DMA Count Interrupts */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mac1_int : 1; /**< Enables SLI_INT_SUM[19] to generate an
+ interrupt to the PCIE-Port1 for MSI/inta.
+ The valuse of this bit has NO effect on PCIE Port0.
+ SLI_INT_ENB_PORT0[MAC1_INT] sould NEVER be set. */
+ uint64_t mac0_int : 1; /**< Enables SLI_INT_SUM[18] to generate an
+ interrupt to the PCIE-Port0 for MSI/inta.
+ The valus of this bit has NO effect on PCIE Port1.
+ SLI_INT_ENB_PORT1[MAC0_INT] sould NEVER be set. */
+ uint64_t mio_int1 : 1; /**< Enables SLI_INT_SUM[17] to generate an
+ interrupt to the PCIE core for MSI/inta.
+ SLI_INT_ENB_PORT0[MIO_INT1] should NEVER be set. */
+ uint64_t mio_int0 : 1; /**< Enables SLI_INT_SUM[16] to generate an
+ interrupt to the PCIE core for MSI/inta.
+ SLI_INT_ENB_PORT1[MIO_INT0] should NEVER be set. */
+ uint64_t m1_un_wi : 1; /**< Enables SLI_INT_SUM[15] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_un_b0 : 1; /**< Enables SLI_INT_SUM[14] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_up_wi : 1; /**< Enables SLI_INT_SUM[13] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_up_b0 : 1; /**< Enables SLI_INT_SUM[12] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_un_wi : 1; /**< Enables SLI_INT_SUM[11] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_un_b0 : 1; /**< Enables SLI_INT_SUM[10] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_up_wi : 1; /**< Enables SLI_INT_SUM[9] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_up_b0 : 1; /**< Enables SLI_INT_SUM[8] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Enables SLI_INT_SUM[5] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pcnt : 1; /**< Enables SLI_INT_SUM[4] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t iob2big : 1; /**< Enables SLI_INT_SUM[3] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t bar0_to : 1; /**< Enables SLI_INT_SUM[2] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< Enables SLI_INT_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t mac0_int : 1;
+ uint64_t mac1_int : 1;
+ uint64_t reserved_20_31 : 12;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t reserved_58_59 : 2;
+ uint64_t ill_pad : 1;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } cn63xx;
+ struct cvmx_sli_int_enb_portx_cn63xx cn63xxp1;
+ struct cvmx_sli_int_enb_portx_cn61xx cn66xx;
+ struct cvmx_sli_int_enb_portx_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t pipe_err : 1; /**< Out of range PIPE value. */
+ uint64_t ill_pad : 1; /**< Illegal packet csr address. */
+ uint64_t reserved_58_59 : 2;
+ uint64_t sprt1_err : 1; /**< Error Response received on SLI port 1. */
+ uint64_t sprt0_err : 1; /**< Error Response received on SLI port 0. */
+ uint64_t pins_err : 1; /**< Read Error during packet instruction fetch. */
+ uint64_t pop_err : 1; /**< Read Error during packet scatter pointer fetch. */
+ uint64_t pdi_err : 1; /**< Read Error during packet data fetch. */
+ uint64_t pgl_err : 1; /**< Read Error during gather list fetch. */
+ uint64_t reserved_51_51 : 1;
+ uint64_t pout_err : 1; /**< Packet Out Interrupt, Error From PKO. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell Count Overflow. */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell Count Overflow. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< DMA Timer Interrupts */
+ uint64_t dcnt : 2; /**< DMA Count Interrupts */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mac1_int : 1; /**< Enables SLI_INT_SUM[19] to generate an
+ interrupt to the PCIE-Port1 for MSI/inta.
+ The valuse of this bit has NO effect on PCIE Port0.
+ SLI_INT_ENB_PORT0[MAC1_INT] sould NEVER be set. */
+ uint64_t mac0_int : 1; /**< Enables SLI_INT_SUM[18] to generate an
+ interrupt to the PCIE-Port0 for MSI/inta.
+ The valus of this bit has NO effect on PCIE Port1.
+ SLI_INT_ENB_PORT1[MAC0_INT] sould NEVER be set. */
+ uint64_t mio_int1 : 1; /**< Enables SLI_INT_SUM[17] to generate an
+ interrupt to the PCIE core for MSI/inta.
+ SLI_INT_ENB_PORT0[MIO_INT1] should NEVER be set. */
+ uint64_t mio_int0 : 1; /**< Enables SLI_INT_SUM[16] to generate an
+ interrupt to the PCIE core for MSI/inta.
+ SLI_INT_ENB_PORT1[MIO_INT0] should NEVER be set. */
+ uint64_t m1_un_wi : 1; /**< Enables SLI_INT_SUM[15] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_un_b0 : 1; /**< Enables SLI_INT_SUM[14] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_up_wi : 1; /**< Enables SLI_INT_SUM[13] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m1_up_b0 : 1; /**< Enables SLI_INT_SUM[12] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_un_wi : 1; /**< Enables SLI_INT_SUM[11] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_un_b0 : 1; /**< Enables SLI_INT_SUM[10] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_up_wi : 1; /**< Enables SLI_INT_SUM[9] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t m0_up_b0 : 1; /**< Enables SLI_INT_SUM[8] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Enables SLI_INT_SUM[5] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t pcnt : 1; /**< Enables SLI_INT_SUM[4] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t iob2big : 1; /**< Enables SLI_INT_SUM[3] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t bar0_to : 1; /**< Enables SLI_INT_SUM[2] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< Enables SLI_INT_SUM[0] to generate an
+ interrupt to the PCIE core for MSI/inta. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t mac0_int : 1;
+ uint64_t mac1_int : 1;
+ uint64_t reserved_20_31 : 12;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t reserved_58_59 : 2;
+ uint64_t ill_pad : 1;
+ uint64_t pipe_err : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn68xx;
+ struct cvmx_sli_int_enb_portx_cn68xx cn68xxp1;
+ struct cvmx_sli_int_enb_portx_cn61xx cnf71xx;
+};
+typedef union cvmx_sli_int_enb_portx cvmx_sli_int_enb_portx_t;
+
+/**
+ * cvmx_sli_int_sum
+ *
+ * SLI_INT_SUM = SLI Interrupt Summary Register
+ *
+ * Set when an interrupt condition occurs, write '1' to clear.
+ */
+union cvmx_sli_int_sum {
+ uint64_t u64;
+ struct cvmx_sli_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t pipe_err : 1; /**< Set when a PIPE value outside range is received. */
+ uint64_t ill_pad : 1; /**< Set when a BAR0 address R/W falls into theaddress
+ range of the Packet-CSR, but for an unused
+ address. */
+ uint64_t sprt3_err : 1; /**< Reserved. */
+ uint64_t sprt2_err : 1; /**< Reserved. */
+ uint64_t sprt1_err : 1; /**< When an error response received on SLI port 1
+ this bit is set. */
+ uint64_t sprt0_err : 1; /**< When an error response received on SLI port 0
+ this bit is set. */
+ uint64_t pins_err : 1; /**< When a read error occurs on a packet instruction
+ this bit is set. */
+ uint64_t pop_err : 1; /**< When a read error occurs on a packet scatter
+ pointer pair this bit is set. */
+ uint64_t pdi_err : 1; /**< When a read error occurs on a packet data read
+ this bit is set. */
+ uint64_t pgl_err : 1; /**< When a read error occurs on a packet gather list
+ read this bit is set. */
+ uint64_t pin_bp : 1; /**< Packet input count has exceeded the WMARK.
+ See SLI_PKT_IN_BP */
+ uint64_t pout_err : 1; /**< Set when PKO sends packet data with the error bit
+ set. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell count overflowed. Which
+ doorbell can be found in DPI_PINT_INFO[PSLDBOF] */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell count overflowed. Which
+ doorbell can be found in DPI_PINT_INFO[PIDBOF] */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< Whenever SLI_DMAx_CNT[CNT] is not 0, the
+ SLI_DMAx_TIM[TIM] timer increments every SLI
+ clock.
+ DTIME[x] is set whenever SLI_DMAx_TIM[TIM] >
+ SLI_DMAx_INT_LEVEL[TIME].
+ DTIME[x] is normally cleared by clearing
+ SLI_DMAx_CNT[CNT] (which also clears
+ SLI_DMAx_TIM[TIM]). */
+ uint64_t dcnt : 2; /**< DCNT[x] is set whenever SLI_DMAx_CNT[CNT] >
+ SLI_DMAx_INT_LEVEL[CNT].
+ DCNT[x] is normally cleared by decreasing
+ SLI_DMAx_CNT[CNT]. */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t m3_un_wi : 1; /**< Reserved. */
+ uint64_t m3_un_b0 : 1; /**< Reserved. */
+ uint64_t m3_up_wi : 1; /**< Reserved. */
+ uint64_t m3_up_b0 : 1; /**< Reserved. */
+ uint64_t m2_un_wi : 1; /**< Reserved. */
+ uint64_t m2_un_b0 : 1; /**< Reserved. */
+ uint64_t m2_up_wi : 1; /**< Reserved. */
+ uint64_t m2_up_b0 : 1; /**< Reserved. */
+ uint64_t mac1_int : 1; /**< Interrupt from MAC1.
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB_INT) */
+ uint64_t mac0_int : 1; /**< Interrupt from MAC0.
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB_INT) */
+ uint64_t mio_int1 : 1; /**< Interrupt from MIO for PORT 1.
+ See CIU_INT33_SUM0, CIU_INT_SUM1
+ (enabled by CIU_INT33_EN0, CIU_INT33_EN1) */
+ uint64_t mio_int0 : 1; /**< Interrupt from MIO for PORT 0.
+ See CIU_INT32_SUM0, CIU_INT_SUM1
+ (enabled by CIU_INT32_EN0, CIU_INT32_EN1) */
+ uint64_t m1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register
+ from MAC 1. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0 from MAC 1.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register
+ from MAC 1. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0 from MAC 1.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register
+ from MAC 0. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0 from MAC 0.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register
+ from MAC 0. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0 from MAC 0.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Packet Timer has an interrupt. Which rings can
+ be found in SLI_PKT_TIME_INT. */
+ uint64_t pcnt : 1; /**< Packet Counter has an interrupt. Which rings can
+ be found in SLI_PKT_CNT_INT. */
+ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */
+ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive
+ read-data/commit in 0xffff core clocks. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< A read or write transfer did not complete
+ within 0xffff core clocks. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t mac0_int : 1;
+ uint64_t mac1_int : 1;
+ uint64_t m2_up_b0 : 1;
+ uint64_t m2_up_wi : 1;
+ uint64_t m2_un_b0 : 1;
+ uint64_t m2_un_wi : 1;
+ uint64_t m3_up_b0 : 1;
+ uint64_t m3_up_wi : 1;
+ uint64_t m3_un_b0 : 1;
+ uint64_t m3_un_wi : 1;
+ uint64_t reserved_28_31 : 4;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t sprt2_err : 1;
+ uint64_t sprt3_err : 1;
+ uint64_t ill_pad : 1;
+ uint64_t pipe_err : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_sli_int_sum_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t ill_pad : 1; /**< Set when a BAR0 address R/W falls into theaddress
+ range of the Packet-CSR, but for an unused
+ address. */
+ uint64_t sprt3_err : 1; /**< Reserved. */
+ uint64_t sprt2_err : 1; /**< Reserved. */
+ uint64_t sprt1_err : 1; /**< When an error response received on SLI port 1
+ this bit is set. */
+ uint64_t sprt0_err : 1; /**< When an error response received on SLI port 0
+ this bit is set. */
+ uint64_t pins_err : 1; /**< When a read error occurs on a packet instruction
+ this bit is set. */
+ uint64_t pop_err : 1; /**< When a read error occurs on a packet scatter
+ pointer pair this bit is set. */
+ uint64_t pdi_err : 1; /**< When a read error occurs on a packet data read
+ this bit is set. */
+ uint64_t pgl_err : 1; /**< When a read error occurs on a packet gather list
+ read this bit is set. */
+ uint64_t pin_bp : 1; /**< Packet input count has exceeded the WMARK.
+ See SLI_PKT_IN_BP */
+ uint64_t pout_err : 1; /**< Set when PKO sends packet data with the error bit
+ set. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell count overflowed. Which
+ doorbell can be found in DPI_PINT_INFO[PSLDBOF] */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell count overflowed. Which
+ doorbell can be found in DPI_PINT_INFO[PIDBOF] */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< Whenever SLI_DMAx_CNT[CNT] is not 0, the
+ SLI_DMAx_TIM[TIM] timer increments every SLI
+ clock.
+ DTIME[x] is set whenever SLI_DMAx_TIM[TIM] >
+ SLI_DMAx_INT_LEVEL[TIME].
+ DTIME[x] is normally cleared by clearing
+ SLI_DMAx_CNT[CNT] (which also clears
+ SLI_DMAx_TIM[TIM]). */
+ uint64_t dcnt : 2; /**< DCNT[x] is set whenever SLI_DMAx_CNT[CNT] >
+ SLI_DMAx_INT_LEVEL[CNT].
+ DCNT[x] is normally cleared by decreasing
+ SLI_DMAx_CNT[CNT]. */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t m3_un_wi : 1; /**< Reserved. */
+ uint64_t m3_un_b0 : 1; /**< Reserved. */
+ uint64_t m3_up_wi : 1; /**< Reserved. */
+ uint64_t m3_up_b0 : 1; /**< Reserved. */
+ uint64_t m2_un_wi : 1; /**< Reserved. */
+ uint64_t m2_un_b0 : 1; /**< Reserved. */
+ uint64_t m2_up_wi : 1; /**< Reserved. */
+ uint64_t m2_up_b0 : 1; /**< Reserved. */
+ uint64_t mac1_int : 1; /**< Interrupt from MAC1.
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB_INT) */
+ uint64_t mac0_int : 1; /**< Interrupt from MAC0.
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB_INT) */
+ uint64_t mio_int1 : 1; /**< Interrupt from MIO for PORT 1.
+ See CIU_INT33_SUM0, CIU_INT_SUM1
+ (enabled by CIU_INT33_EN0, CIU_INT33_EN1) */
+ uint64_t mio_int0 : 1; /**< Interrupt from MIO for PORT 0.
+ See CIU_INT32_SUM0, CIU_INT_SUM1
+ (enabled by CIU_INT32_EN0, CIU_INT32_EN1) */
+ uint64_t m1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register
+ from MAC 1. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0 from MAC 1.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register
+ from MAC 1. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0 from MAC 1.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register
+ from MAC 0. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0 from MAC 0.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register
+ from MAC 0. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0 from MAC 0.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Packet Timer has an interrupt. Which rings can
+ be found in SLI_PKT_TIME_INT. */
+ uint64_t pcnt : 1; /**< Packet Counter has an interrupt. Which rings can
+ be found in SLI_PKT_CNT_INT. */
+ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */
+ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive
+ read-data/commit in 0xffff core clocks. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< A read or write transfer did not complete
+ within 0xffff core clocks. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t mac0_int : 1;
+ uint64_t mac1_int : 1;
+ uint64_t m2_up_b0 : 1;
+ uint64_t m2_up_wi : 1;
+ uint64_t m2_un_b0 : 1;
+ uint64_t m2_un_wi : 1;
+ uint64_t m3_up_b0 : 1;
+ uint64_t m3_up_wi : 1;
+ uint64_t m3_un_b0 : 1;
+ uint64_t m3_un_wi : 1;
+ uint64_t reserved_28_31 : 4;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t sprt2_err : 1;
+ uint64_t sprt3_err : 1;
+ uint64_t ill_pad : 1;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } cn61xx;
+ struct cvmx_sli_int_sum_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63 : 3;
+ uint64_t ill_pad : 1; /**< Set when a BAR0 address R/W falls into theaddress
+ range of the Packet-CSR, but for an unused
+ address. */
+ uint64_t reserved_58_59 : 2;
+ uint64_t sprt1_err : 1; /**< When an error response received on SLI port 1
+ this bit is set. */
+ uint64_t sprt0_err : 1; /**< When an error response received on SLI port 0
+ this bit is set. */
+ uint64_t pins_err : 1; /**< When a read error occurs on a packet instruction
+ this bit is set. */
+ uint64_t pop_err : 1; /**< When a read error occurs on a packet scatter
+ pointer pair this bit is set. */
+ uint64_t pdi_err : 1; /**< When a read error occurs on a packet data read
+ this bit is set. */
+ uint64_t pgl_err : 1; /**< When a read error occurs on a packet gather list
+ read this bit is set. */
+ uint64_t pin_bp : 1; /**< Packet input count has exceeded the WMARK.
+ See SLI_PKT_IN_BP */
+ uint64_t pout_err : 1; /**< Set when PKO sends packet data with the error bit
+ set. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell count overflowed. Which
+ doorbell can be found in DPI_PINT_INFO[PSLDBOF] */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell count overflowed. Which
+ doorbell can be found in DPI_PINT_INFO[PIDBOF] */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< Whenever SLI_DMAx_CNT[CNT] is not 0, the
+ SLI_DMAx_TIM[TIM] timer increments every SLI
+ clock.
+ DTIME[x] is set whenever SLI_DMAx_TIM[TIM] >
+ SLI_DMAx_INT_LEVEL[TIME].
+ DTIME[x] is normally cleared by clearing
+ SLI_DMAx_CNT[CNT] (which also clears
+ SLI_DMAx_TIM[TIM]). */
+ uint64_t dcnt : 2; /**< DCNT[x] is set whenever SLI_DMAx_CNT[CNT] >
+ SLI_DMAx_INT_LEVEL[CNT].
+ DCNT[x] is normally cleared by decreasing
+ SLI_DMAx_CNT[CNT]. */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mac1_int : 1; /**< Interrupt from MAC1.
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB_INT) */
+ uint64_t mac0_int : 1; /**< Interrupt from MAC0.
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB_INT) */
+ uint64_t mio_int1 : 1; /**< Interrupt from MIO for PORT 1.
+ See CIU_INT33_SUM0, CIU_INT_SUM1
+ (enabled by CIU_INT33_EN0, CIU_INT33_EN1) */
+ uint64_t mio_int0 : 1; /**< Interrupt from MIO for PORT 0.
+ See CIU_INT32_SUM0, CIU_INT_SUM1
+ (enabled by CIU_INT32_EN0, CIU_INT32_EN1) */
+ uint64_t m1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register
+ from MAC 1. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0 from MAC 1.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register
+ from MAC 1. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0 from MAC 1.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register
+ from MAC 0. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0 from MAC 0.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register
+ from MAC 0. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0 from MAC 0.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Packet Timer has an interrupt. Which rings can
+ be found in SLI_PKT_TIME_INT. */
+ uint64_t pcnt : 1; /**< Packet Counter has an interrupt. Which rings can
+ be found in SLI_PKT_CNT_INT. */
+ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */
+ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive
+ read-data/commit in 0xffff core clocks. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< A read or write transfer did not complete
+ within 0xffff core clocks. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t mac0_int : 1;
+ uint64_t mac1_int : 1;
+ uint64_t reserved_20_31 : 12;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t pin_bp : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t reserved_58_59 : 2;
+ uint64_t ill_pad : 1;
+ uint64_t reserved_61_63 : 3;
+#endif
+ } cn63xx;
+ struct cvmx_sli_int_sum_cn63xx cn63xxp1;
+ struct cvmx_sli_int_sum_cn61xx cn66xx;
+ struct cvmx_sli_int_sum_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t pipe_err : 1; /**< Set when a PIPE value outside range is received. */
+ uint64_t ill_pad : 1; /**< Set when a BAR0 address R/W falls into theaddress
+ range of the Packet-CSR, but for an unused
+ address. */
+ uint64_t reserved_58_59 : 2;
+ uint64_t sprt1_err : 1; /**< When an error response received on SLI port 1
+ this bit is set. */
+ uint64_t sprt0_err : 1; /**< When an error response received on SLI port 0
+ this bit is set. */
+ uint64_t pins_err : 1; /**< When a read error occurs on a packet instruction
+ this bit is set. */
+ uint64_t pop_err : 1; /**< When a read error occurs on a packet scatter
+ pointer pair this bit is set. */
+ uint64_t pdi_err : 1; /**< When a read error occurs on a packet data read
+ this bit is set. */
+ uint64_t pgl_err : 1; /**< When a read error occurs on a packet gather list
+ read this bit is set. */
+ uint64_t reserved_51_51 : 1;
+ uint64_t pout_err : 1; /**< Set when PKO sends packet data with the error bit
+ set. */
+ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell count overflowed. Which
+ doorbell can be found in DPI_PINT_INFO[PSLDBOF] */
+ uint64_t pidbof : 1; /**< Packet Instruction Doorbell count overflowed. Which
+ doorbell can be found in DPI_PINT_INFO[PIDBOF] */
+ uint64_t reserved_38_47 : 10;
+ uint64_t dtime : 2; /**< Whenever SLI_DMAx_CNT[CNT] is not 0, the
+ SLI_DMAx_TIM[TIM] timer increments every SLI
+ clock.
+ DTIME[x] is set whenever SLI_DMAx_TIM[TIM] >
+ SLI_DMAx_INT_LEVEL[TIME].
+ DTIME[x] is normally cleared by clearing
+ SLI_DMAx_CNT[CNT] (which also clears
+ SLI_DMAx_TIM[TIM]). */
+ uint64_t dcnt : 2; /**< DCNT[x] is set whenever SLI_DMAx_CNT[CNT] >
+ SLI_DMAx_INT_LEVEL[CNT].
+ DCNT[x] is normally cleared by decreasing
+ SLI_DMAx_CNT[CNT]. */
+ uint64_t dmafi : 2; /**< DMA set Forced Interrupts. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mac1_int : 1; /**< Interrupt from MAC1.
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB_INT) */
+ uint64_t mac0_int : 1; /**< Interrupt from MAC0.
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB_INT) */
+ uint64_t mio_int1 : 1; /**< Interrupt from MIO for PORT 1.
+ See CIU_INT33_SUM0, CIU_INT_SUM1
+ (enabled by CIU_INT33_EN0, CIU_INT33_EN1) */
+ uint64_t mio_int0 : 1; /**< Interrupt from MIO for PORT 0.
+ See CIU_INT32_SUM0, CIU_INT_SUM1
+ (enabled by CIU_INT32_EN0, CIU_INT32_EN1) */
+ uint64_t m1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register
+ from MAC 1. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0 from MAC 1.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register
+ from MAC 1. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0 from MAC 1.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register
+ from MAC 0. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0 from MAC 0.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t m0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register
+ from MAC 0. This occurs when the window registers
+ are disabeld and a window register access occurs. */
+ uint64_t m0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0 from MAC 0.
+ This occurs when the BAR 0 address space is
+ disabeled. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ptime : 1; /**< Packet Timer has an interrupt. Which rings can
+ be found in SLI_PKT_TIME_INT. */
+ uint64_t pcnt : 1; /**< Packet Counter has an interrupt. Which rings can
+ be found in SLI_PKT_CNT_INT. */
+ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */
+ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive
+ read-data/commit in 0xffff core clocks. */
+ uint64_t reserved_1_1 : 1;
+ uint64_t rml_to : 1; /**< A read or write transfer did not complete
+ within 0xffff core clocks. */
+#else
+ uint64_t rml_to : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t bar0_to : 1;
+ uint64_t iob2big : 1;
+ uint64_t pcnt : 1;
+ uint64_t ptime : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t m0_up_b0 : 1;
+ uint64_t m0_up_wi : 1;
+ uint64_t m0_un_b0 : 1;
+ uint64_t m0_un_wi : 1;
+ uint64_t m1_up_b0 : 1;
+ uint64_t m1_up_wi : 1;
+ uint64_t m1_un_b0 : 1;
+ uint64_t m1_un_wi : 1;
+ uint64_t mio_int0 : 1;
+ uint64_t mio_int1 : 1;
+ uint64_t mac0_int : 1;
+ uint64_t mac1_int : 1;
+ uint64_t reserved_20_31 : 12;
+ uint64_t dmafi : 2;
+ uint64_t dcnt : 2;
+ uint64_t dtime : 2;
+ uint64_t reserved_38_47 : 10;
+ uint64_t pidbof : 1;
+ uint64_t psldbof : 1;
+ uint64_t pout_err : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t pgl_err : 1;
+ uint64_t pdi_err : 1;
+ uint64_t pop_err : 1;
+ uint64_t pins_err : 1;
+ uint64_t sprt0_err : 1;
+ uint64_t sprt1_err : 1;
+ uint64_t reserved_58_59 : 2;
+ uint64_t ill_pad : 1;
+ uint64_t pipe_err : 1;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } cn68xx;
+ struct cvmx_sli_int_sum_cn68xx cn68xxp1;
+ struct cvmx_sli_int_sum_cn61xx cnf71xx;
+};
+typedef union cvmx_sli_int_sum cvmx_sli_int_sum_t;
+
+/**
+ * cvmx_sli_last_win_rdata0
+ *
+ * SLI_LAST_WIN_RDATA0 = SLI Last Window Read Data
+ *
+ * The data from the last initiated window read by MAC 0.
+ */
+union cvmx_sli_last_win_rdata0 {
+ uint64_t u64;
+ struct cvmx_sli_last_win_rdata0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Last window read data. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_sli_last_win_rdata0_s cn61xx;
+ struct cvmx_sli_last_win_rdata0_s cn63xx;
+ struct cvmx_sli_last_win_rdata0_s cn63xxp1;
+ struct cvmx_sli_last_win_rdata0_s cn66xx;
+ struct cvmx_sli_last_win_rdata0_s cn68xx;
+ struct cvmx_sli_last_win_rdata0_s cn68xxp1;
+ struct cvmx_sli_last_win_rdata0_s cnf71xx;
+};
+typedef union cvmx_sli_last_win_rdata0 cvmx_sli_last_win_rdata0_t;
+
+/**
+ * cvmx_sli_last_win_rdata1
+ *
+ * SLI_LAST_WIN_RDATA1 = SLI Last Window Read Data
+ *
+ * The data from the last initiated window read by MAC 1.
+ */
+union cvmx_sli_last_win_rdata1 {
+ uint64_t u64;
+ struct cvmx_sli_last_win_rdata1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Last window read data. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_sli_last_win_rdata1_s cn61xx;
+ struct cvmx_sli_last_win_rdata1_s cn63xx;
+ struct cvmx_sli_last_win_rdata1_s cn63xxp1;
+ struct cvmx_sli_last_win_rdata1_s cn66xx;
+ struct cvmx_sli_last_win_rdata1_s cn68xx;
+ struct cvmx_sli_last_win_rdata1_s cn68xxp1;
+ struct cvmx_sli_last_win_rdata1_s cnf71xx;
+};
+typedef union cvmx_sli_last_win_rdata1 cvmx_sli_last_win_rdata1_t;
+
+/**
+ * cvmx_sli_last_win_rdata2
+ *
+ * SLI_LAST_WIN_RDATA2 = SLI Last Window Read Data
+ *
+ * The data from the last initiated window read by MAC 2.
+ */
+union cvmx_sli_last_win_rdata2 {
+ uint64_t u64;
+ struct cvmx_sli_last_win_rdata2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Last window read data. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_sli_last_win_rdata2_s cn61xx;
+ struct cvmx_sli_last_win_rdata2_s cn66xx;
+ struct cvmx_sli_last_win_rdata2_s cnf71xx;
+};
+typedef union cvmx_sli_last_win_rdata2 cvmx_sli_last_win_rdata2_t;
+
+/**
+ * cvmx_sli_last_win_rdata3
+ *
+ * SLI_LAST_WIN_RDATA3 = SLI Last Window Read Data
+ *
+ * The data from the last initiated window read by MAC 3.
+ */
+union cvmx_sli_last_win_rdata3 {
+ uint64_t u64;
+ struct cvmx_sli_last_win_rdata3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Last window read data. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_sli_last_win_rdata3_s cn61xx;
+ struct cvmx_sli_last_win_rdata3_s cn66xx;
+ struct cvmx_sli_last_win_rdata3_s cnf71xx;
+};
+typedef union cvmx_sli_last_win_rdata3 cvmx_sli_last_win_rdata3_t;
+
+/**
+ * cvmx_sli_mac_credit_cnt
+ *
+ * SLI_MAC_CREDIT_CNT = SLI MAC Credit Count
+ *
+ * Contains the number of credits for the MAC port FIFOs used by the SLI. This value needs to be set BEFORE S2M traffic
+ * flow starts. A write to this register will cause the credit counts in the SLI for the MAC ports to be reset to the value
+ * in this register.
+ */
+union cvmx_sli_mac_credit_cnt {
+ uint64_t u64;
+ struct cvmx_sli_mac_credit_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t p1_c_d : 1; /**< When set does not allow writing of P1_CCNT. */
+ uint64_t p1_n_d : 1; /**< When set does not allow writing of P1_NCNT. */
+ uint64_t p1_p_d : 1; /**< When set does not allow writing of P1_PCNT. */
+ uint64_t p0_c_d : 1; /**< When set does not allow writing of P0_CCNT. */
+ uint64_t p0_n_d : 1; /**< When set does not allow writing of P0_NCNT. */
+ uint64_t p0_p_d : 1; /**< When set does not allow writing of P0_PCNT. */
+ uint64_t p1_ccnt : 8; /**< Port1 C-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p1_ncnt : 8; /**< Port1 N-TLP FIFO Credits.
+ Legal values are 0x5 to 0x10. */
+ uint64_t p1_pcnt : 8; /**< Port1 P-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p0_ccnt : 8; /**< Port0 C-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p0_ncnt : 8; /**< Port0 N-TLP FIFO Credits.
+ Legal values are 0x5 to 0x10. */
+ uint64_t p0_pcnt : 8; /**< Port0 P-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+#else
+ uint64_t p0_pcnt : 8;
+ uint64_t p0_ncnt : 8;
+ uint64_t p0_ccnt : 8;
+ uint64_t p1_pcnt : 8;
+ uint64_t p1_ncnt : 8;
+ uint64_t p1_ccnt : 8;
+ uint64_t p0_p_d : 1;
+ uint64_t p0_n_d : 1;
+ uint64_t p0_c_d : 1;
+ uint64_t p1_p_d : 1;
+ uint64_t p1_n_d : 1;
+ uint64_t p1_c_d : 1;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_sli_mac_credit_cnt_s cn61xx;
+ struct cvmx_sli_mac_credit_cnt_s cn63xx;
+ struct cvmx_sli_mac_credit_cnt_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t p1_ccnt : 8; /**< Port1 C-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p1_ncnt : 8; /**< Port1 N-TLP FIFO Credits.
+ Legal values are 0x5 to 0x10. */
+ uint64_t p1_pcnt : 8; /**< Port1 P-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p0_ccnt : 8; /**< Port0 C-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p0_ncnt : 8; /**< Port0 N-TLP FIFO Credits.
+ Legal values are 0x5 to 0x10. */
+ uint64_t p0_pcnt : 8; /**< Port0 P-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+#else
+ uint64_t p0_pcnt : 8;
+ uint64_t p0_ncnt : 8;
+ uint64_t p0_ccnt : 8;
+ uint64_t p1_pcnt : 8;
+ uint64_t p1_ncnt : 8;
+ uint64_t p1_ccnt : 8;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cn63xxp1;
+ struct cvmx_sli_mac_credit_cnt_s cn66xx;
+ struct cvmx_sli_mac_credit_cnt_s cn68xx;
+ struct cvmx_sli_mac_credit_cnt_s cn68xxp1;
+ struct cvmx_sli_mac_credit_cnt_s cnf71xx;
+};
+typedef union cvmx_sli_mac_credit_cnt cvmx_sli_mac_credit_cnt_t;
+
+/**
+ * cvmx_sli_mac_credit_cnt2
+ *
+ * SLI_MAC_CREDIT_CNT2 = SLI MAC Credit Count2
+ *
+ * Contains the number of credits for the MAC port FIFOs (for MACs 2 and 3) used by the SLI. This value needs to be set BEFORE S2M traffic
+ * flow starts. A write to this register will cause the credit counts in the SLI for the MAC ports to be reset to the value
+ * in this register.
+ */
+union cvmx_sli_mac_credit_cnt2 {
+ uint64_t u64;
+ struct cvmx_sli_mac_credit_cnt2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t p3_c_d : 1; /**< When set does not allow writing of P3_CCNT. */
+ uint64_t p3_n_d : 1; /**< When set does not allow writing of P3_NCNT. */
+ uint64_t p3_p_d : 1; /**< When set does not allow writing of P3_PCNT. */
+ uint64_t p2_c_d : 1; /**< When set does not allow writing of P2_CCNT. */
+ uint64_t p2_n_d : 1; /**< When set does not allow writing of P2_NCNT. */
+ uint64_t p2_p_d : 1; /**< When set does not allow writing of P2_PCNT. */
+ uint64_t p3_ccnt : 8; /**< Port3 C-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p3_ncnt : 8; /**< Port3 N-TLP FIFO Credits.
+ Legal values are 0x5 to 0x10. */
+ uint64_t p3_pcnt : 8; /**< Port3 P-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p2_ccnt : 8; /**< Port2 C-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+ uint64_t p2_ncnt : 8; /**< Port2 N-TLP FIFO Credits.
+ Legal values are 0x5 to 0x10. */
+ uint64_t p2_pcnt : 8; /**< Port2 P-TLP FIFO Credits.
+ Legal values are 0x25 to 0x80. */
+#else
+ uint64_t p2_pcnt : 8;
+ uint64_t p2_ncnt : 8;
+ uint64_t p2_ccnt : 8;
+ uint64_t p3_pcnt : 8;
+ uint64_t p3_ncnt : 8;
+ uint64_t p3_ccnt : 8;
+ uint64_t p2_p_d : 1;
+ uint64_t p2_n_d : 1;
+ uint64_t p2_c_d : 1;
+ uint64_t p3_p_d : 1;
+ uint64_t p3_n_d : 1;
+ uint64_t p3_c_d : 1;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_sli_mac_credit_cnt2_s cn61xx;
+ struct cvmx_sli_mac_credit_cnt2_s cn66xx;
+ struct cvmx_sli_mac_credit_cnt2_s cnf71xx;
+};
+typedef union cvmx_sli_mac_credit_cnt2 cvmx_sli_mac_credit_cnt2_t;
+
+/**
+ * cvmx_sli_mac_number
+ *
+ * 0x13DA0 - 0x13DF0 reserved for ports 2 - 7
+ *
+ * SLI_MAC_NUMBER = SLI MAC Number
+ *
+ * When read from a MAC port it returns the MAC's port number.
+ */
+union cvmx_sli_mac_number {
+ uint64_t u64;
+ struct cvmx_sli_mac_number_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t a_mode : 1; /**< SLI in Authenticate Mode. */
+ uint64_t num : 8; /**< The mac number. */
+#else
+ uint64_t num : 8;
+ uint64_t a_mode : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_sli_mac_number_s cn61xx;
+ struct cvmx_sli_mac_number_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t num : 8; /**< The mac number. */
+#else
+ uint64_t num : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn63xx;
+ struct cvmx_sli_mac_number_s cn66xx;
+ struct cvmx_sli_mac_number_cn63xx cn68xx;
+ struct cvmx_sli_mac_number_cn63xx cn68xxp1;
+ struct cvmx_sli_mac_number_s cnf71xx;
+};
+typedef union cvmx_sli_mac_number cvmx_sli_mac_number_t;
+
+/**
+ * cvmx_sli_mem_access_ctl
+ *
+ * SLI_MEM_ACCESS_CTL = SLI's Memory Access Control
+ *
+ * Contains control for access to the MAC address space.
+ */
+union cvmx_sli_mem_access_ctl {
+ uint64_t u64;
+ struct cvmx_sli_mem_access_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t max_word : 4; /**< The maximum number of words to merge into a single
+ write operation from the PPs to the MAC. Legal
+ values are 1 to 16, where a '0' is treated as 16. */
+ uint64_t timer : 10; /**< When the SLI starts a PP to MAC write it waits
+ no longer than the value of TIMER in eclks to
+ merge additional writes from the PPs into 1
+ large write. The values for this field is 1 to
+ 1024 where a value of '0' is treated as 1024. */
+#else
+ uint64_t timer : 10;
+ uint64_t max_word : 4;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_sli_mem_access_ctl_s cn61xx;
+ struct cvmx_sli_mem_access_ctl_s cn63xx;
+ struct cvmx_sli_mem_access_ctl_s cn63xxp1;
+ struct cvmx_sli_mem_access_ctl_s cn66xx;
+ struct cvmx_sli_mem_access_ctl_s cn68xx;
+ struct cvmx_sli_mem_access_ctl_s cn68xxp1;
+ struct cvmx_sli_mem_access_ctl_s cnf71xx;
+};
+typedef union cvmx_sli_mem_access_ctl cvmx_sli_mem_access_ctl_t;
+
+/**
+ * cvmx_sli_mem_access_subid#
+ *
+ * // *
+ * // * 8070 - 80C0 saved for ports 2 through 7
+ * // *
+ * // *
+ * // * 0x80d0 free
+ * // *
+ *
+ * SLI_MEM_ACCESS_SUBIDX = SLI Memory Access SubidX Register
+ *
+ * Contains address index and control bits for access to memory from Core PPs.
+ */
+union cvmx_sli_mem_access_subidx {
+ uint64_t u64;
+ struct cvmx_sli_mem_access_subidx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t zero : 1; /**< Causes all byte reads to be zero length reads.
+ Returns to the EXEC a zero for all read data.
+ This must be zero for sRIO ports. */
+ uint64_t port : 3; /**< Physical MAC Port that reads/writes to
+ this subid are sent to. Must be <= 1, as there are
+ only two ports present. */
+ uint64_t nmerge : 1; /**< When set, no merging is allowed in this window. */
+ uint64_t esr : 2; /**< ES<1:0> for reads to this subid.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space reads. */
+ uint64_t esw : 2; /**< ES<1:0> for writes to this subid.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space writes. */
+ uint64_t wtype : 2; /**< ADDRTYPE<1:0> for writes to this subid
+ For PCIe:
+ - ADDRTYPE<0> is the relaxed-order attribute
+ - ADDRTYPE<1> is the no-snoop attribute
+ For sRIO:
+ - ADDRTYPE<1:0> help select an SRIO*_S2M_TYPE*
+ entry */
+ uint64_t rtype : 2; /**< ADDRTYPE<1:0> for reads to this subid
+ For PCIe:
+ - ADDRTYPE<0> is the relaxed-order attribute
+ - ADDRTYPE<1> is the no-snoop attribute
+ For sRIO:
+ - ADDRTYPE<1:0> help select an SRIO*_S2M_TYPE*
+ entry */
+ uint64_t reserved_0_29 : 30;
+#else
+ uint64_t reserved_0_29 : 30;
+ uint64_t rtype : 2;
+ uint64_t wtype : 2;
+ uint64_t esw : 2;
+ uint64_t esr : 2;
+ uint64_t nmerge : 1;
+ uint64_t port : 3;
+ uint64_t zero : 1;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } s;
+ struct cvmx_sli_mem_access_subidx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t zero : 1; /**< Causes all byte reads to be zero length reads.
+ Returns to the EXEC a zero for all read data.
+ This must be zero for sRIO ports. */
+ uint64_t port : 3; /**< Physical MAC Port that reads/writes to
+ this subid are sent to. Must be <= 1, as there are
+ only two ports present. */
+ uint64_t nmerge : 1; /**< When set, no merging is allowed in this window. */
+ uint64_t esr : 2; /**< ES<1:0> for reads to this subid.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space reads. */
+ uint64_t esw : 2; /**< ES<1:0> for writes to this subid.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space writes. */
+ uint64_t wtype : 2; /**< ADDRTYPE<1:0> for writes to this subid
+ For PCIe:
+ - ADDRTYPE<0> is the relaxed-order attribute
+ - ADDRTYPE<1> is the no-snoop attribute
+ For sRIO:
+ - ADDRTYPE<1:0> help select an SRIO*_S2M_TYPE*
+ entry */
+ uint64_t rtype : 2; /**< ADDRTYPE<1:0> for reads to this subid
+ For PCIe:
+ - ADDRTYPE<0> is the relaxed-order attribute
+ - ADDRTYPE<1> is the no-snoop attribute
+ For sRIO:
+ - ADDRTYPE<1:0> help select an SRIO*_S2M_TYPE*
+ entry */
+ uint64_t ba : 30; /**< Address Bits <63:34> for reads/writes that use
+ this subid. */
+#else
+ uint64_t ba : 30;
+ uint64_t rtype : 2;
+ uint64_t wtype : 2;
+ uint64_t esw : 2;
+ uint64_t esr : 2;
+ uint64_t nmerge : 1;
+ uint64_t port : 3;
+ uint64_t zero : 1;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } cn61xx;
+ struct cvmx_sli_mem_access_subidx_cn61xx cn63xx;
+ struct cvmx_sli_mem_access_subidx_cn61xx cn63xxp1;
+ struct cvmx_sli_mem_access_subidx_cn61xx cn66xx;
+ struct cvmx_sli_mem_access_subidx_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t zero : 1; /**< Causes all byte reads to be zero length reads.
+ Returns to the EXEC a zero for all read data.
+ This must be zero for sRIO ports. */
+ uint64_t port : 3; /**< Physical MAC Port that reads/writes to
+ this subid are sent to. Must be <= 1, as there are
+ only two ports present. */
+ uint64_t nmerge : 1; /**< When set, no merging is allowed in this window. */
+ uint64_t esr : 2; /**< ES<1:0> for reads to this subid.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space reads. */
+ uint64_t esw : 2; /**< ES<1:0> for writes to this subid.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space writes. */
+ uint64_t wtype : 2; /**< ADDRTYPE<1:0> for writes to this subid
+ For PCIe:
+ - ADDRTYPE<0> is the relaxed-order attribute
+ - ADDRTYPE<1> is the no-snoop attribute */
+ uint64_t rtype : 2; /**< ADDRTYPE<1:0> for reads to this subid
+ For PCIe:
+ - ADDRTYPE<0> is the relaxed-order attribute
+ - ADDRTYPE<1> is the no-snoop attribute */
+ uint64_t ba : 28; /**< Address Bits <63:36> for reads/writes that use
+ this subid. */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t ba : 28;
+ uint64_t rtype : 2;
+ uint64_t wtype : 2;
+ uint64_t esw : 2;
+ uint64_t esr : 2;
+ uint64_t nmerge : 1;
+ uint64_t port : 3;
+ uint64_t zero : 1;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } cn68xx;
+ struct cvmx_sli_mem_access_subidx_cn68xx cn68xxp1;
+ struct cvmx_sli_mem_access_subidx_cn61xx cnf71xx;
+};
+typedef union cvmx_sli_mem_access_subidx cvmx_sli_mem_access_subidx_t;
+
+/**
+ * cvmx_sli_msi_enb0
+ *
+ * SLI_MSI_ENB0 = SLI MSI Enable0
+ *
+ * Used to enable the interrupt generation for the bits in the SLI_MSI_RCV0.
+ */
+union cvmx_sli_msi_enb0 {
+ uint64_t u64;
+ struct cvmx_sli_msi_enb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb : 64; /**< Enables bit [63:0] of SLI_MSI_RCV0. */
+#else
+ uint64_t enb : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_enb0_s cn61xx;
+ struct cvmx_sli_msi_enb0_s cn63xx;
+ struct cvmx_sli_msi_enb0_s cn63xxp1;
+ struct cvmx_sli_msi_enb0_s cn66xx;
+ struct cvmx_sli_msi_enb0_s cn68xx;
+ struct cvmx_sli_msi_enb0_s cn68xxp1;
+ struct cvmx_sli_msi_enb0_s cnf71xx;
+};
+typedef union cvmx_sli_msi_enb0 cvmx_sli_msi_enb0_t;
+
+/**
+ * cvmx_sli_msi_enb1
+ *
+ * SLI_MSI_ENB1 = SLI MSI Enable1
+ *
+ * Used to enable the interrupt generation for the bits in the SLI_MSI_RCV1.
+ */
+union cvmx_sli_msi_enb1 {
+ uint64_t u64;
+ struct cvmx_sli_msi_enb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb : 64; /**< Enables bit [63:0] of SLI_MSI_RCV1. */
+#else
+ uint64_t enb : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_enb1_s cn61xx;
+ struct cvmx_sli_msi_enb1_s cn63xx;
+ struct cvmx_sli_msi_enb1_s cn63xxp1;
+ struct cvmx_sli_msi_enb1_s cn66xx;
+ struct cvmx_sli_msi_enb1_s cn68xx;
+ struct cvmx_sli_msi_enb1_s cn68xxp1;
+ struct cvmx_sli_msi_enb1_s cnf71xx;
+};
+typedef union cvmx_sli_msi_enb1 cvmx_sli_msi_enb1_t;
+
+/**
+ * cvmx_sli_msi_enb2
+ *
+ * SLI_MSI_ENB2 = SLI MSI Enable2
+ *
+ * Used to enable the interrupt generation for the bits in the SLI_MSI_RCV2.
+ */
+union cvmx_sli_msi_enb2 {
+ uint64_t u64;
+ struct cvmx_sli_msi_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb : 64; /**< Enables bit [63:0] of SLI_MSI_RCV2. */
+#else
+ uint64_t enb : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_enb2_s cn61xx;
+ struct cvmx_sli_msi_enb2_s cn63xx;
+ struct cvmx_sli_msi_enb2_s cn63xxp1;
+ struct cvmx_sli_msi_enb2_s cn66xx;
+ struct cvmx_sli_msi_enb2_s cn68xx;
+ struct cvmx_sli_msi_enb2_s cn68xxp1;
+ struct cvmx_sli_msi_enb2_s cnf71xx;
+};
+typedef union cvmx_sli_msi_enb2 cvmx_sli_msi_enb2_t;
+
+/**
+ * cvmx_sli_msi_enb3
+ *
+ * SLI_MSI_ENB3 = SLI MSI Enable3
+ *
+ * Used to enable the interrupt generation for the bits in the SLI_MSI_RCV3.
+ */
+union cvmx_sli_msi_enb3 {
+ uint64_t u64;
+ struct cvmx_sli_msi_enb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb : 64; /**< Enables bit [63:0] of SLI_MSI_RCV3. */
+#else
+ uint64_t enb : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_enb3_s cn61xx;
+ struct cvmx_sli_msi_enb3_s cn63xx;
+ struct cvmx_sli_msi_enb3_s cn63xxp1;
+ struct cvmx_sli_msi_enb3_s cn66xx;
+ struct cvmx_sli_msi_enb3_s cn68xx;
+ struct cvmx_sli_msi_enb3_s cn68xxp1;
+ struct cvmx_sli_msi_enb3_s cnf71xx;
+};
+typedef union cvmx_sli_msi_enb3 cvmx_sli_msi_enb3_t;
+
+/**
+ * cvmx_sli_msi_rcv0
+ *
+ * SLI_MSI_RCV0 = SLI MSI Receive0
+ *
+ * Contains bits [63:0] of the 256 bits of MSI interrupts.
+ */
+union cvmx_sli_msi_rcv0 {
+ uint64_t u64;
+ struct cvmx_sli_msi_rcv0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr : 64; /**< Bits 63-0 of the 256 bits of MSI interrupt. */
+#else
+ uint64_t intr : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_rcv0_s cn61xx;
+ struct cvmx_sli_msi_rcv0_s cn63xx;
+ struct cvmx_sli_msi_rcv0_s cn63xxp1;
+ struct cvmx_sli_msi_rcv0_s cn66xx;
+ struct cvmx_sli_msi_rcv0_s cn68xx;
+ struct cvmx_sli_msi_rcv0_s cn68xxp1;
+ struct cvmx_sli_msi_rcv0_s cnf71xx;
+};
+typedef union cvmx_sli_msi_rcv0 cvmx_sli_msi_rcv0_t;
+
+/**
+ * cvmx_sli_msi_rcv1
+ *
+ * SLI_MSI_RCV1 = SLI MSI Receive1
+ *
+ * Contains bits [127:64] of the 256 bits of MSI interrupts.
+ */
+union cvmx_sli_msi_rcv1 {
+ uint64_t u64;
+ struct cvmx_sli_msi_rcv1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr : 64; /**< Bits 127-64 of the 256 bits of MSI interrupt. */
+#else
+ uint64_t intr : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_rcv1_s cn61xx;
+ struct cvmx_sli_msi_rcv1_s cn63xx;
+ struct cvmx_sli_msi_rcv1_s cn63xxp1;
+ struct cvmx_sli_msi_rcv1_s cn66xx;
+ struct cvmx_sli_msi_rcv1_s cn68xx;
+ struct cvmx_sli_msi_rcv1_s cn68xxp1;
+ struct cvmx_sli_msi_rcv1_s cnf71xx;
+};
+typedef union cvmx_sli_msi_rcv1 cvmx_sli_msi_rcv1_t;
+
+/**
+ * cvmx_sli_msi_rcv2
+ *
+ * SLI_MSI_RCV2 = SLI MSI Receive2
+ *
+ * Contains bits [191:128] of the 256 bits of MSI interrupts.
+ */
+union cvmx_sli_msi_rcv2 {
+ uint64_t u64;
+ struct cvmx_sli_msi_rcv2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr : 64; /**< Bits 191-128 of the 256 bits of MSI interrupt. */
+#else
+ uint64_t intr : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_rcv2_s cn61xx;
+ struct cvmx_sli_msi_rcv2_s cn63xx;
+ struct cvmx_sli_msi_rcv2_s cn63xxp1;
+ struct cvmx_sli_msi_rcv2_s cn66xx;
+ struct cvmx_sli_msi_rcv2_s cn68xx;
+ struct cvmx_sli_msi_rcv2_s cn68xxp1;
+ struct cvmx_sli_msi_rcv2_s cnf71xx;
+};
+typedef union cvmx_sli_msi_rcv2 cvmx_sli_msi_rcv2_t;
+
+/**
+ * cvmx_sli_msi_rcv3
+ *
+ * SLI_MSI_RCV3 = SLI MSI Receive3
+ *
+ * Contains bits [255:192] of the 256 bits of MSI interrupts.
+ */
+union cvmx_sli_msi_rcv3 {
+ uint64_t u64;
+ struct cvmx_sli_msi_rcv3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr : 64; /**< Bits 255-192 of the 256 bits of MSI interrupt. */
+#else
+ uint64_t intr : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_rcv3_s cn61xx;
+ struct cvmx_sli_msi_rcv3_s cn63xx;
+ struct cvmx_sli_msi_rcv3_s cn63xxp1;
+ struct cvmx_sli_msi_rcv3_s cn66xx;
+ struct cvmx_sli_msi_rcv3_s cn68xx;
+ struct cvmx_sli_msi_rcv3_s cn68xxp1;
+ struct cvmx_sli_msi_rcv3_s cnf71xx;
+};
+typedef union cvmx_sli_msi_rcv3 cvmx_sli_msi_rcv3_t;
+
+/**
+ * cvmx_sli_msi_rd_map
+ *
+ * SLI_MSI_RD_MAP = SLI MSI Read MAP
+ *
+ * Used to read the mapping function of the SLI_PCIE_MSI_RCV to SLI_MSI_RCV registers.
+ */
+union cvmx_sli_msi_rd_map {
+ uint64_t u64;
+ struct cvmx_sli_msi_rd_map_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t rd_int : 8; /**< The value of the map at the location PREVIOUSLY
+ written to the MSI_INT field of this register. */
+ uint64_t msi_int : 8; /**< Selects the value that would be received when the
+ SLI_PCIE_MSI_RCV register is written. */
+#else
+ uint64_t msi_int : 8;
+ uint64_t rd_int : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_sli_msi_rd_map_s cn61xx;
+ struct cvmx_sli_msi_rd_map_s cn63xx;
+ struct cvmx_sli_msi_rd_map_s cn63xxp1;
+ struct cvmx_sli_msi_rd_map_s cn66xx;
+ struct cvmx_sli_msi_rd_map_s cn68xx;
+ struct cvmx_sli_msi_rd_map_s cn68xxp1;
+ struct cvmx_sli_msi_rd_map_s cnf71xx;
+};
+typedef union cvmx_sli_msi_rd_map cvmx_sli_msi_rd_map_t;
+
+/**
+ * cvmx_sli_msi_w1c_enb0
+ *
+ * SLI_MSI_W1C_ENB0 = SLI MSI Write 1 To Clear Enable0
+ *
+ * Used to clear bits in SLI_MSI_ENB0.
+ */
+union cvmx_sli_msi_w1c_enb0 {
+ uint64_t u64;
+ struct cvmx_sli_msi_w1c_enb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr : 64; /**< A write of '1' to a vector will clear the
+ cooresponding bit in SLI_MSI_ENB0.
+ A read to this address will return 0. */
+#else
+ uint64_t clr : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_w1c_enb0_s cn61xx;
+ struct cvmx_sli_msi_w1c_enb0_s cn63xx;
+ struct cvmx_sli_msi_w1c_enb0_s cn63xxp1;
+ struct cvmx_sli_msi_w1c_enb0_s cn66xx;
+ struct cvmx_sli_msi_w1c_enb0_s cn68xx;
+ struct cvmx_sli_msi_w1c_enb0_s cn68xxp1;
+ struct cvmx_sli_msi_w1c_enb0_s cnf71xx;
+};
+typedef union cvmx_sli_msi_w1c_enb0 cvmx_sli_msi_w1c_enb0_t;
+
+/**
+ * cvmx_sli_msi_w1c_enb1
+ *
+ * SLI_MSI_W1C_ENB1 = SLI MSI Write 1 To Clear Enable1
+ *
+ * Used to clear bits in SLI_MSI_ENB1.
+ */
+union cvmx_sli_msi_w1c_enb1 {
+ uint64_t u64;
+ struct cvmx_sli_msi_w1c_enb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr : 64; /**< A write of '1' to a vector will clear the
+ cooresponding bit in SLI_MSI_ENB1.
+ A read to this address will return 0. */
+#else
+ uint64_t clr : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_w1c_enb1_s cn61xx;
+ struct cvmx_sli_msi_w1c_enb1_s cn63xx;
+ struct cvmx_sli_msi_w1c_enb1_s cn63xxp1;
+ struct cvmx_sli_msi_w1c_enb1_s cn66xx;
+ struct cvmx_sli_msi_w1c_enb1_s cn68xx;
+ struct cvmx_sli_msi_w1c_enb1_s cn68xxp1;
+ struct cvmx_sli_msi_w1c_enb1_s cnf71xx;
+};
+typedef union cvmx_sli_msi_w1c_enb1 cvmx_sli_msi_w1c_enb1_t;
+
+/**
+ * cvmx_sli_msi_w1c_enb2
+ *
+ * SLI_MSI_W1C_ENB2 = SLI MSI Write 1 To Clear Enable2
+ *
+ * Used to clear bits in SLI_MSI_ENB2.
+ */
+union cvmx_sli_msi_w1c_enb2 {
+ uint64_t u64;
+ struct cvmx_sli_msi_w1c_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr : 64; /**< A write of '1' to a vector will clear the
+ cooresponding bit in SLI_MSI_ENB2.
+ A read to this address will return 0. */
+#else
+ uint64_t clr : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_w1c_enb2_s cn61xx;
+ struct cvmx_sli_msi_w1c_enb2_s cn63xx;
+ struct cvmx_sli_msi_w1c_enb2_s cn63xxp1;
+ struct cvmx_sli_msi_w1c_enb2_s cn66xx;
+ struct cvmx_sli_msi_w1c_enb2_s cn68xx;
+ struct cvmx_sli_msi_w1c_enb2_s cn68xxp1;
+ struct cvmx_sli_msi_w1c_enb2_s cnf71xx;
+};
+typedef union cvmx_sli_msi_w1c_enb2 cvmx_sli_msi_w1c_enb2_t;
+
+/**
+ * cvmx_sli_msi_w1c_enb3
+ *
+ * SLI_MSI_W1C_ENB3 = SLI MSI Write 1 To Clear Enable3
+ *
+ * Used to clear bits in SLI_MSI_ENB3.
+ */
+union cvmx_sli_msi_w1c_enb3 {
+ uint64_t u64;
+ struct cvmx_sli_msi_w1c_enb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr : 64; /**< A write of '1' to a vector will clear the
+ cooresponding bit in SLI_MSI_ENB3.
+ A read to this address will return 0. */
+#else
+ uint64_t clr : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_w1c_enb3_s cn61xx;
+ struct cvmx_sli_msi_w1c_enb3_s cn63xx;
+ struct cvmx_sli_msi_w1c_enb3_s cn63xxp1;
+ struct cvmx_sli_msi_w1c_enb3_s cn66xx;
+ struct cvmx_sli_msi_w1c_enb3_s cn68xx;
+ struct cvmx_sli_msi_w1c_enb3_s cn68xxp1;
+ struct cvmx_sli_msi_w1c_enb3_s cnf71xx;
+};
+typedef union cvmx_sli_msi_w1c_enb3 cvmx_sli_msi_w1c_enb3_t;
+
+/**
+ * cvmx_sli_msi_w1s_enb0
+ *
+ * SLI_MSI_W1S_ENB0 = SLI MSI Write 1 To Set Enable0
+ *
+ * Used to set bits in SLI_MSI_ENB0.
+ */
+union cvmx_sli_msi_w1s_enb0 {
+ uint64_t u64;
+ struct cvmx_sli_msi_w1s_enb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set : 64; /**< A write of '1' to a vector will set the
+ cooresponding bit in SLI_MSI_ENB0.
+ A read to this address will return 0. */
+#else
+ uint64_t set : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_w1s_enb0_s cn61xx;
+ struct cvmx_sli_msi_w1s_enb0_s cn63xx;
+ struct cvmx_sli_msi_w1s_enb0_s cn63xxp1;
+ struct cvmx_sli_msi_w1s_enb0_s cn66xx;
+ struct cvmx_sli_msi_w1s_enb0_s cn68xx;
+ struct cvmx_sli_msi_w1s_enb0_s cn68xxp1;
+ struct cvmx_sli_msi_w1s_enb0_s cnf71xx;
+};
+typedef union cvmx_sli_msi_w1s_enb0 cvmx_sli_msi_w1s_enb0_t;
+
+/**
+ * cvmx_sli_msi_w1s_enb1
+ *
+ * SLI_MSI_W1S_ENB0 = SLI MSI Write 1 To Set Enable1
+ *
+ * Used to set bits in SLI_MSI_ENB1.
+ */
+union cvmx_sli_msi_w1s_enb1 {
+ uint64_t u64;
+ struct cvmx_sli_msi_w1s_enb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set : 64; /**< A write of '1' to a vector will set the
+ cooresponding bit in SLI_MSI_ENB1.
+ A read to this address will return 0. */
+#else
+ uint64_t set : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_w1s_enb1_s cn61xx;
+ struct cvmx_sli_msi_w1s_enb1_s cn63xx;
+ struct cvmx_sli_msi_w1s_enb1_s cn63xxp1;
+ struct cvmx_sli_msi_w1s_enb1_s cn66xx;
+ struct cvmx_sli_msi_w1s_enb1_s cn68xx;
+ struct cvmx_sli_msi_w1s_enb1_s cn68xxp1;
+ struct cvmx_sli_msi_w1s_enb1_s cnf71xx;
+};
+typedef union cvmx_sli_msi_w1s_enb1 cvmx_sli_msi_w1s_enb1_t;
+
+/**
+ * cvmx_sli_msi_w1s_enb2
+ *
+ * SLI_MSI_W1S_ENB2 = SLI MSI Write 1 To Set Enable2
+ *
+ * Used to set bits in SLI_MSI_ENB2.
+ */
+union cvmx_sli_msi_w1s_enb2 {
+ uint64_t u64;
+ struct cvmx_sli_msi_w1s_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set : 64; /**< A write of '1' to a vector will set the
+ cooresponding bit in SLI_MSI_ENB2.
+ A read to this address will return 0. */
+#else
+ uint64_t set : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_w1s_enb2_s cn61xx;
+ struct cvmx_sli_msi_w1s_enb2_s cn63xx;
+ struct cvmx_sli_msi_w1s_enb2_s cn63xxp1;
+ struct cvmx_sli_msi_w1s_enb2_s cn66xx;
+ struct cvmx_sli_msi_w1s_enb2_s cn68xx;
+ struct cvmx_sli_msi_w1s_enb2_s cn68xxp1;
+ struct cvmx_sli_msi_w1s_enb2_s cnf71xx;
+};
+typedef union cvmx_sli_msi_w1s_enb2 cvmx_sli_msi_w1s_enb2_t;
+
+/**
+ * cvmx_sli_msi_w1s_enb3
+ *
+ * SLI_MSI_W1S_ENB3 = SLI MSI Write 1 To Set Enable3
+ *
+ * Used to set bits in SLI_MSI_ENB3.
+ */
+union cvmx_sli_msi_w1s_enb3 {
+ uint64_t u64;
+ struct cvmx_sli_msi_w1s_enb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set : 64; /**< A write of '1' to a vector will set the
+ cooresponding bit in SLI_MSI_ENB3.
+ A read to this address will return 0. */
+#else
+ uint64_t set : 64;
+#endif
+ } s;
+ struct cvmx_sli_msi_w1s_enb3_s cn61xx;
+ struct cvmx_sli_msi_w1s_enb3_s cn63xx;
+ struct cvmx_sli_msi_w1s_enb3_s cn63xxp1;
+ struct cvmx_sli_msi_w1s_enb3_s cn66xx;
+ struct cvmx_sli_msi_w1s_enb3_s cn68xx;
+ struct cvmx_sli_msi_w1s_enb3_s cn68xxp1;
+ struct cvmx_sli_msi_w1s_enb3_s cnf71xx;
+};
+typedef union cvmx_sli_msi_w1s_enb3 cvmx_sli_msi_w1s_enb3_t;
+
+/**
+ * cvmx_sli_msi_wr_map
+ *
+ * SLI_MSI_WR_MAP = SLI MSI Write MAP
+ *
+ * Used to write the mapping function of the SLI_PCIE_MSI_RCV to SLI_MSI_RCV registers.
+ */
+union cvmx_sli_msi_wr_map {
+ uint64_t u64;
+ struct cvmx_sli_msi_wr_map_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t ciu_int : 8; /**< Selects which bit in the SLI_MSI_RCV# (0-255)
+ will be set when the value specified in the
+ MSI_INT of this register is recevied during a
+ write to the SLI_PCIE_MSI_RCV register. */
+ uint64_t msi_int : 8; /**< Selects the value that would be received when the
+ SLI_PCIE_MSI_RCV register is written. */
+#else
+ uint64_t msi_int : 8;
+ uint64_t ciu_int : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_sli_msi_wr_map_s cn61xx;
+ struct cvmx_sli_msi_wr_map_s cn63xx;
+ struct cvmx_sli_msi_wr_map_s cn63xxp1;
+ struct cvmx_sli_msi_wr_map_s cn66xx;
+ struct cvmx_sli_msi_wr_map_s cn68xx;
+ struct cvmx_sli_msi_wr_map_s cn68xxp1;
+ struct cvmx_sli_msi_wr_map_s cnf71xx;
+};
+typedef union cvmx_sli_msi_wr_map cvmx_sli_msi_wr_map_t;
+
+/**
+ * cvmx_sli_pcie_msi_rcv
+ *
+ * SLI_PCIE_MSI_RCV = SLI MAC MSI Receive
+ *
+ * Register where MSI writes are directed from the MAC.
+ */
+union cvmx_sli_pcie_msi_rcv {
+ uint64_t u64;
+ struct cvmx_sli_pcie_msi_rcv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t intr : 8; /**< A write to this register will result in a bit in
+ one of the SLI_MSI_RCV# registers being set.
+ Which bit is set is dependent on the previously
+ written using the SLI_MSI_WR_MAP register or if
+ not previously written the reset value of the MAP. */
+#else
+ uint64_t intr : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_sli_pcie_msi_rcv_s cn61xx;
+ struct cvmx_sli_pcie_msi_rcv_s cn63xx;
+ struct cvmx_sli_pcie_msi_rcv_s cn63xxp1;
+ struct cvmx_sli_pcie_msi_rcv_s cn66xx;
+ struct cvmx_sli_pcie_msi_rcv_s cn68xx;
+ struct cvmx_sli_pcie_msi_rcv_s cn68xxp1;
+ struct cvmx_sli_pcie_msi_rcv_s cnf71xx;
+};
+typedef union cvmx_sli_pcie_msi_rcv cvmx_sli_pcie_msi_rcv_t;
+
+/**
+ * cvmx_sli_pcie_msi_rcv_b1
+ *
+ * SLI_PCIE_MSI_RCV_B1 = SLI MAC MSI Receive Byte 1
+ *
+ * Register where MSI writes are directed from the MAC.
+ *
+ * Notes:
+ * This CSR can be used by PCIe and sRIO MACs.
+ *
+ */
+union cvmx_sli_pcie_msi_rcv_b1 {
+ uint64_t u64;
+ struct cvmx_sli_pcie_msi_rcv_b1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t intr : 8; /**< A write to this register will result in a bit in
+ one of the SLI_MSI_RCV# registers being set.
+ Which bit is set is dependent on the previously
+ written using the SLI_MSI_WR_MAP register or if
+ not previously written the reset value of the MAP. */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t intr : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_sli_pcie_msi_rcv_b1_s cn61xx;
+ struct cvmx_sli_pcie_msi_rcv_b1_s cn63xx;
+ struct cvmx_sli_pcie_msi_rcv_b1_s cn63xxp1;
+ struct cvmx_sli_pcie_msi_rcv_b1_s cn66xx;
+ struct cvmx_sli_pcie_msi_rcv_b1_s cn68xx;
+ struct cvmx_sli_pcie_msi_rcv_b1_s cn68xxp1;
+ struct cvmx_sli_pcie_msi_rcv_b1_s cnf71xx;
+};
+typedef union cvmx_sli_pcie_msi_rcv_b1 cvmx_sli_pcie_msi_rcv_b1_t;
+
+/**
+ * cvmx_sli_pcie_msi_rcv_b2
+ *
+ * SLI_PCIE_MSI_RCV_B2 = SLI MAC MSI Receive Byte 2
+ *
+ * Register where MSI writes are directed from the MAC.
+ *
+ * Notes:
+ * This CSR can be used by PCIe and sRIO MACs.
+ *
+ */
+union cvmx_sli_pcie_msi_rcv_b2 {
+ uint64_t u64;
+ struct cvmx_sli_pcie_msi_rcv_b2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t intr : 8; /**< A write to this register will result in a bit in
+ one of the SLI_MSI_RCV# registers being set.
+ Which bit is set is dependent on the previously
+ written using the SLI_MSI_WR_MAP register or if
+ not previously written the reset value of the MAP. */
+ uint64_t reserved_0_15 : 16;
+#else
+ uint64_t reserved_0_15 : 16;
+ uint64_t intr : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_sli_pcie_msi_rcv_b2_s cn61xx;
+ struct cvmx_sli_pcie_msi_rcv_b2_s cn63xx;
+ struct cvmx_sli_pcie_msi_rcv_b2_s cn63xxp1;
+ struct cvmx_sli_pcie_msi_rcv_b2_s cn66xx;
+ struct cvmx_sli_pcie_msi_rcv_b2_s cn68xx;
+ struct cvmx_sli_pcie_msi_rcv_b2_s cn68xxp1;
+ struct cvmx_sli_pcie_msi_rcv_b2_s cnf71xx;
+};
+typedef union cvmx_sli_pcie_msi_rcv_b2 cvmx_sli_pcie_msi_rcv_b2_t;
+
+/**
+ * cvmx_sli_pcie_msi_rcv_b3
+ *
+ * SLI_PCIE_MSI_RCV_B3 = SLI MAC MSI Receive Byte 3
+ *
+ * Register where MSI writes are directed from the MAC.
+ *
+ * Notes:
+ * This CSR can be used by PCIe and sRIO MACs.
+ *
+ */
+union cvmx_sli_pcie_msi_rcv_b3 {
+ uint64_t u64;
+ struct cvmx_sli_pcie_msi_rcv_b3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t intr : 8; /**< A write to this register will result in a bit in
+ one of the SLI_MSI_RCV# registers being set.
+ Which bit is set is dependent on the previously
+ written using the SLI_MSI_WR_MAP register or if
+ not previously written the reset value of the MAP. */
+ uint64_t reserved_0_23 : 24;
+#else
+ uint64_t reserved_0_23 : 24;
+ uint64_t intr : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pcie_msi_rcv_b3_s cn61xx;
+ struct cvmx_sli_pcie_msi_rcv_b3_s cn63xx;
+ struct cvmx_sli_pcie_msi_rcv_b3_s cn63xxp1;
+ struct cvmx_sli_pcie_msi_rcv_b3_s cn66xx;
+ struct cvmx_sli_pcie_msi_rcv_b3_s cn68xx;
+ struct cvmx_sli_pcie_msi_rcv_b3_s cn68xxp1;
+ struct cvmx_sli_pcie_msi_rcv_b3_s cnf71xx;
+};
+typedef union cvmx_sli_pcie_msi_rcv_b3 cvmx_sli_pcie_msi_rcv_b3_t;
+
+/**
+ * cvmx_sli_pkt#_cnts
+ *
+ * SLI_PKT[0..31]_CNTS = SLI Packet ring# Counts
+ *
+ * The counters for output rings.
+ */
+union cvmx_sli_pktx_cnts {
+ uint64_t u64;
+ struct cvmx_sli_pktx_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t timer : 22; /**< Timer incremented every 1024 core clocks
+ when SLI_PKTS#_CNTS[CNT] is non zero. Field
+ cleared when SLI_PKTS#_CNTS[CNT] goes to 0.
+ Field is also cleared when SLI_PKT_TIME_INT is
+ cleared.
+ The first increment of this count can occur
+ between 0 to 1023 core clocks. */
+ uint64_t cnt : 32; /**< ring counter. This field is incremented as
+ packets are sent out and decremented in response to
+ writes to this field.
+ When SLI_PKT_OUT_BMODE is '0' a value of 1 is
+ added to the register for each packet, when '1'
+ and the info-pointer is NOT used the length of the
+ packet plus 8 is added, when '1' and info-pointer
+ mode IS used the packet length is added to this
+ field. */
+#else
+ uint64_t cnt : 32;
+ uint64_t timer : 22;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_sli_pktx_cnts_s cn61xx;
+ struct cvmx_sli_pktx_cnts_s cn63xx;
+ struct cvmx_sli_pktx_cnts_s cn63xxp1;
+ struct cvmx_sli_pktx_cnts_s cn66xx;
+ struct cvmx_sli_pktx_cnts_s cn68xx;
+ struct cvmx_sli_pktx_cnts_s cn68xxp1;
+ struct cvmx_sli_pktx_cnts_s cnf71xx;
+};
+typedef union cvmx_sli_pktx_cnts cvmx_sli_pktx_cnts_t;
+
+/**
+ * cvmx_sli_pkt#_in_bp
+ *
+ * SLI_PKT[0..31]_IN_BP = SLI Packet ring# Input Backpressure
+ *
+ * The counters and thresholds for input packets to apply backpressure to processing of the packets.
+ */
+union cvmx_sli_pktx_in_bp {
+ uint64_t u64;
+ struct cvmx_sli_pktx_in_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wmark : 32; /**< When CNT is greater than this threshold no more
+ packets will be processed for this ring.
+ When writing this field of the SLI_PKT#_IN_BP
+ register, use a 4-byte write so as to not write
+ any other field of this register. */
+ uint64_t cnt : 32; /**< ring counter. This field is incremented by one
+ whenever OCTEON receives, buffers, and creates a
+ work queue entry for a packet that arrives by the
+ cooresponding input ring. A write to this field
+ will be subtracted from the field value.
+ When writing this field of the SLI_PKT#_IN_BP
+ register, use a 4-byte write so as to not write
+ any other field of this register. */
+#else
+ uint64_t cnt : 32;
+ uint64_t wmark : 32;
+#endif
+ } s;
+ struct cvmx_sli_pktx_in_bp_s cn61xx;
+ struct cvmx_sli_pktx_in_bp_s cn63xx;
+ struct cvmx_sli_pktx_in_bp_s cn63xxp1;
+ struct cvmx_sli_pktx_in_bp_s cn66xx;
+ struct cvmx_sli_pktx_in_bp_s cnf71xx;
+};
+typedef union cvmx_sli_pktx_in_bp cvmx_sli_pktx_in_bp_t;
+
+/**
+ * cvmx_sli_pkt#_instr_baddr
+ *
+ * SLI_PKT[0..31]_INSTR_BADDR = SLI Packet ring# Instruction Base Address
+ *
+ * Start of Instruction for input packets.
+ */
+union cvmx_sli_pktx_instr_baddr {
+ uint64_t u64;
+ struct cvmx_sli_pktx_instr_baddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 61; /**< Base address for Instructions. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t addr : 61;
+#endif
+ } s;
+ struct cvmx_sli_pktx_instr_baddr_s cn61xx;
+ struct cvmx_sli_pktx_instr_baddr_s cn63xx;
+ struct cvmx_sli_pktx_instr_baddr_s cn63xxp1;
+ struct cvmx_sli_pktx_instr_baddr_s cn66xx;
+ struct cvmx_sli_pktx_instr_baddr_s cn68xx;
+ struct cvmx_sli_pktx_instr_baddr_s cn68xxp1;
+ struct cvmx_sli_pktx_instr_baddr_s cnf71xx;
+};
+typedef union cvmx_sli_pktx_instr_baddr cvmx_sli_pktx_instr_baddr_t;
+
+/**
+ * cvmx_sli_pkt#_instr_baoff_dbell
+ *
+ * SLI_PKT[0..31]_INSTR_BAOFF_DBELL = SLI Packet ring# Instruction Base Address Offset and Doorbell
+ *
+ * The doorbell and base address offset for next read.
+ */
+union cvmx_sli_pktx_instr_baoff_dbell {
+ uint64_t u64;
+ struct cvmx_sli_pktx_instr_baoff_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t aoff : 32; /**< The offset from the SLI_PKT[0..31]_INSTR_BADDR
+ where the next instruction will be read. */
+ uint64_t dbell : 32; /**< Instruction doorbell count. Writes to this field
+ will increment the value here. Reads will return
+ present value. A write of 0xffffffff will set the
+ DBELL and AOFF fields to '0'. */
+#else
+ uint64_t dbell : 32;
+ uint64_t aoff : 32;
+#endif
+ } s;
+ struct cvmx_sli_pktx_instr_baoff_dbell_s cn61xx;
+ struct cvmx_sli_pktx_instr_baoff_dbell_s cn63xx;
+ struct cvmx_sli_pktx_instr_baoff_dbell_s cn63xxp1;
+ struct cvmx_sli_pktx_instr_baoff_dbell_s cn66xx;
+ struct cvmx_sli_pktx_instr_baoff_dbell_s cn68xx;
+ struct cvmx_sli_pktx_instr_baoff_dbell_s cn68xxp1;
+ struct cvmx_sli_pktx_instr_baoff_dbell_s cnf71xx;
+};
+typedef union cvmx_sli_pktx_instr_baoff_dbell cvmx_sli_pktx_instr_baoff_dbell_t;
+
+/**
+ * cvmx_sli_pkt#_instr_fifo_rsize
+ *
+ * SLI_PKT[0..31]_INSTR_FIFO_RSIZE = SLI Packet ring# Instruction FIFO and Ring Size.
+ *
+ * Fifo field and ring size for Instructions.
+ */
+union cvmx_sli_pktx_instr_fifo_rsize {
+ uint64_t u64;
+ struct cvmx_sli_pktx_instr_fifo_rsize_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t max : 9; /**< Max Fifo Size. */
+ uint64_t rrp : 9; /**< Fifo read pointer. */
+ uint64_t wrp : 9; /**< Fifo write pointer. */
+ uint64_t fcnt : 5; /**< Fifo count. */
+ uint64_t rsize : 32; /**< Instruction ring size. */
+#else
+ uint64_t rsize : 32;
+ uint64_t fcnt : 5;
+ uint64_t wrp : 9;
+ uint64_t rrp : 9;
+ uint64_t max : 9;
+#endif
+ } s;
+ struct cvmx_sli_pktx_instr_fifo_rsize_s cn61xx;
+ struct cvmx_sli_pktx_instr_fifo_rsize_s cn63xx;
+ struct cvmx_sli_pktx_instr_fifo_rsize_s cn63xxp1;
+ struct cvmx_sli_pktx_instr_fifo_rsize_s cn66xx;
+ struct cvmx_sli_pktx_instr_fifo_rsize_s cn68xx;
+ struct cvmx_sli_pktx_instr_fifo_rsize_s cn68xxp1;
+ struct cvmx_sli_pktx_instr_fifo_rsize_s cnf71xx;
+};
+typedef union cvmx_sli_pktx_instr_fifo_rsize cvmx_sli_pktx_instr_fifo_rsize_t;
+
+/**
+ * cvmx_sli_pkt#_instr_header
+ *
+ * SLI_PKT[0..31]_INSTR_HEADER = SLI Packet ring# Instruction Header.
+ *
+ * VAlues used to build input packet header.
+ */
+union cvmx_sli_pktx_instr_header {
+ uint64_t u64;
+ struct cvmx_sli_pktx_instr_header_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t pbp : 1; /**< Enable Packet-by-packet mode.
+ Allows DPI to generate PKT_INST_HDR[PM,SL]
+ differently per DPI instruction.
+ USE_IHDR must be set whenever PBP is set. */
+ uint64_t reserved_38_42 : 5;
+ uint64_t rparmode : 2; /**< Parse Mode. Becomes PKT_INST_HDR[PM]
+ when DPI_INST_HDR[R]==1 and PBP==0 */
+ uint64_t reserved_35_35 : 1;
+ uint64_t rskp_len : 7; /**< Skip Length. Becomes PKT_INST_HDR[SL]
+ when DPI_INST_HDR[R]==1 and PBP==0 */
+ uint64_t rngrpext : 2; /**< Becomes PKT_INST_HDR[GRPEXT]
+ when DPI_INST_HDR[R]==1 */
+ uint64_t rnqos : 1; /**< Becomes PKT_INST_HDR[NQOS]
+ when DPI_INST_HDR[R]==1 */
+ uint64_t rngrp : 1; /**< Becomes PKT_INST_HDR[NGRP]
+ when DPI_INST_HDR[R]==1 */
+ uint64_t rntt : 1; /**< Becomes PKT_INST_HDR[NTT]
+ when DPI_INST_HDR[R]==1 */
+ uint64_t rntag : 1; /**< Becomes PKT_INST_HDR[NTAG]
+ when DPI_INST_HDR[R]==1 */
+ uint64_t use_ihdr : 1; /**< When set '1' DPI always prepends a PKT_INST_HDR
+ as part of the packet data sent to PIP/IPD,
+ regardless of DPI_INST_HDR[R]. (DPI also always
+ prepends a PKT_INST_HDR when DPI_INST_HDR[R]=1.)
+ USE_IHDR must be set whenever PBP is set. */
+ uint64_t reserved_16_20 : 5;
+ uint64_t par_mode : 2; /**< Parse Mode. Becomes PKT_INST_HDR[PM]
+ when DPI_INST_HDR[R]==0 and USE_IHDR==1 and PBP==0 */
+ uint64_t reserved_13_13 : 1;
+ uint64_t skp_len : 7; /**< Skip Length. Becomes PKT_INST_HDR[SL]
+ when DPI_INST_HDR[R]==0 and USE_IHDR==1 and PBP==0 */
+ uint64_t ngrpext : 2; /**< Becomes PKT_INST_HDR[GRPEXT]
+ when DPI_INST_HDR[R]==0 (and USE_IHDR==1) */
+ uint64_t nqos : 1; /**< Becomes PKT_INST_HDR[NQOS]
+ when DPI_INST_HDR[R]==0 (and USE_IHDR==1) */
+ uint64_t ngrp : 1; /**< Becomes PKT_INST_HDR[NGRP]
+ when DPI_INST_HDR[R]==0 (and USE_IHDR==1) */
+ uint64_t ntt : 1; /**< Becomes PKT_INST_HDR[NTT]
+ when DPI_INST_HDR[R]==0 (and USE_IHDR==1) */
+ uint64_t ntag : 1; /**< Becomes PKT_INST_HDR[NTAG]
+ when DPI_INST_HDR[R]==0 (and USE_IHDR==1) */
+#else
+ uint64_t ntag : 1;
+ uint64_t ntt : 1;
+ uint64_t ngrp : 1;
+ uint64_t nqos : 1;
+ uint64_t ngrpext : 2;
+ uint64_t skp_len : 7;
+ uint64_t reserved_13_13 : 1;
+ uint64_t par_mode : 2;
+ uint64_t reserved_16_20 : 5;
+ uint64_t use_ihdr : 1;
+ uint64_t rntag : 1;
+ uint64_t rntt : 1;
+ uint64_t rngrp : 1;
+ uint64_t rnqos : 1;
+ uint64_t rngrpext : 2;
+ uint64_t rskp_len : 7;
+ uint64_t reserved_35_35 : 1;
+ uint64_t rparmode : 2;
+ uint64_t reserved_38_42 : 5;
+ uint64_t pbp : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } s;
+ struct cvmx_sli_pktx_instr_header_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t pbp : 1; /**< Enable Packet-by-packet mode.
+ Allows DPI to generate PKT_INST_HDR[PM,SL]
+ differently per DPI instruction.
+ USE_IHDR must be set whenever PBP is set. */
+ uint64_t reserved_38_42 : 5;
+ uint64_t rparmode : 2; /**< Parse Mode. Becomes PKT_INST_HDR[PM]
+ when DPI_INST_HDR[R]==1 and PBP==0 */
+ uint64_t reserved_35_35 : 1;
+ uint64_t rskp_len : 7; /**< Skip Length. Becomes PKT_INST_HDR[SL]
+ when DPI_INST_HDR[R]==1 and PBP==0 */
+ uint64_t reserved_26_27 : 2;
+ uint64_t rnqos : 1; /**< Becomes PKT_INST_HDR[NQOS]
+ when DPI_INST_HDR[R]==1 */
+ uint64_t rngrp : 1; /**< Becomes PKT_INST_HDR[NGRP]
+ when DPI_INST_HDR[R]==1 */
+ uint64_t rntt : 1; /**< Becomes PKT_INST_HDR[NTT]
+ when DPI_INST_HDR[R]==1 */
+ uint64_t rntag : 1; /**< Becomes PKT_INST_HDR[NTAG]
+ when DPI_INST_HDR[R]==1 */
+ uint64_t use_ihdr : 1; /**< When set '1' DPI always prepends a PKT_INST_HDR
+ as part of the packet data sent to PIP/IPD,
+ regardless of DPI_INST_HDR[R]. (DPI also always
+ prepends a PKT_INST_HDR when DPI_INST_HDR[R]=1.)
+ USE_IHDR must be set whenever PBP is set. */
+ uint64_t reserved_16_20 : 5;
+ uint64_t par_mode : 2; /**< Parse Mode. Becomes PKT_INST_HDR[PM]
+ when DPI_INST_HDR[R]==0 and USE_IHDR==1 and PBP==0 */
+ uint64_t reserved_13_13 : 1;
+ uint64_t skp_len : 7; /**< Skip Length. Becomes PKT_INST_HDR[SL]
+ when DPI_INST_HDR[R]==0 and USE_IHDR==1 and PBP==0 */
+ uint64_t reserved_4_5 : 2;
+ uint64_t nqos : 1; /**< Becomes PKT_INST_HDR[NQOS]
+ when DPI_INST_HDR[R]==0 (and USE_IHDR==1) */
+ uint64_t ngrp : 1; /**< Becomes PKT_INST_HDR[NGRP]
+ when DPI_INST_HDR[R]==0 (and USE_IHDR==1) */
+ uint64_t ntt : 1; /**< Becomes PKT_INST_HDR[NTT]
+ when DPI_INST_HDR[R]==0 (and USE_IHDR==1) */
+ uint64_t ntag : 1; /**< Becomes PKT_INST_HDR[NTAG]
+ when DPI_INST_HDR[R]==0 (and USE_IHDR==1) */
+#else
+ uint64_t ntag : 1;
+ uint64_t ntt : 1;
+ uint64_t ngrp : 1;
+ uint64_t nqos : 1;
+ uint64_t reserved_4_5 : 2;
+ uint64_t skp_len : 7;
+ uint64_t reserved_13_13 : 1;
+ uint64_t par_mode : 2;
+ uint64_t reserved_16_20 : 5;
+ uint64_t use_ihdr : 1;
+ uint64_t rntag : 1;
+ uint64_t rntt : 1;
+ uint64_t rngrp : 1;
+ uint64_t rnqos : 1;
+ uint64_t reserved_26_27 : 2;
+ uint64_t rskp_len : 7;
+ uint64_t reserved_35_35 : 1;
+ uint64_t rparmode : 2;
+ uint64_t reserved_38_42 : 5;
+ uint64_t pbp : 1;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } cn61xx;
+ struct cvmx_sli_pktx_instr_header_cn61xx cn63xx;
+ struct cvmx_sli_pktx_instr_header_cn61xx cn63xxp1;
+ struct cvmx_sli_pktx_instr_header_cn61xx cn66xx;
+ struct cvmx_sli_pktx_instr_header_s cn68xx;
+ struct cvmx_sli_pktx_instr_header_cn61xx cn68xxp1;
+ struct cvmx_sli_pktx_instr_header_cn61xx cnf71xx;
+};
+typedef union cvmx_sli_pktx_instr_header cvmx_sli_pktx_instr_header_t;
+
+/**
+ * cvmx_sli_pkt#_out_size
+ *
+ * SLI_PKT[0..31]_OUT_SIZE = SLI Packet Out Size
+ *
+ * Contains the BSIZE and ISIZE for output packet ports.
+ */
+union cvmx_sli_pktx_out_size {
+ uint64_t u64;
+ struct cvmx_sli_pktx_out_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t isize : 7; /**< INFO BYTES size (bytes) for ring X. Legal sizes
+ are 0 to 120. Not used in buffer-pointer-only mode. */
+ uint64_t bsize : 16; /**< BUFFER SIZE (bytes) for ring X. */
+#else
+ uint64_t bsize : 16;
+ uint64_t isize : 7;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_sli_pktx_out_size_s cn61xx;
+ struct cvmx_sli_pktx_out_size_s cn63xx;
+ struct cvmx_sli_pktx_out_size_s cn63xxp1;
+ struct cvmx_sli_pktx_out_size_s cn66xx;
+ struct cvmx_sli_pktx_out_size_s cn68xx;
+ struct cvmx_sli_pktx_out_size_s cn68xxp1;
+ struct cvmx_sli_pktx_out_size_s cnf71xx;
+};
+typedef union cvmx_sli_pktx_out_size cvmx_sli_pktx_out_size_t;
+
+/**
+ * cvmx_sli_pkt#_slist_baddr
+ *
+ * SLI_PKT[0..31]_SLIST_BADDR = SLI Packet ring# Scatter List Base Address
+ *
+ * Start of Scatter List for output packet pointers - MUST be 16 byte alligned
+ */
+union cvmx_sli_pktx_slist_baddr {
+ uint64_t u64;
+ struct cvmx_sli_pktx_slist_baddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr : 60; /**< Base address for scatter list pointers. */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t addr : 60;
+#endif
+ } s;
+ struct cvmx_sli_pktx_slist_baddr_s cn61xx;
+ struct cvmx_sli_pktx_slist_baddr_s cn63xx;
+ struct cvmx_sli_pktx_slist_baddr_s cn63xxp1;
+ struct cvmx_sli_pktx_slist_baddr_s cn66xx;
+ struct cvmx_sli_pktx_slist_baddr_s cn68xx;
+ struct cvmx_sli_pktx_slist_baddr_s cn68xxp1;
+ struct cvmx_sli_pktx_slist_baddr_s cnf71xx;
+};
+typedef union cvmx_sli_pktx_slist_baddr cvmx_sli_pktx_slist_baddr_t;
+
+/**
+ * cvmx_sli_pkt#_slist_baoff_dbell
+ *
+ * SLI_PKT[0..31]_SLIST_BAOFF_DBELL = SLI Packet ring# Scatter List Base Address Offset and Doorbell
+ *
+ * The doorbell and base address offset for next read.
+ */
+union cvmx_sli_pktx_slist_baoff_dbell {
+ uint64_t u64;
+ struct cvmx_sli_pktx_slist_baoff_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t aoff : 32; /**< The offset from the SLI_PKT[0..31]_SLIST_BADDR
+ where the next SList pointer will be read.
+ A write of 0xFFFFFFFF to the DBELL field will
+ clear DBELL and AOFF */
+ uint64_t dbell : 32; /**< Scatter list doorbell count. Writes to this field
+ will increment the value here. Reads will return
+ present value. The value of this field is
+ decremented as read operations are ISSUED for
+ scatter pointers.
+ A write of 0xFFFFFFFF will clear DBELL and AOFF */
+#else
+ uint64_t dbell : 32;
+ uint64_t aoff : 32;
+#endif
+ } s;
+ struct cvmx_sli_pktx_slist_baoff_dbell_s cn61xx;
+ struct cvmx_sli_pktx_slist_baoff_dbell_s cn63xx;
+ struct cvmx_sli_pktx_slist_baoff_dbell_s cn63xxp1;
+ struct cvmx_sli_pktx_slist_baoff_dbell_s cn66xx;
+ struct cvmx_sli_pktx_slist_baoff_dbell_s cn68xx;
+ struct cvmx_sli_pktx_slist_baoff_dbell_s cn68xxp1;
+ struct cvmx_sli_pktx_slist_baoff_dbell_s cnf71xx;
+};
+typedef union cvmx_sli_pktx_slist_baoff_dbell cvmx_sli_pktx_slist_baoff_dbell_t;
+
+/**
+ * cvmx_sli_pkt#_slist_fifo_rsize
+ *
+ * SLI_PKT[0..31]_SLIST_FIFO_RSIZE = SLI Packet ring# Scatter List FIFO and Ring Size.
+ *
+ * The number of scatter pointer pairs in the scatter list.
+ */
+union cvmx_sli_pktx_slist_fifo_rsize {
+ uint64_t u64;
+ struct cvmx_sli_pktx_slist_fifo_rsize_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t rsize : 32; /**< The number of scatter pointer pairs contained in
+ the scatter list ring. */
+#else
+ uint64_t rsize : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pktx_slist_fifo_rsize_s cn61xx;
+ struct cvmx_sli_pktx_slist_fifo_rsize_s cn63xx;
+ struct cvmx_sli_pktx_slist_fifo_rsize_s cn63xxp1;
+ struct cvmx_sli_pktx_slist_fifo_rsize_s cn66xx;
+ struct cvmx_sli_pktx_slist_fifo_rsize_s cn68xx;
+ struct cvmx_sli_pktx_slist_fifo_rsize_s cn68xxp1;
+ struct cvmx_sli_pktx_slist_fifo_rsize_s cnf71xx;
+};
+typedef union cvmx_sli_pktx_slist_fifo_rsize cvmx_sli_pktx_slist_fifo_rsize_t;
+
+/**
+ * cvmx_sli_pkt_cnt_int
+ *
+ * SLI_PKT_CNT_INT = SLI Packet Counter Interrupt
+ *
+ * The packets rings that are interrupting because of Packet Counters.
+ */
+union cvmx_sli_pkt_cnt_int {
+ uint64_t u64;
+ struct cvmx_sli_pkt_cnt_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t port : 32; /**< Output ring packet counter interrupt bits
+ SLI sets PORT<i> whenever
+ SLI_PKTi_CNTS[CNT] > SLI_PKT_INT_LEVELS[CNT].
+ SLI_PKT_CNT_INT_ENB[PORT<i>] is the corresponding
+ enable. */
+#else
+ uint64_t port : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_cnt_int_s cn61xx;
+ struct cvmx_sli_pkt_cnt_int_s cn63xx;
+ struct cvmx_sli_pkt_cnt_int_s cn63xxp1;
+ struct cvmx_sli_pkt_cnt_int_s cn66xx;
+ struct cvmx_sli_pkt_cnt_int_s cn68xx;
+ struct cvmx_sli_pkt_cnt_int_s cn68xxp1;
+ struct cvmx_sli_pkt_cnt_int_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_cnt_int cvmx_sli_pkt_cnt_int_t;
+
+/**
+ * cvmx_sli_pkt_cnt_int_enb
+ *
+ * SLI_PKT_CNT_INT_ENB = SLI Packet Counter Interrupt Enable
+ *
+ * Enable for the packets rings that are interrupting because of Packet Counters.
+ */
+union cvmx_sli_pkt_cnt_int_enb {
+ uint64_t u64;
+ struct cvmx_sli_pkt_cnt_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t port : 32; /**< Output ring packet counter interrupt enables
+ When both PORT<i> and corresponding
+ SLI_PKT_CNT_INT[PORT<i>] are set, for any i,
+ then SLI_INT_SUM[PCNT] is set, which can cause
+ an interrupt. */
+#else
+ uint64_t port : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_cnt_int_enb_s cn61xx;
+ struct cvmx_sli_pkt_cnt_int_enb_s cn63xx;
+ struct cvmx_sli_pkt_cnt_int_enb_s cn63xxp1;
+ struct cvmx_sli_pkt_cnt_int_enb_s cn66xx;
+ struct cvmx_sli_pkt_cnt_int_enb_s cn68xx;
+ struct cvmx_sli_pkt_cnt_int_enb_s cn68xxp1;
+ struct cvmx_sli_pkt_cnt_int_enb_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_cnt_int_enb cvmx_sli_pkt_cnt_int_enb_t;
+
+/**
+ * cvmx_sli_pkt_ctl
+ *
+ * SLI_PKT_CTL = SLI Packet Control
+ *
+ * Control for packets.
+ */
+union cvmx_sli_pkt_ctl {
+ uint64_t u64;
+ struct cvmx_sli_pkt_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t ring_en : 1; /**< When '0' forces "relative Q position" received
+ from PKO to be zero, and replicates the back-
+ pressure indication for the first ring attached
+ to a PKO port across all the rings attached to a
+ PKO port. When '1' backpressure is on a per
+ port/ring. */
+ uint64_t pkt_bp : 4; /**< When set '1' enable the port level backpressure for
+ PKO ports associated with the bit. */
+#else
+ uint64_t pkt_bp : 4;
+ uint64_t ring_en : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_sli_pkt_ctl_s cn61xx;
+ struct cvmx_sli_pkt_ctl_s cn63xx;
+ struct cvmx_sli_pkt_ctl_s cn63xxp1;
+ struct cvmx_sli_pkt_ctl_s cn66xx;
+ struct cvmx_sli_pkt_ctl_s cn68xx;
+ struct cvmx_sli_pkt_ctl_s cn68xxp1;
+ struct cvmx_sli_pkt_ctl_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_ctl cvmx_sli_pkt_ctl_t;
+
+/**
+ * cvmx_sli_pkt_data_out_es
+ *
+ * SLI_PKT_DATA_OUT_ES = SLI's Packet Data Out Endian Swap
+ *
+ * The Endian Swap for writing Data Out.
+ */
+union cvmx_sli_pkt_data_out_es {
+ uint64_t u64;
+ struct cvmx_sli_pkt_data_out_es_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t es : 64; /**< ES<1:0> or MACADD<63:62> for buffer/info writes.
+ ES<2i+1:2i> becomes either ES<1:0> or
+ MACADD<63:62> for writes to buffer/info pair
+ MAC memory space addresses fetched from packet
+ output ring i. ES<1:0> if SLI_PKT_DPADDR[DPTR<i>]=1
+ , else MACADD<63:62>.
+ In the latter case, ES<1:0> comes from DPTR<63:62>.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space writes. */
+#else
+ uint64_t es : 64;
+#endif
+ } s;
+ struct cvmx_sli_pkt_data_out_es_s cn61xx;
+ struct cvmx_sli_pkt_data_out_es_s cn63xx;
+ struct cvmx_sli_pkt_data_out_es_s cn63xxp1;
+ struct cvmx_sli_pkt_data_out_es_s cn66xx;
+ struct cvmx_sli_pkt_data_out_es_s cn68xx;
+ struct cvmx_sli_pkt_data_out_es_s cn68xxp1;
+ struct cvmx_sli_pkt_data_out_es_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_data_out_es cvmx_sli_pkt_data_out_es_t;
+
+/**
+ * cvmx_sli_pkt_data_out_ns
+ *
+ * SLI_PKT_DATA_OUT_NS = SLI's Packet Data Out No Snoop
+ *
+ * The NS field for the TLP when writing packet data.
+ */
+union cvmx_sli_pkt_data_out_ns {
+ uint64_t u64;
+ struct cvmx_sli_pkt_data_out_ns_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t nsr : 32; /**< ADDRTYPE<1> or MACADD<61> for buffer/info writes.
+ NSR<i> becomes either ADDRTYPE<1> or MACADD<61>
+ for writes to buffer/info pair MAC memory space
+ addresses fetched from packet output ring i.
+ ADDRTYPE<1> if SLI_PKT_DPADDR[DPTR<i>]=1, else
+ MACADD<61>.
+ In the latter case,ADDRTYPE<1> comes from DPTR<61>.
+ ADDRTYPE<1> is the no-snoop attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+#else
+ uint64_t nsr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_data_out_ns_s cn61xx;
+ struct cvmx_sli_pkt_data_out_ns_s cn63xx;
+ struct cvmx_sli_pkt_data_out_ns_s cn63xxp1;
+ struct cvmx_sli_pkt_data_out_ns_s cn66xx;
+ struct cvmx_sli_pkt_data_out_ns_s cn68xx;
+ struct cvmx_sli_pkt_data_out_ns_s cn68xxp1;
+ struct cvmx_sli_pkt_data_out_ns_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_data_out_ns cvmx_sli_pkt_data_out_ns_t;
+
+/**
+ * cvmx_sli_pkt_data_out_ror
+ *
+ * SLI_PKT_DATA_OUT_ROR = SLI's Packet Data Out Relaxed Ordering
+ *
+ * The ROR field for the TLP when writing Packet Data.
+ */
+union cvmx_sli_pkt_data_out_ror {
+ uint64_t u64;
+ struct cvmx_sli_pkt_data_out_ror_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ror : 32; /**< ADDRTYPE<0> or MACADD<60> for buffer/info writes.
+ ROR<i> becomes either ADDRTYPE<0> or MACADD<60>
+ for writes to buffer/info pair MAC memory space
+ addresses fetched from packet output ring i.
+ ADDRTYPE<0> if SLI_PKT_DPADDR[DPTR<i>]=1, else
+ MACADD<60>.
+ In the latter case,ADDRTYPE<0> comes from DPTR<60>.
+ ADDRTYPE<0> is the relaxed-order attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+#else
+ uint64_t ror : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_data_out_ror_s cn61xx;
+ struct cvmx_sli_pkt_data_out_ror_s cn63xx;
+ struct cvmx_sli_pkt_data_out_ror_s cn63xxp1;
+ struct cvmx_sli_pkt_data_out_ror_s cn66xx;
+ struct cvmx_sli_pkt_data_out_ror_s cn68xx;
+ struct cvmx_sli_pkt_data_out_ror_s cn68xxp1;
+ struct cvmx_sli_pkt_data_out_ror_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_data_out_ror cvmx_sli_pkt_data_out_ror_t;
+
+/**
+ * cvmx_sli_pkt_dpaddr
+ *
+ * SLI_PKT_DPADDR = SLI's Packet Data Pointer Addr
+ *
+ * Used to detemine address and attributes for packet data writes.
+ */
+union cvmx_sli_pkt_dpaddr {
+ uint64_t u64;
+ struct cvmx_sli_pkt_dpaddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t dptr : 32; /**< Determines whether buffer/info pointers are
+ DPTR format 0 or DPTR format 1.
+ When DPTR<i>=1, the buffer/info pointers fetched
+ from packet output ring i are DPTR format 0.
+ When DPTR<i>=0, the buffer/info pointers fetched
+ from packet output ring i are DPTR format 1.
+ (Replace SLI_PKT_INPUT_CONTROL[D_ESR,D_NSR,D_ROR]
+ in the HRM descriptions of DPTR format 0/1 with
+ SLI_PKT_DATA_OUT_ES[ES<2i+1:2i>],
+ SLI_PKT_DATA_OUT_NS[NSR<i>], and
+ SLI_PKT_DATA_OUT_ROR[ROR<i>], respectively,
+ though.) */
+#else
+ uint64_t dptr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_dpaddr_s cn61xx;
+ struct cvmx_sli_pkt_dpaddr_s cn63xx;
+ struct cvmx_sli_pkt_dpaddr_s cn63xxp1;
+ struct cvmx_sli_pkt_dpaddr_s cn66xx;
+ struct cvmx_sli_pkt_dpaddr_s cn68xx;
+ struct cvmx_sli_pkt_dpaddr_s cn68xxp1;
+ struct cvmx_sli_pkt_dpaddr_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_dpaddr cvmx_sli_pkt_dpaddr_t;
+
+/**
+ * cvmx_sli_pkt_in_bp
+ *
+ * SLI_PKT_IN_BP = SLI Packet Input Backpressure
+ *
+ * Which input rings have backpressure applied.
+ */
+union cvmx_sli_pkt_in_bp {
+ uint64_t u64;
+ struct cvmx_sli_pkt_in_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bp : 32; /**< A packet input ring that has its count greater
+ than its WMARK will have backpressure applied.
+ Each of the 32 bits coorespond to an input ring.
+ When '1' that ring has backpressure applied an
+ will fetch no more instructions, but will process
+ any previously fetched instructions. */
+#else
+ uint64_t bp : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_in_bp_s cn61xx;
+ struct cvmx_sli_pkt_in_bp_s cn63xx;
+ struct cvmx_sli_pkt_in_bp_s cn63xxp1;
+ struct cvmx_sli_pkt_in_bp_s cn66xx;
+ struct cvmx_sli_pkt_in_bp_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_in_bp cvmx_sli_pkt_in_bp_t;
+
+/**
+ * cvmx_sli_pkt_in_done#_cnts
+ *
+ * SLI_PKT_IN_DONE[0..31]_CNTS = SLI Instruction Done ring# Counts
+ *
+ * Counters for instructions completed on Input rings.
+ */
+union cvmx_sli_pkt_in_donex_cnts {
+ uint64_t u64;
+ struct cvmx_sli_pkt_in_donex_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< This field is incrmented by '1' when an instruction
+ is completed. This field is incremented as the
+ last of the data is read from the MAC. */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_in_donex_cnts_s cn61xx;
+ struct cvmx_sli_pkt_in_donex_cnts_s cn63xx;
+ struct cvmx_sli_pkt_in_donex_cnts_s cn63xxp1;
+ struct cvmx_sli_pkt_in_donex_cnts_s cn66xx;
+ struct cvmx_sli_pkt_in_donex_cnts_s cn68xx;
+ struct cvmx_sli_pkt_in_donex_cnts_s cn68xxp1;
+ struct cvmx_sli_pkt_in_donex_cnts_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_in_donex_cnts cvmx_sli_pkt_in_donex_cnts_t;
+
+/**
+ * cvmx_sli_pkt_in_instr_counts
+ *
+ * SLI_PKT_IN_INSTR_COUNTS = SLI Packet Input Instrutction Counts
+ *
+ * Keeps track of the number of instructions read into the FIFO and Packets sent to IPD.
+ */
+union cvmx_sli_pkt_in_instr_counts {
+ uint64_t u64;
+ struct cvmx_sli_pkt_in_instr_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_cnt : 32; /**< Shows the number of packets sent to the IPD. */
+ uint64_t rd_cnt : 32; /**< Shows the value of instructions that have had reads
+ issued for them.
+ to the Packet-ring is in reset. */
+#else
+ uint64_t rd_cnt : 32;
+ uint64_t wr_cnt : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_in_instr_counts_s cn61xx;
+ struct cvmx_sli_pkt_in_instr_counts_s cn63xx;
+ struct cvmx_sli_pkt_in_instr_counts_s cn63xxp1;
+ struct cvmx_sli_pkt_in_instr_counts_s cn66xx;
+ struct cvmx_sli_pkt_in_instr_counts_s cn68xx;
+ struct cvmx_sli_pkt_in_instr_counts_s cn68xxp1;
+ struct cvmx_sli_pkt_in_instr_counts_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_in_instr_counts cvmx_sli_pkt_in_instr_counts_t;
+
+/**
+ * cvmx_sli_pkt_in_pcie_port
+ *
+ * SLI_PKT_IN_PCIE_PORT = SLI's Packet In To MAC Port Assignment
+ *
+ * Assigns Packet Input rings to MAC ports.
+ */
+union cvmx_sli_pkt_in_pcie_port {
+ uint64_t u64;
+ struct cvmx_sli_pkt_in_pcie_port_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pp : 64; /**< The MAC port that the Packet ring number is
+ assigned. Two bits are used per ring (i.e. ring 0
+ [1:0], ring 1 [3:2], ....). A value of '0 means
+ that the Packetring is assign to MAC Port 0, a '1'
+ MAC Port 1, a '2' MAC Port 2, and a '3' MAC Port 3. */
+#else
+ uint64_t pp : 64;
+#endif
+ } s;
+ struct cvmx_sli_pkt_in_pcie_port_s cn61xx;
+ struct cvmx_sli_pkt_in_pcie_port_s cn63xx;
+ struct cvmx_sli_pkt_in_pcie_port_s cn63xxp1;
+ struct cvmx_sli_pkt_in_pcie_port_s cn66xx;
+ struct cvmx_sli_pkt_in_pcie_port_s cn68xx;
+ struct cvmx_sli_pkt_in_pcie_port_s cn68xxp1;
+ struct cvmx_sli_pkt_in_pcie_port_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_in_pcie_port cvmx_sli_pkt_in_pcie_port_t;
+
+/**
+ * cvmx_sli_pkt_input_control
+ *
+ * SLI_PKT_INPUT_CONTROL = SLI's Packet Input Control
+ *
+ * Control for reads for gather list and instructions.
+ */
+union cvmx_sli_pkt_input_control {
+ uint64_t u64;
+ struct cvmx_sli_pkt_input_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prd_erst : 1; /**< PRD Error Reset */
+ uint64_t prd_rds : 7; /**< PRD Reads Out */
+ uint64_t gii_erst : 1; /**< GII Error Reset */
+ uint64_t gii_rds : 7; /**< GII Reads Out */
+ uint64_t reserved_41_47 : 7;
+ uint64_t prc_idle : 1; /**< PRC In IDLE */
+ uint64_t reserved_24_39 : 16;
+ uint64_t pin_rst : 1; /**< Packet In Reset. When a gather-list read receives
+ an error this bit (along with SLI_INT_SUM[PGL_ERR])
+ is set. When receiveing a PGL_ERR interrupt the SW
+ should:
+ 1. Wait 2ms to allow any outstanding reads to return
+ or be timed out.
+ 2. Write a '0' to this bit.
+ 3. Startup the packet input again (all previous
+ CSR setting of the packet-input will be lost). */
+ uint64_t pkt_rr : 1; /**< When set '1' the input packet selection will be
+ made with a Round Robin arbitration. When '0'
+ the input packet ring is fixed in priority,
+ where the lower ring number has higher priority. */
+ uint64_t pbp_dhi : 13; /**< PBP_DHI replaces address bits that are used
+ for parse mode and skip-length when
+ SLI_PKTi_INSTR_HEADER[PBP]=1.
+ PBP_DHI becomes either MACADD<63:55> or MACADD<59:51>
+ for the instruction DPTR reads in this case.
+ The instruction DPTR reads are called
+ "First Direct" or "First Indirect" in the HRM.
+ When PBP=1, if "First Direct" and USE_CSR=0, PBP_DHI
+ becomes MACADD<59:51>, else MACADD<63:55>. */
+ uint64_t d_nsr : 1; /**< ADDRTYPE<1> or MACADD<61> for packet input data
+ reads.
+ D_NSR becomes either ADDRTYPE<1> or MACADD<61>
+ for MAC memory space reads of packet input data
+ fetched for any packet input ring.
+ ADDRTYPE<1> if USE_CSR=1, else MACADD<61>.
+ In the latter case, ADDRTYPE<1> comes from DPTR<61>.
+ ADDRTYPE<1> is the no-snoop attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+ uint64_t d_esr : 2; /**< ES<1:0> or MACADD<63:62> for packet input data
+ reads.
+ D_ESR becomes either ES<1:0> or MACADD<63:62>
+ for MAC memory space reads of packet input data
+ fetched for any packet input ring.
+ ES<1:0> if USE_CSR=1, else MACADD<63:62>.
+ In the latter case, ES<1:0> comes from DPTR<63:62>.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space reads. */
+ uint64_t d_ror : 1; /**< ADDRTYPE<0> or MACADD<60> for packet input data
+ reads.
+ D_ROR becomes either ADDRTYPE<0> or MACADD<60>
+ for MAC memory space reads of packet input data
+ fetched for any packet input ring.
+ ADDRTYPE<0> if USE_CSR=1, else MACADD<60>.
+ In the latter case, ADDRTYPE<0> comes from DPTR<60>.
+ ADDRTYPE<0> is the relaxed-order attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+ uint64_t use_csr : 1; /**< When set '1' the csr value will be used for
+ ROR, ESR, and NSR. When clear '0' the value in
+ DPTR will be used. In turn the bits not used for
+ ROR, ESR, and NSR, will be used for bits [63:60]
+ of the address used to fetch packet data. */
+ uint64_t nsr : 1; /**< ADDRTYPE<1> for packet input instruction reads and
+ gather list (i.e. DPI component) reads from MAC
+ memory space.
+ ADDRTYPE<1> is the no-snoop attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+ uint64_t esr : 2; /**< ES<1:0> for packet input instruction reads and
+ gather list (i.e. DPI component) reads from MAC
+ memory space.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space reads. */
+ uint64_t ror : 1; /**< ADDRTYPE<0> for packet input instruction reads and
+ gather list (i.e. DPI component) reads from MAC
+ memory space.
+ ADDRTYPE<0> is the relaxed-order attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+#else
+ uint64_t ror : 1;
+ uint64_t esr : 2;
+ uint64_t nsr : 1;
+ uint64_t use_csr : 1;
+ uint64_t d_ror : 1;
+ uint64_t d_esr : 2;
+ uint64_t d_nsr : 1;
+ uint64_t pbp_dhi : 13;
+ uint64_t pkt_rr : 1;
+ uint64_t pin_rst : 1;
+ uint64_t reserved_24_39 : 16;
+ uint64_t prc_idle : 1;
+ uint64_t reserved_41_47 : 7;
+ uint64_t gii_rds : 7;
+ uint64_t gii_erst : 1;
+ uint64_t prd_rds : 7;
+ uint64_t prd_erst : 1;
+#endif
+ } s;
+ struct cvmx_sli_pkt_input_control_s cn61xx;
+ struct cvmx_sli_pkt_input_control_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63 : 41;
+ uint64_t pkt_rr : 1; /**< When set '1' the input packet selection will be
+ made with a Round Robin arbitration. When '0'
+ the input packet ring is fixed in priority,
+ where the lower ring number has higher priority. */
+ uint64_t pbp_dhi : 13; /**< PBP_DHI replaces address bits that are used
+ for parse mode and skip-length when
+ SLI_PKTi_INSTR_HEADER[PBP]=1.
+ PBP_DHI becomes either MACADD<63:55> or MACADD<59:51>
+ for the instruction DPTR reads in this case.
+ The instruction DPTR reads are called
+ "First Direct" or "First Indirect" in the HRM.
+ When PBP=1, if "First Direct" and USE_CSR=0, PBP_DHI
+ becomes MACADD<59:51>, else MACADD<63:55>. */
+ uint64_t d_nsr : 1; /**< ADDRTYPE<1> or MACADD<61> for packet input data
+ reads.
+ D_NSR becomes either ADDRTYPE<1> or MACADD<61>
+ for MAC memory space reads of packet input data
+ fetched for any packet input ring.
+ ADDRTYPE<1> if USE_CSR=1, else MACADD<61>.
+ In the latter case, ADDRTYPE<1> comes from DPTR<61>.
+ ADDRTYPE<1> is the no-snoop attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+ uint64_t d_esr : 2; /**< ES<1:0> or MACADD<63:62> for packet input data
+ reads.
+ D_ESR becomes either ES<1:0> or MACADD<63:62>
+ for MAC memory space reads of packet input data
+ fetched for any packet input ring.
+ ES<1:0> if USE_CSR=1, else MACADD<63:62>.
+ In the latter case, ES<1:0> comes from DPTR<63:62>.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space reads. */
+ uint64_t d_ror : 1; /**< ADDRTYPE<0> or MACADD<60> for packet input data
+ reads.
+ D_ROR becomes either ADDRTYPE<0> or MACADD<60>
+ for MAC memory space reads of packet input data
+ fetched for any packet input ring.
+ ADDRTYPE<0> if USE_CSR=1, else MACADD<60>.
+ In the latter case, ADDRTYPE<0> comes from DPTR<60>.
+ ADDRTYPE<0> is the relaxed-order attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+ uint64_t use_csr : 1; /**< When set '1' the csr value will be used for
+ ROR, ESR, and NSR. When clear '0' the value in
+ DPTR will be used. In turn the bits not used for
+ ROR, ESR, and NSR, will be used for bits [63:60]
+ of the address used to fetch packet data. */
+ uint64_t nsr : 1; /**< ADDRTYPE<1> for packet input instruction reads and
+ gather list (i.e. DPI component) reads from MAC
+ memory space.
+ ADDRTYPE<1> is the no-snoop attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+ uint64_t esr : 2; /**< ES<1:0> for packet input instruction reads and
+ gather list (i.e. DPI component) reads from MAC
+ memory space.
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space reads. */
+ uint64_t ror : 1; /**< ADDRTYPE<0> for packet input instruction reads and
+ gather list (i.e. DPI component) reads from MAC
+ memory space.
+ ADDRTYPE<0> is the relaxed-order attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+#else
+ uint64_t ror : 1;
+ uint64_t esr : 2;
+ uint64_t nsr : 1;
+ uint64_t use_csr : 1;
+ uint64_t d_ror : 1;
+ uint64_t d_esr : 2;
+ uint64_t d_nsr : 1;
+ uint64_t pbp_dhi : 13;
+ uint64_t pkt_rr : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } cn63xx;
+ struct cvmx_sli_pkt_input_control_cn63xx cn63xxp1;
+ struct cvmx_sli_pkt_input_control_s cn66xx;
+ struct cvmx_sli_pkt_input_control_s cn68xx;
+ struct cvmx_sli_pkt_input_control_s cn68xxp1;
+ struct cvmx_sli_pkt_input_control_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_input_control cvmx_sli_pkt_input_control_t;
+
+/**
+ * cvmx_sli_pkt_instr_enb
+ *
+ * SLI_PKT_INSTR_ENB = SLI's Packet Instruction Enable
+ *
+ * Enables the instruction fetch for a Packet-ring.
+ */
+union cvmx_sli_pkt_instr_enb {
+ uint64_t u64;
+ struct cvmx_sli_pkt_instr_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t enb : 32; /**< When ENB<i>=1, instruction input ring i is enabled. */
+#else
+ uint64_t enb : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_instr_enb_s cn61xx;
+ struct cvmx_sli_pkt_instr_enb_s cn63xx;
+ struct cvmx_sli_pkt_instr_enb_s cn63xxp1;
+ struct cvmx_sli_pkt_instr_enb_s cn66xx;
+ struct cvmx_sli_pkt_instr_enb_s cn68xx;
+ struct cvmx_sli_pkt_instr_enb_s cn68xxp1;
+ struct cvmx_sli_pkt_instr_enb_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_instr_enb cvmx_sli_pkt_instr_enb_t;
+
+/**
+ * cvmx_sli_pkt_instr_rd_size
+ *
+ * SLI_PKT_INSTR_RD_SIZE = SLI Instruction Read Size
+ *
+ * The number of instruction allowed to be read at one time.
+ */
+union cvmx_sli_pkt_instr_rd_size {
+ uint64_t u64;
+ struct cvmx_sli_pkt_instr_rd_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rdsize : 64; /**< Number of instructions to be read in one MAC read
+ request for the 4 ports - 8 rings. Every two bits
+ (i.e. 1:0, 3:2, 5:4..) are assign to the port/ring
+ combinations.
+ - 15:0 PKIPort0,Ring 7..0 31:16 PKIPort1,Ring 7..0
+ - 47:32 PKIPort2,Ring 7..0 63:48 PKIPort3,Ring 7..0
+ Two bit value are:
+ 0 - 1 Instruction
+ 1 - 2 Instructions
+ 2 - 3 Instructions
+ 3 - 4 Instructions */
+#else
+ uint64_t rdsize : 64;
+#endif
+ } s;
+ struct cvmx_sli_pkt_instr_rd_size_s cn61xx;
+ struct cvmx_sli_pkt_instr_rd_size_s cn63xx;
+ struct cvmx_sli_pkt_instr_rd_size_s cn63xxp1;
+ struct cvmx_sli_pkt_instr_rd_size_s cn66xx;
+ struct cvmx_sli_pkt_instr_rd_size_s cn68xx;
+ struct cvmx_sli_pkt_instr_rd_size_s cn68xxp1;
+ struct cvmx_sli_pkt_instr_rd_size_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_instr_rd_size cvmx_sli_pkt_instr_rd_size_t;
+
+/**
+ * cvmx_sli_pkt_instr_size
+ *
+ * SLI_PKT_INSTR_SIZE = SLI's Packet Instruction Size
+ *
+ * Determines if instructions are 64 or 32 byte in size for a Packet-ring.
+ */
+union cvmx_sli_pkt_instr_size {
+ uint64_t u64;
+ struct cvmx_sli_pkt_instr_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t is_64b : 32; /**< When IS_64B<i>=1, instruction input ring i uses 64B
+ instructions, else 32B instructions. */
+#else
+ uint64_t is_64b : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_instr_size_s cn61xx;
+ struct cvmx_sli_pkt_instr_size_s cn63xx;
+ struct cvmx_sli_pkt_instr_size_s cn63xxp1;
+ struct cvmx_sli_pkt_instr_size_s cn66xx;
+ struct cvmx_sli_pkt_instr_size_s cn68xx;
+ struct cvmx_sli_pkt_instr_size_s cn68xxp1;
+ struct cvmx_sli_pkt_instr_size_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_instr_size cvmx_sli_pkt_instr_size_t;
+
+/**
+ * cvmx_sli_pkt_int_levels
+ *
+ * 0x90F0 reserved SLI_PKT_PCIE_PORT2
+ *
+ *
+ * SLI_PKT_INT_LEVELS = SLI's Packet Interrupt Levels
+ *
+ * Output packet interrupt levels.
+ */
+union cvmx_sli_pkt_int_levels {
+ uint64_t u64;
+ struct cvmx_sli_pkt_int_levels_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t time : 22; /**< Output ring counter time interrupt threshold
+ SLI sets SLI_PKT_TIME_INT[PORT<i>] whenever
+ SLI_PKTi_CNTS[TIMER] > TIME */
+ uint64_t cnt : 32; /**< Output ring counter interrupt threshold
+ SLI sets SLI_PKT_CNT_INT[PORT<i>] whenever
+ SLI_PKTi_CNTS[CNT] > CNT */
+#else
+ uint64_t cnt : 32;
+ uint64_t time : 22;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_sli_pkt_int_levels_s cn61xx;
+ struct cvmx_sli_pkt_int_levels_s cn63xx;
+ struct cvmx_sli_pkt_int_levels_s cn63xxp1;
+ struct cvmx_sli_pkt_int_levels_s cn66xx;
+ struct cvmx_sli_pkt_int_levels_s cn68xx;
+ struct cvmx_sli_pkt_int_levels_s cn68xxp1;
+ struct cvmx_sli_pkt_int_levels_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_int_levels cvmx_sli_pkt_int_levels_t;
+
+/**
+ * cvmx_sli_pkt_iptr
+ *
+ * SLI_PKT_IPTR = SLI's Packet Info Poitner
+ *
+ * Controls using the Info-Pointer to store length and data.
+ */
+union cvmx_sli_pkt_iptr {
+ uint64_t u64;
+ struct cvmx_sli_pkt_iptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iptr : 32; /**< When IPTR<i>=1, packet output ring i is in info-
+ pointer mode, else buffer-pointer-only mode. */
+#else
+ uint64_t iptr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_iptr_s cn61xx;
+ struct cvmx_sli_pkt_iptr_s cn63xx;
+ struct cvmx_sli_pkt_iptr_s cn63xxp1;
+ struct cvmx_sli_pkt_iptr_s cn66xx;
+ struct cvmx_sli_pkt_iptr_s cn68xx;
+ struct cvmx_sli_pkt_iptr_s cn68xxp1;
+ struct cvmx_sli_pkt_iptr_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_iptr cvmx_sli_pkt_iptr_t;
+
+/**
+ * cvmx_sli_pkt_out_bmode
+ *
+ * SLI_PKT_OUT_BMODE = SLI's Packet Out Byte Mode
+ *
+ * Control the updating of the SLI_PKT#_CNT register.
+ */
+union cvmx_sli_pkt_out_bmode {
+ uint64_t u64;
+ struct cvmx_sli_pkt_out_bmode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bmode : 32; /**< Determines whether SLI_PKTi_CNTS[CNT] is a byte or
+ packet counter.
+ When BMODE<i>=1, SLI_PKTi_CNTS[CNT] is a byte
+ counter, else SLI_PKTi_CNTS[CNT] is a packet
+ counter. */
+#else
+ uint64_t bmode : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_out_bmode_s cn61xx;
+ struct cvmx_sli_pkt_out_bmode_s cn63xx;
+ struct cvmx_sli_pkt_out_bmode_s cn63xxp1;
+ struct cvmx_sli_pkt_out_bmode_s cn66xx;
+ struct cvmx_sli_pkt_out_bmode_s cn68xx;
+ struct cvmx_sli_pkt_out_bmode_s cn68xxp1;
+ struct cvmx_sli_pkt_out_bmode_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_out_bmode cvmx_sli_pkt_out_bmode_t;
+
+/**
+ * cvmx_sli_pkt_out_bp_en
+ *
+ * SLI_PKT_OUT_BP_EN = SLI Packet Output Backpressure Enable
+ *
+ * Enables sending backpressure to the PKO.
+ */
+union cvmx_sli_pkt_out_bp_en {
+ uint64_t u64;
+ struct cvmx_sli_pkt_out_bp_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bp_en : 32; /**< When set '1' enable the ring level backpressure
+ to be sent to PKO. Backpressure is sent to the
+ PKO on the PIPE number associated with the ring.
+ (See SLI_TX_PIPE for ring to pipe associations). */
+#else
+ uint64_t bp_en : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_out_bp_en_s cn68xx;
+ struct cvmx_sli_pkt_out_bp_en_s cn68xxp1;
+};
+typedef union cvmx_sli_pkt_out_bp_en cvmx_sli_pkt_out_bp_en_t;
+
+/**
+ * cvmx_sli_pkt_out_enb
+ *
+ * SLI_PKT_OUT_ENB = SLI's Packet Output Enable
+ *
+ * Enables the output packet engines.
+ */
+union cvmx_sli_pkt_out_enb {
+ uint64_t u64;
+ struct cvmx_sli_pkt_out_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t enb : 32; /**< When ENB<i>=1, packet output ring i is enabled.
+ If an error occurs on reading pointers for an
+ output ring, the ring will be disabled by clearing
+ the bit associated with the ring to '0'. */
+#else
+ uint64_t enb : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_out_enb_s cn61xx;
+ struct cvmx_sli_pkt_out_enb_s cn63xx;
+ struct cvmx_sli_pkt_out_enb_s cn63xxp1;
+ struct cvmx_sli_pkt_out_enb_s cn66xx;
+ struct cvmx_sli_pkt_out_enb_s cn68xx;
+ struct cvmx_sli_pkt_out_enb_s cn68xxp1;
+ struct cvmx_sli_pkt_out_enb_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_out_enb cvmx_sli_pkt_out_enb_t;
+
+/**
+ * cvmx_sli_pkt_output_wmark
+ *
+ * SLI_PKT_OUTPUT_WMARK = SLI's Packet Output Water Mark
+ *
+ * Value that when the SLI_PKT#_SLIST_BAOFF_DBELL[DBELL] value is less then that backpressure for the rings will be applied.
+ */
+union cvmx_sli_pkt_output_wmark {
+ uint64_t u64;
+ struct cvmx_sli_pkt_output_wmark_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t wmark : 32; /**< Value when DBELL count drops below backpressure
+ for the ring will be applied to the PKO. */
+#else
+ uint64_t wmark : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_output_wmark_s cn61xx;
+ struct cvmx_sli_pkt_output_wmark_s cn63xx;
+ struct cvmx_sli_pkt_output_wmark_s cn63xxp1;
+ struct cvmx_sli_pkt_output_wmark_s cn66xx;
+ struct cvmx_sli_pkt_output_wmark_s cn68xx;
+ struct cvmx_sli_pkt_output_wmark_s cn68xxp1;
+ struct cvmx_sli_pkt_output_wmark_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_output_wmark cvmx_sli_pkt_output_wmark_t;
+
+/**
+ * cvmx_sli_pkt_pcie_port
+ *
+ * SLI_PKT_PCIE_PORT = SLI's Packet To MAC Port Assignment
+ *
+ * Assigns Packet Ports to MAC ports.
+ */
+union cvmx_sli_pkt_pcie_port {
+ uint64_t u64;
+ struct cvmx_sli_pkt_pcie_port_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pp : 64; /**< The physical MAC port that the output ring uses.
+ Two bits are used per ring (i.e. ring 0 [1:0],
+ ring 1 [3:2], ....). A value of '0 means
+ that the Packetring is assign to MAC Port 0, a '1'
+ MAC Port 1, '2' and '3' are reserved. */
+#else
+ uint64_t pp : 64;
+#endif
+ } s;
+ struct cvmx_sli_pkt_pcie_port_s cn61xx;
+ struct cvmx_sli_pkt_pcie_port_s cn63xx;
+ struct cvmx_sli_pkt_pcie_port_s cn63xxp1;
+ struct cvmx_sli_pkt_pcie_port_s cn66xx;
+ struct cvmx_sli_pkt_pcie_port_s cn68xx;
+ struct cvmx_sli_pkt_pcie_port_s cn68xxp1;
+ struct cvmx_sli_pkt_pcie_port_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_pcie_port cvmx_sli_pkt_pcie_port_t;
+
+/**
+ * cvmx_sli_pkt_port_in_rst
+ *
+ * 91c0 reserved
+ * 91d0 reserved
+ * 91e0 reserved
+ *
+ *
+ * SLI_PKT_PORT_IN_RST = SLI Packet Port In Reset
+ *
+ * Vector bits related to ring-port for ones that are reset.
+ */
+union cvmx_sli_pkt_port_in_rst {
+ uint64_t u64;
+ struct cvmx_sli_pkt_port_in_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t in_rst : 32; /**< When asserted '1' the vector bit cooresponding
+ to the inbound Packet-ring is in reset. */
+ uint64_t out_rst : 32; /**< When asserted '1' the vector bit cooresponding
+ to the outbound Packet-ring is in reset. */
+#else
+ uint64_t out_rst : 32;
+ uint64_t in_rst : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_port_in_rst_s cn61xx;
+ struct cvmx_sli_pkt_port_in_rst_s cn63xx;
+ struct cvmx_sli_pkt_port_in_rst_s cn63xxp1;
+ struct cvmx_sli_pkt_port_in_rst_s cn66xx;
+ struct cvmx_sli_pkt_port_in_rst_s cn68xx;
+ struct cvmx_sli_pkt_port_in_rst_s cn68xxp1;
+ struct cvmx_sli_pkt_port_in_rst_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_port_in_rst cvmx_sli_pkt_port_in_rst_t;
+
+/**
+ * cvmx_sli_pkt_slist_es
+ *
+ * SLI_PKT_SLIST_ES = SLI's Packet Scatter List Endian Swap
+ *
+ * The Endian Swap for Scatter List Read.
+ */
+union cvmx_sli_pkt_slist_es {
+ uint64_t u64;
+ struct cvmx_sli_pkt_slist_es_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t es : 64; /**< ES<1:0> for the packet output ring reads that
+ fetch buffer/info pointer pairs.
+ ES<2i+1:2i> becomes ES<1:0> in DPI/SLI reads that
+ fetch buffer/info pairs from packet output ring i
+ (from address SLI_PKTi_SLIST_BADDR+ in MAC memory
+ space.)
+ ES<1:0> is the endian-swap attribute for these MAC
+ memory space reads. */
+#else
+ uint64_t es : 64;
+#endif
+ } s;
+ struct cvmx_sli_pkt_slist_es_s cn61xx;
+ struct cvmx_sli_pkt_slist_es_s cn63xx;
+ struct cvmx_sli_pkt_slist_es_s cn63xxp1;
+ struct cvmx_sli_pkt_slist_es_s cn66xx;
+ struct cvmx_sli_pkt_slist_es_s cn68xx;
+ struct cvmx_sli_pkt_slist_es_s cn68xxp1;
+ struct cvmx_sli_pkt_slist_es_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_slist_es cvmx_sli_pkt_slist_es_t;
+
+/**
+ * cvmx_sli_pkt_slist_ns
+ *
+ * SLI_PKT_SLIST_NS = SLI's Packet Scatter List No Snoop
+ *
+ * The NS field for the TLP when fetching Scatter List.
+ */
+union cvmx_sli_pkt_slist_ns {
+ uint64_t u64;
+ struct cvmx_sli_pkt_slist_ns_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t nsr : 32; /**< ADDRTYPE<1> for the packet output ring reads that
+ fetch buffer/info pointer pairs.
+ NSR<i> becomes ADDRTYPE<1> in DPI/SLI reads that
+ fetch buffer/info pairs from packet output ring i
+ (from address SLI_PKTi_SLIST_BADDR+ in MAC memory
+ space.)
+ ADDRTYPE<1> is the relaxed-order attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+#else
+ uint64_t nsr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_slist_ns_s cn61xx;
+ struct cvmx_sli_pkt_slist_ns_s cn63xx;
+ struct cvmx_sli_pkt_slist_ns_s cn63xxp1;
+ struct cvmx_sli_pkt_slist_ns_s cn66xx;
+ struct cvmx_sli_pkt_slist_ns_s cn68xx;
+ struct cvmx_sli_pkt_slist_ns_s cn68xxp1;
+ struct cvmx_sli_pkt_slist_ns_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_slist_ns cvmx_sli_pkt_slist_ns_t;
+
+/**
+ * cvmx_sli_pkt_slist_ror
+ *
+ * SLI_PKT_SLIST_ROR = SLI's Packet Scatter List Relaxed Ordering
+ *
+ * The ROR field for the TLP when fetching Scatter List.
+ */
+union cvmx_sli_pkt_slist_ror {
+ uint64_t u64;
+ struct cvmx_sli_pkt_slist_ror_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t ror : 32; /**< ADDRTYPE<0> for the packet output ring reads that
+ fetch buffer/info pointer pairs.
+ ROR<i> becomes ADDRTYPE<0> in DPI/SLI reads that
+ fetch buffer/info pairs from packet output ring i
+ (from address SLI_PKTi_SLIST_BADDR+ in MAC memory
+ space.)
+ ADDRTYPE<0> is the relaxed-order attribute for PCIe
+ , helps select an SRIO*_S2M_TYPE* entry with sRIO. */
+#else
+ uint64_t ror : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_slist_ror_s cn61xx;
+ struct cvmx_sli_pkt_slist_ror_s cn63xx;
+ struct cvmx_sli_pkt_slist_ror_s cn63xxp1;
+ struct cvmx_sli_pkt_slist_ror_s cn66xx;
+ struct cvmx_sli_pkt_slist_ror_s cn68xx;
+ struct cvmx_sli_pkt_slist_ror_s cn68xxp1;
+ struct cvmx_sli_pkt_slist_ror_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_slist_ror cvmx_sli_pkt_slist_ror_t;
+
+/**
+ * cvmx_sli_pkt_time_int
+ *
+ * SLI_PKT_TIME_INT = SLI Packet Timer Interrupt
+ *
+ * The packets rings that are interrupting because of Packet Timers.
+ */
+union cvmx_sli_pkt_time_int {
+ uint64_t u64;
+ struct cvmx_sli_pkt_time_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t port : 32; /**< Output ring packet timer interrupt bits
+ SLI sets PORT<i> whenever
+ SLI_PKTi_CNTS[TIMER] > SLI_PKT_INT_LEVELS[TIME].
+ SLI_PKT_TIME_INT_ENB[PORT<i>] is the corresponding
+ enable. */
+#else
+ uint64_t port : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_time_int_s cn61xx;
+ struct cvmx_sli_pkt_time_int_s cn63xx;
+ struct cvmx_sli_pkt_time_int_s cn63xxp1;
+ struct cvmx_sli_pkt_time_int_s cn66xx;
+ struct cvmx_sli_pkt_time_int_s cn68xx;
+ struct cvmx_sli_pkt_time_int_s cn68xxp1;
+ struct cvmx_sli_pkt_time_int_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_time_int cvmx_sli_pkt_time_int_t;
+
+/**
+ * cvmx_sli_pkt_time_int_enb
+ *
+ * SLI_PKT_TIME_INT_ENB = SLI Packet Timer Interrupt Enable
+ *
+ * The packets rings that are interrupting because of Packet Timers.
+ */
+union cvmx_sli_pkt_time_int_enb {
+ uint64_t u64;
+ struct cvmx_sli_pkt_time_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t port : 32; /**< Output ring packet timer interrupt enables
+ When both PORT<i> and corresponding
+ SLI_PKT_TIME_INT[PORT<i>] are set, for any i,
+ then SLI_INT_SUM[PTIME] is set, which can cause
+ an interrupt. */
+#else
+ uint64_t port : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_pkt_time_int_enb_s cn61xx;
+ struct cvmx_sli_pkt_time_int_enb_s cn63xx;
+ struct cvmx_sli_pkt_time_int_enb_s cn63xxp1;
+ struct cvmx_sli_pkt_time_int_enb_s cn66xx;
+ struct cvmx_sli_pkt_time_int_enb_s cn68xx;
+ struct cvmx_sli_pkt_time_int_enb_s cn68xxp1;
+ struct cvmx_sli_pkt_time_int_enb_s cnf71xx;
+};
+typedef union cvmx_sli_pkt_time_int_enb cvmx_sli_pkt_time_int_enb_t;
+
+/**
+ * cvmx_sli_port#_pkind
+ *
+ * SLI_PORT[0..31]_PKIND = SLI Port Pkind
+ *
+ * The SLI/DPI supports 32 input rings for fetching input packets. This register maps the input-rings (0-31) to a PKIND.
+ */
+union cvmx_sli_portx_pkind {
+ uint64_t u64;
+ struct cvmx_sli_portx_pkind_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t rpk_enb : 1; /**< Alternate PKT_INST_HDR PKind Enable for this ring.
+ When RPK_ENB==1 and DPI prepends
+ a PKT_INST_HDR to a packet, the pkind for the
+ packet is PKINDR (rather than PKIND), and any
+ special PIP/IPD processing of the DPI packet is
+ disabled (see PIP_PRT_CFG*[INST_HDR,HIGIG_EN]).
+ (DPI prepends a PKT_INST_HDR when either
+ DPI_INST_HDR[R]==1 for the packet or
+ SLI_PKT*_INSTR_HEADER[USE_IHDR]==1 for the ring.)
+ When RPK_ENB==0, PKIND is the pkind for all
+ packets through the input ring, and
+ PIP/IPD will process a DPI packet that has a
+ PKT_INST_HDR specially. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t pkindr : 6; /**< Port Kind For this Ring used with packets
+ that include a DPI-prepended PKT_INST_HDR
+ when RPK_ENB is set. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t bpkind : 6; /**< Back-pressure pkind for this Ring. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t pkind : 6; /**< Port Kind For this Ring. */
+#else
+ uint64_t pkind : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t bpkind : 6;
+ uint64_t reserved_14_15 : 2;
+ uint64_t pkindr : 6;
+ uint64_t reserved_22_23 : 2;
+ uint64_t rpk_enb : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_sli_portx_pkind_s cn68xx;
+ struct cvmx_sli_portx_pkind_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t bpkind : 6; /**< Back-pressure pkind for this Ring. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t pkind : 6; /**< Port Kind For this Ring. */
+#else
+ uint64_t pkind : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t bpkind : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_sli_portx_pkind cvmx_sli_portx_pkind_t;
+
+/**
+ * cvmx_sli_s2m_port#_ctl
+ *
+ * SLI_S2M_PORTX_CTL = SLI's S2M Port 0 Control
+ *
+ * Contains control for access from SLI to a MAC port.
+ * Writes to this register are not ordered with writes/reads to the MAC Memory space.
+ * To ensure that a write has completed the user must read the register before
+ * making an access(i.e. MAC memory space) that requires the value of this register to be updated.
+ */
+union cvmx_sli_s2m_portx_ctl {
+ uint64_t u64;
+ struct cvmx_sli_s2m_portx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t wind_d : 1; /**< When set '1' disables access to the Window
+ Registers from the MAC-Port.
+ When Authenticate-Mode is set the reset value of
+ this field is "1" else "0'. */
+ uint64_t bar0_d : 1; /**< When set '1' disables access from MAC to
+ BAR-0 address offsets: Less Than 0x330,
+ 0x3CD0, and greater than 0x3D70 excluding
+ 0x3e00.
+ When Authenticate-Mode is set the reset value of
+ this field is "1" else "0'. */
+ uint64_t mrrs : 3; /**< Max Read Request Size
+ 0 = 128B
+ 1 = 256B
+ 2 = 512B
+ 3 = 1024B
+ 4 = 2048B
+ 5-7 = Reserved
+ This field should not exceed the desired
+ max read request size. This field is used to
+ determine if an IOBDMA is too large.
+ For a PCIe MAC, this field should not exceed
+ PCIE*_CFG030[MRRS].
+ For a sRIO MAC, this field should indicate a size
+ of 256B or smaller. */
+#else
+ uint64_t mrrs : 3;
+ uint64_t bar0_d : 1;
+ uint64_t wind_d : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_sli_s2m_portx_ctl_s cn61xx;
+ struct cvmx_sli_s2m_portx_ctl_s cn63xx;
+ struct cvmx_sli_s2m_portx_ctl_s cn63xxp1;
+ struct cvmx_sli_s2m_portx_ctl_s cn66xx;
+ struct cvmx_sli_s2m_portx_ctl_s cn68xx;
+ struct cvmx_sli_s2m_portx_ctl_s cn68xxp1;
+ struct cvmx_sli_s2m_portx_ctl_s cnf71xx;
+};
+typedef union cvmx_sli_s2m_portx_ctl cvmx_sli_s2m_portx_ctl_t;
+
+/**
+ * cvmx_sli_scratch_1
+ *
+ * SLI_SCRATCH_1 = SLI's Scratch 1
+ *
+ * A general purpose 64 bit register for SW use.
+ */
+union cvmx_sli_scratch_1 {
+ uint64_t u64;
+ struct cvmx_sli_scratch_1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< The value in this register is totaly SW dependent. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_sli_scratch_1_s cn61xx;
+ struct cvmx_sli_scratch_1_s cn63xx;
+ struct cvmx_sli_scratch_1_s cn63xxp1;
+ struct cvmx_sli_scratch_1_s cn66xx;
+ struct cvmx_sli_scratch_1_s cn68xx;
+ struct cvmx_sli_scratch_1_s cn68xxp1;
+ struct cvmx_sli_scratch_1_s cnf71xx;
+};
+typedef union cvmx_sli_scratch_1 cvmx_sli_scratch_1_t;
+
+/**
+ * cvmx_sli_scratch_2
+ *
+ * SLI_SCRATCH_2 = SLI's Scratch 2
+ *
+ * A general purpose 64 bit register for SW use.
+ */
+union cvmx_sli_scratch_2 {
+ uint64_t u64;
+ struct cvmx_sli_scratch_2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< The value in this register is totaly SW dependent. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_sli_scratch_2_s cn61xx;
+ struct cvmx_sli_scratch_2_s cn63xx;
+ struct cvmx_sli_scratch_2_s cn63xxp1;
+ struct cvmx_sli_scratch_2_s cn66xx;
+ struct cvmx_sli_scratch_2_s cn68xx;
+ struct cvmx_sli_scratch_2_s cn68xxp1;
+ struct cvmx_sli_scratch_2_s cnf71xx;
+};
+typedef union cvmx_sli_scratch_2 cvmx_sli_scratch_2_t;
+
+/**
+ * cvmx_sli_state1
+ *
+ * SLI_STATE1 = SLI State 1
+ *
+ * State machines in SLI. For debug.
+ */
+union cvmx_sli_state1 {
+ uint64_t u64;
+ struct cvmx_sli_state1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cpl1 : 12; /**< CPL1 State */
+ uint64_t cpl0 : 12; /**< CPL0 State */
+ uint64_t arb : 1; /**< ARB State */
+ uint64_t csr : 39; /**< CSR State */
+#else
+ uint64_t csr : 39;
+ uint64_t arb : 1;
+ uint64_t cpl0 : 12;
+ uint64_t cpl1 : 12;
+#endif
+ } s;
+ struct cvmx_sli_state1_s cn61xx;
+ struct cvmx_sli_state1_s cn63xx;
+ struct cvmx_sli_state1_s cn63xxp1;
+ struct cvmx_sli_state1_s cn66xx;
+ struct cvmx_sli_state1_s cn68xx;
+ struct cvmx_sli_state1_s cn68xxp1;
+ struct cvmx_sli_state1_s cnf71xx;
+};
+typedef union cvmx_sli_state1 cvmx_sli_state1_t;
+
+/**
+ * cvmx_sli_state2
+ *
+ * SLI_STATE2 = SLI State 2
+ *
+ * State machines in SLI. For debug.
+ */
+union cvmx_sli_state2 {
+ uint64_t u64;
+ struct cvmx_sli_state2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t nnp1 : 8; /**< NNP1 State */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rac : 1; /**< RAC State */
+ uint64_t csm1 : 15; /**< CSM1 State */
+ uint64_t csm0 : 15; /**< CSM0 State */
+ uint64_t nnp0 : 8; /**< NNP0 State */
+ uint64_t nnd : 8; /**< NND State */
+#else
+ uint64_t nnd : 8;
+ uint64_t nnp0 : 8;
+ uint64_t csm0 : 15;
+ uint64_t csm1 : 15;
+ uint64_t rac : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t nnp1 : 8;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_sli_state2_s cn61xx;
+ struct cvmx_sli_state2_s cn63xx;
+ struct cvmx_sli_state2_s cn63xxp1;
+ struct cvmx_sli_state2_s cn66xx;
+ struct cvmx_sli_state2_s cn68xx;
+ struct cvmx_sli_state2_s cn68xxp1;
+ struct cvmx_sli_state2_s cnf71xx;
+};
+typedef union cvmx_sli_state2 cvmx_sli_state2_t;
+
+/**
+ * cvmx_sli_state3
+ *
+ * SLI_STATE3 = SLI State 3
+ *
+ * State machines in SLI. For debug.
+ */
+union cvmx_sli_state3 {
+ uint64_t u64;
+ struct cvmx_sli_state3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t psm1 : 15; /**< PSM1 State */
+ uint64_t psm0 : 15; /**< PSM0 State */
+ uint64_t nsm1 : 13; /**< NSM1 State */
+ uint64_t nsm0 : 13; /**< NSM0 State */
+#else
+ uint64_t nsm0 : 13;
+ uint64_t nsm1 : 13;
+ uint64_t psm0 : 15;
+ uint64_t psm1 : 15;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_sli_state3_s cn61xx;
+ struct cvmx_sli_state3_s cn63xx;
+ struct cvmx_sli_state3_s cn63xxp1;
+ struct cvmx_sli_state3_s cn66xx;
+ struct cvmx_sli_state3_s cn68xx;
+ struct cvmx_sli_state3_s cn68xxp1;
+ struct cvmx_sli_state3_s cnf71xx;
+};
+typedef union cvmx_sli_state3 cvmx_sli_state3_t;
+
+/**
+ * cvmx_sli_tx_pipe
+ *
+ * SLI_TX_PIPE = SLI Packet TX Pipe
+ *
+ * Contains the starting pipe number and number of pipes used by the SLI packet Output.
+ * If a packet is recevied from PKO with an out of range PIPE number, the following occurs:
+ * - SLI_INT_SUM[PIPE_ERR] is set.
+ * - the out of range pipe value is used for returning credits to the PKO.
+ * - the PCIe packet engine will treat the PIPE value to be equal to [BASE].
+ */
+union cvmx_sli_tx_pipe {
+ uint64_t u64;
+ struct cvmx_sli_tx_pipe_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t nump : 8; /**< Number of pipes the the SLI/DPI supports.
+ When this value is 4 or less there is a performance
+ advantage for output packets.
+ The SLI/DPI can support up to 32 pipes assigned to
+ packet-rings 0 - 31. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t base : 7; /**< When NUMP is non-zero, indicates the base pipe
+ number the SLI/DPI will accept.
+ The SLI/DPI will accept pko packets from pipes in
+ the range of:
+ BASE .. (BASE+(NUMP-1))
+ BASE and NUMP must be constrained such that
+ 1) BASE+(NUMP-1) < 127
+ 2) Each used PKO pipe must map to exactly
+ one ring. Where BASE == ring 0, BASE+1 == to
+ ring 1, etc
+ 3) The pipe ranges must be consistent with
+ the PKO configuration. */
+#else
+ uint64_t base : 7;
+ uint64_t reserved_7_15 : 9;
+ uint64_t nump : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_sli_tx_pipe_s cn68xx;
+ struct cvmx_sli_tx_pipe_s cn68xxp1;
+};
+typedef union cvmx_sli_tx_pipe cvmx_sli_tx_pipe_t;
+
+/**
+ * cvmx_sli_win_rd_addr
+ *
+ * SLI_WIN_RD_ADDR = SLI Window Read Address Register
+ *
+ * The address to be read when the SLI_WIN_RD_DATA register is read.
+ * This register should NOT be used to read SLI_* registers.
+ */
+union cvmx_sli_win_rd_addr {
+ uint64_t u64;
+ struct cvmx_sli_win_rd_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63 : 13;
+ uint64_t ld_cmd : 2; /**< The load command sent wit hthe read.
+ 0x3 == Load 8-bytes, 0x2 == Load 4-bytes,
+ 0x1 == Load 2-bytes, 0x0 == Load 1-bytes, */
+ uint64_t iobit : 1; /**< A 1 or 0 can be written here but will not be used
+ in address generation. */
+ uint64_t rd_addr : 48; /**< The address to be read from.
+ [47:40] = NCB_ID
+ [39:0] = Address
+ When [47:43] == SLI & [42:40] == 0 bits [39:0] are:
+ [39:32] == x, Not Used
+ [31:24] == RSL_ID
+ [23:0] == RSL Register Offset */
+#else
+ uint64_t rd_addr : 48;
+ uint64_t iobit : 1;
+ uint64_t ld_cmd : 2;
+ uint64_t reserved_51_63 : 13;
+#endif
+ } s;
+ struct cvmx_sli_win_rd_addr_s cn61xx;
+ struct cvmx_sli_win_rd_addr_s cn63xx;
+ struct cvmx_sli_win_rd_addr_s cn63xxp1;
+ struct cvmx_sli_win_rd_addr_s cn66xx;
+ struct cvmx_sli_win_rd_addr_s cn68xx;
+ struct cvmx_sli_win_rd_addr_s cn68xxp1;
+ struct cvmx_sli_win_rd_addr_s cnf71xx;
+};
+typedef union cvmx_sli_win_rd_addr cvmx_sli_win_rd_addr_t;
+
+/**
+ * cvmx_sli_win_rd_data
+ *
+ * SLI_WIN_RD_DATA = SLI Window Read Data Register
+ *
+ * Reading this register causes a window read operation to take place. Address read is that contained in the SLI_WIN_RD_ADDR
+ * register.
+ */
+union cvmx_sli_win_rd_data {
+ uint64_t u64;
+ struct cvmx_sli_win_rd_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rd_data : 64; /**< The read data. */
+#else
+ uint64_t rd_data : 64;
+#endif
+ } s;
+ struct cvmx_sli_win_rd_data_s cn61xx;
+ struct cvmx_sli_win_rd_data_s cn63xx;
+ struct cvmx_sli_win_rd_data_s cn63xxp1;
+ struct cvmx_sli_win_rd_data_s cn66xx;
+ struct cvmx_sli_win_rd_data_s cn68xx;
+ struct cvmx_sli_win_rd_data_s cn68xxp1;
+ struct cvmx_sli_win_rd_data_s cnf71xx;
+};
+typedef union cvmx_sli_win_rd_data cvmx_sli_win_rd_data_t;
+
+/**
+ * cvmx_sli_win_wr_addr
+ *
+ * Add Lock Register (Set on Read, Clear on write), SW uses to control access to BAR0 space.
+ *
+ * Total Address is 16Kb; 0x0000 - 0x3fff, 0x000 - 0x7fe(Reg, every other 8B)
+ *
+ * General 5kb; 0x0000 - 0x13ff, 0x000 - 0x27e(Reg-General)
+ * PktMem 10Kb; 0x1400 - 0x3bff, 0x280 - 0x77e(Reg-General-Packet)
+ * Rsvd 1Kb; 0x3c00 - 0x3fff, 0x780 - 0x7fe(Reg-NCB Only Mode)
+ *
+ * SLI_WIN_WR_ADDR = SLI Window Write Address Register
+ *
+ * Contains the address to be writen to when a write operation is started by writing the
+ * SLI_WIN_WR_DATA register (see below).
+ *
+ * This register should NOT be used to write SLI_* registers.
+ */
+union cvmx_sli_win_wr_addr {
+ uint64_t u64;
+ struct cvmx_sli_win_wr_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63 : 15;
+ uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always
+ read as '0'. */
+ uint64_t wr_addr : 45; /**< The address that will be written to when the
+ SLI_WIN_WR_DATA register is written.
+ [47:40] = NCB_ID
+ [39:3] = Address
+ When [47:43] == SLI & [42:40] == 0 bits [39:0] are:
+ [39:32] == x, Not Used
+ [31:24] == RSL_ID
+ [23:3] == RSL Register Offset */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t wr_addr : 45;
+ uint64_t iobit : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_sli_win_wr_addr_s cn61xx;
+ struct cvmx_sli_win_wr_addr_s cn63xx;
+ struct cvmx_sli_win_wr_addr_s cn63xxp1;
+ struct cvmx_sli_win_wr_addr_s cn66xx;
+ struct cvmx_sli_win_wr_addr_s cn68xx;
+ struct cvmx_sli_win_wr_addr_s cn68xxp1;
+ struct cvmx_sli_win_wr_addr_s cnf71xx;
+};
+typedef union cvmx_sli_win_wr_addr cvmx_sli_win_wr_addr_t;
+
+/**
+ * cvmx_sli_win_wr_data
+ *
+ * SLI_WIN_WR_DATA = SLI Window Write Data Register
+ *
+ * Contains the data to write to the address located in the SLI_WIN_WR_ADDR Register.
+ * Writing the least-significant-byte of this register will cause a write operation to take place.
+ */
+union cvmx_sli_win_wr_data {
+ uint64_t u64;
+ struct cvmx_sli_win_wr_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_data : 64; /**< The data to be written. Whenever the LSB of this
+ register is written, the Window Write will take
+ place. */
+#else
+ uint64_t wr_data : 64;
+#endif
+ } s;
+ struct cvmx_sli_win_wr_data_s cn61xx;
+ struct cvmx_sli_win_wr_data_s cn63xx;
+ struct cvmx_sli_win_wr_data_s cn63xxp1;
+ struct cvmx_sli_win_wr_data_s cn66xx;
+ struct cvmx_sli_win_wr_data_s cn68xx;
+ struct cvmx_sli_win_wr_data_s cn68xxp1;
+ struct cvmx_sli_win_wr_data_s cnf71xx;
+};
+typedef union cvmx_sli_win_wr_data cvmx_sli_win_wr_data_t;
+
+/**
+ * cvmx_sli_win_wr_mask
+ *
+ * SLI_WIN_WR_MASK = SLI Window Write Mask Register
+ *
+ * Contains the mask for the data in the SLI_WIN_WR_DATA Register.
+ */
+union cvmx_sli_win_wr_mask {
+ uint64_t u64;
+ struct cvmx_sli_win_wr_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t wr_mask : 8; /**< The data to be written. When a bit is '1'
+ the corresponding byte will be written. The values
+ of this field must be contiguos and for 1, 2, 4, or
+ 8 byte operations and aligned to operation size.
+ A Value of 0 will produce unpredictable results */
+#else
+ uint64_t wr_mask : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_sli_win_wr_mask_s cn61xx;
+ struct cvmx_sli_win_wr_mask_s cn63xx;
+ struct cvmx_sli_win_wr_mask_s cn63xxp1;
+ struct cvmx_sli_win_wr_mask_s cn66xx;
+ struct cvmx_sli_win_wr_mask_s cn68xx;
+ struct cvmx_sli_win_wr_mask_s cn68xxp1;
+ struct cvmx_sli_win_wr_mask_s cnf71xx;
+};
+typedef union cvmx_sli_win_wr_mask cvmx_sli_win_wr_mask_t;
+
+/**
+ * cvmx_sli_window_ctl
+ *
+ * // *
+ * // * 81e0 - 82d0 Reserved for future subids
+ * // *
+ *
+ * SLI_WINDOW_CTL = SLI's Window Control
+ *
+ * Access to register space on the NCB (caused by Window Reads/Writes) will wait for a period of time specified
+ * by this register before timeing out. Because a Window Access can access the RML, which has a fixed timeout of 0xFFFF
+ * core clocks, the value of this register should be set to a minimum of 0x200000 to ensure that a timeout to an RML register
+ * occurs on the RML 0xFFFF timer before the timeout for a BAR0 access from the MAC.
+ */
+union cvmx_sli_window_ctl {
+ uint64_t u64;
+ struct cvmx_sli_window_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t time : 32; /**< Time to wait in core clocks for a
+ BAR0 access to completeon the NCB
+ before timing out. A value of 0 will cause no
+ timeouts. A minimum value of 0x200000 should be
+ used when this register is not set to 0x0. */
+#else
+ uint64_t time : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sli_window_ctl_s cn61xx;
+ struct cvmx_sli_window_ctl_s cn63xx;
+ struct cvmx_sli_window_ctl_s cn63xxp1;
+ struct cvmx_sli_window_ctl_s cn66xx;
+ struct cvmx_sli_window_ctl_s cn68xx;
+ struct cvmx_sli_window_ctl_s cn68xxp1;
+ struct cvmx_sli_window_ctl_s cnf71xx;
+};
+typedef union cvmx_sli_window_ctl cvmx_sli_window_ctl_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-sli-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-smi-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-smi-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-smi-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,105 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-smi-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon smi.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_SMI_DEFS_H__
+#define __CVMX_SMI_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SMI_DRV_CTL CVMX_SMI_DRV_CTL_FUNC()
+static inline uint64_t CVMX_SMI_DRV_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_SMI_DRV_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180000001828ull);
+}
+#else
+#define CVMX_SMI_DRV_CTL (CVMX_ADD_IO_SEG(0x0001180000001828ull))
+#endif
+
+/**
+ * cvmx_smi_drv_ctl
+ *
+ * SMI_DRV_CTL = SMI Drive Strength Control
+ *
+ */
+union cvmx_smi_drv_ctl {
+ uint64_t u64;
+ struct cvmx_smi_drv_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t pctl : 6; /**< PCTL Drive strength control bits
+ Assuming a 50ohm termination
+ 3.3v supply = 19
+ 2.5v supply = TBD */
+ uint64_t reserved_6_7 : 2;
+ uint64_t nctl : 6; /**< NCTL Drive strength control bits
+ Assuming a 50ohm termination
+ 3.3v supply = 15
+ 2.5v supply = TBD */
+#else
+ uint64_t nctl : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t pctl : 6;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_smi_drv_ctl_s cn61xx;
+ struct cvmx_smi_drv_ctl_s cn63xx;
+ struct cvmx_smi_drv_ctl_s cn63xxp1;
+ struct cvmx_smi_drv_ctl_s cn66xx;
+ struct cvmx_smi_drv_ctl_s cn68xx;
+ struct cvmx_smi_drv_ctl_s cn68xxp1;
+ struct cvmx_smi_drv_ctl_s cnf71xx;
+};
+typedef union cvmx_smi_drv_ctl cvmx_smi_drv_ctl_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-smi-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-smix-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-smix-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-smix-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,514 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-smix-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon smix.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_SMIX_DEFS_H__
+#define __CVMX_SMIX_DEFS_H__
+
+static inline uint64_t CVMX_SMIX_CLK(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if ((offset == 0))
+ return CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 0) * 256;
+ break;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001180000003818ull) + ((offset) & 3) * 128;
+ break;
+ }
+ cvmx_warn("CVMX_SMIX_CLK (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256;
+}
+static inline uint64_t CVMX_SMIX_CMD(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if ((offset == 0))
+ return CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 0) * 256;
+ break;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001180000003800ull) + ((offset) & 3) * 128;
+ break;
+ }
+ cvmx_warn("CVMX_SMIX_CMD (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256;
+}
+static inline uint64_t CVMX_SMIX_EN(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if ((offset == 0))
+ return CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 0) * 256;
+ break;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001180000003820ull) + ((offset) & 3) * 128;
+ break;
+ }
+ cvmx_warn("CVMX_SMIX_EN (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256;
+}
+static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if ((offset == 0))
+ return CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 0) * 256;
+ break;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001180000003810ull) + ((offset) & 3) * 128;
+ break;
+ }
+ cvmx_warn("CVMX_SMIX_RD_DAT (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256;
+}
+static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset)
+{
+ switch(cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ if ((offset == 0))
+ return CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 0) * 256;
+ break;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 1))
+ return CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256;
+ break;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ if ((offset <= 3))
+ return CVMX_ADD_IO_SEG(0x0001180000003808ull) + ((offset) & 3) * 128;
+ break;
+ }
+ cvmx_warn("CVMX_SMIX_WR_DAT (offset = %lu) not supported on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256;
+}
+
+/**
+ * cvmx_smi#_clk
+ *
+ * SMI_CLK = Clock Control Register
+ *
+ */
+union cvmx_smix_clk {
+ uint64_t u64;
+ struct cvmx_smix_clk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t mode : 1; /**< IEEE operating mode
+ 0=Clause 22 complient
+ 1=Clause 45 complient */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */
+ uint64_t sample_mode : 1; /**< Read Data sampling mode
+ According to the 802.3 spec, on reads, the STA
+ transitions MDC and the PHY drives MDIO with
+ some delay relative to that edge. This is edge1.
+ The STA then samples MDIO on the next rising edge
+ of MDC. This is edge2. Octeon can sample the
+ read data relative to either edge.
+ 0=[SAMPLE_HI,SAMPLE] specify the sample time
+ relative to edge2
+ 1=[SAMPLE_HI,SAMPLE] specify the sample time
+ relative to edge1 */
+ uint64_t reserved_14_14 : 1;
+ uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */
+ uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton
+ PREAMBLE must be set 1 when MODE=1 in order
+ for the receiving PHY to correctly frame the
+ transaction. */
+ uint64_t sample : 4; /**< When to sample read data
+ (number of eclks after the rising edge of mdc)
+ ( [SAMPLE_HI,SAMPLE] > 1 )
+ ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */
+ uint64_t phase : 8; /**< MDC Clock Phase
+ (number of eclks that make up an mdc phase)
+ (PHASE > 2) */
+#else
+ uint64_t phase : 8;
+ uint64_t sample : 4;
+ uint64_t preamble : 1;
+ uint64_t clk_idle : 1;
+ uint64_t reserved_14_14 : 1;
+ uint64_t sample_mode : 1;
+ uint64_t sample_hi : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t mode : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_smix_clk_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63 : 43;
+ uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */
+ uint64_t sample_mode : 1; /**< Read Data sampling mode
+ According to the 802.3 spec, on reads, the STA
+ transitions MDC and the PHY drives MDIO with
+ some delay relative to that edge. This is edge1.
+ The STA then samples MDIO on the next rising edge
+ of MDC. This is edge2. Octeon can sample the
+ read data relative to either edge.
+ 0=[SAMPLE_HI,SAMPLE] specify the sample time
+ relative to edge2
+ 1=[SAMPLE_HI,SAMPLE] specify the sample time
+ relative to edge1 */
+ uint64_t reserved_14_14 : 1;
+ uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */
+ uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton */
+ uint64_t sample : 4; /**< When to sample read data
+ (number of eclks after the rising edge of mdc)
+ ( [SAMPLE_HI,SAMPLE] > 1 )
+ ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */
+ uint64_t phase : 8; /**< MDC Clock Phase
+ (number of eclks that make up an mdc phase)
+ (PHASE > 2) */
+#else
+ uint64_t phase : 8;
+ uint64_t sample : 4;
+ uint64_t preamble : 1;
+ uint64_t clk_idle : 1;
+ uint64_t reserved_14_14 : 1;
+ uint64_t sample_mode : 1;
+ uint64_t sample_hi : 5;
+ uint64_t reserved_21_63 : 43;
+#endif
+ } cn30xx;
+ struct cvmx_smix_clk_cn30xx cn31xx;
+ struct cvmx_smix_clk_cn30xx cn38xx;
+ struct cvmx_smix_clk_cn30xx cn38xxp2;
+ struct cvmx_smix_clk_s cn50xx;
+ struct cvmx_smix_clk_s cn52xx;
+ struct cvmx_smix_clk_s cn52xxp1;
+ struct cvmx_smix_clk_s cn56xx;
+ struct cvmx_smix_clk_s cn56xxp1;
+ struct cvmx_smix_clk_cn30xx cn58xx;
+ struct cvmx_smix_clk_cn30xx cn58xxp1;
+ struct cvmx_smix_clk_s cn61xx;
+ struct cvmx_smix_clk_s cn63xx;
+ struct cvmx_smix_clk_s cn63xxp1;
+ struct cvmx_smix_clk_s cn66xx;
+ struct cvmx_smix_clk_s cn68xx;
+ struct cvmx_smix_clk_s cn68xxp1;
+ struct cvmx_smix_clk_s cnf71xx;
+};
+typedef union cvmx_smix_clk cvmx_smix_clk_t;
+
+/**
+ * cvmx_smi#_cmd
+ *
+ * SMI_CMD = Force a Read/Write command to the PHY
+ *
+ *
+ * Notes:
+ * Writes to this register will create SMI xactions. Software will poll on (depending on the xaction type).
+ *
+ */
+union cvmx_smix_cmd {
+ uint64_t u64;
+ struct cvmx_smix_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t phy_op : 2; /**< PHY Opcode depending on SMI_CLK[MODE]
+ SMI_CLK[MODE] == 0 (<=1Gbs / Clause 22)
+ x0=write
+ x1=read
+ SMI_CLK[MODE] == 1 (>1Gbs / Clause 45)
+ 00=address
+ 01=write
+ 11=read
+ 10=post-read-increment-address */
+ uint64_t reserved_13_15 : 3;
+ uint64_t phy_adr : 5; /**< PHY Address */
+ uint64_t reserved_5_7 : 3;
+ uint64_t reg_adr : 5; /**< PHY Register Offset */
+#else
+ uint64_t reg_adr : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t phy_adr : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t phy_op : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_smix_cmd_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t phy_op : 1; /**< PHY Opcode
+ 0=write
+ 1=read */
+ uint64_t reserved_13_15 : 3;
+ uint64_t phy_adr : 5; /**< PHY Address */
+ uint64_t reserved_5_7 : 3;
+ uint64_t reg_adr : 5; /**< PHY Register Offset */
+#else
+ uint64_t reg_adr : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t phy_adr : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t phy_op : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn30xx;
+ struct cvmx_smix_cmd_cn30xx cn31xx;
+ struct cvmx_smix_cmd_cn30xx cn38xx;
+ struct cvmx_smix_cmd_cn30xx cn38xxp2;
+ struct cvmx_smix_cmd_s cn50xx;
+ struct cvmx_smix_cmd_s cn52xx;
+ struct cvmx_smix_cmd_s cn52xxp1;
+ struct cvmx_smix_cmd_s cn56xx;
+ struct cvmx_smix_cmd_s cn56xxp1;
+ struct cvmx_smix_cmd_cn30xx cn58xx;
+ struct cvmx_smix_cmd_cn30xx cn58xxp1;
+ struct cvmx_smix_cmd_s cn61xx;
+ struct cvmx_smix_cmd_s cn63xx;
+ struct cvmx_smix_cmd_s cn63xxp1;
+ struct cvmx_smix_cmd_s cn66xx;
+ struct cvmx_smix_cmd_s cn68xx;
+ struct cvmx_smix_cmd_s cn68xxp1;
+ struct cvmx_smix_cmd_s cnf71xx;
+};
+typedef union cvmx_smix_cmd cvmx_smix_cmd_t;
+
+/**
+ * cvmx_smi#_en
+ *
+ * SMI_EN = Enable the SMI interface
+ *
+ */
+union cvmx_smix_en {
+ uint64_t u64;
+ struct cvmx_smix_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< Interface enable
+ 0=SMI Interface is down / no transactions, no MDC
+ 1=SMI Interface is up */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_smix_en_s cn30xx;
+ struct cvmx_smix_en_s cn31xx;
+ struct cvmx_smix_en_s cn38xx;
+ struct cvmx_smix_en_s cn38xxp2;
+ struct cvmx_smix_en_s cn50xx;
+ struct cvmx_smix_en_s cn52xx;
+ struct cvmx_smix_en_s cn52xxp1;
+ struct cvmx_smix_en_s cn56xx;
+ struct cvmx_smix_en_s cn56xxp1;
+ struct cvmx_smix_en_s cn58xx;
+ struct cvmx_smix_en_s cn58xxp1;
+ struct cvmx_smix_en_s cn61xx;
+ struct cvmx_smix_en_s cn63xx;
+ struct cvmx_smix_en_s cn63xxp1;
+ struct cvmx_smix_en_s cn66xx;
+ struct cvmx_smix_en_s cn68xx;
+ struct cvmx_smix_en_s cn68xxp1;
+ struct cvmx_smix_en_s cnf71xx;
+};
+typedef union cvmx_smix_en cvmx_smix_en_t;
+
+/**
+ * cvmx_smi#_rd_dat
+ *
+ * SMI_RD_DAT = SMI Read Data
+ *
+ *
+ * Notes:
+ * VAL will assert when the read xaction completes. A read to this register
+ * will clear VAL. PENDING indicates that an SMI RD transaction is in flight.
+ */
+union cvmx_smix_rd_dat {
+ uint64_t u64;
+ struct cvmx_smix_rd_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t pending : 1; /**< Read Xaction Pending */
+ uint64_t val : 1; /**< Read Data Valid */
+ uint64_t dat : 16; /**< Read Data */
+#else
+ uint64_t dat : 16;
+ uint64_t val : 1;
+ uint64_t pending : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_smix_rd_dat_s cn30xx;
+ struct cvmx_smix_rd_dat_s cn31xx;
+ struct cvmx_smix_rd_dat_s cn38xx;
+ struct cvmx_smix_rd_dat_s cn38xxp2;
+ struct cvmx_smix_rd_dat_s cn50xx;
+ struct cvmx_smix_rd_dat_s cn52xx;
+ struct cvmx_smix_rd_dat_s cn52xxp1;
+ struct cvmx_smix_rd_dat_s cn56xx;
+ struct cvmx_smix_rd_dat_s cn56xxp1;
+ struct cvmx_smix_rd_dat_s cn58xx;
+ struct cvmx_smix_rd_dat_s cn58xxp1;
+ struct cvmx_smix_rd_dat_s cn61xx;
+ struct cvmx_smix_rd_dat_s cn63xx;
+ struct cvmx_smix_rd_dat_s cn63xxp1;
+ struct cvmx_smix_rd_dat_s cn66xx;
+ struct cvmx_smix_rd_dat_s cn68xx;
+ struct cvmx_smix_rd_dat_s cn68xxp1;
+ struct cvmx_smix_rd_dat_s cnf71xx;
+};
+typedef union cvmx_smix_rd_dat cvmx_smix_rd_dat_t;
+
+/**
+ * cvmx_smi#_wr_dat
+ *
+ * SMI_WR_DAT = SMI Write Data
+ *
+ *
+ * Notes:
+ * VAL will assert when the write xaction completes. A read to this register
+ * will clear VAL. PENDING indicates that an SMI WR transaction is in flight.
+ */
+union cvmx_smix_wr_dat {
+ uint64_t u64;
+ struct cvmx_smix_wr_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t pending : 1; /**< Write Xaction Pending */
+ uint64_t val : 1; /**< Write Data Valid */
+ uint64_t dat : 16; /**< Write Data */
+#else
+ uint64_t dat : 16;
+ uint64_t val : 1;
+ uint64_t pending : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_smix_wr_dat_s cn30xx;
+ struct cvmx_smix_wr_dat_s cn31xx;
+ struct cvmx_smix_wr_dat_s cn38xx;
+ struct cvmx_smix_wr_dat_s cn38xxp2;
+ struct cvmx_smix_wr_dat_s cn50xx;
+ struct cvmx_smix_wr_dat_s cn52xx;
+ struct cvmx_smix_wr_dat_s cn52xxp1;
+ struct cvmx_smix_wr_dat_s cn56xx;
+ struct cvmx_smix_wr_dat_s cn56xxp1;
+ struct cvmx_smix_wr_dat_s cn58xx;
+ struct cvmx_smix_wr_dat_s cn58xxp1;
+ struct cvmx_smix_wr_dat_s cn61xx;
+ struct cvmx_smix_wr_dat_s cn63xx;
+ struct cvmx_smix_wr_dat_s cn63xxp1;
+ struct cvmx_smix_wr_dat_s cn66xx;
+ struct cvmx_smix_wr_dat_s cn68xx;
+ struct cvmx_smix_wr_dat_s cn68xxp1;
+ struct cvmx_smix_wr_dat_s cnf71xx;
+};
+typedef union cvmx_smix_wr_dat cvmx_smix_wr_dat_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-smix-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-spi.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-spi.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-spi.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,671 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support library for the SPI
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
+#include <asm/octeon/cvmx-srxx-defs.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-clock.h>
+#else
+#include "cvmx.h"
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include "cvmx-config.h"
+#endif
+#include "cvmx-sysinfo.h"
+#include "cvmx-pko.h"
+#include "cvmx-spi.h"
+#include "cvmx-clock.h"
+#endif
+
+
+#define INVOKE_CB(function_p, args...) \
+ do { \
+ if (function_p) { \
+ res = function_p(args); \
+ if (res) \
+ return res; \
+ } \
+ } while (0)
+
+#if CVMX_ENABLE_DEBUG_PRINTS
+static const char *modes[] = {"UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex"};
+#endif
+
+/* Default callbacks, can be overridden
+ * using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks
+ */
+static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
+ .reset_cb = cvmx_spi_reset_cb,
+ .calendar_setup_cb = cvmx_spi_calendar_setup_cb,
+ .clock_detect_cb = cvmx_spi_clock_detect_cb,
+ .training_cb = cvmx_spi_training_cb,
+ .calendar_sync_cb = cvmx_spi_calendar_sync_cb,
+ .interface_up_cb = cvmx_spi_interface_up_cb
+};
+
+/**
+ * Get current SPI4 initialization callbacks
+ *
+ * @param callbacks Pointer to the callbacks structure.to fill
+ *
+ * @return Pointer to cvmx_spi_callbacks_t structure.
+ */
+void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t * callbacks)
+{
+ memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks));
+}
+
+/**
+ * Set new SPI4 initialization callbacks
+ *
+ * @param new_callbacks Pointer to an updated callbacks structure.
+ */
+void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t * new_callbacks)
+{
+ memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks));
+}
+
+/**
+ * Initialize and start the SPI interface.
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for clock synchronization in seconds
+ * @param num_ports Number of SPI ports to configure
+ *
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout, int num_ports)
+{
+ int res = -1;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ return res;
+
+ // Callback to perform SPI4 reset
+ INVOKE_CB( cvmx_spi_callbacks.reset_cb, interface, mode);
+
+ // Callback to perform calendar setup
+ INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode, num_ports);
+
+ // Callback to perform clock detection
+ INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
+
+ // Callback to perform SPI4 link training
+ INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
+
+ // Callback to perform calendar sync
+ INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode, timeout);
+
+ // Callback to handle interface coming up
+ INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
+
+ return res;
+}
+
+/**
+ * This routine restarts the SPI interface after it has lost synchronization
+ * with its correspondent system.
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for clock synchronization in seconds
+ * @return Zero on success, negative of failure.
+ */
+int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ int res = -1;
+
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ return res;
+
+ cvmx_dprintf ("SPI%d: Restart %s\n", interface, modes[mode]);
+
+ // Callback to perform SPI4 reset
+ INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface,mode);
+
+ // NOTE: Calendar setup is not performed during restart
+ // Refer to cvmx_spi_start_interface() for the full sequence
+
+ // Callback to perform clock detection
+ INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
+
+ // Callback to perform SPI4 link training
+ INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
+
+ // Callback to perform calendar sync
+ INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode, timeout);
+
+ // Callback to handle interface coming up
+ INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
+
+ return res;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_spi_restart_interface);
+#endif
+
+/**
+ * Callback to perform SPI4 reset
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
+{
+ cvmx_spxx_dbg_deskew_ctl_t spxx_dbg_deskew_ctl;
+ cvmx_spxx_clk_ctl_t spxx_clk_ctl;
+ cvmx_spxx_bist_stat_t spxx_bist_stat;
+ cvmx_spxx_int_msk_t spxx_int_msk;
+ cvmx_stxx_int_msk_t stxx_int_msk;
+ cvmx_spxx_trn4_ctl_t spxx_trn4_ctl;
+ int index;
+ uint64_t MS = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000;
+
+ /* Disable SPI error events while we run BIST */
+ spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
+ stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
+
+ /* Run BIST in the SPI interface */
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
+ cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
+ spxx_clk_ctl.u64 = 0;
+ spxx_clk_ctl.s.runbist = 1;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+ cvmx_wait (10 * MS);
+ spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
+ if (spxx_bist_stat.s.stat0)
+ cvmx_dprintf("ERROR SPI%d: BIST failed on receive datapath FIFO\n", interface);
+ if (spxx_bist_stat.s.stat1)
+ cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n", interface);
+ if (spxx_bist_stat.s.stat2)
+ cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n", interface);
+
+ /* Clear the calendar table after BIST to fix parity errors */
+ for (index=0; index<32; index++)
+ {
+ cvmx_srxx_spi4_calx_t srxx_spi4_calx;
+ cvmx_stxx_spi4_calx_t stxx_spi4_calx;
+
+ srxx_spi4_calx.u64 = 0;
+ srxx_spi4_calx.s.oddpar = 1;
+ cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface), srxx_spi4_calx.u64);
+
+ stxx_spi4_calx.u64 = 0;
+ stxx_spi4_calx.s.oddpar = 1;
+ cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface), stxx_spi4_calx.u64);
+ }
+
+ /* Re enable reporting of error interrupts */
+ cvmx_write_csr(CVMX_SPXX_INT_REG(interface), cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
+ cvmx_write_csr(CVMX_STXX_INT_REG(interface), cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
+
+ // Setup the CLKDLY right in the middle
+ spxx_clk_ctl.u64 = 0;
+ spxx_clk_ctl.s.seetrn = 0;
+ spxx_clk_ctl.s.clkdly = 0x10;
+ spxx_clk_ctl.s.runbist = 0;
+ spxx_clk_ctl.s.statdrv = 0;
+ spxx_clk_ctl.s.statrcv = 1; /* This should always be on the opposite edge as statdrv */
+ spxx_clk_ctl.s.sndtrn = 0;
+ spxx_clk_ctl.s.drptrn = 0;
+ spxx_clk_ctl.s.rcvtrn = 0;
+ spxx_clk_ctl.s.srxdlck = 0;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+ cvmx_wait (100 * MS);
+
+ // Reset SRX0 DLL
+ spxx_clk_ctl.s.srxdlck = 1;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+
+ // Waiting for Inf0 Spi4 RX DLL to lock
+ cvmx_wait (100 * MS);
+
+ // Enable dynamic alignment
+ spxx_trn4_ctl.s.trntest = 0;
+ spxx_trn4_ctl.s.jitter = 1;
+ spxx_trn4_ctl.s.clr_boot = 1;
+ spxx_trn4_ctl.s.set_boot = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX))
+ spxx_trn4_ctl.s.maxdist = 3;
+ else
+ spxx_trn4_ctl.s.maxdist = 8;
+ spxx_trn4_ctl.s.macro_en = 1;
+ spxx_trn4_ctl.s.mux_en = 1;
+ cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
+
+ spxx_dbg_deskew_ctl.u64 = 0;
+ cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), spxx_dbg_deskew_ctl.u64);
+
+ return 0;
+}
+
+/**
+ * Callback to setup calendar and miscellaneous settings before clock detection
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param num_ports Number of ports to configure on SPI
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode, int num_ports)
+{
+ int port;
+ int index;
+ if (mode & CVMX_SPI_MODE_RX_HALFPLEX)
+ {
+ cvmx_srxx_com_ctl_t srxx_com_ctl;
+ cvmx_srxx_spi4_stat_t srxx_spi4_stat;
+
+ // SRX0 number of Ports
+ srxx_com_ctl.u64 = 0;
+ srxx_com_ctl.s.prts = num_ports - 1;
+ srxx_com_ctl.s.st_en = 0;
+ srxx_com_ctl.s.inf_en = 0;
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+
+ // SRX0 Calendar Table. This round robbins through all ports
+ port = 0;
+ index = 0;
+ while (port < num_ports)
+ {
+ cvmx_srxx_spi4_calx_t srxx_spi4_calx;
+ srxx_spi4_calx.u64 = 0;
+ srxx_spi4_calx.s.prt0 = port++;
+ srxx_spi4_calx.s.prt1 = port++;
+ srxx_spi4_calx.s.prt2 = port++;
+ srxx_spi4_calx.s.prt3 = port++;
+ srxx_spi4_calx.s.oddpar = ~(cvmx_dpop(srxx_spi4_calx.u64) & 1);
+ cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface), srxx_spi4_calx.u64);
+ index++;
+ }
+ srxx_spi4_stat.u64 = 0;
+ srxx_spi4_stat.s.len = num_ports;
+ srxx_spi4_stat.s.m = 1;
+ cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface), srxx_spi4_stat.u64);
+ }
+
+ if (mode & CVMX_SPI_MODE_TX_HALFPLEX)
+ {
+ cvmx_stxx_arb_ctl_t stxx_arb_ctl;
+ cvmx_gmxx_tx_spi_max_t gmxx_tx_spi_max;
+ cvmx_gmxx_tx_spi_thresh_t gmxx_tx_spi_thresh;
+ cvmx_gmxx_tx_spi_ctl_t gmxx_tx_spi_ctl;
+ cvmx_stxx_spi4_stat_t stxx_spi4_stat;
+ cvmx_stxx_spi4_dat_t stxx_spi4_dat;
+
+ // STX0 Config
+ stxx_arb_ctl.u64 = 0;
+ stxx_arb_ctl.s.igntpa = 0;
+ stxx_arb_ctl.s.mintrn = 0;
+ cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64);
+
+ gmxx_tx_spi_max.u64 = 0;
+ gmxx_tx_spi_max.s.max1 = 8;
+ gmxx_tx_spi_max.s.max2 = 4;
+ gmxx_tx_spi_max.s.slice = 0;
+ cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface), gmxx_tx_spi_max.u64);
+
+ gmxx_tx_spi_thresh.u64 = 0;
+ gmxx_tx_spi_thresh.s.thresh = 4;
+ cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface), gmxx_tx_spi_thresh.u64);
+
+ gmxx_tx_spi_ctl.u64 = 0;
+ gmxx_tx_spi_ctl.s.tpa_clr = 0;
+ gmxx_tx_spi_ctl.s.cont_pkt = 0;
+ cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface), gmxx_tx_spi_ctl.u64);
+
+ // STX0 Training Control
+ stxx_spi4_dat.u64 = 0;
+ stxx_spi4_dat.s.alpha = 32; /*Minimum needed by dynamic alignment*/
+ stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20*/
+ cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface), stxx_spi4_dat.u64);
+
+ // STX0 Calendar Table. This round robbins through all ports
+ port = 0;
+ index = 0;
+ while (port < num_ports)
+ {
+ cvmx_stxx_spi4_calx_t stxx_spi4_calx;
+ stxx_spi4_calx.u64 = 0;
+ stxx_spi4_calx.s.prt0 = port++;
+ stxx_spi4_calx.s.prt1 = port++;
+ stxx_spi4_calx.s.prt2 = port++;
+ stxx_spi4_calx.s.prt3 = port++;
+ stxx_spi4_calx.s.oddpar = ~(cvmx_dpop(stxx_spi4_calx.u64) & 1);
+ cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface), stxx_spi4_calx.u64);
+ index++;
+ }
+ stxx_spi4_stat.u64 = 0;
+ stxx_spi4_stat.s.len = num_ports;
+ stxx_spi4_stat.s.m = 1;
+ cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface), stxx_spi4_stat.u64);
+ }
+
+ return 0;
+}
+
+/**
+ * Callback to perform clock detection
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for clock synchronization in seconds
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ int clock_transitions;
+ cvmx_spxx_clk_stat_t stat;
+ uint64_t timeout_time;
+ uint64_t MS = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000;
+
+ /* Regardless of operating mode, both Tx and Rx clocks must be present
+ for the SPI interface to operate. */
+ cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ /* Require 100 clock transitions in order to avoid any noise in the
+ beginning */
+ clock_transitions = 100;
+ do
+ {
+ stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+ if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions)
+ {
+ /* We've seen a clock transition, so decrement the number we still
+ need */
+ clock_transitions--;
+ cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+ stat.s.s4clk0 = 0;
+ stat.s.s4clk1 = 0;
+ }
+ if (cvmx_get_cycle() > timeout_time)
+ {
+ cvmx_dprintf ("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);
+
+ cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ /* Require 100 clock transitions in order to avoid any noise in the
+ beginning */
+ clock_transitions = 100;
+ do
+ {
+ stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
+ if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions)
+ {
+ /* We've seen a clock transition, so decrement the number we still
+ need */
+ clock_transitions--;
+ cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+ stat.s.d4clk0 = 0;
+ stat.s.d4clk1 = 0;
+ }
+ if (cvmx_get_cycle() > timeout_time)
+ {
+ cvmx_dprintf ("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);
+
+ return 0;
+}
+
+/**
+ * Callback to perform link training
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for link to be trained (in seconds)
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ cvmx_spxx_trn4_ctl_t spxx_trn4_ctl;
+ cvmx_spxx_clk_stat_t stat;
+ uint64_t MS = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000;
+ uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ int rx_training_needed;
+
+ // SRX0 & STX0 Inf0 Links are configured - begin training
+ cvmx_spxx_clk_ctl_t spxx_clk_ctl;
+ spxx_clk_ctl.u64 = 0;
+ spxx_clk_ctl.s.seetrn = 0;
+ spxx_clk_ctl.s.clkdly = 0x10;
+ spxx_clk_ctl.s.runbist = 0;
+ spxx_clk_ctl.s.statdrv = 0;
+ spxx_clk_ctl.s.statrcv = 1; /* This should always be on the opposite edge as statdrv */
+ spxx_clk_ctl.s.sndtrn = 1;
+ spxx_clk_ctl.s.drptrn = 1;
+ spxx_clk_ctl.s.rcvtrn = 1;
+ spxx_clk_ctl.s.srxdlck = 1;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+ cvmx_wait (1000 * MS);
+
+ // SRX0 clear the boot bit
+ spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
+ spxx_trn4_ctl.s.clr_boot = 1;
+ cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
+
+ // Wait for the training sequence to complete
+ cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
+ cvmx_wait (1000 * MS);
+#if !defined(OCTEON_VENDOR_LANNER)
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * 600; /* Wait a really long time here */
+#else
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * 10;
+#endif
+ /* The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
+ We'll be pessimistic and wait for a lot more */
+ rx_training_needed = 500;
+ do {
+ stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
+ if (stat.s.srxtrn && rx_training_needed)
+ {
+ rx_training_needed--;
+ cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+ stat.s.srxtrn = 0;
+ }
+ if (cvmx_get_cycle() > timeout_time)
+ {
+ cvmx_dprintf ("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.srxtrn == 0);
+
+ return 0;
+}
+
+/**
+ * Callback to perform calendar data synchronization
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for calendar data in seconds
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ uint64_t MS = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000;
+ if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+ // SRX0 interface should be good, send calendar data
+ cvmx_srxx_com_ctl_t srxx_com_ctl;
+ cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
+ srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
+ srxx_com_ctl.s.inf_en = 1;
+ srxx_com_ctl.s.st_en = 1;
+ cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+ }
+
+ if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+ // STX0 has achieved sync
+ // The corespondant board should be sending calendar data
+ // Enable the STX0 STAT receiver.
+ cvmx_spxx_clk_stat_t stat;
+ uint64_t timeout_time;
+ cvmx_stxx_com_ctl_t stxx_com_ctl;
+ stxx_com_ctl.u64 = 0;
+ stxx_com_ctl.s.st_en = 1;
+ cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
+
+ // Waiting for calendar sync on STX0 STAT
+ cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
+ do {
+ stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
+ if (cvmx_get_cycle() > timeout_time)
+ {
+ cvmx_dprintf ("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.stxcal == 0);
+ }
+
+ return 0;
+}
+
+/**
+ * Callback to handle interface up
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode)
+{
+ cvmx_gmxx_rxx_frm_min_t gmxx_rxx_frm_min;
+ cvmx_gmxx_rxx_frm_max_t gmxx_rxx_frm_max;
+ cvmx_gmxx_rxx_jabber_t gmxx_rxx_jabber;
+
+ if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+ cvmx_srxx_com_ctl_t srxx_com_ctl;
+ srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
+ srxx_com_ctl.s.inf_en = 1;
+ cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+ cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
+ }
+
+ if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+ cvmx_stxx_com_ctl_t stxx_com_ctl;
+ stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface));
+ stxx_com_ctl.s.inf_en = 1;
+ cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
+ cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
+ }
+
+ gmxx_rxx_frm_min.u64 = 0;
+ gmxx_rxx_frm_min.s.len = 64;
+#ifdef OCTEON_VENDOR_RADISYS
+ /*
+ * Incoming packets on the RSYS4GBE have the FCS stripped.
+ */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE)
+ gmxx_rxx_frm_min.s.len -= 4;
+#endif
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0,interface), gmxx_rxx_frm_min.u64);
+ gmxx_rxx_frm_max.u64 = 0;
+ gmxx_rxx_frm_max.s.len = 64*1024 - 4;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0,interface), gmxx_rxx_frm_max.u64);
+ gmxx_rxx_jabber.u64 = 0;
+ gmxx_rxx_jabber.s.cnt = 64*1024 - 4;
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0,interface), gmxx_rxx_jabber.u64);
+
+ return 0;
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-spi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-spi.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-spi.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-spi.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,272 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file contains defines for the SPI interface
+
+ * <hr>$Revision: 70030 $<hr>
+ *
+ *
+ */
+#ifndef __CVMX_SPI_H__
+#define __CVMX_SPI_H__
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include "cvmx-gmxx-defs.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* CSR typedefs have been moved to cvmx-spi-defs.h */
+
+typedef enum
+{
+ CVMX_SPI_MODE_UNKNOWN = 0,
+ CVMX_SPI_MODE_TX_HALFPLEX = 1,
+ CVMX_SPI_MODE_RX_HALFPLEX = 2,
+ CVMX_SPI_MODE_DUPLEX = 3
+} cvmx_spi_mode_t;
+
+/** Callbacks structure to customize SPI4 initialization sequence */
+typedef struct
+{
+ /** Called to reset SPI4 DLL */
+ int (*reset_cb)(int interface, cvmx_spi_mode_t mode);
+
+ /** Called to setup calendar */
+ int (*calendar_setup_cb)(int interface, cvmx_spi_mode_t mode, int num_ports);
+
+ /** Called for Tx and Rx clock detection */
+ int (*clock_detect_cb)(int interface, cvmx_spi_mode_t mode, int timeout);
+
+ /** Called to perform link training */
+ int (*training_cb)(int interface, cvmx_spi_mode_t mode, int timeout);
+
+ /** Called for calendar data synchronization */
+ int (*calendar_sync_cb)(int interface, cvmx_spi_mode_t mode, int timeout);
+
+ /** Called when interface is up */
+ int (*interface_up_cb)(int interface, cvmx_spi_mode_t mode);
+
+} cvmx_spi_callbacks_t;
+
+
+/**
+ * Return true if the supplied interface is configured for SPI
+ *
+ * @param interface Interface to check
+ * @return True if interface is SPI
+ */
+static inline int cvmx_spi_is_spi_interface(int interface)
+{
+ uint64_t gmxState = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ return ((gmxState & 0x2) && (gmxState & 0x1));
+}
+
+/**
+ * Initialize and start the SPI interface.
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for clock synchronization in seconds
+ * @param num_ports Number of SPI ports to configure
+ *
+ * @return Zero on success, negative of failure.
+ */
+extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout, int num_ports);
+
+/**
+ * This routine restarts the SPI interface after it has lost synchronization
+ * with its corespondant system.
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for clock synchronization in seconds
+ * @return Zero on success, negative of failure.
+ */
+extern int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout);
+
+/**
+ * Return non-zero if the SPI interface has a SPI4000 attached
+ *
+ * @param interface SPI interface the SPI4000 is connected to
+ *
+ * @return
+ */
+extern int cvmx_spi4000_is_present(int interface);
+
+/**
+ * Initialize the SPI4000 for use
+ *
+ * @param interface SPI interface the SPI4000 is connected to
+ */
+extern int cvmx_spi4000_initialize(int interface);
+
+/**
+ * Poll all the SPI4000 port and check its speed
+ *
+ * @param interface Interface the SPI4000 is on
+ * @param port Port to poll (0-9)
+ * @return Status of the port. 0=down. All other values the port is up.
+ */
+extern cvmx_gmxx_rxx_rx_inbnd_t cvmx_spi4000_check_speed(int interface, int port);
+
+/**
+ * Get current SPI4 initialization callbacks
+ *
+ * @param callbacks Pointer to the callbacks structure.to fill
+ *
+ * @return Pointer to cvmx_spi_callbacks_t structure.
+ */
+extern void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t * callbacks);
+
+/**
+ * Set new SPI4 initialization callbacks
+ *
+ * @param new_callbacks Pointer to an updated callbacks structure.
+ */
+extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t * new_callbacks);
+
+/**
+ * Callback to perform SPI4 reset
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
+
+/**
+ * Callback to setup calendar and miscellaneous settings before clock detection
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param num_ports Number of ports to configure on SPI
+ *
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode, int num_ports);
+
+/**
+ * Callback to perform clock detection
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for clock synchronization in seconds
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout);
+
+/**
+ * Callback to perform link training
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for link to be trained (in seconds)
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout);
+
+/**
+ * Callback to perform calendar data synchronization
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @param timeout Timeout to wait for calendar data in seconds
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout);
+
+/**
+ * Callback to handle interface up
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @param mode The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @return Zero on success, non-zero error code on failure (will cause SPI initialization to abort)
+ */
+extern int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_SPI_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-spi.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-spi4000.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-spi4000.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-spi4000.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,532 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support library for the SPI4000 card
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-twsi.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#else
+#include "cvmx.h"
+#include "cvmx-spi.h"
+#include "cvmx-twsi.h"
+#endif
+
+/* If someone is using an old config, make the SPI4000 act like RGMII for backpressure */
+#ifndef CVMX_HELPER_DISABLE_SPI4000_BACKPRESSURE
+#ifndef CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
+#define CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 0
+#endif
+#define CVMX_HELPER_DISABLE_SPI4000_BACKPRESSURE CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
+#endif
+
+#define SPI4000_READ_ADDRESS_HIGH 0xf0
+#define SPI4000_READ_ADDRESS_LOW 0xf1
+#define SPI4000_WRITE_ADDRESS_HIGH 0xf2
+#define SPI4000_WRITE_ADDRESS_LOW 0xf3
+#define SPI4000_READ_DATA0 0xf4 /* High byte */
+#define SPI4000_READ_DATA1 0xf5
+#define SPI4000_READ_DATA2 0xf6
+#define SPI4000_READ_DATA3 0xf7 /* Low byte */
+#define SPI4000_WRITE_DATA0 0xf8 /* High byte */
+#define SPI4000_WRITE_DATA1 0xf9
+#define SPI4000_WRITE_DATA2 0xfa
+#define SPI4000_WRITE_DATA3 0xfb /* Low byte */
+#define SPI4000_DO_READ 0xfc /* Issue a read, returns read status */
+#define SPI4000_GET_READ_STATUS 0xfd /* 0xff: initial state, 2: Read failed, 1: Read pending, 0: Read success */
+#define SPI4000_DO_WRITE 0xfe /* Issue a write, returns write status */
+#define SPI4000_GET_WRITE_STATUS 0xff /* 0xff: initial state, 6: Write failed, 5: Write pending, 4: Write success */
+#define SPI4000_TWSI_ID(interface) (0x66 + interface)
+
+/* MDI Single Command (register 0x680) */
+typedef union
+{
+ uint32_t u32;
+ struct
+ {
+ uint32_t reserved_21_31 : 11;
+ uint32_t mdi_command : 1; /**< Performs an MDIO access. When set, this bit
+ self clears upon completion of the access. */
+ uint32_t reserved_18_19 : 2;
+ uint32_t op_code : 2; /**< MDIO Op Code
+ 00 = Reserved
+ 01 = Write Access
+ 10 = Read Access
+ 11 = Reserved */
+ uint32_t reserved_13_15 : 3;
+ uint32_t phy_address : 5; /**< Address of external PHY device */
+ uint32_t reserved_5_7 : 3;
+ uint32_t reg_address : 5; /**< Address of register within external PHY */
+ } s;
+} mdio_single_command_t;
+
+
+static CVMX_SHARED int interface_is_spi4000[2] = {0,0};
+
+
+/**
+ * @INTERNAL
+ * Write data to the specified SPI4000 address
+ *
+ * @param interface Interface the SPI4000 is on. (0 or 1)
+ * @param address Address to write to
+ * @param data Data to write
+ */
+static void __cvmx_spi4000_write(int interface, int address, uint32_t data)
+{
+ int status;
+ cvmx_twsix_write_ia(0, SPI4000_TWSI_ID(interface), SPI4000_WRITE_ADDRESS_HIGH, 2, 1, address);
+ cvmx_twsix_write_ia(0, SPI4000_TWSI_ID(interface), SPI4000_WRITE_DATA0, 4, 1, data);
+
+ status = cvmx_twsi_read8(SPI4000_TWSI_ID(interface), SPI4000_DO_WRITE);
+ while ((status == 5) || (status == 0xff))
+ status = cvmx_twsi_read8(SPI4000_TWSI_ID(interface), SPI4000_GET_WRITE_STATUS);
+
+ if (status != 4)
+ cvmx_dprintf("SPI4000: write failed with status=0x%x\n", status);
+}
+
+
+/**
+ * @INTERNAL
+ * Read data from the SPI4000.
+ *
+ * @param interface Interface the SPI4000 is on. (0 or 1)
+ * @param address Address to read from
+ *
+ * @return Value at the specified address
+ */
+static uint32_t __cvmx_spi4000_read(int interface, int address)
+{
+ int status;
+ uint64_t data;
+
+ cvmx_twsix_write_ia(0, SPI4000_TWSI_ID(interface), SPI4000_READ_ADDRESS_HIGH, 2, 1, address);
+
+ status = cvmx_twsi_read8(SPI4000_TWSI_ID(interface), SPI4000_DO_READ);
+ while ((status == 1) || (status == 0xff))
+ status = cvmx_twsi_read8(SPI4000_TWSI_ID(interface), SPI4000_GET_READ_STATUS);
+
+ if (status)
+ {
+ cvmx_dprintf("SPI4000: read failed with %d\n", status);
+ return 0;
+ }
+
+ status = cvmx_twsix_read_ia(0, SPI4000_TWSI_ID(interface), SPI4000_READ_DATA0, 4, 1, &data);
+ if (status != 4)
+ {
+ cvmx_dprintf("SPI4000: read failed with %d\n", status);
+ return 0;
+ }
+
+ return data;
+}
+
+
+/**
+ * @INTERNAL
+ * Write to a PHY using MDIO on the SPI4000
+ *
+ * @param interface Interface the SPI4000 is on. (0 or 1)
+ * @param port SPI4000 RGMII port to write to. (0-9)
+ * @param location MDIO register to write
+ * @param val Value to write
+ */
+static void __cvmx_spi4000_mdio_write(int interface, int port, int location, int val)
+{
+ static int last_value=-1;
+ mdio_single_command_t mdio;
+
+ mdio.u32 = 0;
+ mdio.s.mdi_command = 1;
+ mdio.s.op_code = 1;
+ mdio.s.phy_address = port;
+ mdio.s.reg_address = location;
+
+ /* Since the TWSI accesses are very slow, don't update the write value
+ if it is the same as the last value */
+ if (val != last_value)
+ {
+ last_value = val;
+ __cvmx_spi4000_write(interface, 0x0681, val);
+ }
+
+ __cvmx_spi4000_write(interface, 0x0680, mdio.u32);
+}
+
+
+/**
+ * @INTERNAL
+ * Read from a PHY using MDIO on the SPI4000
+ *
+ * @param interface Interface the SPI4000 is on. (0 or 1)
+ * @param port SPI4000 RGMII port to read from. (0-9)
+ * @param location MDIO register to read
+ * @return The MDI read result
+ */
+static int __cvmx_spi4000_mdio_read(int interface, int port, int location)
+{
+ mdio_single_command_t mdio;
+
+ mdio.u32 = 0;
+ mdio.s.mdi_command = 1;
+ mdio.s.op_code = 2;
+ mdio.s.phy_address = port;
+ mdio.s.reg_address = location;
+ __cvmx_spi4000_write(interface, 0x0680, mdio.u32);
+
+ do
+ {
+ mdio.u32 = __cvmx_spi4000_read(interface, 0x0680);
+ } while (mdio.s.mdi_command);
+
+ return __cvmx_spi4000_read(interface, 0x0681) >> 16;
+}
+
+
+/**
+ * @INTERNAL
+ * Configure the SPI4000 MACs
+ */
+static void __cvmx_spi4000_configure_mac(int interface)
+{
+ int port;
+ // IXF1010 configuration
+ // ---------------------
+ //
+ // Step 1: Apply soft reset to TxFIFO and MAC
+ // MAC soft reset register. address=0x505
+ // TxFIFO soft reset. address=0x620
+ __cvmx_spi4000_write(interface, 0x0505, 0x3ff); // reset all the MACs
+ __cvmx_spi4000_write(interface, 0x0620, 0x3ff); // reset the TX FIFOs
+
+ // Global address and Configuration Register. address=0x500
+ //
+ // Step 2: Apply soft reset to RxFIFO and SPI.
+ __cvmx_spi4000_write(interface, 0x059e, 0x3ff); // reset the RX FIFOs
+
+ // Step 3a: Take the MAC out of softreset
+ // MAC soft reset register. address=0x505
+ __cvmx_spi4000_write(interface, 0x0505, 0x0); // reset all the MACs
+
+ // Step 3b: De-assert port enables.
+ // Global address and Configuration Register. address=0x500
+ __cvmx_spi4000_write(interface, 0x0500, 0x0); // disable all ports
+
+ // Step 4: Assert Clock mode change En.
+ // Clock and interface mode Change En. address=Serdes base + 0x14
+ // Serdes (Serializer/de-serializer). address=0x780
+ // [Can't find this one]
+
+ for (port=0; port < 10; port++)
+ {
+ int port_offset = port << 7;
+
+ // Step 5: Set MAC interface mode GMII speed.
+ // MAC interface mode and RGMII speed register.
+ // address=port_index+0x10
+ //
+ // OUT port_index+0x10, 0x07 //RGMII 1000 Mbps operation.
+ __cvmx_spi4000_write(interface, port_offset | 0x0010, 0x3);
+
+ // Set the max packet size to 16383 bytes, including the CRC
+ __cvmx_spi4000_write(interface, port_offset | 0x000f, 0x3fff);
+
+ // Step 6: Change Interface to Copper mode
+ // Interface mode register. address=0x501
+ // [Can't find this]
+
+ // Step 7: MAC configuration
+ // Station address configuration.
+ // Source MAC address low register. Source MAC address 31-0.
+ // address=port_index+0x00
+ // Source MAC address high register. Source MAC address 47-32.
+ // address=port_index+0x01
+ // where Port index is 0x0 to 0x5.
+ // This address is inserted in the source address filed when
+ // transmitting pause frames, and is also used to compare against
+ // unicast pause frames at the receiving side.
+ //
+ // OUT port_index+0x00, source MAC address low.
+ __cvmx_spi4000_write(interface, port_offset | 0x0000, 0x0000);
+ // OUT port_index+0x01, source MAC address high.
+ __cvmx_spi4000_write(interface, port_offset | 0x0001, 0x0000);
+
+ // Step 8: Set desired duplex mode
+ // Desired duplex register. address=port_index+0x02
+ // [Reserved]
+
+ // Step 9: Other configuration.
+ // FC Enable Register. address=port_index+0x12
+ // Discard Unknown Control Frame. address=port_index+0x15
+ // Diverse config write register. address=port_index+0x18
+ // RX Packet Filter register. address=port_index+0x19
+ //
+ // Step 9a: Tx FD FC Enabled / Rx FD FC Enabled
+ if (CVMX_HELPER_DISABLE_SPI4000_BACKPRESSURE)
+ __cvmx_spi4000_write(interface, port_offset | 0x0012, 0);
+ else
+ __cvmx_spi4000_write(interface, port_offset | 0x0012, 0x7);
+
+ // Step 9b: Discard unknown control frames
+ __cvmx_spi4000_write(interface, port_offset | 0x0015, 0x1);
+
+ // Step 9c: Enable auto-CRC and auto-padding
+ __cvmx_spi4000_write(interface, port_offset | 0x0018, 0x11cd); //??
+
+ // Step 9d: Drop bad CRC / Drop Pause / No DAF
+ __cvmx_spi4000_write(interface, port_offset | 0x0019, 0x00);
+ }
+
+ // Step 9d: Drop frames
+ __cvmx_spi4000_write(interface, 0x059f, 0x03ff);
+
+ for (port=0; port < 10; port++)
+ {
+ // Step 9e: Set the TX FIFO marks
+ __cvmx_spi4000_write(interface, port + 0x0600, 0x0900); // TXFIFO High watermark
+ __cvmx_spi4000_write(interface, port + 0x060a, 0x0800); // TXFIFO Low watermark
+ __cvmx_spi4000_write(interface, port + 0x0614, 0x0380); // TXFIFO threshold
+ }
+
+ // Step 12: De-assert RxFIFO and SPI Rx/Tx reset
+ __cvmx_spi4000_write(interface, 0x059e, 0x0); // reset the RX FIFOs
+
+ // Step 13: De-assert TxFIFO and MAC reset
+ __cvmx_spi4000_write(interface, 0x0620, 0x0); // reset the TX FIFOs
+
+ // Step 14: Assert port enable
+ // Global address and Configuration Register. address=0x500
+ __cvmx_spi4000_write(interface, 0x0500, 0x03ff); // enable all ports
+
+ // Step 15: Disable loopback
+ // [Can't find this one]
+}
+
+
+/**
+ * @INTERNAL
+ * Configure the SPI4000 PHYs
+ */
+static void __cvmx_spi4000_configure_phy(int interface)
+{
+ int port;
+
+ /* We use separate loops below since it allows us to save a write
+ to the SPI4000 for each repeated value. This adds up to a couple
+ of seconds */
+
+ /* Update the link state before resets. It takes a while for the links to
+ come back after the resets. Most likely they'll come back the same as
+ they are now */
+ for (port=0; port < 10; port++)
+ cvmx_spi4000_check_speed(interface, port);
+ /* Enable RGMII DELAYS for TX_CLK and RX_CLK (see spec) */
+ for (port=0; port < 10; port++)
+ __cvmx_spi4000_mdio_write(interface, port, 0x14, 0x00e2);
+ /* Advertise pause and 100 Full Duplex. Don't advertise half duplex or 10Mbpa */
+ for (port=0; port < 10; port++)
+ __cvmx_spi4000_mdio_write(interface, port, 0x4, 0x0d01);
+ /* Enable PHY reset */
+ for (port=0; port < 10; port++)
+ __cvmx_spi4000_mdio_write(interface, port, 0x0, 0x9140);
+}
+
+
+/**
+ * Poll all the SPI4000 port and check its speed
+ *
+ * @param interface Interface the SPI4000 is on
+ * @param port Port to poll (0-9)
+ * @return Status of the port. 0=down. All other values the port is up.
+ */
+cvmx_gmxx_rxx_rx_inbnd_t cvmx_spi4000_check_speed(int interface, int port)
+{
+ static int phy_status[10] = {0,};
+ cvmx_gmxx_rxx_rx_inbnd_t link;
+ int read_status;
+
+ link.u64 = 0;
+
+ if (!interface_is_spi4000[interface])
+ return link;
+ if (port>=10)
+ return link;
+
+ /* Register 0x11: PHY Specific Status Register
+ Register Function Setting Mode HW Rst SW Rst Notes
+ RO 00 Retain note
+ 17.15:14 Speed 11 = Reserved
+ 17.a
+ 10 = 1000 Mbps
+ 01 = 100 Mbps
+ 00 = 10 Mbps
+ 17.13 Duplex 1 = Full-duplex RO 0 Retain note
+ 0 = Half-duplex 17.a
+ 17.12 Page Received 1 = Page received RO, LH 0 0
+ 0 = Page not received
+ 1 = Resolved RO 0 0 note
+ 17.11 Speed and
+ 0 = Not resolved 17.a
+ Duplex
+ Resolved
+ 17.10 Link (real time) 1 = Link up RO 0 0
+ 0 = Link down
+ RO 000 000 note
+ 000 = < 50m
+ 17.9:7 Cable Length
+ 001 = 50 - 80m 17.b
+ (100/1000
+ 010 = 80 - 110m
+ modes only)
+ 011 = 110 - 140m
+ 100 = >140m
+ 17.6 MDI Crossover 1 = MDIX RO 0 0 note
+ Status 0 = MDI 17.a
+ 17.5 Downshift Sta- 1 = Downshift RO 0 0
+ tus 0 = No Downshift
+ 17.4 Energy Detect 1 = Sleep RO 0 0
+ Status 0 = Active
+ 17.3 Transmit Pause 1 = Transmit pause enabled RO 0 0 note17.
+ Enabled 0 = Transmit pause disabled a, 17.c
+ 17.2 Receive Pause 1 = Receive pause enabled RO 0 0 note17.
+ Enabled 0 = Receive pause disabled a, 17.c
+ 17.1 Polarity (real 1 = Reversed RO 0 0
+ time) 0 = Normal
+ 17.0 Jabber (real 1 = Jabber RO 0 Retain
+ time) 0 = No jabber
+ */
+ read_status = __cvmx_spi4000_mdio_read(interface, port, 0x11);
+ if ((read_status & (1<<10)) == 0)
+ read_status = 0; /* If the link is down, force zero */
+ else
+ read_status &= 0xe400; /* Strip off all the don't care bits */
+ if (read_status != phy_status[port])
+ {
+ phy_status[port] = read_status;
+ if (read_status & (1<<10))
+ {
+ /* If the link is up, we need to set the speed based on the PHY status */
+ if (read_status & (1<<15))
+ __cvmx_spi4000_write(interface, (port<<7) | 0x0010, 0x3); /* 1Gbps */
+ else
+ __cvmx_spi4000_write(interface, (port<<7) | 0x0010, 0x1); /* 100Mbps */
+ }
+ else
+ {
+ /* If the link is down, force 1Gbps so TX traffic dumps fast */
+ __cvmx_spi4000_write(interface, (port<<7) | 0x0010, 0x3); /* 1Gbps */
+ }
+ }
+
+ if (read_status & (1<<10))
+ {
+ link.s.status = 1; /* Link up */
+ if (read_status & (1<<15))
+ link.s.speed = 2;
+ else
+ link.s.speed = 1;
+ }
+ else
+ {
+ link.s.speed = 2; /* Use 1Gbps when down */
+ link.s.status = 0; /* Link Down */
+ }
+ link.s.duplex = ((read_status & (1<<13)) != 0);
+
+ return link;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_spi4000_check_speed);
+#endif
+
+
+/**
+ * Return non-zero if the SPI interface has a SPI4000 attached
+ *
+ * @param interface SPI interface the SPI4000 is connected to
+ *
+ * @return
+ */
+int cvmx_spi4000_is_present(int interface)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ return 0;
+ // Check for the presence of a SPI4000. If it isn't there,
+ // these writes will timeout.
+ if (cvmx_twsi_write8(SPI4000_TWSI_ID(interface), SPI4000_WRITE_ADDRESS_HIGH, 0))
+ return 0;
+ if (cvmx_twsi_write8(SPI4000_TWSI_ID(interface), SPI4000_WRITE_ADDRESS_LOW, 0))
+ return 0;
+ interface_is_spi4000[interface] = 1;
+ return 1;
+}
+
+
+/**
+ * Initialize the SPI4000 for use
+ *
+ * @param interface SPI interface the SPI4000 is connected to
+ */
+int cvmx_spi4000_initialize(int interface)
+{
+ if (!cvmx_spi4000_is_present(interface))
+ return -1;
+
+ __cvmx_spi4000_configure_mac(interface);
+ __cvmx_spi4000_configure_phy(interface);
+ return 0;
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-spi4000.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-spinlock.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-spinlock.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-spinlock.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,433 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Implementation of spinlocks.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+
+#ifndef __CVMX_SPINLOCK_H__
+#define __CVMX_SPINLOCK_H__
+
+#include "cvmx-asm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Spinlocks for Octeon */
+
+
+// define these to enable recursive spinlock debugging
+//#define CVMX_SPINLOCK_DEBUG
+
+
+/**
+ * Spinlocks for Octeon
+ */
+typedef struct {
+ volatile uint32_t value;
+} cvmx_spinlock_t;
+
+// note - macros not expanded in inline ASM, so values hardcoded
+#define CVMX_SPINLOCK_UNLOCKED_VAL 0
+#define CVMX_SPINLOCK_LOCKED_VAL 1
+
+
+#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
+
+
+/**
+ * Initialize a spinlock
+ *
+ * @param lock Lock to initialize
+ */
+static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
+{
+ lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
+}
+
+
+/**
+ * Return non-zero if the spinlock is currently locked
+ *
+ * @param lock Lock to check
+ * @return Non-zero if locked
+ */
+static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
+{
+ return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
+}
+
+
+/**
+ * Releases lock
+ *
+ * @param lock pointer to lock structure
+ */
+static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
+{
+ CVMX_SYNCWS;
+ lock->value = 0;
+ CVMX_SYNCWS;
+}
+
+
+/**
+ * Attempts to take the lock, but does not spin if lock is not available.
+ * May take some time to acquire the lock even if it is available
+ * due to the ll/sc not succeeding.
+ *
+ * @param lock pointer to lock structure
+ *
+ * @return 0: lock successfully taken
+ * 1: lock not taken, held by someone else
+ * These return values match the Linux semantics.
+ */
+
+static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " bnez %[tmp], 2f \n" // if lock held, fail immediately
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set reorder \n"
+ : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
+ :
+ : "memory");
+
+ return (!!tmp); /* normalize to 0 or 1 */
+}
+
+/**
+ * Gets lock, spins until lock is taken
+ *
+ * @param lock pointer to lock structure
+ */
+static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " bnez %[tmp], 1b \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
+ :
+ : "memory");
+
+}
+
+
+
+/** ********************************************************************
+ * Bit spinlocks
+ * These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
+ * The rest of the bits in the word are left undisturbed. This enables more
+ * compact data structures as only 1 bit is consumed for the lock.
+ *
+ */
+
+/**
+ * Gets lock, spins until lock is taken
+ * Preserves the low 31 bits of the 32 bit
+ * word used for the lock.
+ *
+ *
+ * @param word word to lock bit 31 of
+ */
+static inline void cvmx_spinlock_bit_lock(uint32_t *word)
+{
+ unsigned int tmp;
+ unsigned int sav;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ ".set noat \n"
+ "1: ll %[tmp], %[val] \n"
+ " bbit1 %[tmp], 31, 1b \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set at \n"
+ ".set reorder \n"
+ : [val] "+m" (*word), [tmp] "=&r" (tmp), [sav] "=&r" (sav)
+ :
+ : "memory");
+
+}
+
+/**
+ * Attempts to get lock, returns immediately with success/failure
+ * Preserves the low 31 bits of the 32 bit
+ * word used for the lock.
+ *
+ *
+ * @param word word to lock bit 31 of
+ * @return 0: lock successfully taken
+ * 1: lock not taken, held by someone else
+ * These return values match the Linux semantics.
+ */
+static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ ".set noat \n"
+ "1: ll %[tmp], %[val] \n"
+ " bbit1 %[tmp], 31, 2f \n" // if lock held, fail immediately
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set at \n"
+ ".set reorder \n"
+ : [val] "+m" (*word), [tmp] "=&r" (tmp)
+ :
+ : "memory");
+
+ return (!!tmp); /* normalize to 0 or 1 */
+}
+/**
+ * Releases bit lock
+ *
+ * Unconditionally clears bit 31 of the lock word. Note that this is
+ * done non-atomically, as this implementation assumes that the rest
+ * of the bits in the word are protected by the lock.
+ *
+ * @param word word to unlock bit 31 in
+ */
+static inline void cvmx_spinlock_bit_unlock(uint32_t *word)
+{
+ CVMX_SYNCWS;
+ *word &= ~(1UL << 31) ;
+ CVMX_SYNCWS;
+}
+
+
+
+/** ********************************************************************
+ * Recursive spinlocks
+ */
+typedef struct {
+ volatile unsigned int value;
+ volatile unsigned int core_num;
+} cvmx_spinlock_rec_t;
+
+
+/**
+ * Initialize a recursive spinlock
+ *
+ * @param lock Lock to initialize
+ */
+static inline void cvmx_spinlock_rec_init(cvmx_spinlock_rec_t *lock)
+{
+ lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
+}
+
+
+/**
+ * Return non-zero if the recursive spinlock is currently locked
+ *
+ * @param lock Lock to check
+ * @return Non-zero if locked
+ */
+static inline int cvmx_spinlock_rec_locked(cvmx_spinlock_rec_t *lock)
+{
+ return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
+}
+
+
+/**
+* Unlocks one level of recursive spinlock. Lock is not unlocked
+* unless this is the final unlock call for that spinlock
+*
+* @param lock ptr to recursive spinlock structure
+*/
+static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock);
+
+#ifdef CVMX_SPINLOCK_DEBUG
+#define cvmx_spinlock_rec_unlock(x) _int_cvmx_spinlock_rec_unlock((x), __FILE__, __LINE__)
+static inline void _int_cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
+#else
+static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock)
+#endif
+{
+
+ unsigned int temp, result;
+ int core_num;
+ core_num = cvmx_get_core_num();
+
+#ifdef CVMX_SPINLOCK_DEBUG
+ {
+ if (lock->core_num != core_num)
+ {
+ cvmx_dprintf("ERROR: Recursive spinlock release attemped by non-owner! file: %s, line: %d\n", filename, linenum);
+ return;
+ }
+ }
+#endif
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ " addi %[tmp], %[pid], 0x80 \n"
+ " sw %[tmp], %[lid] # set lid to invalid value\n"
+ CVMX_SYNCWS_STR
+ "1: ll %[tmp], %[val] \n"
+ " addu %[res], %[tmp], -1 # decrement lock count\n"
+ " sc %[res], %[val] \n"
+ " beqz %[res], 1b \n"
+ " nop \n"
+ " beq %[tmp], %[res], 2f # res is 1 on successful sc \n"
+ " nop \n"
+ " sw %[pid], %[lid] # set lid to pid, only if lock still held\n"
+ "2: \n"
+ CVMX_SYNCWS_STR
+ ".set reorder \n"
+ : [res] "=&r" (result), [tmp] "=&r" (temp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
+ : [pid] "r" (core_num)
+ : "memory");
+
+
+#ifdef CVMX_SPINLOCK_DEBUG
+ {
+ if (lock->value == ~0UL)
+ {
+ cvmx_dprintf("ERROR: Recursive spinlock released too many times! file: %s, line: %d\n", filename, linenum);
+ }
+ }
+#endif
+
+
+}
+
+/**
+ * Takes recursive spinlock for a given core. A core can take the lock multiple
+ * times, and the lock is released only when the corresponding number of
+ * unlocks have taken place.
+ *
+ * NOTE: This assumes only one thread per core, and that the core ID is used as
+ * the lock 'key'. (This implementation cannot be generalized to allow
+ * multiple threads to use the same key (core id) .)
+ *
+ * @param lock address of recursive spinlock structure. Note that this is
+ * distinct from the standard spinlock
+ */
+static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock);
+
+#ifdef CVMX_SPINLOCK_DEBUG
+#define cvmx_spinlock_rec_lock(x) _int_cvmx_spinlock_rec_lock((x), __FILE__, __LINE__)
+static inline void _int_cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
+#else
+static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock)
+#endif
+{
+
+
+ volatile unsigned int tmp;
+ volatile int core_num;
+
+ core_num = cvmx_get_core_num();
+
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] # load the count\n"
+ " bnez %[tmp], 2f # if count!=zero branch to 2\n"
+ " addu %[tmp], %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b # go back if not success\n"
+ " nop \n"
+ " j 3f # go to write core_num \n"
+ "2: lw %[tmp], %[lid] # load the core_num \n"
+ " bne %[tmp], %[pid], 1b # core_num no match, restart\n"
+ " nop \n"
+ " lw %[tmp], %[val] \n"
+ " addu %[tmp], %[tmp], 1 \n"
+ " sw %[tmp], %[val] # update the count\n"
+ "3: sw %[pid], %[lid] # store the core_num\n"
+ CVMX_SYNCWS_STR
+ ".set reorder \n"
+ : [tmp] "=&r" (tmp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
+ : [pid] "r" (core_num)
+ : "memory");
+
+#ifdef CVMX_SPINLOCK_DEBUG
+ if (lock->core_num != core_num)
+ {
+ cvmx_dprintf("cvmx_spinlock_rec_lock: lock taken, but core_num is incorrect. file: %s, line: %d\n", filename, linenum);
+ }
+#endif
+
+
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_SPINLOCK_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-spinlock.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-spx0-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-spx0-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-spx0-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,117 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-spx0-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon spx0.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_SPX0_DEFS_H__
+#define __CVMX_SPX0_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SPX0_PLL_BW_CTL CVMX_SPX0_PLL_BW_CTL_FUNC()
+static inline uint64_t CVMX_SPX0_PLL_BW_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX)))
+ cvmx_warn("CVMX_SPX0_PLL_BW_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180090000388ull);
+}
+#else
+#define CVMX_SPX0_PLL_BW_CTL (CVMX_ADD_IO_SEG(0x0001180090000388ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SPX0_PLL_SETTING CVMX_SPX0_PLL_SETTING_FUNC()
+static inline uint64_t CVMX_SPX0_PLL_SETTING_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX)))
+ cvmx_warn("CVMX_SPX0_PLL_SETTING not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180090000380ull);
+}
+#else
+#define CVMX_SPX0_PLL_SETTING (CVMX_ADD_IO_SEG(0x0001180090000380ull))
+#endif
+
+/**
+ * cvmx_spx0_pll_bw_ctl
+ */
+union cvmx_spx0_pll_bw_ctl {
+ uint64_t u64;
+ struct cvmx_spx0_pll_bw_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t bw_ctl : 5; /**< Core PLL bandwidth control */
+#else
+ uint64_t bw_ctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_spx0_pll_bw_ctl_s cn38xx;
+ struct cvmx_spx0_pll_bw_ctl_s cn38xxp2;
+};
+typedef union cvmx_spx0_pll_bw_ctl cvmx_spx0_pll_bw_ctl_t;
+
+/**
+ * cvmx_spx0_pll_setting
+ */
+union cvmx_spx0_pll_setting {
+ uint64_t u64;
+ struct cvmx_spx0_pll_setting_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t setting : 17; /**< Core PLL setting */
+#else
+ uint64_t setting : 17;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_spx0_pll_setting_s cn38xx;
+ struct cvmx_spx0_pll_setting_s cn38xxp2;
+};
+typedef union cvmx_spx0_pll_setting cvmx_spx0_pll_setting_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-spx0-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-spxx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-spxx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-spxx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1407 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-spxx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon spxx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_SPXX_DEFS_H__
+#define __CVMX_SPXX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_BCKPRS_CNT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_BCKPRS_CNT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000340ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_BCKPRS_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000340ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_BIST_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_BIST_STAT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800900007F8ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_BIST_STAT(block_id) (CVMX_ADD_IO_SEG(0x00011800900007F8ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_CLK_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_CLK_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000348ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_CLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000348ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_CLK_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_CLK_STAT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000350ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_CLK_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000350ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_DBG_DESKEW_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_DBG_DESKEW_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000368ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000368ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_DBG_DESKEW_STATE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_DBG_DESKEW_STATE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000370ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) (CVMX_ADD_IO_SEG(0x0001180090000370ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_DRV_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_DRV_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000358ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_DRV_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000358ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_ERR_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_ERR_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000320ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_ERR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000320ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_INT_DAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_INT_DAT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000318ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_INT_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000318ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_INT_MSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_INT_MSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000308ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_INT_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180090000308ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_INT_REG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_INT_REG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000300ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180090000300ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_INT_SYNC(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_INT_SYNC(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000310ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_INT_SYNC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000310ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_TPA_ACC(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_TPA_ACC(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000338ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_TPA_ACC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000338ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_TPA_MAX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_TPA_MAX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000330ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_TPA_MAX(block_id) (CVMX_ADD_IO_SEG(0x0001180090000330ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_TPA_SEL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_TPA_SEL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SPXX_TRN4_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SPXX_TRN4_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+
+/**
+ * cvmx_spx#_bckprs_cnt
+ */
+union cvmx_spxx_bckprs_cnt {
+ uint64_t u64;
+ struct cvmx_spxx_bckprs_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Counts the number of core clock cycles in which
+ the SPI-4.2 receiver receives data once the TPA
+ for a particular port has been deasserted. The
+ desired port to watch can be selected with the
+ SPX_TPA_SEL[PRTSEL] field. CNT can be cleared by
+ writing all 1s to it. */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_spxx_bckprs_cnt_s cn38xx;
+ struct cvmx_spxx_bckprs_cnt_s cn38xxp2;
+ struct cvmx_spxx_bckprs_cnt_s cn58xx;
+ struct cvmx_spxx_bckprs_cnt_s cn58xxp1;
+};
+typedef union cvmx_spxx_bckprs_cnt cvmx_spxx_bckprs_cnt_t;
+
+/**
+ * cvmx_spx#_bist_stat
+ *
+ * Notes:
+ * Bist results encoding
+ * - 0: good (or bist in progress/never run)
+ * - 1: bad
+ */
+union cvmx_spxx_bist_stat {
+ uint64_t u64;
+ struct cvmx_spxx_bist_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t stat2 : 1; /**< Bist Results/No Repair (Tx calendar table)
+ (spx.stx.cal.calendar) */
+ uint64_t stat1 : 1; /**< Bist Results/No Repair (Rx calendar table)
+ (spx.srx.spi4.cal.calendar) */
+ uint64_t stat0 : 1; /**< Bist Results/No Repair (Spi4 receive datapath FIFO)
+ (spx.srx.spi4.dat.dpr) */
+#else
+ uint64_t stat0 : 1;
+ uint64_t stat1 : 1;
+ uint64_t stat2 : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_spxx_bist_stat_s cn38xx;
+ struct cvmx_spxx_bist_stat_s cn38xxp2;
+ struct cvmx_spxx_bist_stat_s cn58xx;
+ struct cvmx_spxx_bist_stat_s cn58xxp1;
+};
+typedef union cvmx_spxx_bist_stat cvmx_spxx_bist_stat_t;
+
+/**
+ * cvmx_spx#_clk_ctl
+ *
+ * Notes:
+ * * SRXDLCK
+ * When asserted, this bit locks the Spi4 receive DLLs. This bit also
+ * acts as the Spi4 receiver reset and must be asserted before the
+ * training sequences are used to initialize the interface. This bit
+ * only applies to the receiver interface.
+ *
+ * * RCVTRN
+ * Once the SRXDLCK bit is asserted and the DLLs have locked and the
+ * system has been programmed, software should assert this bit in order
+ * to start looking for valid training sequence and synchronize the
+ * interface. This bit only applies to the receiver interface.
+ *
+ * * DRPTRN
+ * The Spi4 receiver can either convert training packets into NOPs or
+ * drop them entirely. Dropping ticks allows the interface to deskew
+ * periodically if the dclk and eclk ratios are close. This bit only
+ * applies to the receiver interface.
+ *
+ * * SNDTRN
+ * When software sets this bit, it indicates that the Spi4 transmit
+ * interface has been setup and has seen the calendare status. Once the
+ * transmitter begins sending training data, the receiving device is free
+ * to start traversing the calendar table to synch the link.
+ *
+ * * STATRCV
+ * This bit determines which status clock edge to sample the status
+ * channel in Spi4 mode. Since the status channel is in the opposite
+ * direction to the datapath, the STATRCV actually effects the
+ * transmitter/TX block.
+ *
+ * * STATDRV
+ * This bit determines which status clock edge to drive the status
+ * channel in Spi4 mode. Since the status channel is in the opposite
+ * direction to the datapath, the STATDRV actually effects the
+ * receiver/RX block.
+ *
+ * * RUNBIST
+ * RUNBIST will beginning BIST/BISR in all the SPX compilied memories.
+ * These memories are...
+ *
+ * * spx.srx.spi4.dat.dpr // FIFO Spi4 to IMX
+ * * spx.stx.cal.calendar // Spi4 TX calendar table
+ * * spx.srx.spi4.cal.calendar // Spi4 RX calendar table
+ *
+ * RUNBIST must never be asserted when the interface is enabled.
+ * Furthmore, setting RUNBIST at any other time is destructive and can
+ * cause data and configuration corruption. The entire interface must be
+ * reconfigured when this bit is set.
+ *
+ * * CLKDLY
+ * CLKDLY should be kept at its reset value during normal operation. This
+ * register controls the SPI4.2 static clock positioning which normally only is
+ * set to the non-reset value in quarter clocking schemes. In this mode, the
+ * delay window is not large enough for slow clock freq, therefore clock and
+ * data must be statically positioned with CSRs. By changing the clock position
+ * relative to the data bits, we give the system a wider window.
+ *
+ * * SEETRN
+ * In systems in which no training data is sent to N2 or N2 cannot
+ * correctly sample the training data, software may pulse this bit by
+ * writing a '1' followed by a '0' in order to correctly set the
+ * receivers state. The receive data bus should be idle at this time
+ * (only NOPs on the bus). If N2 cannot see at least on training
+ * sequence, the data bus will not send any data to the core. The
+ * interface will hang.
+ */
+union cvmx_spxx_clk_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_clk_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t seetrn : 1; /**< Force the Spi4 receive into seeing a traing
+ sequence */
+ uint64_t reserved_12_15 : 4;
+ uint64_t clkdly : 5; /**< Set the spx__clkdly lines to this value to
+ control the delay on the incoming dclk
+ (spx__clkdly) */
+ uint64_t runbist : 1; /**< Write this bit to begin BIST testing in SPX */
+ uint64_t statdrv : 1; /**< Spi4 status channel drive mode
+ - 1: Drive STAT on posedge of SCLK
+ - 0: Drive STAT on negedge of SCLK */
+ uint64_t statrcv : 1; /**< Spi4 status channel sample mode
+ - 1: Sample STAT on posedge of SCLK
+ - 0: Sample STAT on negedge of SCLK */
+ uint64_t sndtrn : 1; /**< Start sending training patterns on the Spi4
+ Tx Interface */
+ uint64_t drptrn : 1; /**< Drop blocks of training packets */
+ uint64_t rcvtrn : 1; /**< Write this bit once the DLL is locked to sync
+ on the training seqeunce */
+ uint64_t srxdlck : 1; /**< Write this bit to lock the Spi4 receive DLL */
+#else
+ uint64_t srxdlck : 1;
+ uint64_t rcvtrn : 1;
+ uint64_t drptrn : 1;
+ uint64_t sndtrn : 1;
+ uint64_t statrcv : 1;
+ uint64_t statdrv : 1;
+ uint64_t runbist : 1;
+ uint64_t clkdly : 5;
+ uint64_t reserved_12_15 : 4;
+ uint64_t seetrn : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_spxx_clk_ctl_s cn38xx;
+ struct cvmx_spxx_clk_ctl_s cn38xxp2;
+ struct cvmx_spxx_clk_ctl_s cn58xx;
+ struct cvmx_spxx_clk_ctl_s cn58xxp1;
+};
+typedef union cvmx_spxx_clk_ctl cvmx_spxx_clk_ctl_t;
+
+/**
+ * cvmx_spx#_clk_stat
+ */
+union cvmx_spxx_clk_stat {
+ uint64_t u64;
+ struct cvmx_spxx_clk_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63 : 53;
+ uint64_t stxcal : 1; /**< The transistion from Sync to Calendar on status
+ channel */
+ uint64_t reserved_9_9 : 1;
+ uint64_t srxtrn : 1; /**< Saw a good data training sequence */
+ uint64_t s4clk1 : 1; /**< Saw '1' on Spi4 transmit status forward clk input */
+ uint64_t s4clk0 : 1; /**< Saw '0' on Spi4 transmit status forward clk input */
+ uint64_t d4clk1 : 1; /**< Saw '1' on Spi4 receive data forward clk input */
+ uint64_t d4clk0 : 1; /**< Saw '0' on Spi4 receive data forward clk input */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t d4clk0 : 1;
+ uint64_t d4clk1 : 1;
+ uint64_t s4clk0 : 1;
+ uint64_t s4clk1 : 1;
+ uint64_t srxtrn : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t stxcal : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_spxx_clk_stat_s cn38xx;
+ struct cvmx_spxx_clk_stat_s cn38xxp2;
+ struct cvmx_spxx_clk_stat_s cn58xx;
+ struct cvmx_spxx_clk_stat_s cn58xxp1;
+};
+typedef union cvmx_spxx_clk_stat cvmx_spxx_clk_stat_t;
+
+/**
+ * cvmx_spx#_dbg_deskew_ctl
+ *
+ * Notes:
+ * These bits are meant as a backdoor to control Spi4 per-bit deskew. See
+ * that Spec for more details.
+ *
+ * The basic idea is to allow software to disable the auto-deskew widgets
+ * and make any adjustments by hand. These steps should only be taken
+ * once the RCVTRN bit is set and before any real traffic is sent on the
+ * Spi4 bus. Great care should be taken when messing with these bits as
+ * improper programmings can cause catestrophic or intermitent problems.
+ *
+ * The params we have to test are the MUX tap selects and the XCV delay
+ * tap selects.
+ *
+ * For the muxes, we can set each tap to a random value and then read
+ * back the taps. To write...
+ *
+ * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set
+ * SPXX_DBG_DESKEW_CTL[OFFSET] = mux tap value (2-bits)
+ * SPXX_DBG_DESKEW_CTL[MUX] = go bit
+ *
+ * Notice this can all happen with a single CSR write. To read, first
+ * set the bit you to look at with the SPXX_DBG_DESKEW_CTL[BITSEL], then
+ * simply read SPXX_DBG_DESKEW_STATE[MUXSEL]...
+ *
+ * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set
+ * SPXX_DBG_DESKEW_STATE[MUXSEL] = 2-bit value
+ *
+ * For the xcv delay taps, the CSR controls increment and decrement the
+ * 5-bit count value in the XCV. This is a saturating counter, so it
+ * will not wrap when decrementing below zero or incrementing above 31.
+ *
+ * To write...
+ *
+ * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set
+ * SPXX_DBG_DESKEW_CTL[OFFSET] = tap value increment or decrement amount (5-bits)
+ * SPXX_DBG_DESKEW_CTL[INC|DEC] = go bit
+ *
+ * These values are copied in SPX, so that they can be read back by
+ * software by a similar mechanism to the MUX selects...
+ *
+ * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set
+ * SPXX_DBG_DESKEW_STATE[OFFSET] = 5-bit value
+ *
+ * In addition, there is a reset bit that sets all the state back to the
+ * default/starting value of 0x10.
+ *
+ * SPXX_DBG_DESKEW_CTL[CLRDLY] = 1
+ *
+ * SINGLE STEP TRAINING MODE (WILMA)
+ * Debug feature that will enable the user to single-step the debug
+ * logic to watch initial movement and trends by putting the training
+ * machine in single step mode.
+ *
+ * * SPX*_DBG_DESKEW_CTL[SSTEP]
+ * This will put the training control logic into single step mode. We
+ * will not deskew in this scenario and will require the TX device to
+ * send continuous training sequences.
+ *
+ * It is required that SRX*_COM_CTL[INF_EN] be clear so that suspect
+ * data does not flow into the chip.
+ *
+ * Deasserting SPX*_DBG_DESKEW_CTL[SSTEP] will attempt to deskew as per
+ * the normal definition. Single step mode is for debug only. Special
+ * care must be given to correctly deskew the interface if normal
+ * operation is desired.
+ *
+ * * SPX*_DBG_DESKEW_CTL[SSTEP_GO]
+ * Each write of '1' to SSTEP_GO will go through a single training
+ * iteration and will perform...
+ *
+ * - DLL update, if SPX*_DBG_DESKEW_CTL[DLLDIS] is clear
+ * - coarse update, if SPX*_TRN4_CTL[MUX_EN] is set
+ * - single fine update, if SPX*_TRN4_CTL[MACRO_EN] is set and an edge
+ * was detected after walked +/- SPX*_TRN4_CTL[MAXDIST] taps.
+ *
+ * Writes to this register have no effect if the interface is not in
+ * SSTEP mode (SPX*_DBG_DESKEW_CTL[SSTEP]).
+ *
+ * The WILMA mode will be cleared at the final state transition, so
+ * that software can set SPX*_DBG_DESKEW_CTL[SSTEP] and
+ * SPX*_DBG_DESKEW_CTL[SSTEP_GO] before setting SPX*_CLK_CTL[RCVTRN]
+ * and the machine will go through the initial iteration and stop -
+ * waiting for another SPX*_DBG_DESKEW_CTL[SSTEP_GO] or an interface
+ * enable.
+ *
+ * * SPX*_DBG_DESKEW_CTL[FALL8]
+ * Determines how many pattern matches are required during training
+ * operations to fallout of training and begin processing the normal data
+ * stream. The default value is 10 pattern matches. The pattern that is
+ * used is dependent on the SPX*_DBG_DESKEW_CTL[FALLNOP] CSR which
+ * determines between non-training packets (the default) and NOPs.
+ *
+ * * SPX*_DBG_DESKEW_CTL[FALLNOP]
+ * Determines the pattern that is required during training operations to
+ * fallout of training and begin processing the normal data stream. The
+ * default value is to match against non-training data. Setting this
+ * bit, changes the behavior to watch for NOPs packet instead.
+ *
+ * This bit should not be changed dynamically while the link is
+ * operational.
+ */
+union cvmx_spxx_dbg_deskew_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_dbg_deskew_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63 : 34;
+ uint64_t fallnop : 1; /**< Training fallout on NOP matches instead of
+ non-training matches.
+ (spx_csr__spi4_fallout_nop) */
+ uint64_t fall8 : 1; /**< Training fallout at 8 pattern matches instead of 10
+ (spx_csr__spi4_fallout_8_match) */
+ uint64_t reserved_26_27 : 2;
+ uint64_t sstep_go : 1; /**< Single Step Training Sequence
+ (spx_csr__spi4_single_step_go) */
+ uint64_t sstep : 1; /**< Single Step Training Mode
+ (spx_csr__spi4_single_step_mode) */
+ uint64_t reserved_22_23 : 2;
+ uint64_t clrdly : 1; /**< Resets the offset control in the XCV
+ (spx_csr__spi4_dll_clr_dly) */
+ uint64_t dec : 1; /**< Decrement the offset by OFFSET for the Spi4
+ bit selected by BITSEL
+ (spx_csr__spi4_dbg_trn_dec) */
+ uint64_t inc : 1; /**< Increment the offset by OFFSET for the Spi4
+ bit selected by BITSEL
+ (spx_csr__spi4_dbg_trn_inc) */
+ uint64_t mux : 1; /**< Set the mux select tap for the Spi4 bit
+ selected by BITSEL
+ (spx_csr__spi4_dbg_trn_mux) */
+ uint64_t offset : 5; /**< Adds or subtracts (Based on INC or DEC) the
+ offset to Spi4 bit BITSEL.
+ (spx_csr__spi4_dbg_trn_offset) */
+ uint64_t bitsel : 5; /**< Select the Spi4 CTL or DAT bit
+ 15-0 : Spi4 DAT[15:0]
+ 16 : Spi4 CTL
+ - 31-17: Invalid
+ (spx_csr__spi4_dbg_trn_bitsel) */
+ uint64_t offdly : 6; /**< Set the spx__offset lines to this value when
+ not in macro sequence
+ (spx_csr__spi4_mac_offdly) */
+ uint64_t dllfrc : 1; /**< Force the Spi4 RX DLL to update
+ (spx_csr__spi4_dll_force) */
+ uint64_t dlldis : 1; /**< Disable sending the update signal to the Spi4
+ RX DLL when set
+ (spx_csr__spi4_dll_trn_en) */
+#else
+ uint64_t dlldis : 1;
+ uint64_t dllfrc : 1;
+ uint64_t offdly : 6;
+ uint64_t bitsel : 5;
+ uint64_t offset : 5;
+ uint64_t mux : 1;
+ uint64_t inc : 1;
+ uint64_t dec : 1;
+ uint64_t clrdly : 1;
+ uint64_t reserved_22_23 : 2;
+ uint64_t sstep : 1;
+ uint64_t sstep_go : 1;
+ uint64_t reserved_26_27 : 2;
+ uint64_t fall8 : 1;
+ uint64_t fallnop : 1;
+ uint64_t reserved_30_63 : 34;
+#endif
+ } s;
+ struct cvmx_spxx_dbg_deskew_ctl_s cn38xx;
+ struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2;
+ struct cvmx_spxx_dbg_deskew_ctl_s cn58xx;
+ struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1;
+};
+typedef union cvmx_spxx_dbg_deskew_ctl cvmx_spxx_dbg_deskew_ctl_t;
+
+/**
+ * cvmx_spx#_dbg_deskew_state
+ *
+ * Notes:
+ * These bits are meant as a backdoor to control Spi4 per-bit deskew. See
+ * that Spec for more details.
+ */
+union cvmx_spxx_dbg_deskew_state {
+ uint64_t u64;
+ struct cvmx_spxx_dbg_deskew_state_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t testres : 1; /**< Training Test Mode Result
+ (srx_spi4__test_mode_result) */
+ uint64_t unxterm : 1; /**< Unexpected training terminiation
+ (srx_spi4__top_unxexp_trn_term) */
+ uint64_t muxsel : 2; /**< The mux select value of the bit selected by
+ SPX_DBG_DESKEW_CTL[BITSEL]
+ (srx_spi4__trn_mux_sel) */
+ uint64_t offset : 5; /**< The counter value of the bit selected by
+ SPX_DBG_DESKEW_CTL[BITSEL]
+ (srx_spi4__xcv_tap_select) */
+#else
+ uint64_t offset : 5;
+ uint64_t muxsel : 2;
+ uint64_t unxterm : 1;
+ uint64_t testres : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_spxx_dbg_deskew_state_s cn38xx;
+ struct cvmx_spxx_dbg_deskew_state_s cn38xxp2;
+ struct cvmx_spxx_dbg_deskew_state_s cn58xx;
+ struct cvmx_spxx_dbg_deskew_state_s cn58xxp1;
+};
+typedef union cvmx_spxx_dbg_deskew_state cvmx_spxx_dbg_deskew_state_t;
+
+/**
+ * cvmx_spx#_drv_ctl
+ *
+ * Notes:
+ * These bits all come from Duke - he will provide documentation and
+ * explanation. I'll just butcher it.
+ */
+union cvmx_spxx_drv_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_drv_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_spxx_drv_ctl_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t stx4ncmp : 4; /**< Duke (spx__spi4_tx_nctl_comp) */
+ uint64_t stx4pcmp : 4; /**< Duke (spx__spi4_tx_pctl_comp) */
+ uint64_t srx4cmp : 8; /**< Duke (spx__spi4_rx_rctl_comp) */
+#else
+ uint64_t srx4cmp : 8;
+ uint64_t stx4pcmp : 4;
+ uint64_t stx4ncmp : 4;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2;
+ struct cvmx_spxx_drv_ctl_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t stx4ncmp : 4; /**< Not used in CN58XX (spx__spi4_tx_nctl_comp) */
+ uint64_t stx4pcmp : 4; /**< Not used in CN58XX (spx__spi4_tx_pctl_comp) */
+ uint64_t reserved_10_15 : 6;
+ uint64_t srx4cmp : 10; /**< Suresh (spx__spi4_rx_rctl_comp)
+ Can be used to bypass the RX termination resistor
+ value. We have an on-chip RX termination resistor
+ compensation control block, which adjusts the
+ resistor value to a nominal 100 ohms. This
+ register can be used to bypass this automatically
+ computed value. */
+#else
+ uint64_t srx4cmp : 10;
+ uint64_t reserved_10_15 : 6;
+ uint64_t stx4pcmp : 4;
+ uint64_t stx4ncmp : 4;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } cn58xx;
+ struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1;
+};
+typedef union cvmx_spxx_drv_ctl cvmx_spxx_drv_ctl_t;
+
+/**
+ * cvmx_spx#_err_ctl
+ *
+ * SPX_ERR_CTL - Spi error control register
+ *
+ *
+ * Notes:
+ * * DIPPAY, DIPCLS, PRTNXA
+ * These bits control whether or not the packet's ERR bit is set when any of
+ * the these error is detected. If the corresponding error's bit is clear,
+ * the packet ERR will be set. If the error bit is set, the SPX will simply
+ * pass through the ERR bit without modifying it in anyway - the error bit
+ * may or may not have been set by the transmitter device.
+ */
+union cvmx_spxx_err_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_err_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t prtnxa : 1; /**< Spi4 - set the ERR bit on packets in which the
+ port is out-of-range */
+ uint64_t dipcls : 1; /**< Spi4 DIPERR on closing control words cause the
+ ERR bit to be set */
+ uint64_t dippay : 1; /**< Spi4 DIPERR on payload control words cause the
+ ERR bit to be set */
+ uint64_t reserved_4_5 : 2;
+ uint64_t errcnt : 4; /**< Number of Dip4 errors before bringing down the
+ interface */
+#else
+ uint64_t errcnt : 4;
+ uint64_t reserved_4_5 : 2;
+ uint64_t dippay : 1;
+ uint64_t dipcls : 1;
+ uint64_t prtnxa : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_spxx_err_ctl_s cn38xx;
+ struct cvmx_spxx_err_ctl_s cn38xxp2;
+ struct cvmx_spxx_err_ctl_s cn58xx;
+ struct cvmx_spxx_err_ctl_s cn58xxp1;
+};
+typedef union cvmx_spxx_err_ctl cvmx_spxx_err_ctl_t;
+
+/**
+ * cvmx_spx#_int_dat
+ *
+ * SPX_INT_DAT - Interrupt Data Register
+ *
+ *
+ * Notes:
+ * Note: The SPX_INT_DAT[MUL] bit is set when multiple errors have been
+ * detected that would set any of the data fields: PRT, RSVOP, and CALBNK.
+ *
+ * The following errors will cause MUL to assert for PRT conflicts.
+ * - ABNORM
+ * - APERR
+ * - DPERR
+ *
+ * The following errors will cause MUL to assert for RSVOP conflicts.
+ * - RSVERR
+ *
+ * The following errors will cause MUL to assert for CALBNK conflicts.
+ * - CALERR
+ *
+ * The following errors will cause MUL to assert if multiple interrupts are
+ * asserted.
+ * - TPAOVR
+ *
+ * The MUL bit will be cleared once all outstanding errors have been
+ * cleared by software (not just MUL errors - all errors).
+ */
+union cvmx_spxx_int_dat {
+ uint64_t u64;
+ struct cvmx_spxx_int_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t mul : 1; /**< Multiple errors have occured */
+ uint64_t reserved_14_30 : 17;
+ uint64_t calbnk : 2; /**< Spi4 Calendar table parity error bank */
+ uint64_t rsvop : 4; /**< Spi4 reserved control word */
+ uint64_t prt : 8; /**< Port associated with error */
+#else
+ uint64_t prt : 8;
+ uint64_t rsvop : 4;
+ uint64_t calbnk : 2;
+ uint64_t reserved_14_30 : 17;
+ uint64_t mul : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_spxx_int_dat_s cn38xx;
+ struct cvmx_spxx_int_dat_s cn38xxp2;
+ struct cvmx_spxx_int_dat_s cn58xx;
+ struct cvmx_spxx_int_dat_s cn58xxp1;
+};
+typedef union cvmx_spxx_int_dat cvmx_spxx_int_dat_t;
+
+/**
+ * cvmx_spx#_int_msk
+ *
+ * SPX_INT_MSK - Interrupt Mask Register
+ *
+ */
+union cvmx_spxx_int_msk {
+ uint64_t u64;
+ struct cvmx_spxx_int_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t calerr : 1; /**< Spi4 Calendar table parity error */
+ uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded
+ SPX_ERR_CTL[ERRCNT] */
+ uint64_t diperr : 1; /**< Spi4 DIP4 error */
+ uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */
+ uint64_t rsverr : 1; /**< Spi4 reserved control word detected */
+ uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */
+ uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */
+ uint64_t spiovr : 1; /**< Spi async FIFO overflow (Spi3 or Spi4) */
+ uint64_t reserved_2_3 : 2;
+ uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */
+ uint64_t prtnxa : 1; /**< Port out of range */
+#else
+ uint64_t prtnxa : 1;
+ uint64_t abnorm : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t spiovr : 1;
+ uint64_t clserr : 1;
+ uint64_t drwnng : 1;
+ uint64_t rsverr : 1;
+ uint64_t tpaovr : 1;
+ uint64_t diperr : 1;
+ uint64_t syncerr : 1;
+ uint64_t calerr : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_spxx_int_msk_s cn38xx;
+ struct cvmx_spxx_int_msk_s cn38xxp2;
+ struct cvmx_spxx_int_msk_s cn58xx;
+ struct cvmx_spxx_int_msk_s cn58xxp1;
+};
+typedef union cvmx_spxx_int_msk cvmx_spxx_int_msk_t;
+
+/**
+ * cvmx_spx#_int_reg
+ *
+ * SPX_INT_REG - Interrupt Register
+ *
+ *
+ * Notes:
+ * * PRTNXA
+ * This error indicates that the port on the Spi bus was not a valid port
+ * for the system. Spi4 accesses occur on payload control bit-times. The
+ * SRX can be configured with the exact number of ports available (by
+ * SRX_COM_CTL[PRTS] register). Any Spi access to anthing outside the range
+ * of 0 .. (SRX_COM_CTL[PRTS] - 1) is considered an error. The offending
+ * port is logged in SPX_INT_DAT[PRT] if there are no pending interrupts in
+ * SPX_INT_REG that require SPX_INT_DAT[PRT].
+ *
+ * SRX will not drop the packet with the bogus port address. Instead, the
+ * port will be mapped into the supported port range. The remapped address
+ * in simply...
+ *
+ * Address = [ interfaceId, ADR[3:0] ]
+ *
+ * If the SPX detects that a PRTNXA error has occured, the packet will
+ * have its ERR bit set (or'ed in with the ERR bit from the transmitter)
+ * if the SPX_ERR_CTL[PRTNXA] bit is clear.
+ *
+ * In Spi4 mode, SPX will generate an interrupt for every 8B data burst
+ * associated with the invalid address. The SPX_INT_DAT[MUL] bit will never
+ * be set.
+ *
+ * * ABNORM
+ * This bit simply indicates that a given packet had abnormal terminiation.
+ * In Spi4 mode, this means that packet completed with an EOPS[1:0] code of
+ * 2'b01. This error can also be thought of as the application specific
+ * error (as mentioned in the Spi4 spec). The offending port is logged in
+ * SPX_INT_DAT[PRT] if there are no pending interrupts in SPX_INT_REG that
+ * require SPX_INT_DAT[PRT].
+ *
+ * The ABNORM error is only raised when the ERR bit that comes from the
+ * Spi interface is set. It will never assert if any internal condition
+ * causes the ERR bit to assert (e.g. PRTNXA or DPERR).
+ *
+ * * SPIOVR
+ * This error indicates that the FIFOs that manage the async crossing from
+ * the Spi clocks to the core clock domains have overflowed. This is a
+ * fatal error and can cause much data/control corruption since ticks will
+ * be dropped and reordered. This is purely a function of clock ratios and
+ * correct system ratios should make this an impossible condition.
+ *
+ * * CLSERR
+ * This is a Spi4 error that indicates that a given data transfer burst
+ * that did not terminate with an EOP, did not end with the 16B alignment
+ * as per the Spi4 spec. The offending port cannot be logged since the
+ * block does not know the streamm terminated until the port switches.
+ * At that time, that packet has already been pushed down the pipe.
+ *
+ * The CLSERR bit does not actually check the Spi4 burst - just how data
+ * is accumulated for the downstream logic. Bursts that are separted by
+ * idles or training will still be merged into accumulated transfers and
+ * will not fire the CLSERR condition. The checker is really checking
+ * non-8B aligned, non-EOP data ticks that are sent downstream. These
+ * ticks are what will really mess up the core.
+ *
+ * This is an expensive fix, so we'll probably let it ride. We never
+ * claim to check Spi4 protocol anyway.
+ *
+ * * DRWNNG
+ * This error indicates that the Spi4 FIFO that services the GMX has
+ * overflowed. Like the SPIOVR error condition, correct system ratios
+ * should make this an impossible condition.
+ *
+ * * RSVERR
+ * This Spi4 error indicates that the Spi4 receiver has seen a reserve
+ * control packet. A reserve control packet is an invalid combiniation
+ * of bits on DAT[15:12]. Basically this is DAT[15] == 1'b0 and DAT[12]
+ * == 1'b1 (an SOP without a payload command). The RSVERR indicates an
+ * error has occured and SPX_INT_DAT[RSVOP] holds the first reserved
+ * opcode and will be set if there are no pending interrupts in
+ * SPX_INT_REG that require SPX_INT_DAT[RSVOP].
+ *
+ * * TPAOVR
+ * This bit indicates that the TPA Watcher has flagged an event. See the
+ * TPA Watcher for a more detailed discussion.
+ *
+ * * DIPERR
+ * This bit indicates that the Spi4 receiver has encountered a DIP4
+ * miscompare on the datapath. A DIPERR can occur in an IDLE or a
+ * control word that frames a data burst. If the DIPERR occurs on a
+ * framing word there are three cases.
+ *
+ * 1) DIPERR occurs at the end of a data burst. The previous packet is
+ * marked with the ERR bit to be processed later if
+ * SPX_ERR_CTL[DIPCLS] is clear.
+ * 2) DIPERR occurs on a payload word. The subsequent packet is marked
+ * with the ERR bit to be processed later if SPX_ERR_CTL[DIPPAY] is
+ * clear.
+ * 3) DIPERR occurs on a control word that closes on packet and is a
+ * payload for another packet. In this case, both packets will have
+ * their ERR bit marked depending on the respective values of
+ * SPX_ERR_CTL[DIPCLS] and SPX_ERR_CTL[DIPPAY] as discussed above.
+ *
+ * * SYNCERR
+ * This bit indicates that the Spi4 receiver has encountered
+ * SPX_ERR_CTL[ERRCNT] consecutive Spi4 DIP4 errors and the interface
+ * should be synched.
+ *
+ * * CALERR
+ * This bit indicates that the Spi4 calendar table encountered a parity
+ * error. This error bit is associated with the calendar table on the RX
+ * interface - the interface that receives the Spi databus. Parity errors
+ * can occur during normal operation when the calendar table is constantly
+ * being read for the port information, or during initialization time, when
+ * the user has access. Since the calendar table is split into two banks,
+ * SPX_INT_DAT[CALBNK] indicates which banks have taken a parity error.
+ * CALBNK[1] indicates the error occured in the upper bank, while CALBNK[0]
+ * indicates that the error occured in the lower bank. SPX_INT_DAT[CALBNK]
+ * will be set if there are no pending interrupts in SPX_INT_REG that
+ * require SPX_INT_DAT[CALBNK].
+ *
+ * * SPF
+ * This bit indicates that a Spi fatal error has occurred. A fatal error
+ * is defined as any error condition for which the corresponding
+ * SPX_INT_SYNC bit is set. Therefore, conservative systems can halt the
+ * interface on any error condition although this is not strictly
+ * necessary. Some error are much more fatal in nature than others.
+ *
+ * PRTNXA, SPIOVR, CLSERR, DRWNNG, DIPERR, CALERR, and SYNCERR are examples
+ * of fatal error for different reasons - usually because multiple port
+ * streams could be effected. ABNORM, RSVERR, and TPAOVR are conditions
+ * that are contained to a single packet which allows the interface to drop
+ * a single packet and remain up and stable.
+ */
+union cvmx_spxx_int_reg {
+ uint64_t u64;
+ struct cvmx_spxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t spf : 1; /**< Spi interface down */
+ uint64_t reserved_12_30 : 19;
+ uint64_t calerr : 1; /**< Spi4 Calendar table parity error */
+ uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded
+ SPX_ERR_CTL[ERRCNT] */
+ uint64_t diperr : 1; /**< Spi4 DIP4 error */
+ uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */
+ uint64_t rsverr : 1; /**< Spi4 reserved control word detected */
+ uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */
+ uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */
+ uint64_t spiovr : 1; /**< Spi async FIFO overflow */
+ uint64_t reserved_2_3 : 2;
+ uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */
+ uint64_t prtnxa : 1; /**< Port out of range */
+#else
+ uint64_t prtnxa : 1;
+ uint64_t abnorm : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t spiovr : 1;
+ uint64_t clserr : 1;
+ uint64_t drwnng : 1;
+ uint64_t rsverr : 1;
+ uint64_t tpaovr : 1;
+ uint64_t diperr : 1;
+ uint64_t syncerr : 1;
+ uint64_t calerr : 1;
+ uint64_t reserved_12_30 : 19;
+ uint64_t spf : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_spxx_int_reg_s cn38xx;
+ struct cvmx_spxx_int_reg_s cn38xxp2;
+ struct cvmx_spxx_int_reg_s cn58xx;
+ struct cvmx_spxx_int_reg_s cn58xxp1;
+};
+typedef union cvmx_spxx_int_reg cvmx_spxx_int_reg_t;
+
+/**
+ * cvmx_spx#_int_sync
+ *
+ * SPX_INT_SYNC - Interrupt Sync Register
+ *
+ *
+ * Notes:
+ * This mask set indicates which exception condition should cause the
+ * SPX_INT_REG[SPF] bit to assert
+ *
+ * It is recommended that software set the PRTNXA, SPIOVR, CLSERR, DRWNNG,
+ * DIPERR, CALERR, and SYNCERR errors as synchronization events. Software is
+ * free to synchronize the bus on other conditions, but this is the minimum
+ * recommended set.
+ */
+union cvmx_spxx_int_sync {
+ uint64_t u64;
+ struct cvmx_spxx_int_sync_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t calerr : 1; /**< Spi4 Calendar table parity error */
+ uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded
+ SPX_ERR_CTL[ERRCNT] */
+ uint64_t diperr : 1; /**< Spi4 DIP4 error */
+ uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */
+ uint64_t rsverr : 1; /**< Spi4 reserved control word detected */
+ uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */
+ uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */
+ uint64_t spiovr : 1; /**< Spi async FIFO overflow (Spi3 or Spi4) */
+ uint64_t reserved_2_3 : 2;
+ uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */
+ uint64_t prtnxa : 1; /**< Port out of range */
+#else
+ uint64_t prtnxa : 1;
+ uint64_t abnorm : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t spiovr : 1;
+ uint64_t clserr : 1;
+ uint64_t drwnng : 1;
+ uint64_t rsverr : 1;
+ uint64_t tpaovr : 1;
+ uint64_t diperr : 1;
+ uint64_t syncerr : 1;
+ uint64_t calerr : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_spxx_int_sync_s cn38xx;
+ struct cvmx_spxx_int_sync_s cn38xxp2;
+ struct cvmx_spxx_int_sync_s cn58xx;
+ struct cvmx_spxx_int_sync_s cn58xxp1;
+};
+typedef union cvmx_spxx_int_sync cvmx_spxx_int_sync_t;
+
+/**
+ * cvmx_spx#_tpa_acc
+ *
+ * SPX_TPA_ACC - TPA watcher byte accumulator
+ *
+ *
+ * Notes:
+ * This field allows the user to access the TPA watcher accumulator counter.
+ * This register reflects the number of bytes sent to IMX once the port
+ * specified by SPX_TPA_SEL[PRTSEL] has lost its TPA. The SPX_INT_REG[TPAOVR]
+ * bit is asserted when CNT >= SPX_TPA_MAX[MAX]. The CNT will continue to
+ * increment until the TPA for the port is asserted. At that point the CNT
+ * value is frozen until software clears the interrupt bit.
+ */
+union cvmx_spxx_tpa_acc {
+ uint64_t u64;
+ struct cvmx_spxx_tpa_acc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< TPA watcher accumulate count */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_spxx_tpa_acc_s cn38xx;
+ struct cvmx_spxx_tpa_acc_s cn38xxp2;
+ struct cvmx_spxx_tpa_acc_s cn58xx;
+ struct cvmx_spxx_tpa_acc_s cn58xxp1;
+};
+typedef union cvmx_spxx_tpa_acc cvmx_spxx_tpa_acc_t;
+
+/**
+ * cvmx_spx#_tpa_max
+ *
+ * SPX_TPA_MAX - TPA watcher assertion threshold
+ *
+ *
+ * Notes:
+ * The TPA watcher has the ability to notify the system with an interrupt when
+ * too much data has been received on loss of TPA. The user sets the
+ * SPX_TPA_MAX[MAX] register and when the watcher has accumulated that many
+ * ticks, then the interrupt is conditionally raised (based on interrupt mask
+ * bits). This feature will be disabled if the programmed count is zero.
+ */
+union cvmx_spxx_tpa_max {
+ uint64_t u64;
+ struct cvmx_spxx_tpa_max_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t max : 32; /**< TPA watcher TPA threshold */
+#else
+ uint64_t max : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_spxx_tpa_max_s cn38xx;
+ struct cvmx_spxx_tpa_max_s cn38xxp2;
+ struct cvmx_spxx_tpa_max_s cn58xx;
+ struct cvmx_spxx_tpa_max_s cn58xxp1;
+};
+typedef union cvmx_spxx_tpa_max cvmx_spxx_tpa_max_t;
+
+/**
+ * cvmx_spx#_tpa_sel
+ *
+ * SPX_TPA_SEL - TPA watcher port selector
+ *
+ *
+ * Notes:
+ * The TPA Watcher is primarily a debug vehicle used to help initial bringup
+ * of a system. The TPA watcher counts bytes that roll in from the Spi
+ * interface. The user programs the Spi port to watch using
+ * SPX_TPA_SEL[PRTSEL]. Once the TPA is deasserted for that port, the watcher
+ * begins to count the data ticks that have been delivered to the inbound
+ * datapath (and eventually to the IOB). The result is that we can derive
+ * turn-around times of the other device by watching how much data was sent
+ * after a loss of TPA through the SPX_TPA_ACC[CNT] register. An optional
+ * interrupt may be raised as well. See SPX_TPA_MAX for further information.
+ *
+ * TPA's can be deasserted for a number of reasons...
+ *
+ * 1) IPD indicates backpressure
+ * 2) The GMX inbound FIFO is filling up and should BP
+ * 3) User has out an override on the TPA wires
+ */
+union cvmx_spxx_tpa_sel {
+ uint64_t u64;
+ struct cvmx_spxx_tpa_sel_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t prtsel : 4; /**< TPA watcher port select */
+#else
+ uint64_t prtsel : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_spxx_tpa_sel_s cn38xx;
+ struct cvmx_spxx_tpa_sel_s cn38xxp2;
+ struct cvmx_spxx_tpa_sel_s cn58xx;
+ struct cvmx_spxx_tpa_sel_s cn58xxp1;
+};
+typedef union cvmx_spxx_tpa_sel cvmx_spxx_tpa_sel_t;
+
+/**
+ * cvmx_spx#_trn4_ctl
+ *
+ * Notes:
+ * These bits are controls for the Spi4 RX bit deskew logic. See that Spec
+ * for further details.
+ *
+ * * BOOT_BIT
+ * On the initial training synchronization sequence, the hardware has the
+ * BOOT_BIT set which means that it will continueously perform macro
+ * operations. Once the BOOT_BIT is cleared, the macro machine will finish
+ * the macro operation is working on and then return to the idle state.
+ * Subsequent training sequences will only go through a single macro
+ * operation in order to do slight deskews.
+ *
+ * * JITTER
+ * Minimum value is 1. This parameter must be set for Spi4 mode using
+ * auto-bit deskew. Regardless of the original intent, this field must be
+ * set non-zero for deskew to function correctly.
+ *
+ * The thought is the JITTER range is no longer required since the macro
+ * machine was enhanced to understand about edge direction. Originally
+ * these bits were intended to compensate for clock jitter.
+ *
+ * dly: this is the intrinsic delay of each delay element
+ * tap currently, it is 70ps-110ps.
+ * jitter: amount of jitter we expect in the system (~200ps)
+ * j: number of taps to account for jitter
+ *
+ * j = ((jitter / dly) + 1)
+ *
+ * * TRNTEST
+ * This mode is used to test systems to make sure that the bit deskew
+ * parameters have been correctly setup. After configuration, software can
+ * set the TRNTEST mode bit. This should be done before SRX_COM_CTL[ST_EN]
+ * is set such that we can be sure that the TX device is simply sending
+ * continuous training patterns.
+ *
+ * The test mode samples every incoming bit-time and makes sure that it is
+ * either a training control or a training data packet. If any other data
+ * is observed, then SPX_DBG_DESKEW_STATE[TESTRES] will assert signaling a
+ * test failure.
+ *
+ * Software must clear TRNTEST before training is terminated.
+ *
+ * * Example Spi4 RX init flow...
+ *
+ * 1) set the CLKDLY lines (SPXX_CLK_CTL[CLKDLY])
+ * - these bits must be set before the DLL can successfully lock
+ *
+ * 2) set the SRXDLCK (SPXX_CLK_CTL[SRXDLCK])
+ * - this is the DLL lock bit which also acts as a block reset
+ *
+ * 3) wait for the DLLs lock
+ *
+ * 4) set any desired fields in SPXX_DBG_DESKEW_CTL
+ * - This register has only one field that most users will care about.
+ * When set, DLLDIS will disable sending update pulses to the Spi4 RX
+ * DLLs. This pulse allows the DLL to adjust to clock variations over
+ * time. In general, it is desired behavior.
+ *
+ * 5) set fields in SPXX_TRN4_CTL
+ * - These fields deal with the MUX training sequence
+ * * MUX_EN
+ * This is the enable bit for the mux select. The MUX select will
+ * run in the training sequence between the DLL and the Macro
+ * sequence when enabled. Once the MUX selects are selected, the
+ * entire macro sequence must be rerun. The expectation is that
+ * this is only run at boot time and this is bit cleared at/around
+ * step \#8.
+ * - These fields deal with the Macro training sequence
+ * * MACRO_EN
+ * This is the enable bit for the macro sequence. Macro sequences
+ * will run after the DLL and MUX training sequences. Each macro
+ * sequence can move the offset by one value.
+ * * MAXDIST
+ * This is how far we will search for an edge. Example...
+ *
+ * dly: this is the intrinsic delay of each delay element
+ * tap currently, it is 70ps-110ps.
+ * U: bit time period in time units.
+ *
+ * MAXDIST = MIN(16, ((bit_time / 2) / dly)
+ *
+ * Each MAXDIST iteration consists of an edge detect in the early
+ * and late (+/-) directions in an attempt to center the data. This
+ * requires two training transistions, the control/data and
+ * data/control transistions which comprise a training sequence.
+ * Therefore, the number of training sequences required for a single
+ * macro operation is simply MAXDIST.
+ *
+ * 6) set the RCVTRN go bit (SPXX_CLK_CTL[RCVTRN])
+ * - this bit synchs on the first valid complete training cycle and
+ * starts to process the training packets
+ *
+ * 6b) This is where software could manually set the controls as opposed to
+ * letting the hardware do it. See the SPXX_DBG_DESKEW_CTL register
+ * description for more detail.
+ *
+ * 7) the TX device must continue to send training packets for the initial
+ * time period.
+ * - this can be determined by...
+ *
+ * DLL: one training sequence for the DLL adjustment (regardless of enable/disable)
+ * MUX: one training sequence for the Flop MUX taps (regardless of enable/disable)
+ * INIT_SEQUENCES: max number of taps that we must move
+ *
+ * INIT_SEQUENCES = MIN(16, ((bit_time / 2) / dly))
+ *
+ * INIT_TRN = DLL + MUX + ROUNDUP((INIT_SEQUENCES * (MAXDIST + 2)))
+ *
+ *
+ * - software can either wait a fixed amount of time based on the clock
+ * frequencies or poll the SPXX_CLK_STAT[SRXTRN] register. Each
+ * assertion of SRXTRN means that at least one training sequence has
+ * been received. Software can poll, clear, and repeat on this bit to
+ * eventually count all required transistions.
+ *
+ * int cnt = 0;
+ * while (cnt < INIT_TRN) [
+ * if (SPXX_CLK_STAT[SRXTRN]) [
+ * cnt++;
+ * SPXX_CLK_STAT[SRXTRN] = 0;
+ * ]
+ * ]
+ *
+ * - subsequent training sequences will normally move the taps only
+ * one position, so the ALPHA equation becomes...
+ *
+ * MAC = (MAXDIST == 0) ? 1 : ROUNDUP((1 * (MAXDIST + 2))) + 1
+ *
+ * ALPHA = DLL + MUX + MAC
+ *
+ * ergo, MAXDIST simplifies to...
+ *
+ * ALPHA = (MAXDIST == 0) ? 3 : MAXDIST + 5
+ *
+ * DLL and MUX and MAC will always require at least a training sequence
+ * each - even if disabled. If the macro sequence is enabled, an
+ * additional training sequenece at the end is necessary. The extra
+ * sequence allows for all training state to be cleared before resuming
+ * normal operation.
+ *
+ * 8) after the recevier gets enough training sequences in order to achieve
+ * deskew lock, set SPXX_TRN4_CTL[CLR_BOOT]
+ * - this disables the continuous macro sequences and puts into into one
+ * macro sequnence per training operation
+ * - optionally, the machine can choose to fall out of training if
+ * enough NOPs follow the training operation (require at least 32 NOPs
+ * to follow the training sequence).
+ *
+ * There must be at least MAXDIST + 3 training sequences after the
+ * SPXX_TRN4_CTL[CLR_BOOT] is set or sufficient NOPs from the TX device.
+ *
+ * 9) the TX device continues to send training sequences until the RX
+ * device sends a calendar transistion. This is controlled by
+ * SRXX_COM_CTL[ST_EN]. Other restrictions require other Spi parameters
+ * (e.g. the calendar table) to be setup before this bit can be enabled.
+ * Once the entire interface is properly programmed, software writes
+ * SRXX_COM_CTL[INF_EN]. At this point, the Spi4 packets will begin to
+ * be sent into the N2K core and processed by the chip.
+ */
+union cvmx_spxx_trn4_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_trn4_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t trntest : 1; /**< Training Test Mode
+ This bit is only for initial bringup
+ (spx_csr__spi4_trn_test_mode) */
+ uint64_t jitter : 3; /**< Accounts for jitter when the macro sequence is
+ locking. The value is how many consecutive
+ transititions before declaring en edge. Minimum
+ value is 1. This parameter must be set for Spi4
+ mode using auto-bit deskew.
+ (spx_csr__spi4_mac_jitter) */
+ uint64_t clr_boot : 1; /**< Clear the macro boot sequence mode bit
+ (spx_csr__spi4_mac_clr_boot) */
+ uint64_t set_boot : 1; /**< Enable the macro boot sequence mode bit
+ (spx_csr__spi4_mac_set_boot) */
+ uint64_t maxdist : 5; /**< This field defines how far from center the
+ deskew logic will search in a single macro
+ sequence (spx_csr__spi4_mac_iters) */
+ uint64_t macro_en : 1; /**< Allow the macro sequence to center the sample
+ point in the data window through hardware
+ (spx_csr__spi4_mac_trn_en) */
+ uint64_t mux_en : 1; /**< Enable the hardware machine that selects the
+ proper coarse FLOP selects
+ (spx_csr__spi4_mux_trn_en) */
+#else
+ uint64_t mux_en : 1;
+ uint64_t macro_en : 1;
+ uint64_t maxdist : 5;
+ uint64_t set_boot : 1;
+ uint64_t clr_boot : 1;
+ uint64_t jitter : 3;
+ uint64_t trntest : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_spxx_trn4_ctl_s cn38xx;
+ struct cvmx_spxx_trn4_ctl_s cn38xxp2;
+ struct cvmx_spxx_trn4_ctl_s cn58xx;
+ struct cvmx_spxx_trn4_ctl_s cn58xxp1;
+};
+typedef union cvmx_spxx_trn4_ctl cvmx_spxx_trn4_ctl_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-spxx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-srio.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-srio.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-srio.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1623 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+/**
+ * @file
+ *
+ * Interface to SRIO
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-srio.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-atomic.h>
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+#include <asm/octeon/cvmx-error.h>
+#endif
+#include <asm/octeon/cvmx-sriox-defs.h>
+#include <asm/octeon/cvmx-sriomaintx-defs.h>
+#include <asm/octeon/cvmx-sli-defs.h>
+#include <asm/octeon/cvmx-dpi-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-qlm.h>
+#else
+#include "cvmx.h"
+#include "cvmx-srio.h"
+#include "cvmx-clock.h"
+#include "cvmx-helper.h"
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+#include "cvmx-atomic.h"
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "cvmx-error.h"
+#endif
+#include "cvmx-helper-errata.h"
+#endif
+#include "cvmx-qlm.h"
+#include "cvmx-helper.h"
+#endif
+
+#define CVMX_SRIO_CONFIG_TIMEOUT 10000 /* 10ms */
+#define CVMX_SRIO_DOORBELL_TIMEOUT 10000 /* 10ms */
+#define CVMX_SRIO_CONFIG_PRIORITY 0
+#define ULL unsigned long long
+
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t upper : 2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61 : 13; /* Must be zero */
+ uint64_t io : 1; /* 1 for IO space access */
+ uint64_t did : 5; /* DID = 3 */
+ uint64_t subdid : 3; /* SubDID = 3-6 */
+ uint64_t reserved_36_39 : 4; /* Must be zero */
+ uint64_t se : 2; /* SubDID extender */
+ uint64_t reserved_32_33 : 2; /* Must be zero */
+ uint64_t hopcount : 8; /* Hopcount */
+ uint64_t address : 24; /* Mem address */
+#else
+ uint64_t address : 24;
+ uint64_t hopcount : 8;
+ uint64_t reserved_32_33 : 2;
+ uint64_t se : 2;
+ uint64_t reserved_36_39 : 4;
+ uint64_t subdid : 3;
+ uint64_t did : 5;
+ uint64_t io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t upper : 2;
+#endif
+ } config;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t upper : 2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61 : 13; /* Must be zero */
+ uint64_t io : 1; /* 1 for IO space access */
+ uint64_t did : 5; /* DID = 3 */
+ uint64_t subdid : 3; /* SubDID = 3-6 */
+ uint64_t reserved_36_39 : 4; /* Must be zero */
+ uint64_t se : 2; /* SubDID extender */
+ uint64_t address : 34; /* Mem address */
+#else
+ uint64_t address : 34;
+ uint64_t se : 2;
+ uint64_t reserved_36_39 : 4;
+ uint64_t subdid : 3;
+ uint64_t did : 5;
+ uint64_t io : 1;
+ uint64_t reserved_49_61 : 13;
+ uint64_t upper : 2;
+#endif
+ } mem;
+} cvmx_sli_address_t;
+
+typedef struct
+{
+ cvmx_srio_initialize_flags_t flags;
+ int32_t subidx_ref_count[16]; /* Reference count for SLI_MEM_ACCESS_SUBID[12-27]. Index=X-12 */
+ int32_t s2m_ref_count[16]; /* Reference count for SRIOX_S2M_TYPE[0-15]. */
+} __cvmx_srio_state_t;
+
+static CVMX_SHARED __cvmx_srio_state_t __cvmx_srio_state[4];
+
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+/**
+ * @INTERNAL
+ * Allocate a SRIOX_S2M_TYPEX register for mapping a remote SRIO
+ * device's address range into Octeons SLI address space. Reference
+ * counting is used to allow sharing of duplicate setups. The current
+ * implementation treats reads and writes as paired, but this could be
+ * changed if we have trouble running out of indexes.
+ *
+ * @param srio_port SRIO port device is on
+ * @param s2m SRIOX_S2M_TYPEX setup required
+ *
+ * @return Index of CSR, or negative on failure
+ */
+static int __cvmx_srio_alloc_s2m(int srio_port, cvmx_sriox_s2m_typex_t s2m)
+{
+ int s2m_index;
+ /* Search through the S2M_TYPE registers looking for an unsed one or one
+ setup the way we need it */
+ for (s2m_index=0; s2m_index<16; s2m_index++)
+ {
+ /* Increment ref count by 2 since we count read and write
+ independently. We might need a more complicated search in the
+ future */
+ int ref_count = cvmx_atomic_fetch_and_add32(&__cvmx_srio_state[srio_port].s2m_ref_count[s2m_index], 2);
+ if (ref_count == 0)
+ {
+ /* Unused location. Write our value */
+ cvmx_write_csr(CVMX_SRIOX_S2M_TYPEX(s2m_index, srio_port), s2m.u64);
+ /* Read back to make sure the update is complete */
+ cvmx_read_csr(CVMX_SRIOX_S2M_TYPEX(s2m_index, srio_port));
+ return s2m_index;
+ }
+ else
+ {
+ /* In use, see if we can use it */
+ if (cvmx_read_csr(CVMX_SRIOX_S2M_TYPEX(s2m_index, srio_port)) == s2m.u64)
+ return s2m_index;
+ else
+ cvmx_atomic_add32(&__cvmx_srio_state[srio_port].s2m_ref_count[s2m_index], -2);
+ }
+ }
+ cvmx_dprintf("SRIO%d: Unable to find free SRIOX_S2M_TYPEX\n", srio_port);
+ return -1;
+}
+
+
+/**
+ * @INTERNAL
+ * Free a handle allocated by __cvmx_srio_alloc_s2m
+ *
+ * @param srio_port SRIO port
+ * @param index Index to free
+ */
+static void __cvmx_srio_free_s2m(int srio_port, int index)
+{
+ /* Read to force pending transactions to complete */
+ cvmx_read_csr(CVMX_SRIOX_S2M_TYPEX(index, srio_port));
+ cvmx_atomic_add32(&__cvmx_srio_state[srio_port].s2m_ref_count[index], -2);
+}
+
+
+/**
+ * @INTERNAL
+ * Allocate a SLI SubID to map a region of memory. Reference
+ * counting is used to allow sharing of duplicate setups.
+ *
+ * @param subid SLI_MEM_ACCESS_SUBIDX we need an index for
+ *
+ * @return Index of CSR, or negative on failure
+ */
+static int __cvmx_srio_alloc_subid(cvmx_sli_mem_access_subidx_t subid)
+{
+ int mem_index;
+ /* Search through the mem access subid registers looking for an unsed one
+ or one setup the way we need it. PCIe uses the low indexes, so search
+ backwards */
+ for (mem_index=27; mem_index>=12; mem_index--)
+ {
+ int ref_count = cvmx_atomic_fetch_and_add32(&__cvmx_srio_state[0].subidx_ref_count[mem_index-12], 1);
+ if (ref_count == 0)
+ {
+ /* Unused location. Write our value */
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(mem_index), subid.u64);
+ /* Read back the value to make sure the update is complete */
+ cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(mem_index));
+ return mem_index;
+ }
+ else
+ {
+ /* In use, see if we can use it */
+ if (cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(mem_index)) == subid.u64)
+ return mem_index;
+ else
+ cvmx_atomic_add32(&__cvmx_srio_state[0].subidx_ref_count[mem_index-12], -1);
+ }
+ }
+ cvmx_dprintf("SRIO: Unable to find free SLI_MEM_ACCESS_SUBIDX\n");
+ return -1;
+}
+
+
+/**
+ * @INTERNAL
+ * Free a handle allocated by __cvmx_srio_alloc_subid
+ *
+ * @param index Index to free
+ */
+static void __cvmx_srio_free_subid(int index)
+{
+ /* Read to force pending transactions to complete */
+ cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(index));
+ cvmx_atomic_add32(&__cvmx_srio_state[0].subidx_ref_count[index-12], -1);
+}
+#endif
+
+
+/**
+ * @INTERNAL
+ * Read 32bits from a local port
+ *
+ * @param srio_port SRIO port the device is on
+ * @param offset Offset in config space. This must be a multiple of 32 bits.
+ * @param result Result of the read. This will be unmodified on failure.
+ *
+ * @return Zero on success, negative on failure.
+ */
+static int __cvmx_srio_local_read32(int srio_port, uint32_t offset, uint32_t *result)
+{
+ cvmx_sriox_maint_op_t maint_op;
+ cvmx_sriox_maint_rd_data_t maint_rd_data;
+ maint_op.u64 = 0;
+ maint_op.s.op = 0; /* Read */
+ maint_op.s.addr = offset;
+
+ /* Make sure SRIO isn't already busy */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SRIOX_MAINT_OP(srio_port), cvmx_sriox_maint_op_t, pending, ==, 0, CVMX_SRIO_CONFIG_TIMEOUT))
+ {
+ cvmx_dprintf("SRIO%d: Pending bit stuck before config read\n", srio_port);
+ return -1;
+ }
+
+ /* Issue the read to the hardware */
+ cvmx_write_csr(CVMX_SRIOX_MAINT_OP(srio_port), maint_op.u64);
+
+ /* Wait for the hardware to complete the operation */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SRIOX_MAINT_OP(srio_port), cvmx_sriox_maint_op_t, pending, ==, 0, CVMX_SRIO_CONFIG_TIMEOUT))
+ {
+ cvmx_dprintf("SRIO%d: Config read timeout\n", srio_port);
+ return -1;
+ }
+
+ /* Display and error and return if the operation failed to issue */
+ maint_op.u64 = cvmx_read_csr(CVMX_SRIOX_MAINT_OP(srio_port));
+ if (maint_op.s.fail)
+ {
+ cvmx_dprintf("SRIO%d: Config read addressing error (offset=0x%x)\n", srio_port, (unsigned int)offset);
+ return -1;
+ }
+
+ /* Wait for the read data to become valid */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SRIOX_MAINT_RD_DATA(srio_port), cvmx_sriox_maint_rd_data_t, valid, ==, 1, CVMX_SRIO_CONFIG_TIMEOUT))
+ {
+ cvmx_dprintf("SRIO%d: Config read data timeout\n", srio_port);
+ return -1;
+ }
+
+ /* Get the read data */
+ maint_rd_data.u64 = cvmx_read_csr(CVMX_SRIOX_MAINT_RD_DATA(srio_port));
+ *result = maint_rd_data.s.rd_data;
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Write 32bits to a local port
+ * @param srio_port SRIO port the device is on
+ * @param offset Offset in config space. This must be a multiple of 32 bits.
+ * @param data Data to write.
+ *
+ * @return Zero on success, negative on failure.
+ */
+static int __cvmx_srio_local_write32(int srio_port, uint32_t offset, uint32_t data)
+{
+ cvmx_sriox_maint_op_t maint_op;
+ maint_op.u64 = 0;
+ maint_op.s.wr_data = data;
+ maint_op.s.op = 1; /* Write */
+ maint_op.s.addr = offset;
+
+ /* Make sure SRIO isn't already busy */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SRIOX_MAINT_OP(srio_port), cvmx_sriox_maint_op_t, pending, ==, 0, CVMX_SRIO_CONFIG_TIMEOUT))
+ {
+ cvmx_dprintf("SRIO%d: Pending bit stuck before config write\n", srio_port);
+ return -1;
+ }
+
+ /* Issue the write to the hardware */
+ cvmx_write_csr(CVMX_SRIOX_MAINT_OP(srio_port), maint_op.u64);
+
+ /* Wait for the hardware to complete the operation */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SRIOX_MAINT_OP(srio_port), cvmx_sriox_maint_op_t, pending, ==, 0, CVMX_SRIO_CONFIG_TIMEOUT))
+ {
+ cvmx_dprintf("SRIO%d: Config write timeout\n", srio_port);
+ return -1;
+ }
+
+ /* Display and error and return if the operation failed to issue */
+ maint_op.u64 = cvmx_read_csr(CVMX_SRIOX_MAINT_OP(srio_port));
+ if (maint_op.s.fail)
+ {
+ cvmx_dprintf("SRIO%d: Config write addressing error (offset=0x%x)\n", srio_port, (unsigned int)offset);
+ return -1;
+ }
+ return 0;
+}
+
+
+/**
+ * Reset SRIO to link partner
+ *
+ * @param srio_port SRIO port to initialize
+ *
+ * @return Zero on success
+ */
+int cvmx_srio_link_rst(int srio_port)
+{
+ cvmx_sriomaintx_port_0_link_resp_t link_resp;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ return -1;
+
+ /* Generate a symbol reset to the link partner by writing 0x3. */
+ if (cvmx_srio_config_write32(srio_port, 0, -1, 0, 0,
+ CVMX_SRIOMAINTX_PORT_0_LINK_REQ(srio_port), 3))
+ return -1;
+
+ if (cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
+ CVMX_SRIOMAINTX_PORT_0_LINK_RESP(srio_port), &link_resp.u32))
+ return -1;
+
+ /* Poll until link partner has received the reset. */
+ while (link_resp.s.valid == 0)
+ {
+ //cvmx_dprintf("Waiting for Link Response\n");
+ if (cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
+ CVMX_SRIOMAINTX_PORT_0_LINK_RESP(srio_port), &link_resp.u32))
+ return -1;
+ }
+
+ /* Valid response, Asserting MAC reset */
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
+
+ cvmx_wait(10);
+
+ /* De-asserting MAC Reset */
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x0);
+
+ return 0;
+}
+
+/**
+ * Initialize a SRIO port for use.
+ *
+ * @param srio_port SRIO port to initialize
+ * @param flags Optional flags
+ *
+ * @return Zero on success
+ */
+int cvmx_srio_initialize(int srio_port, cvmx_srio_initialize_flags_t flags)
+{
+ cvmx_sriomaintx_port_lt_ctl_t port_lt_ctl;
+ cvmx_sriomaintx_port_rt_ctl_t port_rt_ctl;
+ cvmx_sriomaintx_port_0_ctl_t port_0_ctl;
+ cvmx_sriomaintx_core_enables_t core_enables;
+ cvmx_sriomaintx_port_gen_ctl_t port_gen_ctl;
+ cvmx_sriox_status_reg_t sriox_status_reg;
+ cvmx_mio_rst_ctlx_t mio_rst_ctl;
+ cvmx_sriox_imsg_vport_thr_t sriox_imsg_vport_thr;
+ cvmx_dpi_sli_prtx_cfg_t prt_cfg;
+ cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
+ cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
+ cvmx_sriomaintx_port_0_ctl2_t port_0_ctl2;
+
+ sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(srio_port));
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ {
+ /* All SRIO ports are connected to QLM0 */
+ int status = cvmx_qlm_get_status(0);
+ if (status < 4 || status > 6)
+ {
+ cvmx_dprintf("SRIO%d: Initialization called on a port not in SRIO mode\n", srio_port);
+ return -1;
+ }
+ }
+ else if (!sriox_status_reg.s.srio)
+ {
+ cvmx_dprintf("SRIO%d: Initialization called on a port not in SRIO mode\n", srio_port);
+ return -1;
+ }
+
+ __cvmx_srio_state[srio_port].flags = flags;
+
+ /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be
+ programmed */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
+ {
+ if (srio_port)
+ {
+ cvmx_ciu_qlm1_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
+ }
+ else
+ {
+ cvmx_ciu_qlm0_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
+ }
+ }
+
+ /* Don't receive or drive reset signals for the SRIO QLM */
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ {
+ /* The reset signals are available only for srio_port == 0. */
+ if (srio_port == 0 || (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2) && srio_port == 1))
+ {
+ cvmx_mio_rst_cntlx_t mio_rst_cntl;
+ mio_rst_cntl.u64 = cvmx_read_csr(CVMX_MIO_RST_CNTLX(srio_port));
+ mio_rst_cntl.s.rst_drv = 0;
+ mio_rst_cntl.s.rst_rcv = 0;
+ mio_rst_cntl.s.rst_chip = 0;
+ cvmx_write_csr(CVMX_MIO_RST_CNTLX(srio_port), mio_rst_cntl.u64);
+ }
+ /* MIO_RST_CNTL2<prtmode> is initialized to 0 on cold reset */
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CNTLX(srio_port));
+ }
+ else
+ {
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(srio_port));
+ mio_rst_ctl.s.rst_drv = 0;
+ mio_rst_ctl.s.rst_rcv = 0;
+ mio_rst_ctl.s.rst_chip = 0;
+ cvmx_write_csr(CVMX_MIO_RST_CTLX(srio_port), mio_rst_ctl.u64);
+
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(srio_port));
+ }
+
+ cvmx_dprintf("SRIO%d: Port in %s mode\n", srio_port,
+ (mio_rst_ctl.s.prtmode) ? "host" : "endpoint");
+
+ /* Bring the port out of reset if necessary */
+ switch (srio_port)
+ {
+ case 0:
+ {
+ cvmx_ciu_soft_prst_t prst;
+ prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ if (prst.s.soft_prst)
+ {
+ prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, prst.u64);
+ /* Wait up to 250ms for the port to come out of reset */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SRIOX_STATUS_REG(srio_port), cvmx_sriox_status_reg_t, access, ==, 1, 250000))
+ return -1;
+ }
+ break;
+ }
+ case 1:
+ {
+ cvmx_ciu_soft_prst1_t prst;
+ prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ if (prst.s.soft_prst)
+ {
+ prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, prst.u64);
+ /* Wait up to 250ms for the port to come out of reset */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SRIOX_STATUS_REG(srio_port), cvmx_sriox_status_reg_t, access, ==, 1, 250000))
+ return -1;
+ }
+ break;
+ }
+ case 2:
+ {
+ cvmx_ciu_soft_prst2_t prst;
+ prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST2);
+ if (prst.s.soft_prst)
+ {
+ prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST2, prst.u64);
+ /* Wait up to 250ms for the port to come out of reset */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SRIOX_STATUS_REG(srio_port), cvmx_sriox_status_reg_t, access, ==, 1, 250000))
+ return -1;
+ }
+ break;
+ }
+ }
+
+ /* Disable the link while we make changes */
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PORT_0_CTL(srio_port), &port_0_ctl.u32))
+ return -1;
+ port_0_ctl.s.o_enable = 0;
+ port_0_ctl.s.i_enable = 0;
+ port_0_ctl.s.prt_lock = 1;
+ port_0_ctl.s.disable = 0;
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_0_CTL(srio_port), port_0_ctl.u32))
+ return -1;
+
+ /* CN63XX Pass 2.0 and 2.1 errata G-15273 requires the QLM De-emphasis be
+ programmed when using a 156.25Mhz ref clock */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1))
+ {
+ cvmx_mio_rst_boot_t mio_rst_boot;
+ cvmx_sriomaintx_lane_x_status_0_t lane_x_status;
+
+ /* Read the QLM config and speed pins */
+ mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_LANE_X_STATUS_0(0, srio_port), &lane_x_status.u32))
+ return -1;
+
+ if (srio_port)
+ {
+ cvmx_ciu_qlm1_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
+ switch (mio_rst_boot.cn63xx.qlm1_spd)
+ {
+ case 0x4: /* 1.25 Gbaud, 156.25MHz */
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 0x0;
+ ciu_qlm.s.txmargin = (lane_x_status.s.rx_type == 0) ? 0x11 : 0x1c; /* short or med/long */
+ break;
+ case 0xb: /* 5.0 Gbaud, 156.25MHz */
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = (lane_x_status.s.rx_type == 0) ? 0xa : 0xf; /* short or med/long */
+ ciu_qlm.s.txmargin = (lane_x_status.s.rx_type == 0) ? 0xf : 0x1a; /* short or med/long */
+ break;
+ }
+ cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
+ }
+ else
+ {
+ cvmx_ciu_qlm0_t ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
+ switch (mio_rst_boot.cn63xx.qlm0_spd)
+ {
+ case 0x4: /* 1.25 Gbaud, 156.25MHz */
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 0x0;
+ ciu_qlm.s.txmargin = (lane_x_status.s.rx_type == 0) ? 0x11 : 0x1c; /* short or med/long */
+ break;
+ case 0xb: /* 5.0 Gbaud, 156.25MHz */
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = (lane_x_status.s.rx_type == 0) ? 0xa : 0xf; /* short or med/long */
+ ciu_qlm.s.txmargin = (lane_x_status.s.rx_type == 0) ? 0xf : 0x1a; /* short or med/long */
+ break;
+ }
+ cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
+ }
+ }
+
+ /* Errata SRIO-14485: Link speed is reported incorrectly in CN63XX
+ pass 1.x */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ cvmx_sriomaintx_port_0_ctl2_t port_0_ctl2;
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PORT_0_CTL2(srio_port), &port_0_ctl2.u32))
+ return -1;
+ if (port_0_ctl2.s.enb_500g)
+ {
+ port_0_ctl2.u32 = 0;
+ port_0_ctl2.s.enb_625g = 1;
+ }
+ else if (port_0_ctl2.s.enb_312g)
+ {
+ port_0_ctl2.u32 = 0;
+ port_0_ctl2.s.enb_500g = 1;
+ }
+ else if (port_0_ctl2.s.enb_250g)
+ {
+ port_0_ctl2.u32 = 0;
+ port_0_ctl2.s.enb_312g = 1;
+ }
+ else if (port_0_ctl2.s.enb_125g)
+ {
+ port_0_ctl2.u32 = 0;
+ port_0_ctl2.s.enb_250g = 1;
+ }
+ else
+ {
+ port_0_ctl2.u32 = 0;
+ port_0_ctl2.s.enb_125g = 1;
+ }
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_0_CTL2(srio_port), port_0_ctl2.u32))
+ return -1;
+ }
+
+ /* Errata SRIO-15351: Turn off SRIOMAINTX_MAC_CTRL[TYPE_MRG] as it may
+ cause packet ACCEPT to be lost */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0) || OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1))
+ {
+ cvmx_sriomaintx_mac_ctrl_t mac_ctrl;
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_MAC_CTRL(srio_port), &mac_ctrl.u32))
+ return -1;
+ mac_ctrl.s.type_mrg = 0;
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_MAC_CTRL(srio_port), mac_ctrl.u32))
+ return -1;
+ }
+
+ /* Set the link layer timeout to 1ms. The default is too high and causes
+ core bus errors */
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PORT_LT_CTL(srio_port), &port_lt_ctl.u32))
+ return -1;
+ port_lt_ctl.s.timeout = 1000000 / 200; /* 1ms = 1000000ns / 200ns */
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_LT_CTL(srio_port), port_lt_ctl.u32))
+ return -1;
+
+ /* Set the logical layer timeout to 100ms. The default is too high and causes
+ core bus errors */
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PORT_RT_CTL(srio_port), &port_rt_ctl.u32))
+ return -1;
+ port_rt_ctl.s.timeout = 100000000 / 200; /* 100ms = 100000000ns / 200ns */
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_RT_CTL(srio_port), port_rt_ctl.u32))
+ return -1;
+
+ /* Allow memory and doorbells. Messaging is enabled later */
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), &core_enables.u32))
+ return -1;
+ core_enables.s.doorbell = 1;
+ core_enables.s.memory = 1;
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), core_enables.u32))
+ return -1;
+
+ /* Allow us to master transactions */
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PORT_GEN_CTL(srio_port), &port_gen_ctl.u32))
+ return -1;
+ port_gen_ctl.s.menable = 1;
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_GEN_CTL(srio_port), port_gen_ctl.u32))
+ return -1;
+
+ /* Set the MRRS and MPS for optimal SRIO performance */
+ prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(srio_port));
+ prt_cfg.s.mps = 1;
+ prt_cfg.s.mrrs = 1;
+ prt_cfg.s.molr = 32;
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ prt_cfg.s.molr = ((prt_cfg.s.qlm_cfg == 1 || prt_cfg.s.qlm_cfg == 3) ? 8
+ : (prt_cfg.s.qlm_cfg == 4 || prt_cfg.s.qlm_cfg == 6) ? 16
+ : 32);
+ cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(srio_port), prt_cfg.u64);
+
+ sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(srio_port));
+ sli_s2m_portx_ctl.s.mrrs = 1;
+ cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(srio_port), sli_s2m_portx_ctl.u64);
+
+ /* Setup RX messaging thresholds */
+ sriox_imsg_vport_thr.u64 = cvmx_read_csr(CVMX_SRIOX_IMSG_VPORT_THR(srio_port));
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ sriox_imsg_vport_thr.s.max_tot = ((prt_cfg.s.qlm_cfg == 1 || prt_cfg.s.qlm_cfg == 3) ? 44 : 46);
+ else
+ sriox_imsg_vport_thr.s.max_tot = 48;
+ sriox_imsg_vport_thr.s.max_s1 = 24;
+ sriox_imsg_vport_thr.s.max_s0 = 24;
+ sriox_imsg_vport_thr.s.sp_vport = 1;
+ sriox_imsg_vport_thr.s.buf_thr = 4;
+ sriox_imsg_vport_thr.s.max_p1 = 12;
+ sriox_imsg_vport_thr.s.max_p0 = 12;
+ cvmx_write_csr(CVMX_SRIOX_IMSG_VPORT_THR(srio_port), sriox_imsg_vport_thr.u64);
+
+ /* Setup RX messaging thresholds for other virtual ports. */
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX))
+ {
+ cvmx_sriox_imsg_vport_thr2_t sriox_imsg_vport_thr2;
+ sriox_imsg_vport_thr2.u64 = cvmx_read_csr(CVMX_SRIOX_IMSG_VPORT_THR2(srio_port));
+ sriox_imsg_vport_thr2.s.max_s2 = 24;
+ sriox_imsg_vport_thr2.s.max_s3 = 24;
+ cvmx_write_csr(CVMX_SRIOX_IMSG_VPORT_THR2(srio_port), sriox_imsg_vport_thr2.u64);
+ }
+
+ /* Errata SRIO-X: SRIO error behavior may not be optimal in CN63XX pass 1.x */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ cvmx_sriox_tx_ctrl_t sriox_tx_ctrl;
+ sriox_tx_ctrl.u64 = cvmx_read_csr(CVMX_SRIOX_TX_CTRL(srio_port));
+ sriox_tx_ctrl.s.tag_th2 = 2;
+ sriox_tx_ctrl.s.tag_th1 = 3;
+ sriox_tx_ctrl.s.tag_th0 = 4;
+ cvmx_write_csr(CVMX_SRIOX_TX_CTRL(srio_port), sriox_tx_ctrl.u64);
+ }
+
+ /* Errata SLI-15954: SLI relaxed order issues */
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X))
+ {
+ cvmx_sli_ctl_portx_t sli_ctl_portx;
+ sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(srio_port));
+ sli_ctl_portx.s.ptlp_ro = 1; /* Set to same value for all MACs. */
+ sli_ctl_portx.s.ctlp_ro = 1; /* Set to same value for all MACs. */
+ sli_ctl_portx.s.wait_com = 0; /* So that no inbound stores wait for a commit */
+ sli_ctl_portx.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(srio_port), sli_ctl_portx.u64);
+ }
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ /* Clear the ACK state */
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_0_LOCAL_ACKID(srio_port), 0))
+ return -1;
+ }
+
+ /* Bring the link down, then up, by writing to the SRIO port's
+ PORT_0_CTL2 CSR. */
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PORT_0_CTL2(srio_port), &port_0_ctl2.u32))
+ return -1;
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_0_CTL2(srio_port), port_0_ctl2.u32))
+ return -1;
+
+ /* Clear any pending interrupts */
+ cvmx_write_csr(CVMX_SRIOX_INT_REG(srio_port), cvmx_read_csr(CVMX_SRIOX_INT_REG(srio_port)));
+
+ /* Enable error reporting */
+#if (!defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL)
+ cvmx_error_enable_group(CVMX_ERROR_GROUP_SRIO, srio_port);
+#endif
+
+ /* Finally enable the link */
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PORT_0_CTL(srio_port), &port_0_ctl.u32))
+ return -1;
+ port_0_ctl.s.o_enable = 1;
+ port_0_ctl.s.i_enable = 1;
+ port_0_ctl.s.disable = 0;
+ port_0_ctl.s.prt_lock = 0;
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_0_CTL(srio_port), port_0_ctl.u32))
+ return -1;
+
+ /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
+ sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
+ sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
+ sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
+
+ /* FIXME: Disable sending a link request when the SRIO link is
+ brought up. For unknown reasons this code causes issues with some SRIO
+ devices. As we currently don't support hotplug in software, this code
+ should never be needed. Without link down/up events, the ACKs should
+ start off and stay synchronized */
+#if 0
+ /* Ask for a link and align our ACK state. CN63XXp1 didn't support this */
+ if (!OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ uint64_t stop_cycle;
+ cvmx_sriomaintx_port_0_err_stat_t sriomaintx_port_0_err_stat;
+
+ /* Clear the SLI_CTL_PORTX[DIS_PORT[ bit to re-enable traffic-flow
+ to the SRIO MACs. */
+ cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(srio_port), cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(srio_port)));
+
+ /* Wait a little to see if the link comes up */
+ stop_cycle = cvmx_clock_get_rate(CVMX_CLOCK_CORE)/4 + cvmx_clock_get_count(CVMX_CLOCK_CORE);
+ do
+ {
+ /* Read the port link status */
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PORT_0_ERR_STAT(srio_port), &sriomaintx_port_0_err_stat.u32))
+ return -1;
+ } while (!sriomaintx_port_0_err_stat.s.pt_ok && (cvmx_clock_get_count(CVMX_CLOCK_CORE) < stop_cycle));
+
+ /* Send link request if link is up */
+ if (sriomaintx_port_0_err_stat.s.pt_ok)
+ {
+ cvmx_sriomaintx_port_0_link_req_t link_req;
+ cvmx_sriomaintx_port_0_link_resp_t link_resp;
+ link_req.u32 = 0;
+ link_req.s.cmd = 4;
+
+ /* Send the request */
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_0_LINK_REQ(srio_port), link_req.u32))
+ return -1;
+
+ /* Wait for the response */
+ stop_cycle = cvmx_clock_get_rate(CVMX_CLOCK_CORE)/8 + cvmx_clock_get_count(CVMX_CLOCK_CORE);
+ do
+ {
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PORT_0_LINK_RESP(srio_port), &link_resp.u32))
+ return -1;
+ } while (!link_resp.s.valid && (cvmx_clock_get_count(CVMX_CLOCK_CORE) < stop_cycle));
+
+ /* Set our ACK state if we got a response */
+ if (link_resp.s.valid)
+ {
+ cvmx_sriomaintx_port_0_local_ackid_t local_ackid;
+ local_ackid.u32 = 0;
+ local_ackid.s.i_ackid = 0;
+ local_ackid.s.e_ackid = link_resp.s.ackid;
+ local_ackid.s.o_ackid = link_resp.s.ackid;
+ if (__cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_PORT_0_LOCAL_ACKID(srio_port), local_ackid.u32))
+ return -1;
+ }
+ else
+ return -1;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+
+/**
+ * Read 32bits from a Device's config space
+ *
+ * @param srio_port SRIO port the device is on
+ * @param srcid_index
+ * Which SRIO source ID to use. 0 = Primary, 1 = Secondary
+ * @param destid RapidIO device ID, or -1 for the local Octeon.
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param hopcount Number of hops to the remote device. Use 0 for the local Octeon.
+ * @param offset Offset in config space. This must be a multiple of 32 bits.
+ * @param result Result of the read. This will be unmodified on failure.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_srio_config_read32(int srio_port, int srcid_index, int destid,
+ int is16bit, uint8_t hopcount, uint32_t offset,
+ uint32_t *result)
+{
+ if (destid == -1)
+ {
+ int status = __cvmx_srio_local_read32(srio_port, offset, result);
+
+ if ((status == 0) && (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG))
+ cvmx_dprintf("SRIO%d: Local read [0x%06x] <= 0x%08x\n", srio_port, (unsigned int)offset, (unsigned int)*result);
+
+ return status;
+ }
+ else
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ int return_code;
+ uint32_t pkt = 0;
+ uint32_t sourceid;
+ uint64_t stop_cycle;
+ char rx_buffer[64];
+
+ /* Tell the user */
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("SRIO%d: Remote read [id=0x%04x hop=%3d offset=0x%06x] <= ", srio_port, destid, hopcount, (unsigned int)offset);
+
+ /* Read the proper source ID */
+ if (srcid_index)
+ __cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_SEC_DEV_ID(srio_port), &sourceid);
+ else
+ __cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PRI_DEV_ID(srio_port), &sourceid);
+
+ if (is16bit)
+ {
+ /* Use the 16bit source ID */
+ sourceid &= 0xffff;
+
+ /* MAINT Reads are 11 bytes */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_CTRL(srio_port), 11<<16);
+
+ pkt |= CVMX_SRIO_CONFIG_PRIORITY << 30; /* priority [31:30] */
+ pkt |= 1 << 28; /* tt [29:28] */
+ pkt |= 0x8 << 24; /* ftype [27:24] */
+ pkt |= destid << 8; /* destID [23:8] */
+ pkt |= sourceid >> 8; /* sourceID [7:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = 0;
+ pkt |= sourceid << 24; /* sourceID [31:24] */
+ pkt |= 0 << 20; /* transaction [23:20] */
+ pkt |= 8 << 16; /* rdsize [19:16] */
+ pkt |= 0xc0 << 8; /* srcTID [15:8] */
+ pkt |= hopcount; /* hopcount [7:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = 0;
+ pkt |= offset << 8; /* offset [31:11, wdptr[10], reserved[9:8] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ }
+ else
+ {
+ /* Use the 8bit source ID */
+ sourceid = (sourceid >> 16) & 0xff;
+
+ /* MAINT Reads are 9 bytes */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_CTRL(srio_port), 9<<16);
+
+ pkt |= CVMX_SRIO_CONFIG_PRIORITY << 30; /* priority [31:30] */
+ pkt |= 0 << 28; /* tt [29:28] */
+ pkt |= 0x8 << 24; /* ftype [27:24] */
+ pkt |= destid << 16; /* destID [23:16] */
+ pkt |= sourceid << 8; /* sourceID [15:8] */
+ pkt |= 0 << 4; /* transaction [7:4] */
+ pkt |= 8 << 0; /* rdsize [3:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = 0;
+ pkt |= 0xc0 << 24; /* srcTID [31:24] */
+ pkt |= hopcount << 16; /* hopcount [23:16] */
+ pkt |= offset >> 8; /* offset [15:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = 0;
+ pkt |= offset << 24; /* offset [31:27, wdptr[26], reserved[25:24] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ }
+
+ stop_cycle = cvmx_clock_get_rate(CVMX_CLOCK_CORE)/10 + cvmx_clock_get_count(CVMX_CLOCK_CORE);
+ do
+ {
+ return_code = cvmx_srio_receive_spf(srio_port, rx_buffer, sizeof(rx_buffer));
+ if ((return_code == 0) && (cvmx_clock_get_count(CVMX_CLOCK_CORE) > stop_cycle))
+ {
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("timeout\n");
+ return_code = -1;
+ }
+ } while (return_code == 0);
+
+ if (return_code == ((is16bit) ? 23 : 19))
+ {
+ if (is16bit)
+ {
+ if (offset & 4)
+ *result = *(uint32_t*)(rx_buffer + 15);
+ else
+ *result = *(uint32_t*)(rx_buffer + 11);
+ }
+ else
+ {
+ if (offset & 4)
+ *result = *(uint32_t*)(rx_buffer + 13);
+ else
+ *result = *(uint32_t*)(rx_buffer + 9);
+ }
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("0x%08x\n", (unsigned int)*result);
+ return_code = 0;
+ }
+ else
+ {
+ *result = 0xffffffff;
+ return_code = -1;
+ }
+
+ return return_code;
+ }
+ else
+ {
+#if !defined(CVMX_BUILD_FOR_LINUX_HOST)
+ uint64_t physical;
+ physical = cvmx_srio_physical_map(srio_port,
+ CVMX_SRIO_WRITE_MODE_MAINTENANCE, CVMX_SRIO_CONFIG_PRIORITY,
+ CVMX_SRIO_READ_MODE_MAINTENANCE, CVMX_SRIO_CONFIG_PRIORITY,
+ srcid_index, destid, is16bit, offset + (hopcount<<24), 4);
+ if (!physical)
+ return -1;
+
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("SRIO%d: Remote read [id=0x%04x hop=%3d offset=0x%06x] <= ", srio_port, destid, hopcount, (unsigned int)offset);
+
+ /* Finally do the maintenance read to complete the config request */
+ *result = cvmx_read64_uint32(CVMX_ADD_IO_SEG(physical));
+ cvmx_srio_physical_unmap(physical, 4);
+
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("0x%08x\n", (unsigned int)*result);
+
+ return 0;
+#else
+ return -1;
+#endif
+ }
+ }
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_srio_config_read32);
+#endif
+
+
+/**
+ * Write 32bits to a Device's config space
+ *
+ * @param srio_port SRIO port the device is on
+ * @param srcid_index
+ * Which SRIO source ID to use. 0 = Primary, 1 = Secondary
+ * @param destid RapidIO device ID, or -1 for the local Octeon.
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param hopcount Number of hops to the remote device. Use 0 for the local Octeon.
+ * @param offset Offset in config space. This must be a multiple of 32 bits.
+ * @param data Data to write.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_srio_config_write32(int srio_port, int srcid_index, int destid,
+ int is16bit, uint8_t hopcount, uint32_t offset,
+ uint32_t data)
+{
+ if (destid == -1)
+ {
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("SRIO%d: Local write[0x%06x] => 0x%08x\n", srio_port, (unsigned int)offset, (unsigned int)data);
+
+ return __cvmx_srio_local_write32(srio_port, offset, data);
+ }
+ else
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ int return_code;
+ uint32_t pkt = 0;
+ uint32_t sourceid;
+ uint64_t stop_cycle;
+ char rx_buffer[64];
+
+ /* Tell the user */
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("SRIO%d: Remote write[id=0x%04x hop=%3d offset=0x%06x] => 0x%08x\n", srio_port, destid, hopcount, (unsigned int)offset, (unsigned int)data);
+
+ /* Read the proper source ID */
+ if (srcid_index)
+ __cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_SEC_DEV_ID(srio_port), &sourceid);
+ else
+ __cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_PRI_DEV_ID(srio_port), &sourceid);
+
+ if (is16bit)
+ {
+ /* Use the 16bit source ID */
+ sourceid &= 0xffff;
+
+ /* MAINT Writes are 19 bytes */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_CTRL(srio_port), 19<<16);
+
+ pkt |= CVMX_SRIO_CONFIG_PRIORITY << 30; /* priority [31:30] */
+ pkt |= 1 << 28; /* tt [29:28] */
+ pkt |= 0x8 << 24; /* ftype [27:24] */
+ pkt |= destid << 8; /* destID [23:8] */
+ pkt |= sourceid >> 8; /* sourceID [7:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = 0;
+ pkt |= sourceid << 24; /* sourceID [31:24] */
+ pkt |= 1 << 20; /* transaction [23:20] */
+ pkt |= 8 << 16; /* wrsize [19:16] */
+ pkt |= 0xc0 << 8; /* srcTID [15:8] */
+ pkt |= hopcount; /* hopcount [7:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = 0;
+ pkt |= offset << 8; /* offset [31:11, wdptr[10], reserved[9:8] */
+ if ((offset & 4) == 0)
+ pkt |= 0xff & (data >> 24); /* data [7:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ if (offset & 4)
+ {
+ pkt = 0xff & (data >> 24);
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = data << 8;
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ }
+ else
+ {
+ pkt = data << 8;
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), 0);
+ }
+ }
+ else
+ {
+ /* Use the 8bit source ID */
+ sourceid = (sourceid >> 16) & 0xff;
+
+ /* MAINT Writes are 17 bytes */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_CTRL(srio_port), 17<<16);
+
+ pkt |= CVMX_SRIO_CONFIG_PRIORITY << 30; /* priority [31:30] */
+ pkt |= 0 << 28; /* tt [29:28] */
+ pkt |= 0x8 << 24; /* ftype [27:24] */
+ pkt |= destid << 16; /* destID [23:16] */
+ pkt |= sourceid << 8; /* sourceID [15:8] */
+ pkt |= 1 << 4; /* transaction [7:4] */
+ pkt |= 8 << 0; /* wrsize [3:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = 0;
+ pkt |= 0xc0 << 24; /* srcTID [31:24] */
+ pkt |= hopcount << 16; /* hopcount [23:16] */
+ pkt |= offset >> 8; /* offset [15:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = 0;
+ pkt |= offset << 24; /* offset [31:27, wdptr[26], reserved[25:24] */
+ if (offset & 4)
+ {
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = data >> 8;
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = data << 24;
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ }
+ else
+ {
+ pkt |= data >> 8; /* data [23:0] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ pkt = data << 24; /* data [31:24] */
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), pkt);
+ __cvmx_srio_local_write32(srio_port, CVMX_SRIOMAINTX_IR_SP_TX_DATA(srio_port), 0);
+ }
+ }
+
+ stop_cycle = cvmx_clock_get_rate(CVMX_CLOCK_CORE)/10 + cvmx_clock_get_count(CVMX_CLOCK_CORE);
+ do
+ {
+ return_code = cvmx_srio_receive_spf(srio_port, rx_buffer, sizeof(rx_buffer));
+ if ((return_code == 0) && (cvmx_clock_get_count(CVMX_CLOCK_CORE) > stop_cycle))
+ {
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("timeout\n");
+ return_code = -1;
+ }
+ } while (return_code == 0);
+
+ if (return_code == ((is16bit) ? 15 : 11))
+ return_code = 0;
+ else
+ {
+ cvmx_dprintf("SRIO%d: Remote write failed\n", srio_port);
+ return_code = -1;
+ }
+
+ return return_code;
+ }
+ else
+ {
+#if !defined(CVMX_BUILD_FOR_LINUX_HOST)
+ uint64_t physical = cvmx_srio_physical_map(srio_port,
+ CVMX_SRIO_WRITE_MODE_MAINTENANCE, CVMX_SRIO_CONFIG_PRIORITY,
+ CVMX_SRIO_READ_MODE_MAINTENANCE, CVMX_SRIO_CONFIG_PRIORITY,
+ srcid_index, destid, is16bit, offset + (hopcount<<24), 4);
+ if (!physical)
+ return -1;
+
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("SRIO%d: Remote write[id=0x%04x hop=%3d offset=0x%06x] => 0x%08x\n", srio_port, destid, hopcount, (unsigned int)offset, (unsigned int)data);
+
+ /* Finally do the maintenance write to complete the config request */
+ cvmx_write64_uint32(CVMX_ADD_IO_SEG(physical), data);
+ return cvmx_srio_physical_unmap(physical, 4);
+#else
+ return -1;
+#endif
+ }
+ }
+}
+
+
+/**
+ * Send a RapidIO doorbell to a remote device
+ *
+ * @param srio_port SRIO port the device is on
+ * @param srcid_index
+ * Which SRIO source ID to use. 0 = Primary, 1 = Secondary
+ * @param destid RapidIO device ID.
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param priority Doorbell priority (0-3)
+ * @param data Data for doorbell.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_srio_send_doorbell(int srio_port, int srcid_index, int destid, int is16bit, int priority, uint16_t data)
+{
+ cvmx_sriox_tx_bell_t tx_bell;
+ tx_bell.u64 = 0;
+ tx_bell.s.data = data;
+ tx_bell.s.dest_id = destid;
+ tx_bell.s.src_id = srcid_index;
+ tx_bell.s.id16 = !!is16bit;
+ tx_bell.s.priority = priority;
+
+ /* Make sure the previous doorbell has completed */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_SRIOX_TX_BELL(srio_port), cvmx_sriox_tx_bell_t, pending, ==, 0, CVMX_SRIO_DOORBELL_TIMEOUT))
+ {
+ cvmx_dprintf("SRIO%d: Pending bit stuck before doorbell\n", srio_port);
+ return -1;
+ }
+
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("SRIO%d: Send doorbell destid=0x%x, priority=%d, data=0x%x\n", srio_port, destid, priority, 0xffff & data);
+
+ /* Send the doorbell. We don't wait for it to complete. The next doorbell
+ may delay on the pending bit, but this gives the caller the ability to
+ do other stuff while the doorbell processes */
+ cvmx_write_csr(CVMX_SRIOX_TX_BELL(srio_port), tx_bell.u64);
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_srio_send_doorbell);
+#endif
+
+/**
+ * Get the status of the last doorbell sent. If the dooorbell
+ * hardware is done, then the status is cleared to get ready for
+ * the next doorbell (or retry).
+ *
+ * @param srio_port SRIO port to check doorbell on
+ *
+ * @return Doorbell status
+ */
+cvmx_srio_doorbell_status_t cvmx_srio_send_doorbell_status(int srio_port)
+{
+ cvmx_sriox_tx_bell_t tx_bell;
+ cvmx_sriox_tx_bell_info_t tx_bell_info;
+ cvmx_sriox_int_reg_t int_reg;
+ cvmx_sriox_int_reg_t int_reg_clear;
+
+ /* Return busy if the doorbell is still processing */
+ tx_bell.u64 = cvmx_read_csr(CVMX_SRIOX_TX_BELL(srio_port));
+ if (tx_bell.s.pending)
+ return CVMX_SRIO_DOORBELL_BUSY;
+
+ /* Read and clear the TX doorbell interrupts */
+ int_reg.u64 = cvmx_read_csr(CVMX_SRIOX_INT_REG(srio_port));
+ int_reg_clear.u64 = 0;
+ int_reg_clear.s.bell_err = int_reg.s.bell_err;
+ int_reg_clear.s.txbell = int_reg.s.txbell;
+ cvmx_write_csr(CVMX_SRIOX_INT_REG(srio_port), int_reg_clear.u64);
+
+ /* Check for errors */
+ if (int_reg.s.bell_err)
+ {
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("SRIO%d: Send doorbell failed\n", srio_port);
+ tx_bell_info.u64 = cvmx_read_csr(CVMX_SRIOX_TX_BELL_INFO(srio_port));
+ if (tx_bell_info.s.timeout)
+ return CVMX_SRIO_DOORBELL_TMOUT;
+ if (tx_bell_info.s.error)
+ return CVMX_SRIO_DOORBELL_ERROR;
+ if (tx_bell_info.s.retry)
+ return CVMX_SRIO_DOORBELL_RETRY;
+ }
+
+ /* Check if we're done */
+ if (int_reg.s.txbell)
+ return CVMX_SRIO_DOORBELL_DONE;
+
+ /* No doorbell found */
+ return CVMX_SRIO_DOORBELL_NONE;
+}
+
+
+/**
+ * Read a received doorbell and report data about it.
+ *
+ * @param srio_port SRIO port to check for the received doorbell
+ * @param destid_index
+ * Which Octeon destination ID was the doorbell for
+ * @param sequence_num
+ * Sequence number of doorbell (32bits)
+ * @param srcid RapidIO source ID of the doorbell sender
+ * @param priority Priority of the doorbell (0-3)
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param data Data in the doorbell (16 bits)
+ *
+ * @return Doorbell status. Either DONE, NONE, or ERROR.
+ */
+cvmx_srio_doorbell_status_t cvmx_srio_receive_doorbell(int srio_port,
+ int *destid_index, uint32_t *sequence_num, int *srcid, int *priority,
+ int *is16bit, uint16_t *data)
+{
+ cvmx_sriox_rx_bell_seq_t rx_bell_seq;
+ cvmx_sriox_rx_bell_t rx_bell;
+
+ /* Check if there are any pending doorbells */
+ rx_bell_seq.u64 = cvmx_read_csr(CVMX_SRIOX_RX_BELL_SEQ(srio_port));
+ if (!rx_bell_seq.s.count)
+ return CVMX_SRIO_DOORBELL_NONE;
+
+ /* Read the doorbell and write our return parameters */
+ rx_bell.u64 = cvmx_read_csr(CVMX_SRIOX_RX_BELL(srio_port));
+ *sequence_num = rx_bell_seq.s.seq;
+ *srcid = rx_bell.s.src_id;
+ *priority = rx_bell.s.priority;
+ *is16bit = rx_bell.s.id16;
+ *data = rx_bell.s.data;
+ *destid_index = rx_bell.s.dest_id;
+
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("SRIO%d: Receive doorbell sequence=0x%x, srcid=0x%x, priority=%d, data=0x%x\n",
+ srio_port, rx_bell_seq.s.seq, rx_bell.s.src_id, rx_bell.s.priority, rx_bell.s.data);
+
+ return CVMX_SRIO_DOORBELL_DONE;
+}
+
+
+/**
+ * Receive a packet from the Soft Packet FIFO (SPF).
+ *
+ * @param srio_port SRIO port to read the packet from.
+ * @param buffer Buffer to receive the packet.
+ * @param buffer_length
+ * Length of the buffer in bytes.
+ *
+ * @return Returns the length of the packet read. Negative on failure.
+ * Zero if no packets are available.
+ */
+int cvmx_srio_receive_spf(int srio_port, void *buffer, int buffer_length)
+{
+ uint32_t *ptr = (uint32_t *)buffer;
+ cvmx_sriomaintx_ir_sp_rx_stat_t sriomaintx_ir_sp_rx_stat;
+
+ /* Read the SFP status */
+ if (__cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_IR_SP_RX_STAT(srio_port), &sriomaintx_ir_sp_rx_stat.u32))
+ return -1;
+
+ /* Return zero if there isn't a packet available */
+ if (sriomaintx_ir_sp_rx_stat.s.buffers < 1)
+ return 0;
+
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("SRIO%d: Soft packet FIFO received %d bytes", srio_port, sriomaintx_ir_sp_rx_stat.s.octets);
+
+ /* Return error if the packet is larger than our buffer */
+ if (sriomaintx_ir_sp_rx_stat.s.octets > buffer_length)
+ return -1;
+
+ /* Read out the packet four bytes at a time */
+ buffer_length = sriomaintx_ir_sp_rx_stat.s.octets;
+ while (buffer_length > 0)
+ {
+ __cvmx_srio_local_read32(srio_port, CVMX_SRIOMAINTX_IR_SP_RX_DATA(srio_port), ptr);
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf(" %08x", (unsigned int)*ptr);
+ ptr++;
+ buffer_length-=4;
+ }
+
+ if (__cvmx_srio_state[srio_port].flags & CVMX_SRIO_INITIALIZE_DEBUG)
+ cvmx_dprintf("\n");
+
+ /* Return the number of bytes in the buffer */
+ return sriomaintx_ir_sp_rx_stat.s.octets;
+}
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+/**
+ * Map a remote device's memory region into Octeon's physical
+ * address area. The caller can then map this into a core using
+ * the TLB or XKPHYS.
+ *
+ * @param srio_port SRIO port to map the device on
+ * @param write_op Type of operation to perform on a write to the device.
+ * Normally should be CVMX_SRIO_WRITE_MODE_AUTO.
+ * @param write_priority
+ * SRIO priority of writes (0-3)
+ * @param read_op Type of operation to perform on reads to the device.
+ * Normally should be CVMX_SRIO_READ_MODE_NORMAL.
+ * @param read_priority
+ * SRIO priority of reads (0-3)
+ * @param srcid_index
+ * Which SRIO source ID to use. 0 = Primary, 1 = Secondary
+ * @param destid RapidIO device ID.
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param base Device base address to start the mapping
+ * @param size Size of the mapping in bytes
+ *
+ * @return Octeon 64bit physical address that accesses the remote device,
+ * or zero on failure.
+ */
+uint64_t cvmx_srio_physical_map(int srio_port, cvmx_srio_write_mode_t write_op,
+ int write_priority, cvmx_srio_read_mode_t read_op, int read_priority,
+ int srcid_index, int destid, int is16bit, uint64_t base, uint64_t size)
+{
+ cvmx_sriox_s2m_typex_t needed_s2m_type;
+ cvmx_sli_mem_access_subidx_t needed_subid;
+ int s2m_index;
+ int subdid;
+ cvmx_sli_address_t sli_address;
+
+ /* We currently don't support mapping regions that span a 34 bit boundary.
+ Keeping track of multiple regions to span 34 bits is hard and not
+ likely to be needed */
+ if (((base+size-1)>>34) != (base>>34))
+ {
+ cvmx_dprintf("SRIO%d: Failed to map range 0x%llx-0x%llx spanning a 34bit boundary\n",
+ srio_port, (ULL)base, (ULL)base+size-1);
+ return 0;
+ }
+
+ /* Build the S2M_TYPE we are going to need */
+ needed_s2m_type.u64 = 0;
+ needed_s2m_type.s.wr_op = write_op;
+ needed_s2m_type.s.rd_op = read_op;
+ needed_s2m_type.s.wr_prior = write_priority;
+ needed_s2m_type.s.rd_prior = read_priority;
+ needed_s2m_type.s.src_id = srcid_index;
+ needed_s2m_type.s.id16 = !!is16bit;
+
+ /* Build the needed SubID config */
+ needed_subid.u64 = 0;
+ needed_subid.s.port = srio_port;
+ needed_subid.s.nmerge = 0;
+
+ /* FIXME: We might want to use the device ID swapping modes so the device
+ ID is part of the lower address bits. This would allow many more
+ devices to share S2M_TYPE indexes. This would require "base+size-1"
+ to fit in bits [17:0] or bits[25:0] for 8 bits of device ID */
+ if (base < (1ull<<34))
+ {
+ needed_subid.cn63xx.ba = destid;
+ needed_s2m_type.s.iaow_sel = 0;
+ }
+ else if (base < (1ull<<42))
+ {
+ needed_subid.cn63xx.ba = (base>>34) & 0xff;
+ needed_subid.cn63xx.ba |= ((uint64_t)destid & 0xff) << (42-34);
+ needed_subid.cn63xx.ba |= (((uint64_t)destid>>8) & 0xff) << (51-34);
+ needed_s2m_type.s.iaow_sel = 1;
+ }
+ else
+ {
+ if (destid>>8)
+ {
+ cvmx_dprintf("SRIO%d: Attempt to map 16bit device ID 0x%x using 66bit addressing\n", srio_port, destid);
+ return 0;
+ }
+ if (base>>50)
+ {
+ cvmx_dprintf("SRIO%d: Attempt to map address 0x%llx using 66bit addressing\n", srio_port, (ULL)base);
+ return 0;
+ }
+ needed_subid.cn63xx.ba = (base>>34) & 0xffff;
+ needed_subid.cn63xx.ba |= ((uint64_t)destid & 0xff) << (51-34);
+ needed_s2m_type.s.iaow_sel = 2;
+ }
+
+ /* Find a S2M_TYPE index to use. If this fails return 0 */
+ s2m_index = __cvmx_srio_alloc_s2m(srio_port, needed_s2m_type);
+ if (s2m_index == -1)
+ return 0;
+
+ /* Attach the SubID to the S2M_TYPE index */
+ needed_subid.s.rtype = s2m_index & 3;
+ needed_subid.s.wtype = s2m_index & 3;
+ needed_subid.cn63xx.ba |= (((uint64_t)s2m_index >> 2) & 1) << (50-34);
+ needed_subid.cn63xx.ba |= (((uint64_t)s2m_index >> 3) & 1) << (59-34);
+
+ /* Allocate a SubID for use */
+ subdid = __cvmx_srio_alloc_subid(needed_subid);
+ if (subdid == -1)
+ {
+ /* Free the s2m_index as we aren't using it */
+ __cvmx_srio_free_s2m(srio_port, s2m_index);
+ return 0;
+ }
+
+ /* Build the final core physical address */
+ sli_address.u64 = 0;
+ sli_address.mem.io = 1;
+ sli_address.mem.did = 3;
+ sli_address.mem.subdid = subdid>>2;
+ sli_address.mem.se = subdid & 3;
+ sli_address.mem.address = base; /* Bits[33:0] of full address */
+ return sli_address.u64;
+}
+
+
+/**
+ * Unmap a physical address window created by cvmx_srio_phys_map().
+ *
+ * @param physical_address
+ * Physical address returned by cvmx_srio_phys_map().
+ * @param size Size used on original call.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_srio_physical_unmap(uint64_t physical_address, uint64_t size)
+{
+ cvmx_sli_mem_access_subidx_t subid;
+ int subdid = (physical_address >> 40) & 7;
+ int extender = (physical_address >> 34) & 3;
+ int mem_index = subdid * 4 + extender;
+ int read_s2m_type;
+
+ /* Get the subid setup so we can figure out where this mapping was for */
+ subid.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(mem_index));
+ /* Type[0] is mapped to the Relaxed Ordering
+ Type[1] is mapped to the No Snoop
+ Type[2] is mapped directly to bit 50 of the SLI address
+ Type[3] is mapped directly to bit 59 of the SLI address */
+ read_s2m_type = ((subid.cn63xx.ba>>(50-34))&1<<2) | ((subid.cn63xx.ba>>(59-34))&1<<3);
+ read_s2m_type |= subid.s.rtype;
+ __cvmx_srio_free_subid(mem_index);
+ __cvmx_srio_free_s2m(subid.s.port, read_s2m_type);
+ return 0;
+}
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+/**
+ * fill out outbound message descriptor
+ *
+ * @param port pip/ipd port number
+ * @param buf_ptr pointer to a buffer pointer. the buffer pointer points
+ * to a chain of buffers that hold an outbound srio packet.
+ * the packet can take the format of (1) a pip/ipd inbound
+ * message or (2) an application-generated outbound message
+ * @param desc_ptr pointer to an outbound message descriptor. should be null
+ * if *buf_ptr is in the format (1)
+ *
+ * @return 0 on success; negative of failure.
+ */
+int cvmx_srio_omsg_desc (uint64_t port, cvmx_buf_ptr_t *buf_ptr,
+ cvmx_srio_tx_message_header_t *desc_ptr)
+{
+ int ret_val = -1;
+ int intf_num;
+ cvmx_helper_interface_mode_t imode;
+
+ uint64_t *desc_addr, *hdr_addr;
+ cvmx_srio_rx_message_header_t rx_msg_hdr;
+ cvmx_srio_tx_message_header_t *tx_msg_hdr_ptr;
+
+ if (buf_ptr == NULL)
+ return ret_val;
+
+ /* check if port is an srio port */
+ intf_num = cvmx_helper_get_interface_num (port);
+ imode = cvmx_helper_interface_get_mode (intf_num);
+ if (imode != CVMX_HELPER_INTERFACE_MODE_SRIO)
+ return ret_val;
+
+ /* app-generated outbound message. descriptor space pre-allocated */
+ if (desc_ptr != NULL)
+ {
+ desc_addr = (uint64_t *) cvmx_phys_to_ptr ((*buf_ptr).s.addr);
+ *desc_addr = *(uint64_t *) desc_ptr;
+ ret_val = 0;
+ return ret_val;
+ }
+
+ /* pip/ipd inbound message. 16-byte srio message header is present */
+ hdr_addr = (uint64_t *) cvmx_phys_to_ptr ((*buf_ptr).s.addr);
+ rx_msg_hdr.word0.u64 = *hdr_addr;
+
+ /* adjust buffer pointer to get rid of srio message header word 0 */
+ (*buf_ptr).s.addr += 8;
+ (*buf_ptr).s.size -= 8; /* last buffer or not */
+ if ((*buf_ptr).s.addr >> 7 > ((*buf_ptr).s.addr - 8) >> 7)
+ (*buf_ptr).s.back++;
+ tx_msg_hdr_ptr = (cvmx_srio_tx_message_header_t *)
+ cvmx_phys_to_ptr ((*buf_ptr).s.addr);
+
+ /* transfer values from rx to tx */
+ tx_msg_hdr_ptr->s.prio = rx_msg_hdr.word0.s.prio;
+ tx_msg_hdr_ptr->s.tt = rx_msg_hdr.word0.s.tt; /* called id in hrm */
+ tx_msg_hdr_ptr->s.sis = rx_msg_hdr.word0.s.dis;
+ tx_msg_hdr_ptr->s.ssize = rx_msg_hdr.word0.s.ssize;
+ tx_msg_hdr_ptr->s.did = rx_msg_hdr.word0.s.sid;
+ tx_msg_hdr_ptr->s.mbox = rx_msg_hdr.word0.s.mbox;
+
+ /* other values we have to decide */
+ tx_msg_hdr_ptr->s.xmbox = 0; /* multi-segement in general */
+ tx_msg_hdr_ptr->s.letter = 0; /* fake like traffic gen */
+ tx_msg_hdr_ptr->s.lns = 0; /* not use sriox_omsg_ctrly[] */
+ tx_msg_hdr_ptr->s.intr = 1; /* get status */
+
+ ret_val = 0;
+ return ret_val;
+}
+#endif
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-srio.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-srio.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-srio.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-srio.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,554 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+/**
+ * @file
+ *
+ * Interface to SRIO
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+
+#ifndef __CVMX_SRIO_H__
+#define __CVMX_SRIO_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Enumeration of the type of operations that can be performed
+ * by a mapped write operation.
+ */
+typedef enum
+{
+ CVMX_SRIO_WRITE_MODE_NWRITE = 0, /**< Only create NWrite operations */
+ CVMX_SRIO_WRITE_MODE_NWRITE_RESP = 1, /**< Create NWrite with response */
+ CVMX_SRIO_WRITE_MODE_AUTO = 2, /**< Intelligently breaks writes into multiple transactions based on alignment */
+ CVMX_SRIO_WRITE_MODE_AUTO_RESP = 3, /**< CVMX_SRIO_WRITE_MODE_WRITE followed with a response */
+ CVMX_SRIO_WRITE_MODE_MAINTENANCE = 6, /**< Create a MAINTENANCE transaction. Use cvmx_srio_config_write32() instead */
+ CVMX_SRIO_WRITE_MODE_PORT = 7 /**< Port Write? */
+} cvmx_srio_write_mode_t;
+
+/**
+ * Enumeration of the type of operations that can be performed
+ * by a mapped read operation.
+ */
+typedef enum
+{
+ CVMX_SRIO_READ_MODE_NORMAL = 0, /**< Perform a normal read */
+ CVMX_SRIO_READ_MODE_ATOMIC_SET = 2, /**< Atomically sets bits in data on remote device */
+ CVMX_SRIO_READ_MODE_ATOMIC_CLEAR = 3, /**< Atomically clears bits in data on remote device */
+ CVMX_SRIO_READ_MODE_ATOMIC_INCREMENT = 4,/**< Atomically increments data on remote device */
+ CVMX_SRIO_READ_MODE_ATOMIC_DECREMENT = 5,/**< Atomically decrements data on remote device */
+ CVMX_SRIO_READ_MODE_MAINTENANCE = 6 /**< Create a MAINTENANCE transaction. Use cvmx_srio_config_read32() instead */
+} cvmx_srio_read_mode_t;
+
+/**
+ * Initialization flags for SRIO
+ */
+typedef enum
+{
+ CVMX_SRIO_INITIALIZE_DEBUG = 1,
+} cvmx_srio_initialize_flags_t;
+
+/**
+ * The possible results from a doorbell operation
+ */
+typedef enum
+{
+ CVMX_SRIO_DOORBELL_DONE, /**< The doorbell is complete */
+ CVMX_SRIO_DOORBELL_NONE, /**< There wasn't an outstanding doorbell */
+ CVMX_SRIO_DOORBELL_BUSY, /**< The doorbell is still processing */
+ CVMX_SRIO_DOORBELL_RETRY, /**< The doorbell needs to be retried */
+ CVMX_SRIO_DOORBELL_ERROR, /**< The doorbell failed with an error */
+ CVMX_SRIO_DOORBELL_TMOUT /**< The doorbell failed due to timeout */
+} cvmx_srio_doorbell_status_t;
+
+/**
+ * This structure represents the SRIO header received from SRIO on
+ * the top of every received message. This header passes through
+ * IPD/PIP unmodified.
+ */
+typedef struct
+{
+ union
+ {
+ uint64_t u64;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prio : 2; /**< The sRIO prio (priority) field in the
+ first sRIO message segment received for the
+ message. */
+ uint64_t tt : 1; /**< When set, indicates that the first sRIO
+ message segment received for the message had
+ 16-bit source and destination ID's. When
+ clear, indicates 8-bit ID were present. */
+ uint64_t dis : 1; /**< When set, indicates that the destination
+ ID in the first sRIO message segment received
+ for the message matched the 63xx's secondary
+ ID. When clear, indicates that the destination
+ ID in the first sRIO message segment
+ received for the message matched the 63xx's
+ primary ID. Note that the full destination
+ ID in the received sRIO message can be
+ determined via the combination of
+ WORD0[DIS] in the sRIO inbound message
+ header and WORD1[iprt] in the work queue
+ entry created by PIP/IPD. */
+ uint64_t ssize : 4; /**< The RIO ssize (standard message packet data
+ size) field used for the message. */
+ uint64_t sid : 16; /**< The source ID in the first sRIO message
+ segment received for the message. When TT is
+ clear, the most-significant 8 bits are zero. */
+ uint64_t xmbox : 4; /**< The RIO xmbox (recipient mailbox extension)
+ field in the first sRIO message segment
+ received for the message. Always zero for
+ multi-segment messages. */
+ uint64_t mbox : 2; /**< The RIO mbox (recipient mailbox) field in
+ the first sRIO message segment received for
+ the message. */
+ uint64_t letter : 2; /**< The RIO letter (slot within a mailbox)
+ field in the first sRIO message segment
+ received for the message. */
+ uint64_t seq : 32; /**< A sequence number. Whenever the OCTEON
+ 63xx sRIO hardware accepts the first sRIO
+ segment of either a message or doorbell, it
+ samples the current value of a counter
+ register and increments the counter
+ register. SEQ is the value sampled for the
+ message. The counter increments once per
+ message/doorbell. SEQ can be used to
+ determine the relative order of
+ packets/doorbells. Note that the SEQ-implied
+ order may differ from the order that the
+ WQE's are received by software for a number
+ of reasons, including the fact that the WQE
+ is not created until the end of the message,
+ while SEQ is sampled when the first segment. */
+#else
+ uint64_t seq : 32;
+ uint64_t letter : 2;
+ uint64_t mbox : 2;
+ uint64_t xmbox : 4;
+ uint64_t sid : 16;
+ uint64_t ssize : 4;
+ uint64_t dis : 1;
+ uint64_t tt : 1;
+ uint64_t prio : 2;
+#endif
+ } s;
+ } word0;
+ union
+ {
+ uint64_t u64;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t r : 1; /**< When set, WORD1[R]/PKT_INST_HDR[R] selects
+ either RAWFULL or RAWSCHED special PIP
+ instruction form. WORD1[R] may commonly be
+ set so that WORD1[QOS,GRP] will be directly
+ used by the PIP hardware. */
+ uint64_t reserved_62_58 : 5;
+ uint64_t pm : 2; /**< WORD1[PM]/PKT_INST_HDR[PM] selects the PIP
+ parse mode (uninterpreted, skip-to-L2,
+ skip-to-IP), and chooses between
+ RAWFULL/RAWSCHED when WORD1[R] is set. */
+ uint64_t reserved_55 : 1;
+ uint64_t sl : 7; /**< WORD1[SL]/PKT_INST_HDR[SL] selects the
+ skip II length. WORD1[SL] may typically be
+ set to 8 (or larger) so that PIP skips this
+ WORD1. */
+ uint64_t reserved_47_46 : 2;
+ uint64_t nqos : 1; /**< WORD1[NQOS] must not be set when WORD1[R]
+ is clear and PIP interprets WORD1 as a
+ PKT_INST_HDR. When set, WORD1[NQOS]/PKT_INST_HDR[NQOS]
+ prevents PIP from directly using
+ WORD1[QOS]/PKT_INST_HDR[QOS] for the QOS
+ value in the work queue entry created by
+ PIP. WORD1[NQOS] may commonly be clear so
+ that WORD1[QOS] will be directly used by the
+ PIP hardware. PKT_INST_HDR[NQOS] is new to
+ 63xx - this functionality did not exist in
+ prior OCTEON's. */
+ uint64_t ngrp : 1; /**< WORD1[NGRP] must not be set when WORD1[R]
+ is clear and PIP interprets WORD1 as a
+ PKT_INST_HDR. When set, WORD1[NGRP]/PKT_INST_HDR[NGRP]
+ prevents PIP from directly using
+ WORD1[GRP]/PKT_INST_HDR[GRP] for the GRP
+ value in the work queue entry created by
+ PIP. WORD1[NGRP] may commonly be clear so
+ that WORD1[GRP] will be directly used by the
+ PIP hardware. PKT_INST_HDR[NGRP] is new to
+ 63xx - this functionality did not exist in
+ prior OCTEON's. */
+ uint64_t ntt : 1; /**< WORD1[NTT] must not be set when WORD1[R]
+ is clear and PIP interprets WORD1 as a
+ PKT_INST_HDR. When set, WORD1[NTT]/PKT_INST_HDR[NTT]
+ prevents PIP from directly using
+ WORD1[TT]/PKT_INST_HDR[TT] for the TT value
+ in the work queue entry created by PIP.
+ PKT_INST_HDR[NTT] is new to 63xx - this
+ functionality did not exist in prior OCTEON's. */
+ uint64_t ntag : 1; /**< WORD1[NTAG] must not be set when WORD1[R]
+ is clear and PIP interprets WORD1 as a
+ PKT_INST_HDR. When set, WORD1[NTAG]/PKT_INST_HDR[NTAG]
+ prevents PIP from directly using
+ WORD1[TAG]/PKT_INST_HDR[TAG] for the TAG
+ value in the work queue entry created by PIP.
+ PKT_INST_HDR[NTAG] is new to 63xx - this
+ functionality did not exist in prior OCTEON's. */
+ uint64_t qos : 3; /**< Created by the hardware from an entry in a
+ 256-entry table. The 8-bit value
+ WORD0[PRIO,TT,DIS,MBOX,LETTER] selects the
+ table entry. When WORD1[R] is set and WORD1[NQOS]
+ is clear, WORD1[QOS] becomes the QOS value
+ in the work queue entry created by PIP. The
+ QOS value in the work queue entry determines
+ the priority that SSO/POW will schedule the
+ work, and can also control how/if the sRIO
+ message gets dropped by PIP/IPD. The 256-entry
+ table is unique to each sRIO core, but
+ shared by the two controllers associated
+ with the sRIO core. */
+ uint64_t grp : 4; /**< Created by the hardware from an entry in a
+ 256-entry table. The 8-bit value
+ WORD0[PRIO,TT,DIS,MBOX,LETTER] selects the
+ table entry. When WORD1[R] is set and WORD1[NGRP]
+ is clear, WORD1[GRP] becomes the GRP value
+ in the work queue entry created by PIP. The
+ GRP value in the work queue entry can direct
+ the work to particular cores or particular
+ groups of cores. The 256-entry table is
+ unique to each sRIO core, but shared by the
+ two controllers associated with the sRIO core. */
+ uint64_t rs : 1; /**< In some configurations, enables the sRIO
+ message to be buffered solely in the work
+ queue entry, and not otherwise in L2/DRAM. */
+ uint64_t tt : 2; /**< When WORD1[R] is set and WORD1[NTT] is
+ clear, WORD1[TT]/PKT_INST_HDR[TT] becomes
+ the TT value in the work queue entry created
+ by PIP. The TT and TAG values in the work
+ queue entry determine the scheduling/synchronization
+ constraints for the work (no constraints,
+ tag order, atomic tag order). */
+ uint64_t tag : 32; /**< Created by the hardware from a CSR
+ associated with the sRIO inbound message
+ controller. When WORD1[R] is set and WORD1[NTAG]
+ is clear, WORD1[TAG]/PKT_INST_HDR[TAG]
+ becomes the TAG value in the work queue
+ entry created by PIP. The TT and TAG values
+ in the work queue entry determine the
+ scheduling/synchronization constraints for
+ the work (no constraints, tag order, atomic
+ tag order). */
+#else
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t rs : 1;
+ uint64_t grp : 4;
+ uint64_t qos : 3;
+ uint64_t ntag : 1;
+ uint64_t ntt : 1;
+ uint64_t ngrp : 1;
+ uint64_t nqos : 1;
+ uint64_t reserved_47_46 : 2;
+ uint64_t sl : 7;
+ uint64_t reserved_55 : 1;
+ uint64_t pm : 2;
+ uint64_t reserved_62_58 : 5;
+ uint64_t r : 1;
+#endif
+ } s;
+ } word1;
+} cvmx_srio_rx_message_header_t;
+
+/**
+ * This structure represents the SRIO header required on the front
+ * of PKO packets destine for SRIO message queues.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prio : 2; /**< The sRIO prio (priority) field for all
+ segments in the message. */
+ uint64_t tt : 1; /**< When set, the sRIO message segments use a
+ 16-bit source and destination ID for all the
+ segments in the message. When clear, the
+ message segments use an 8-bit ID. */
+ uint64_t sis : 1; /**< When set, the sRIO message segments use the
+ 63xx's secondary ID as the source ID. When
+ clear, the sRIO message segments use the
+ primary ID as the source ID. */
+ uint64_t ssize : 4; /**< The RIO ssize (standard message segment
+ data size) field used for the message. */
+ uint64_t did : 16; /**< The destination ID in the sRIO message
+ segments of the message. When TT is clear,
+ the most-significant 8 bits must be zero. */
+ uint64_t xmbox : 4; /**< The RIO xmbox (recipient mailbox extension)
+ field in the sRIO message segment for a
+ single-segment message. Must be zero for
+ multi-segment messages. */
+ uint64_t mbox : 2; /**< The RIO mbox (recipient mailbox) field in
+ the sRIO message segments of the message. */
+ uint64_t letter : 2; /**< The RIO letter (slot within mailbox) field
+ in the sRIO message segments of the message
+ when LNS is clear. When LNS is set, this
+ LETTER field is not used and must be zero. */
+ uint64_t reserved_31_2 : 30;
+ uint64_t lns : 1; /**< When set, the outbound message controller
+ will dynamically selects an sRIO letter
+ field for the message (based on LETTER_SP or
+ LETTER_MP - see appendix A), and the LETTER
+ field in this sRIO outbound message
+ descriptor is unused. When clear, the LETTER
+ field in this sRIO outbound message
+ descriptor selects the sRIO letter used for
+ the message. */
+ uint64_t intr : 1; /**< When set, the outbound message controller
+ will set an interrupt bit after all sRIO
+ segments of the message receive a message
+ DONE response. If the message transfer has
+ errors, the interrupt bit is not set (but
+ others are). */
+#else
+ uint64_t intr : 1;
+ uint64_t lns : 1;
+ uint64_t reserved_31_2 : 30;
+ uint64_t letter : 2;
+ uint64_t mbox : 2;
+ uint64_t xmbox : 4;
+ uint64_t did : 16;
+ uint64_t ssize : 4;
+ uint64_t sis : 1;
+ uint64_t tt : 1;
+ uint64_t prio : 2;
+#endif
+ } s;
+} cvmx_srio_tx_message_header_t;
+
+/**
+ * Reset SRIO to link partner
+ *
+ * @param srio_port SRIO port to initialize
+ *
+ * @return Zero on success
+ */
+int cvmx_srio_link_rst(int srio_port);
+
+/**
+ * Initialize a SRIO port for use.
+ *
+ * @param srio_port SRIO port to initialize
+ * @param flags Optional flags
+ *
+ * @return Zero on success
+ */
+int cvmx_srio_initialize(int srio_port, cvmx_srio_initialize_flags_t flags);
+
+/**
+ * Read 32bits from a Device's config space
+ *
+ * @param srio_port SRIO port the device is on
+ * @param srcid_index
+ * Which SRIO source ID to use. 0 = Primary, 1 = Secondary
+ * @param destid RapidIO device ID, or -1 for the local Octeon.
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param hopcount Number of hops to the remote device. Use 0 for the local Octeon.
+ * @param offset Offset in config space. This must be a multiple of 32 bits.
+ * @param result Result of the read. This will be unmodified on failure.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_srio_config_read32(int srio_port, int srcid_index, int destid,
+ int is16bit, uint8_t hopcount, uint32_t offset,
+ uint32_t *result);
+
+/**
+ * Write 32bits to a Device's config space
+ *
+ * @param srio_port SRIO port the device is on
+ * @param srcid_index
+ * Which SRIO source ID to use. 0 = Primary, 1 = Secondary
+ * @param destid RapidIO device ID, or -1 for the local Octeon.
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param hopcount Number of hops to the remote device. Use 0 for the local Octeon.
+ * @param offset Offset in config space. This must be a multiple of 32 bits.
+ * @param data Data to write.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_srio_config_write32(int srio_port, int srcid_index, int destid,
+ int is16bit, uint8_t hopcount, uint32_t offset,
+ uint32_t data);
+
+/**
+ * Send a RapidIO doorbell to a remote device
+ *
+ * @param srio_port SRIO port the device is on
+ * @param srcid_index
+ * Which SRIO source ID to use. 0 = Primary, 1 = Secondary
+ * @param destid RapidIO device ID.
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param priority Doorbell priority (0-3)
+ * @param data Data for doorbell.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_srio_send_doorbell(int srio_port, int srcid_index, int destid,
+ int is16bit, int priority, uint16_t data);
+
+/**
+ * Get the status of the last doorbell sent. If the dooorbell
+ * hardware is done, then the status is cleared to get ready for
+ * the next doorbell (or retry).
+ *
+ * @param srio_port SRIO port to check doorbell on
+ *
+ * @return Doorbell status
+ */
+cvmx_srio_doorbell_status_t cvmx_srio_send_doorbell_status(int srio_port);
+
+/**
+ * Read a received doorbell and report data about it.
+ *
+ * @param srio_port SRIO port to check for the received doorbell
+ * @param destid_index
+ * Which Octeon destination ID was the doorbell for
+ * @param sequence_num
+ * Sequence number of doorbell (32bits)
+ * @param srcid RapidIO source ID of the doorbell sender
+ * @param priority Priority of the doorbell (0-3)
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param data Data in the doorbell (16 bits)
+ *
+ * @return Doorbell status. Either DONE, NONE, or ERROR.
+ */
+cvmx_srio_doorbell_status_t cvmx_srio_receive_doorbell(int srio_port,
+ int *destid_index, uint32_t *sequence_num, int *srcid, int *priority,
+ int *is16bit, uint16_t *data);
+
+/**
+ * Receive a packet from the Soft Packet FIFO (SPF).
+ *
+ * @param srio_port SRIO port to read the packet from.
+ * @param buffer Buffer to receive the packet.
+ * @param buffer_length
+ * Length of the buffer in bytes.
+ *
+ * @return Returns the length of the packet read. Negative on failure.
+ * Zero if no packets are available.
+ */
+int cvmx_srio_receive_spf(int srio_port, void *buffer, int buffer_length);
+
+/**
+ * Map a remote device's memory region into Octeon's physical
+ * address area. The caller can then map this into a core using
+ * the TLB or XKPHYS.
+ *
+ * @param srio_port SRIO port to map the device on
+ * @param write_op Type of operation to perform on a write to the device.
+ * Normally should be CVMX_SRIO_WRITE_MODE_AUTO.
+ * @param write_priority
+ * SRIO priority of writes (0-3)
+ * @param read_op Type of operation to perform on reads to the device.
+ * Normally should be CVMX_SRIO_READ_MODE_NORMAL.
+ * @param read_priority
+ * SRIO priority of reads (0-3)
+ * @param srcid_index
+ * Which SRIO source ID to use. 0 = Primary, 1 = Secondary
+ * @param destid RapidIO device ID.
+ * @param is16bit Non zero if the transactions should use 16bit device IDs. Zero
+ * if transactions should use 8bit device IDs.
+ * @param base Device base address to start the mapping
+ * @param size Size of the mapping in bytes
+ *
+ * @return Octeon 64bit physical address that accesses the remote device,
+ * or zero on failure.
+ */
+uint64_t cvmx_srio_physical_map(int srio_port, cvmx_srio_write_mode_t write_op,
+ int write_priority, cvmx_srio_read_mode_t read_op, int read_priority,
+ int srcid_index, int destid, int is16bit, uint64_t base, uint64_t size);
+
+/**
+ * Unmap a physical address window created by cvmx_srio_phys_map().
+ *
+ * @param physical_address
+ * Physical address returned by cvmx_srio_phys_map().
+ * @param size Size used on original call.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_srio_physical_unmap(uint64_t physical_address, uint64_t size);
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+/**
+ * fill out outbound message descriptor
+ *
+ * @param buf_ptr pointer to a buffer pointer. the buffer pointer points
+ * to a chain of buffers that hold an outbound srio packet.
+ * the packet can take the format of (1) a pip/ipd inbound
+ * message or (2) an application-generated outbound message
+ * @param desc_ptr pointer to an outbound message descriptor. should be null
+ * if *buf_ptr is in the format (1)
+ *
+ * @return 0 on success; negative of failure.
+ */
+int cvmx_srio_omsg_desc (uint64_t port, cvmx_buf_ptr_t *buf_ptr,
+ cvmx_srio_tx_message_header_t *desc_ptr);
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-srio.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-sriomaintx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-sriomaintx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-sriomaintx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,4400 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-sriomaintx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon sriomaintx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_SRIOMAINTX_DEFS_H__
+#define __CVMX_SRIOMAINTX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ASMBLY_ID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ASMBLY_ID(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000008ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ASMBLY_ID(block_id) (0x0000000000000008ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ASMBLY_INFO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ASMBLY_INFO(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000000Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_ASMBLY_INFO(block_id) (0x000000000000000Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_BAR1_IDXX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 15)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_BAR1_IDXX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return 0x0000000000200010ull + (((offset) & 15) + ((block_id) & 3) * 0x0ull) * 4;
+}
+#else
+#define CVMX_SRIOMAINTX_BAR1_IDXX(offset, block_id) (0x0000000000200010ull + (((offset) & 15) + ((block_id) & 3) * 0x0ull) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_BELL_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_BELL_STATUS(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000200080ull;
+}
+#else
+#define CVMX_SRIOMAINTX_BELL_STATUS(block_id) (0x0000000000200080ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_COMP_TAG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_COMP_TAG(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000006Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_COMP_TAG(block_id) (0x000000000000006Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_CORE_ENABLES(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_CORE_ENABLES(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000200070ull;
+}
+#else
+#define CVMX_SRIOMAINTX_CORE_ENABLES(block_id) (0x0000000000200070ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_DEV_ID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_DEV_ID(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000000ull;
+}
+#else
+#define CVMX_SRIOMAINTX_DEV_ID(block_id) (0x0000000000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_DEV_REV(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_DEV_REV(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000004ull;
+}
+#else
+#define CVMX_SRIOMAINTX_DEV_REV(block_id) (0x0000000000000004ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_DST_OPS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_DST_OPS(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000001Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_DST_OPS(block_id) (0x000000000000001Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_ATTR_CAPT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_ATTR_CAPT(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002048ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_ATTR_CAPT(block_id) (0x0000000000002048ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_ERR_DET(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_ERR_DET(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002040ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_ERR_DET(block_id) (0x0000000000002040ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_ERR_RATE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_ERR_RATE(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002068ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_ERR_RATE(block_id) (0x0000000000002068ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_ERR_RATE_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_ERR_RATE_EN(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002044ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_ERR_RATE_EN(block_id) (0x0000000000002044ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_ERR_RATE_THR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_ERR_RATE_THR(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000206Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_ERR_RATE_THR(block_id) (0x000000000000206Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_HDR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_HDR(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002000ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_HDR(block_id) (0x0000000000002000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_LT_ADDR_CAPT_H(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_LT_ADDR_CAPT_H(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002010ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_LT_ADDR_CAPT_H(block_id) (0x0000000000002010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_LT_ADDR_CAPT_L(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_LT_ADDR_CAPT_L(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002014ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_LT_ADDR_CAPT_L(block_id) (0x0000000000002014ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_LT_CTRL_CAPT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_LT_CTRL_CAPT(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000201Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_LT_CTRL_CAPT(block_id) (0x000000000000201Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_LT_DEV_ID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_LT_DEV_ID(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002028ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_LT_DEV_ID(block_id) (0x0000000000002028ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_LT_DEV_ID_CAPT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_LT_DEV_ID_CAPT(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002018ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_LT_DEV_ID_CAPT(block_id) (0x0000000000002018ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_LT_ERR_DET(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_LT_ERR_DET(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002008ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_LT_ERR_DET(block_id) (0x0000000000002008ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_LT_ERR_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_LT_ERR_EN(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000200Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_LT_ERR_EN(block_id) (0x000000000000200Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_PACK_CAPT_1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_PACK_CAPT_1(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002050ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_PACK_CAPT_1(block_id) (0x0000000000002050ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_PACK_CAPT_2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_PACK_CAPT_2(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002054ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_PACK_CAPT_2(block_id) (0x0000000000002054ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_PACK_CAPT_3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_PACK_CAPT_3(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000002058ull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_PACK_CAPT_3(block_id) (0x0000000000002058ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_ERB_PACK_SYM_CAPT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_ERB_PACK_SYM_CAPT(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000204Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_ERB_PACK_SYM_CAPT(block_id) (0x000000000000204Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_HB_DEV_ID_LOCK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_HB_DEV_ID_LOCK(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000068ull;
+}
+#else
+#define CVMX_SRIOMAINTX_HB_DEV_ID_LOCK(block_id) (0x0000000000000068ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_BUFFER_CONFIG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_BUFFER_CONFIG(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000102000ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_BUFFER_CONFIG(block_id) (0x0000000000102000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_BUFFER_CONFIG2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_BUFFER_CONFIG2(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000102004ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_BUFFER_CONFIG2(block_id) (0x0000000000102004ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_PD_PHY_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_PD_PHY_CTRL(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000107028ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_PD_PHY_CTRL(block_id) (0x0000000000107028ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_PD_PHY_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_PD_PHY_STAT(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000010702Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_PD_PHY_STAT(block_id) (0x000000000010702Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_PI_PHY_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_PI_PHY_CTRL(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000107020ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_PI_PHY_CTRL(block_id) (0x0000000000107020ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_PI_PHY_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_PI_PHY_STAT(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000107024ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_PI_PHY_STAT(block_id) (0x0000000000107024ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_SP_RX_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_SP_RX_CTRL(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000010700Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_SP_RX_CTRL(block_id) (0x000000000010700Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_SP_RX_DATA(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_SP_RX_DATA(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000107014ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_SP_RX_DATA(block_id) (0x0000000000107014ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_SP_RX_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_SP_RX_STAT(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000107010ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_SP_RX_STAT(block_id) (0x0000000000107010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_SP_TX_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_SP_TX_CTRL(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000107000ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_SP_TX_CTRL(block_id) (0x0000000000107000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_SP_TX_DATA(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_SP_TX_DATA(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000107008ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_SP_TX_DATA(block_id) (0x0000000000107008ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_IR_SP_TX_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_IR_SP_TX_STAT(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000107004ull;
+}
+#else
+#define CVMX_SRIOMAINTX_IR_SP_TX_STAT(block_id) (0x0000000000107004ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_LANE_X_STATUS_0(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_LANE_X_STATUS_0(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return 0x0000000000001010ull + (((offset) & 3) + ((block_id) & 3) * 0x0ull) * 32;
+}
+#else
+#define CVMX_SRIOMAINTX_LANE_X_STATUS_0(offset, block_id) (0x0000000000001010ull + (((offset) & 3) + ((block_id) & 3) * 0x0ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_LCS_BA0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_LCS_BA0(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000058ull;
+}
+#else
+#define CVMX_SRIOMAINTX_LCS_BA0(block_id) (0x0000000000000058ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_LCS_BA1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_LCS_BA1(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000005Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_LCS_BA1(block_id) (0x000000000000005Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_M2S_BAR0_START0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_M2S_BAR0_START0(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000200000ull;
+}
+#else
+#define CVMX_SRIOMAINTX_M2S_BAR0_START0(block_id) (0x0000000000200000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_M2S_BAR0_START1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_M2S_BAR0_START1(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000200004ull;
+}
+#else
+#define CVMX_SRIOMAINTX_M2S_BAR0_START1(block_id) (0x0000000000200004ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_M2S_BAR1_START0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_M2S_BAR1_START0(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000200008ull;
+}
+#else
+#define CVMX_SRIOMAINTX_M2S_BAR1_START0(block_id) (0x0000000000200008ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_M2S_BAR1_START1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_M2S_BAR1_START1(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000020000Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_M2S_BAR1_START1(block_id) (0x000000000020000Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_M2S_BAR2_START(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_M2S_BAR2_START(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000200050ull;
+}
+#else
+#define CVMX_SRIOMAINTX_M2S_BAR2_START(block_id) (0x0000000000200050ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_MAC_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_MAC_CTRL(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000200068ull;
+}
+#else
+#define CVMX_SRIOMAINTX_MAC_CTRL(block_id) (0x0000000000200068ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PE_FEAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PE_FEAT(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000010ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PE_FEAT(block_id) (0x0000000000000010ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PE_LLC(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PE_LLC(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000004Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_PE_LLC(block_id) (0x000000000000004Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_0_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_0_CTL(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000015Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_0_CTL(block_id) (0x000000000000015Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_0_CTL2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_0_CTL2(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000154ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_0_CTL2(block_id) (0x0000000000000154ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_0_ERR_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_0_ERR_STAT(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000158ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_0_ERR_STAT(block_id) (0x0000000000000158ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_0_LINK_REQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_0_LINK_REQ(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000140ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_0_LINK_REQ(block_id) (0x0000000000000140ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_0_LINK_RESP(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_0_LINK_RESP(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000144ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_0_LINK_RESP(block_id) (0x0000000000000144ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_0_LOCAL_ACKID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_0_LOCAL_ACKID(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000148ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_0_LOCAL_ACKID(block_id) (0x0000000000000148ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_GEN_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_GEN_CTL(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000013Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_GEN_CTL(block_id) (0x000000000000013Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_LT_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_LT_CTL(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000120ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_LT_CTL(block_id) (0x0000000000000120ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_MBH0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_MBH0(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000100ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_MBH0(block_id) (0x0000000000000100ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_RT_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_RT_CTL(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000124ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_RT_CTL(block_id) (0x0000000000000124ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PORT_TTL_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PORT_TTL_CTL(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000000012Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_PORT_TTL_CTL(block_id) (0x000000000000012Cull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_PRI_DEV_ID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_PRI_DEV_ID(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000060ull;
+}
+#else
+#define CVMX_SRIOMAINTX_PRI_DEV_ID(block_id) (0x0000000000000060ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_SEC_DEV_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_SEC_DEV_CTRL(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000200064ull;
+}
+#else
+#define CVMX_SRIOMAINTX_SEC_DEV_CTRL(block_id) (0x0000000000200064ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_SEC_DEV_ID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_SEC_DEV_ID(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000200060ull;
+}
+#else
+#define CVMX_SRIOMAINTX_SEC_DEV_ID(block_id) (0x0000000000200060ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_SERIAL_LANE_HDR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_SERIAL_LANE_HDR(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000001000ull;
+}
+#else
+#define CVMX_SRIOMAINTX_SERIAL_LANE_HDR(block_id) (0x0000000000001000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_SRC_OPS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_SRC_OPS(%lu) is invalid on this chip\n", block_id);
+ return 0x0000000000000018ull;
+}
+#else
+#define CVMX_SRIOMAINTX_SRC_OPS(block_id) (0x0000000000000018ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOMAINTX_TX_DROP(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOMAINTX_TX_DROP(%lu) is invalid on this chip\n", block_id);
+ return 0x000000000020006Cull;
+}
+#else
+#define CVMX_SRIOMAINTX_TX_DROP(block_id) (0x000000000020006Cull)
+#endif
+
+/**
+ * cvmx_sriomaint#_asmbly_id
+ *
+ * SRIOMAINT_ASMBLY_ID = SRIO Assembly ID
+ *
+ * The Assembly ID register shows the Assembly ID and Vendor
+ *
+ * Notes:
+ * The Assembly ID register shows the Assembly ID and Vendor specified in $SRIO_ASMBLY_ID.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ASMBLY_ID hclk hrst_n
+ */
+union cvmx_sriomaintx_asmbly_id {
+ uint32_t u32;
+ struct cvmx_sriomaintx_asmbly_id_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t assy_id : 16; /**< Assembly Identifer */
+ uint32_t assy_ven : 16; /**< Assembly Vendor Identifer */
+#else
+ uint32_t assy_ven : 16;
+ uint32_t assy_id : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_asmbly_id_s cn63xx;
+ struct cvmx_sriomaintx_asmbly_id_s cn63xxp1;
+ struct cvmx_sriomaintx_asmbly_id_s cn66xx;
+};
+typedef union cvmx_sriomaintx_asmbly_id cvmx_sriomaintx_asmbly_id_t;
+
+/**
+ * cvmx_sriomaint#_asmbly_info
+ *
+ * SRIOMAINT_ASMBLY_INFO = SRIO Assembly Information
+ *
+ * The Assembly Info register shows the Assembly Revision specified in $SRIO_ASMBLY_INFO
+ *
+ * Notes:
+ * The Assembly Info register shows the Assembly Revision specified in $SRIO_ASMBLY_INFO and Extended
+ * Feature Pointer.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ASMBLY_INFO hclk hrst_n
+ */
+union cvmx_sriomaintx_asmbly_info {
+ uint32_t u32;
+ struct cvmx_sriomaintx_asmbly_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t assy_rev : 16; /**< Assembly Revision */
+ uint32_t ext_fptr : 16; /**< Pointer to the first entry in the extended feature
+ list. */
+#else
+ uint32_t ext_fptr : 16;
+ uint32_t assy_rev : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_asmbly_info_s cn63xx;
+ struct cvmx_sriomaintx_asmbly_info_s cn63xxp1;
+ struct cvmx_sriomaintx_asmbly_info_s cn66xx;
+};
+typedef union cvmx_sriomaintx_asmbly_info cvmx_sriomaintx_asmbly_info_t;
+
+/**
+ * cvmx_sriomaint#_bar1_idx#
+ *
+ * SRIOMAINT_BAR1_IDXX = SRIO BAR1 IndexX Register
+ *
+ * Contains address index and control bits for access to memory ranges of BAR1.
+ *
+ * Notes:
+ * This register specifies the Octeon address, endian swap and cache status associated with each of
+ * the 16 BAR1 entries. The local address bits used are based on the BARSIZE field located in the
+ * SRIOMAINT(0,2..3)_M2S_BAR1_START0 register. This register is only writeable over SRIO if the
+ * SRIO(0,2..3)_ACC_CTRL.DENY_BAR1 bit is zero.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_BAR1_IDX[0:15] hclk hrst_n
+ */
+union cvmx_sriomaintx_bar1_idxx {
+ uint32_t u32;
+ struct cvmx_sriomaintx_bar1_idxx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t la : 22; /**< L2/DRAM Address bits [37:16]
+ Not all LA[21:0] bits are used by SRIO hardware,
+ depending on SRIOMAINT(0,2..3)_M2S_BAR1_START1[BARSIZE].
+
+ Become
+ L2/DRAM
+ Address Entry
+ BARSIZE LA Bits Used Bits Size
+ 0 LA[21:0] [37:16] 64KB
+ 1 LA[21:1] [37:17] 128KB
+ 2 LA[21:2] [37:18] 256KB
+ 3 LA[21:3] [37:19] 512KB
+ 4 LA[21:4] [37:20] 1MB
+ 5 LA[21:5] [37:21] 2MB
+ 6 LA[21:6] [37:22] 4MB
+ 7 LA[21:7] [37:23] 8MB
+ 8 LA[21:8] [37:24] 16MB
+ 9 LA[21:9] [37:25] 32MB
+ 10 LA[21:10] [37:26] 64MB
+ 11 LA[21:11] [37:27] 128MB
+ 12 LA[21:12] [37:28] 256MB
+ 13 LA[21:13] [37:29] 512MB */
+ uint32_t reserved_6_7 : 2;
+ uint32_t es : 2; /**< Endian Swap Mode.
+ 0 = No Swap
+ 1 = 64-bit Swap Bytes [ABCD_EFGH] -> [HGFE_DCBA]
+ 2 = 32-bit Swap Words [ABCD_EFGH] -> [DCBA_HGFE]
+ 3 = 32-bit Word Exch [ABCD_EFGH] -> [EFGH_ABCD] */
+ uint32_t nca : 1; /**< Non-Cacheable Access Mode. When set, transfers
+ through this window are not cacheable. */
+ uint32_t reserved_1_2 : 2;
+ uint32_t enable : 1; /**< When set the selected index address is valid. */
+#else
+ uint32_t enable : 1;
+ uint32_t reserved_1_2 : 2;
+ uint32_t nca : 1;
+ uint32_t es : 2;
+ uint32_t reserved_6_7 : 2;
+ uint32_t la : 22;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } s;
+ struct cvmx_sriomaintx_bar1_idxx_s cn63xx;
+ struct cvmx_sriomaintx_bar1_idxx_s cn63xxp1;
+ struct cvmx_sriomaintx_bar1_idxx_s cn66xx;
+};
+typedef union cvmx_sriomaintx_bar1_idxx cvmx_sriomaintx_bar1_idxx_t;
+
+/**
+ * cvmx_sriomaint#_bell_status
+ *
+ * SRIOMAINT_BELL_STATUS = SRIO Incoming Doorbell Status
+ *
+ * The SRIO Incoming (RX) Doorbell Status
+ *
+ * Notes:
+ * This register displays the status of the doorbells received. If FULL is set the SRIO device will
+ * retry incoming transactions.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_BELL_STATUS hclk hrst_n
+ */
+union cvmx_sriomaintx_bell_status {
+ uint32_t u32;
+ struct cvmx_sriomaintx_bell_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t full : 1; /**< Not able to receive Doorbell Transactions */
+#else
+ uint32_t full : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_sriomaintx_bell_status_s cn63xx;
+ struct cvmx_sriomaintx_bell_status_s cn63xxp1;
+ struct cvmx_sriomaintx_bell_status_s cn66xx;
+};
+typedef union cvmx_sriomaintx_bell_status cvmx_sriomaintx_bell_status_t;
+
+/**
+ * cvmx_sriomaint#_comp_tag
+ *
+ * SRIOMAINT_COMP_TAG = SRIO Component Tag
+ *
+ * Component Tag
+ *
+ * Notes:
+ * This register contains a component tag value for the processing element and the value can be
+ * assigned by software when the device is initialized.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_COMP_TAG hclk hrst_n
+ */
+union cvmx_sriomaintx_comp_tag {
+ uint32_t u32;
+ struct cvmx_sriomaintx_comp_tag_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t comp_tag : 32; /**< Component Tag for Firmware Use */
+#else
+ uint32_t comp_tag : 32;
+#endif
+ } s;
+ struct cvmx_sriomaintx_comp_tag_s cn63xx;
+ struct cvmx_sriomaintx_comp_tag_s cn63xxp1;
+ struct cvmx_sriomaintx_comp_tag_s cn66xx;
+};
+typedef union cvmx_sriomaintx_comp_tag cvmx_sriomaintx_comp_tag_t;
+
+/**
+ * cvmx_sriomaint#_core_enables
+ *
+ * SRIOMAINT_CORE_ENABLES = SRIO Core Control
+ *
+ * Core Control
+ *
+ * Notes:
+ * This register displays the reset state of the Octeon Core Logic while the SRIO Link is running.
+ * The bit should be set after the software has initialized the chip to allow memory operations.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_CORE_ENABLES hclk hrst_n, srst_n
+ */
+union cvmx_sriomaintx_core_enables {
+ uint32_t u32;
+ struct cvmx_sriomaintx_core_enables_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_5_31 : 27;
+ uint32_t halt : 1; /**< OCTEON currently in Reset
+ 0 = All OCTEON resources are available.
+ 1 = The OCTEON is in reset. When this bit is set,
+ SRIO maintenance registers can be accessed,
+ but BAR0, BAR1, and BAR2 cannot be. */
+ uint32_t imsg1 : 1; /**< Allow Incoming Message Unit 1 Operations
+ Note: This bit is cleared when the C63XX is reset
+ 0 = SRIO Incoming Messages to Unit 1 ignored and
+ return error response
+ 1 = SRIO Incoming Messages to Unit 1 */
+ uint32_t imsg0 : 1; /**< Allow Incoming Message Unit 0 Operations
+ Note: This bit is cleared when the C63XX is reset
+ 0 = SRIO Incoming Messages to Unit 0 ignored and
+ return error response
+ 1 = SRIO Incoming Messages to Unit 0 */
+ uint32_t doorbell : 1; /**< Allow Inbound Doorbell Operations
+ Note: This bit is cleared when the C63XX is reset
+ 0 = SRIO Doorbell OPs ignored and return error
+ response
+ 1 = SRIO Doorbell OPs Allowed */
+ uint32_t memory : 1; /**< Allow Inbound/Outbound Memory Operations
+ Note: This bit is cleared when the C63XX is reset
+ 0 = SRIO Incoming Nwrites and Swrites are
+ dropped. Incoming Nreads, Atomics and
+ NwriteRs return responses with ERROR status.
+ SRIO Incoming Maintenance BAR Memory Accesses
+ are processed normally.
+ Outgoing Store Operations are Dropped
+ Outgoing Load Operations are not issued and
+ return all 1's with an ERROR status.
+ In Flight Operations started while the bit is
+ set in both directions will complete normally.
+ 1 = SRIO Memory Read/Write OPs Allowed */
+#else
+ uint32_t memory : 1;
+ uint32_t doorbell : 1;
+ uint32_t imsg0 : 1;
+ uint32_t imsg1 : 1;
+ uint32_t halt : 1;
+ uint32_t reserved_5_31 : 27;
+#endif
+ } s;
+ struct cvmx_sriomaintx_core_enables_s cn63xx;
+ struct cvmx_sriomaintx_core_enables_s cn63xxp1;
+ struct cvmx_sriomaintx_core_enables_s cn66xx;
+};
+typedef union cvmx_sriomaintx_core_enables cvmx_sriomaintx_core_enables_t;
+
+/**
+ * cvmx_sriomaint#_dev_id
+ *
+ * SRIOMAINT_DEV_ID = SRIO Device ID
+ *
+ * The DeviceVendor Identity field identifies the vendor that manufactured the device
+ *
+ * Notes:
+ * This register identifies Cavium Inc. and the Product ID.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_DEV_ID hclk hrst_n
+ */
+union cvmx_sriomaintx_dev_id {
+ uint32_t u32;
+ struct cvmx_sriomaintx_dev_id_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t device : 16; /**< Product Identity */
+ uint32_t vendor : 16; /**< Cavium Vendor Identity */
+#else
+ uint32_t vendor : 16;
+ uint32_t device : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_dev_id_s cn63xx;
+ struct cvmx_sriomaintx_dev_id_s cn63xxp1;
+ struct cvmx_sriomaintx_dev_id_s cn66xx;
+};
+typedef union cvmx_sriomaintx_dev_id cvmx_sriomaintx_dev_id_t;
+
+/**
+ * cvmx_sriomaint#_dev_rev
+ *
+ * SRIOMAINT_DEV_REV = SRIO Device Revision
+ *
+ * The Device Revision register identifies the chip pass and revision
+ *
+ * Notes:
+ * This register identifies the chip pass and revision derived from the fuses.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_DEV_REV hclk hrst_n
+ */
+union cvmx_sriomaintx_dev_rev {
+ uint32_t u32;
+ struct cvmx_sriomaintx_dev_rev_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t revision : 8; /**< Chip Pass/Revision */
+#else
+ uint32_t revision : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_sriomaintx_dev_rev_s cn63xx;
+ struct cvmx_sriomaintx_dev_rev_s cn63xxp1;
+ struct cvmx_sriomaintx_dev_rev_s cn66xx;
+};
+typedef union cvmx_sriomaintx_dev_rev cvmx_sriomaintx_dev_rev_t;
+
+/**
+ * cvmx_sriomaint#_dst_ops
+ *
+ * SRIOMAINT_DST_OPS = SRIO Source Operations
+ *
+ * The logical operations supported from external devices.
+ *
+ * Notes:
+ * The logical operations supported from external devices. The Destination OPs register shows the
+ * operations specified in the SRIO(0,2..3)_IP_FEATURE.OPS register.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_DST_OPS hclk hrst_n
+ */
+union cvmx_sriomaintx_dst_ops {
+ uint32_t u32;
+ struct cvmx_sriomaintx_dst_ops_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t gsm_read : 1; /**< PE does not support Read Home operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<31>] */
+ uint32_t i_read : 1; /**< PE does not support Instruction Read.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<30>] */
+ uint32_t rd_own : 1; /**< PE does not support Read for Ownership.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<29>] */
+ uint32_t d_invald : 1; /**< PE does not support Data Cache Invalidate.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<28>] */
+ uint32_t castout : 1; /**< PE does not support Castout Operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<27>] */
+ uint32_t d_flush : 1; /**< PE does not support Data Cache Flush.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<26>] */
+ uint32_t io_read : 1; /**< PE does not support IO Read.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<25>] */
+ uint32_t i_invald : 1; /**< PE does not support Instruction Cache Invalidate.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<24>] */
+ uint32_t tlb_inv : 1; /**< PE does not support TLB Entry Invalidate.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<23>] */
+ uint32_t tlb_invs : 1; /**< PE does not support TLB Entry Invalidate Sync.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<22>] */
+ uint32_t reserved_16_21 : 6;
+ uint32_t read : 1; /**< PE can support Nread operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<15>] */
+ uint32_t write : 1; /**< PE can support Nwrite operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<14>] */
+ uint32_t swrite : 1; /**< PE can support Swrite operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<13>] */
+ uint32_t write_r : 1; /**< PE can support Write with Response operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<12>] */
+ uint32_t msg : 1; /**< PE can support Data Message operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<11>] */
+ uint32_t doorbell : 1; /**< PE can support Doorbell operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<10>] */
+ uint32_t compswap : 1; /**< PE does not support Atomic Compare and Swap.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<9>] */
+ uint32_t testswap : 1; /**< PE does not support Atomic Test and Swap.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<8>] */
+ uint32_t atom_inc : 1; /**< PE can support Atomic increment operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<7>] */
+ uint32_t atom_dec : 1; /**< PE can support Atomic decrement operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<6>] */
+ uint32_t atom_set : 1; /**< PE can support Atomic set operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<5>] */
+ uint32_t atom_clr : 1; /**< PE can support Atomic clear operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<4>] */
+ uint32_t atom_swp : 1; /**< PE does not support Atomic Swap.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<3>] */
+ uint32_t port_wr : 1; /**< PE can Port Write operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<2>] */
+ uint32_t reserved_0_1 : 2;
+#else
+ uint32_t reserved_0_1 : 2;
+ uint32_t port_wr : 1;
+ uint32_t atom_swp : 1;
+ uint32_t atom_clr : 1;
+ uint32_t atom_set : 1;
+ uint32_t atom_dec : 1;
+ uint32_t atom_inc : 1;
+ uint32_t testswap : 1;
+ uint32_t compswap : 1;
+ uint32_t doorbell : 1;
+ uint32_t msg : 1;
+ uint32_t write_r : 1;
+ uint32_t swrite : 1;
+ uint32_t write : 1;
+ uint32_t read : 1;
+ uint32_t reserved_16_21 : 6;
+ uint32_t tlb_invs : 1;
+ uint32_t tlb_inv : 1;
+ uint32_t i_invald : 1;
+ uint32_t io_read : 1;
+ uint32_t d_flush : 1;
+ uint32_t castout : 1;
+ uint32_t d_invald : 1;
+ uint32_t rd_own : 1;
+ uint32_t i_read : 1;
+ uint32_t gsm_read : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_dst_ops_s cn63xx;
+ struct cvmx_sriomaintx_dst_ops_s cn63xxp1;
+ struct cvmx_sriomaintx_dst_ops_s cn66xx;
+};
+typedef union cvmx_sriomaintx_dst_ops cvmx_sriomaintx_dst_ops_t;
+
+/**
+ * cvmx_sriomaint#_erb_attr_capt
+ *
+ * SRIOMAINT_ERB_ATTR_CAPT = SRIO Attributes Capture
+ *
+ * Attributes Capture
+ *
+ * Notes:
+ * This register contains the information captured during the error.
+ * The HW will not update this register (i.e. this register is locked) while
+ * VALID is set in this CSR.
+ * The HW sets SRIO_INT_REG[PHY_ERB] every time it sets VALID in this CSR.
+ * To handle the interrupt, the following procedure may be best:
+ * (1) clear SRIO_INT_REG[PHY_ERB],
+ * (2) read this CSR, corresponding SRIOMAINT*_ERB_ERR_DET, SRIOMAINT*_ERB_PACK_SYM_CAPT,
+ * SRIOMAINT*_ERB_PACK_CAPT_1, SRIOMAINT*_ERB_PACK_CAPT_2, and SRIOMAINT*_ERB_PACK_CAPT_3
+ * (3) Write VALID in this CSR to 0.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_ATTR_CAPT hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_attr_capt {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_attr_capt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t inf_type : 3; /**< Type of Information Logged.
+ 000 - Packet
+ 010 - Short Control Symbol
+ (use only first capture register)
+ 100 - Implementation Specific Error Reporting
+ All Others Reserved */
+ uint32_t err_type : 5; /**< The encoded value of the 31 minus the bit in
+ SRIOMAINT(0,2..3)_ERB_ERR_DET that describes the error
+ captured in SRIOMAINT(0,2..3)_ERB_*CAPT Registers.
+ (For example a value of 5 indicates 31-5 = bit 26) */
+ uint32_t err_info : 20; /**< Error Info.
+ ERR_TYPE Bits Description
+ 0 23 TX Protocol Error
+ 22 RX Protocol Error
+ 21 TX Link Response Timeout
+ 20 TX ACKID Timeout
+ - 19:16 Reserved
+ - 15:12 TX Protocol ID
+ 1 = Rcvd Unexpected Link Response
+ 2 = Rcvd Link Response before Req
+ 3 = Rcvd NACK servicing NACK
+ 4 = Rcvd NACK
+ 5 = Rcvd RETRY servicing RETRY
+ 6 = Rcvd RETRY servicing NACK
+ 7 = Rcvd ACK servicing RETRY
+ 8 = Rcvd ACK servicing NACK
+ 9 = Unexp ACKID on ACK or RETRY
+ 10 = Unexp ACK or RETRY
+ - 11:8 Reserved
+ - 7:4 RX Protocol ID
+ 1 = Rcvd EOP w/o Prev SOP
+ 2 = Rcvd STOMP w/o Prev SOP
+ 3 = Unexp RESTART
+ 4 = Redundant Status from LinkReq
+ 9-16 23:20 RX K Bits
+ - 19:0 Reserved
+ 26 23:20 RX K Bits
+ - 19:0 Reserved
+ 27 23:12 Type
+ 0x000 TX
+ 0x010 RX
+ - 11:8 RX or TX Protocol ID (see above)
+ - 7:4 Reserved
+ 30 23:20 RX K Bits
+ - 19:0 Reserved
+ 31 23:16 ACKID Timeout 0x2
+ - 15:14 Reserved
+ - 13:8 AckID
+ - 7:4 Reserved
+ All others ERR_TYPEs are reserved. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t valid : 1; /**< This bit is set by hardware to indicate that the
+ Packet/control symbol capture registers contain
+ valid information. For control symbols, only
+ capture register 0 will contain meaningful
+ information. This bit must be cleared by software
+ to allow capture of other errors. */
+#else
+ uint32_t valid : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t err_info : 20;
+ uint32_t err_type : 5;
+ uint32_t inf_type : 3;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_attr_capt_s cn63xx;
+ struct cvmx_sriomaintx_erb_attr_capt_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t inf_type : 3; /**< Type of Information Logged.
+ 000 - Packet
+ 010 - Short Control Symbol
+ (use only first capture register)
+ All Others Reserved */
+ uint32_t err_type : 5; /**< The encoded value of the 31 minus the bit in
+ SRIOMAINT(0..1)_ERB_ERR_DET that describes the error
+ captured in SRIOMAINT(0..1)_ERB_*CAPT Registers.
+ (For example a value of 5 indicates 31-5 = bit 26) */
+ uint32_t reserved_1_23 : 23;
+ uint32_t valid : 1; /**< This bit is set by hardware to indicate that the
+ Packet/control symbol capture registers contain
+ valid information. For control symbols, only
+ capture register 0 will contain meaningful
+ information. This bit must be cleared by software
+ to allow capture of other errors. */
+#else
+ uint32_t valid : 1;
+ uint32_t reserved_1_23 : 23;
+ uint32_t err_type : 5;
+ uint32_t inf_type : 3;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriomaintx_erb_attr_capt_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_attr_capt cvmx_sriomaintx_erb_attr_capt_t;
+
+/**
+ * cvmx_sriomaint#_erb_err_det
+ *
+ * SRIOMAINT_ERB_ERR_DET = SRIO Error Detect
+ *
+ * Error Detect
+ *
+ * Notes:
+ * The Error Detect Register indicates physical layer transmission errors detected by the hardware.
+ * The HW will not update this register (i.e. this register is locked) while
+ * SRIOMAINT*_ERB_ATTR_CAPT[VALID] is set.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_ERR_DET hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_err_det {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_err_det_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t imp_err : 1; /**< Implementation Specific Error. */
+ uint32_t reserved_23_30 : 8;
+ uint32_t ctl_crc : 1; /**< Received a control symbol with a bad CRC value
+ Complete Symbol in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t uns_id : 1; /**< Received an acknowledge control symbol with an
+ unexpected ackID (packet-accepted or packet_retry)
+ Partial Symbol in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t nack : 1; /**< Received packet-not-accepted acknowledge control
+ symbols.
+ Partial Symbol in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t out_ack : 1; /**< Received packet with unexpected ackID value
+ Header in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t pkt_crc : 1; /**< Received a packet with a bad CRC value
+ Header in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t size : 1; /**< Received packet which exceeds the maximum allowed
+ size of 276 bytes.
+ Header in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t inv_char : 1; /**< Received illegal, 8B/10B error or undefined
+ codegroup within a packet.
+ Header in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t inv_data : 1; /**< Received data codegroup or 8B/10B error within an
+ IDLE sequence.
+ Header in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t reserved_6_14 : 9;
+ uint32_t bad_ack : 1; /**< Link_response received with an ackID that is not
+ outstanding.
+ Partial Symbol in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t proterr : 1; /**< An unexpected packet or control symbol was
+ received.
+ Partial Symbol in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t f_toggle : 1; /**< Reserved. */
+ uint32_t del_err : 1; /**< Received illegal or undefined codegroup.
+ (either INV_DATA or INV_CHAR)
+ Complete Symbol in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t uns_ack : 1; /**< An unexpected acknowledge control symbol was
+ received.
+ Partial Symbol in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+ uint32_t lnk_tout : 1; /**< An acknowledge or link-response control symbol is
+ not received within the specified timeout interval
+ Partial Header in SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT */
+#else
+ uint32_t lnk_tout : 1;
+ uint32_t uns_ack : 1;
+ uint32_t del_err : 1;
+ uint32_t f_toggle : 1;
+ uint32_t proterr : 1;
+ uint32_t bad_ack : 1;
+ uint32_t reserved_6_14 : 9;
+ uint32_t inv_data : 1;
+ uint32_t inv_char : 1;
+ uint32_t size : 1;
+ uint32_t pkt_crc : 1;
+ uint32_t out_ack : 1;
+ uint32_t nack : 1;
+ uint32_t uns_id : 1;
+ uint32_t ctl_crc : 1;
+ uint32_t reserved_23_30 : 8;
+ uint32_t imp_err : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_err_det_s cn63xx;
+ struct cvmx_sriomaintx_erb_err_det_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t ctl_crc : 1; /**< Received a control symbol with a bad CRC value
+ Complete Symbol in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t uns_id : 1; /**< Received an acknowledge control symbol with an
+ unexpected ackID (packet-accepted or packet_retry)
+ Partial Symbol in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t nack : 1; /**< Received packet-not-accepted acknowledge control
+ symbols.
+ Partial Symbol in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t out_ack : 1; /**< Received packet with unexpected ackID value
+ Header in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t pkt_crc : 1; /**< Received a packet with a bad CRC value
+ Header in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t size : 1; /**< Received packet which exceeds the maximum allowed
+ size of 276 bytes.
+ Header in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t reserved_6_16 : 11;
+ uint32_t bad_ack : 1; /**< Link_response received with an ackID that is not
+ outstanding.
+ Partial Symbol in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t proterr : 1; /**< An unexpected packet or control symbol was
+ received.
+ Partial Symbol in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t f_toggle : 1; /**< Reserved. */
+ uint32_t del_err : 1; /**< Received illegal or undefined codegroup.
+ (either INV_DATA or INV_CHAR) (Pass 2)
+ Complete Symbol in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t uns_ack : 1; /**< An unexpected acknowledge control symbol was
+ received.
+ Partial Symbol in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+ uint32_t lnk_tout : 1; /**< An acknowledge or link-response control symbol is
+ not received within the specified timeout interval
+ Partial Header in SRIOMAINT(0..1)_ERB_PACK_SYM_CAPT */
+#else
+ uint32_t lnk_tout : 1;
+ uint32_t uns_ack : 1;
+ uint32_t del_err : 1;
+ uint32_t f_toggle : 1;
+ uint32_t proterr : 1;
+ uint32_t bad_ack : 1;
+ uint32_t reserved_6_16 : 11;
+ uint32_t size : 1;
+ uint32_t pkt_crc : 1;
+ uint32_t out_ack : 1;
+ uint32_t nack : 1;
+ uint32_t uns_id : 1;
+ uint32_t ctl_crc : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriomaintx_erb_err_det_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_err_det cvmx_sriomaintx_erb_err_det_t;
+
+/**
+ * cvmx_sriomaint#_erb_err_rate
+ *
+ * SRIOMAINT_ERB_ERR_RATE = SRIO Error Rate
+ *
+ * Error Rate
+ *
+ * Notes:
+ * The Error Rate register is used with the Error Rate Threshold register to monitor and control the
+ * reporting of transmission errors.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_ERR_RATE hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_err_rate {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_err_rate_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t err_bias : 8; /**< These bits provide the error rate bias value.
+ 0x00 - do not decrement the error rate counter
+ 0x01 - decrement every 1ms (+/-34%)
+ 0x02 - decrement every 10ms (+/-34%)
+ 0x04 - decrement every 100ms (+/-34%)
+ 0x08 - decrement every 1s (+/-34%)
+ 0x10 - decrement every 10s (+/-34%)
+ 0x20 - decrement every 100s (+/-34%)
+ 0x40 - decrement every 1000s (+/-34%)
+ 0x80 - decrement every 10000s (+/-34%)
+ All other values are reserved */
+ uint32_t reserved_18_23 : 6;
+ uint32_t rate_lim : 2; /**< These bits limit the incrementing of the error
+ rate counter above the failed threshold trigger.
+ 00 - only count 2 errors above
+ 01 - only count 4 errors above
+ 10 - only count 16 error above
+ 11 - do not limit incrementing the error rate ct */
+ uint32_t pk_rate : 8; /**< Peak Value attainted by the error rate counter */
+ uint32_t rate_cnt : 8; /**< These bits maintain a count of the number of
+ transmission errors that have been detected by the
+ port, decremented by the Error Rate Bias
+ mechanism, to create an indication of the link
+ error rate. */
+#else
+ uint32_t rate_cnt : 8;
+ uint32_t pk_rate : 8;
+ uint32_t rate_lim : 2;
+ uint32_t reserved_18_23 : 6;
+ uint32_t err_bias : 8;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_err_rate_s cn63xx;
+ struct cvmx_sriomaintx_erb_err_rate_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_err_rate_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_err_rate cvmx_sriomaintx_erb_err_rate_t;
+
+/**
+ * cvmx_sriomaint#_erb_err_rate_en
+ *
+ * SRIOMAINT_ERB_ERR_RATE_EN = SRIO Error Rate Enable
+ *
+ * Error Rate Enable
+ *
+ * Notes:
+ * This register contains the bits that control when an error condition is allowed to increment the
+ * error rate counter in the Error Rate Threshold Register and lock the Error Capture registers.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_ERR_RATE_EN hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_err_rate_en {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_err_rate_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t imp_err : 1; /**< Enable Implementation Specific Error. */
+ uint32_t reserved_23_30 : 8;
+ uint32_t ctl_crc : 1; /**< Enable error rate counting of control symbols with
+ bad CRC values */
+ uint32_t uns_id : 1; /**< Enable error rate counting of acknowledge control
+ symbol with unexpected ackIDs
+ (packet-accepted or packet_retry) */
+ uint32_t nack : 1; /**< Enable error rate counting of packet-not-accepted
+ acknowledge control symbols. */
+ uint32_t out_ack : 1; /**< Enable error rate counting of received packet with
+ unexpected ackID value */
+ uint32_t pkt_crc : 1; /**< Enable error rate counting of received a packet
+ with a bad CRC value */
+ uint32_t size : 1; /**< Enable error rate counting of received packet
+ which exceeds the maximum size of 276 bytes. */
+ uint32_t inv_char : 1; /**< Enable error rate counting of received illegal
+ illegal, 8B/10B error or undefined codegroup
+ within a packet. */
+ uint32_t inv_data : 1; /**< Enable error rate counting of received data
+ codegroup or 8B/10B error within IDLE sequence. */
+ uint32_t reserved_6_14 : 9;
+ uint32_t bad_ack : 1; /**< Enable error rate counting of link_responses with
+ an ackID that is not outstanding. */
+ uint32_t proterr : 1; /**< Enable error rate counting of unexpected packet or
+ control symbols received. */
+ uint32_t f_toggle : 1; /**< Reserved. */
+ uint32_t del_err : 1; /**< Enable error rate counting of illegal or undefined
+ codegroups (either INV_DATA or INV_CHAR). */
+ uint32_t uns_ack : 1; /**< Enable error rate counting of unexpected
+ acknowledge control symbols received. */
+ uint32_t lnk_tout : 1; /**< Enable error rate counting of acknowledge or
+ link-response control symbols not received within
+ the specified timeout interval */
+#else
+ uint32_t lnk_tout : 1;
+ uint32_t uns_ack : 1;
+ uint32_t del_err : 1;
+ uint32_t f_toggle : 1;
+ uint32_t proterr : 1;
+ uint32_t bad_ack : 1;
+ uint32_t reserved_6_14 : 9;
+ uint32_t inv_data : 1;
+ uint32_t inv_char : 1;
+ uint32_t size : 1;
+ uint32_t pkt_crc : 1;
+ uint32_t out_ack : 1;
+ uint32_t nack : 1;
+ uint32_t uns_id : 1;
+ uint32_t ctl_crc : 1;
+ uint32_t reserved_23_30 : 8;
+ uint32_t imp_err : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_err_rate_en_s cn63xx;
+ struct cvmx_sriomaintx_erb_err_rate_en_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t ctl_crc : 1; /**< Enable error rate counting of control symbols with
+ bad CRC values */
+ uint32_t uns_id : 1; /**< Enable error rate counting of acknowledge control
+ symbol with unexpected ackIDs
+ (packet-accepted or packet_retry) */
+ uint32_t nack : 1; /**< Enable error rate counting of packet-not-accepted
+ acknowledge control symbols. */
+ uint32_t out_ack : 1; /**< Enable error rate counting of received packet with
+ unexpected ackID value */
+ uint32_t pkt_crc : 1; /**< Enable error rate counting of received a packet
+ with a bad CRC value */
+ uint32_t size : 1; /**< Enable error rate counting of received packet
+ which exceeds the maximum size of 276 bytes. */
+ uint32_t reserved_6_16 : 11;
+ uint32_t bad_ack : 1; /**< Enable error rate counting of link_responses with
+ an ackID that is not outstanding. */
+ uint32_t proterr : 1; /**< Enable error rate counting of unexpected packet or
+ control symbols received. */
+ uint32_t f_toggle : 1; /**< Reserved. */
+ uint32_t del_err : 1; /**< Enable error rate counting of illegal or undefined
+ codegroups (either INV_DATA or INV_CHAR). (Pass 2) */
+ uint32_t uns_ack : 1; /**< Enable error rate counting of unexpected
+ acknowledge control symbols received. */
+ uint32_t lnk_tout : 1; /**< Enable error rate counting of acknowledge or
+ link-response control symbols not received within
+ the specified timeout interval */
+#else
+ uint32_t lnk_tout : 1;
+ uint32_t uns_ack : 1;
+ uint32_t del_err : 1;
+ uint32_t f_toggle : 1;
+ uint32_t proterr : 1;
+ uint32_t bad_ack : 1;
+ uint32_t reserved_6_16 : 11;
+ uint32_t size : 1;
+ uint32_t pkt_crc : 1;
+ uint32_t out_ack : 1;
+ uint32_t nack : 1;
+ uint32_t uns_id : 1;
+ uint32_t ctl_crc : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriomaintx_erb_err_rate_en_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_err_rate_en cvmx_sriomaintx_erb_err_rate_en_t;
+
+/**
+ * cvmx_sriomaint#_erb_err_rate_thr
+ *
+ * SRIOMAINT_ERB_ERR_RATE_THR = SRIO Error Rate Threshold
+ *
+ * Error Rate Threshold
+ *
+ * Notes:
+ * The Error Rate Threshold register is used to control the reporting of errors to the link status.
+ * Typically the Degraded Threshold is less than the Fail Threshold.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_ERR_RATE_THR hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_err_rate_thr {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_err_rate_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t fail_th : 8; /**< These bits provide the threshold value for
+ reporting an error condition due to a possibly
+ broken link.
+ 0x00 - Disable the Error Rate Failed Threshold
+ Trigger
+ 0x01 - Set the error reporting threshold to 1
+ 0x02 - Set the error reporting threshold to 2
+ - ...
+ 0xFF - Set the error reporting threshold to 255 */
+ uint32_t dgrad_th : 8; /**< These bits provide the threshold value for
+ reporting an error condition due to a possibly
+ degrading link.
+ 0x00 - Disable the Degrade Rate Failed Threshold
+ Trigger
+ 0x01 - Set the error reporting threshold to 1
+ 0x02 - Set the error reporting threshold to 2
+ - ...
+ 0xFF - Set the error reporting threshold to 255 */
+ uint32_t reserved_0_15 : 16;
+#else
+ uint32_t reserved_0_15 : 16;
+ uint32_t dgrad_th : 8;
+ uint32_t fail_th : 8;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_err_rate_thr_s cn63xx;
+ struct cvmx_sriomaintx_erb_err_rate_thr_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_err_rate_thr_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_err_rate_thr cvmx_sriomaintx_erb_err_rate_thr_t;
+
+/**
+ * cvmx_sriomaint#_erb_hdr
+ *
+ * SRIOMAINT_ERB_HDR = SRIO Error Reporting Block Header
+ *
+ * Error Reporting Block Header
+ *
+ * Notes:
+ * The error management extensions block header register contains the EF_PTR to the next EF_BLK and
+ * the EF_ID that identifies this as the error management extensions block header. In this
+ * implementation this is the last block and therefore the EF_PTR is a NULL pointer.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_HDR hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_hdr {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ef_ptr : 16; /**< Pointer to the next block in the extended features
+ data structure. */
+ uint32_t ef_id : 16; /**< Single Port ID */
+#else
+ uint32_t ef_id : 16;
+ uint32_t ef_ptr : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_hdr_s cn63xx;
+ struct cvmx_sriomaintx_erb_hdr_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_hdr_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_hdr cvmx_sriomaintx_erb_hdr_t;
+
+/**
+ * cvmx_sriomaint#_erb_lt_addr_capt_h
+ *
+ * SRIOMAINT_ERB_LT_ADDR_CAPT_H = SRIO Logical/Transport Layer High Address Capture
+ *
+ * Logical/Transport Layer High Address Capture
+ *
+ * Notes:
+ * This register contains error information. It is locked when a Logical/Transport error is detected
+ * and unlocked when the SRIOMAINT(0,2..3)_ERB_LT_ERR_DET is written to zero. This register should be
+ * written only when error detection is disabled. This register is only required for end point
+ * transactions of 50 or 66 bits.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_LT_ADDR_CAPT_H hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_lt_addr_capt_h {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_lt_addr_capt_h_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t addr : 32; /**< Most significant 32 bits of the address associated
+ with the error. Information supplied for requests
+ and responses if available. */
+#else
+ uint32_t addr : 32;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_lt_addr_capt_h_s cn63xx;
+ struct cvmx_sriomaintx_erb_lt_addr_capt_h_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_lt_addr_capt_h_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_lt_addr_capt_h cvmx_sriomaintx_erb_lt_addr_capt_h_t;
+
+/**
+ * cvmx_sriomaint#_erb_lt_addr_capt_l
+ *
+ * SRIOMAINT_ERB_LT_ADDR_CAPT_L = SRIO Logical/Transport Layer Low Address Capture
+ *
+ * Logical/Transport Layer Low Address Capture
+ *
+ * Notes:
+ * This register contains error information. It is locked when a Logical/Transport error is detected
+ * and unlocked when the SRIOMAINT(0,2..3)_ERB_LT_ERR_DET is written to zero. This register should be
+ * written only when error detection is disabled.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_LT_ADDR_CAPT_L hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_lt_addr_capt_l {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_lt_addr_capt_l_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t addr : 29; /**< Least significant 29 bits of the address
+ associated with the error. Bits 31:24 specify the
+ request HOP count for Maintenance Operations.
+ Information supplied for requests and responses if
+ available. */
+ uint32_t reserved_2_2 : 1;
+ uint32_t xaddr : 2; /**< Extended address bits of the address associated
+ with the error. Information supplied for requests
+ and responses if available. */
+#else
+ uint32_t xaddr : 2;
+ uint32_t reserved_2_2 : 1;
+ uint32_t addr : 29;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_lt_addr_capt_l_s cn63xx;
+ struct cvmx_sriomaintx_erb_lt_addr_capt_l_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_lt_addr_capt_l_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_lt_addr_capt_l cvmx_sriomaintx_erb_lt_addr_capt_l_t;
+
+/**
+ * cvmx_sriomaint#_erb_lt_ctrl_capt
+ *
+ * SRIOMAINT_ERB_LT_CTRL_CAPT = SRIO Logical/Transport Layer Control Capture
+ *
+ * Logical/Transport Layer Control Capture
+ *
+ * Notes:
+ * This register contains error information. It is locked when a Logical/Transport error is detected
+ * and unlocked when the SRIOMAINT(0,2..3)_ERB_LT_ERR_DET is written to zero. This register should be
+ * written only when error detection is disabled.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_LT_CTRL_CAPT hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_lt_ctrl_capt {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_lt_ctrl_capt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ftype : 4; /**< Format Type associated with the error */
+ uint32_t ttype : 4; /**< Transaction Type associated with the error
+ (For Messages)
+ Message Length */
+ uint32_t extra : 8; /**< Additional Information
+ (For Messages)
+ - 23:22 Letter
+ - 21:20 Mbox
+ - 19:16 Msgseg/xmbox
+ Information for the last message request sent
+ for the mailbox that had an error
+ (For Responses)
+ - 23:20 Response Request FTYPE
+ - 19:16 Response Request TTYPE
+ (For all other types)
+ Reserved. */
+ uint32_t status : 4; /**< Response Status.
+ (For all other Requests)
+ Reserved. */
+ uint32_t size : 4; /**< Size associated with the transaction. */
+ uint32_t tt : 1; /**< Transfer Type 0=ID8, 1=ID16. */
+ uint32_t wdptr : 1; /**< Word Pointer associated with the error. */
+ uint32_t reserved_5_5 : 1;
+ uint32_t capt_idx : 5; /**< Capture Index. 31 - Bit set in
+ SRIOMAINT(0,2..3)_ERB_LT_ERR_DET. */
+#else
+ uint32_t capt_idx : 5;
+ uint32_t reserved_5_5 : 1;
+ uint32_t wdptr : 1;
+ uint32_t tt : 1;
+ uint32_t size : 4;
+ uint32_t status : 4;
+ uint32_t extra : 8;
+ uint32_t ttype : 4;
+ uint32_t ftype : 4;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_lt_ctrl_capt_s cn63xx;
+ struct cvmx_sriomaintx_erb_lt_ctrl_capt_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_lt_ctrl_capt_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_lt_ctrl_capt cvmx_sriomaintx_erb_lt_ctrl_capt_t;
+
+/**
+ * cvmx_sriomaint#_erb_lt_dev_id
+ *
+ * SRIOMAINT_ERB_LT_DEV_ID = SRIO Port-write Target deviceID
+ *
+ * Port-write Target deviceID
+ *
+ * Notes:
+ * This SRIO interface does not support generating Port-Writes based on ERB Errors. This register is
+ * currently unused and should be treated as reserved.
+ *
+ * Clk_Rst: SRIOMAINT_ERB_LT_DEV_ID hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_lt_dev_id {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_lt_dev_id_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t id16 : 8; /**< This is the most significant byte of the
+ port-write destination deviceID (large transport
+ systems only)
+ destination ID used for Port Write errors */
+ uint32_t id8 : 8; /**< This is the port-write destination deviceID */
+ uint32_t tt : 1; /**< Transport Type used for Port Write
+ 0 = Small Transport, ID8 Only
+ 1 = Large Transport, ID16 and ID8 */
+ uint32_t reserved_0_14 : 15;
+#else
+ uint32_t reserved_0_14 : 15;
+ uint32_t tt : 1;
+ uint32_t id8 : 8;
+ uint32_t id16 : 8;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_lt_dev_id_s cn63xx;
+ struct cvmx_sriomaintx_erb_lt_dev_id_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_lt_dev_id_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_lt_dev_id cvmx_sriomaintx_erb_lt_dev_id_t;
+
+/**
+ * cvmx_sriomaint#_erb_lt_dev_id_capt
+ *
+ * SRIOMAINT_ERB_LT_DEV_ID_CAPT = SRIO Logical/Transport Layer Device ID Capture
+ *
+ * Logical/Transport Layer Device ID Capture
+ *
+ * Notes:
+ * This register contains error information. It is locked when a Logical/Transport error is detected
+ * and unlocked when the SRIOMAINT(0,2..3)_ERB_LT_ERR_DET is written to zero. This register should be
+ * written only when error detection is disabled.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_LT_DEV_ID_CAPT hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_lt_dev_id_capt {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_lt_dev_id_capt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dst_id16 : 8; /**< Most significant byte of the large transport
+ destination ID associated with the error */
+ uint32_t dst_id8 : 8; /**< Least significant byte of the large transport
+ destination ID or the 8-bit small transport
+ destination ID associated with the error */
+ uint32_t src_id16 : 8; /**< Most significant byte of the large transport
+ source ID associated with the error */
+ uint32_t src_id8 : 8; /**< Least significant byte of the large transport
+ source ID or the 8-bit small transport source ID
+ associated with the error */
+#else
+ uint32_t src_id8 : 8;
+ uint32_t src_id16 : 8;
+ uint32_t dst_id8 : 8;
+ uint32_t dst_id16 : 8;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_lt_dev_id_capt_s cn63xx;
+ struct cvmx_sriomaintx_erb_lt_dev_id_capt_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_lt_dev_id_capt_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_lt_dev_id_capt cvmx_sriomaintx_erb_lt_dev_id_capt_t;
+
+/**
+ * cvmx_sriomaint#_erb_lt_err_det
+ *
+ * SRIOMAINT_ERB_LT_ERR_DET = SRIO Logical/Transport Layer Error Detect
+ *
+ * SRIO Logical/Transport Layer Error Detect
+ *
+ * Notes:
+ * This register indicates the error that was detected by the Logical or Transport logic layer.
+ * Once a bit is set in this CSR, HW will lock the register until SW writes a zero to clear all the
+ * fields. The HW sets SRIO_INT_REG[LOG_ERB] every time it sets one of the bits.
+ * To handle the interrupt, the following procedure may be best:
+ * (1) clear SRIO_INT_REG[LOG_ERB],
+ * (2) read this CSR, corresponding SRIOMAINT*_ERB_LT_ADDR_CAPT_H, SRIOMAINT*_ERB_LT_ADDR_CAPT_L,
+ * SRIOMAINT*_ERB_LT_DEV_ID_CAPT, and SRIOMAINT*_ERB_LT_CTRL_CAPT
+ * (3) Write this CSR to 0.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_LT_ERR_DET hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_lt_err_det {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_lt_err_det_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t io_err : 1; /**< Received a response of ERROR for an IO Logical
+ Layer Request. This includes all Maintenance and
+ Memory Responses not destined for the RX Soft
+ Packet FIFO. When SRIO receives an ERROR response
+ for a read, the issuing core or DPI DMA engine
+ receives result bytes with all bits set. In the
+ case of writes with response, this bit is the only
+ indication of failure. */
+ uint32_t msg_err : 1; /**< Received a response of ERROR for an outgoing
+ message segment. This bit is the only direct
+ indication of a MSG_ERR. When a MSG_ERR occurs,
+ SRIO drops the message segment and will not set
+ SRIO*_INT_REG[OMSG*] after the message
+ "transfer". NOTE: SRIO can continue to send or
+ retry other segments from the same message after
+ a MSG_ERR. */
+ uint32_t gsm_err : 1; /**< Received a response of ERROR for an GSM Logical
+ Request. SRIO hardware never sets this bit. GSM
+ operations are not supported (outside of the Soft
+ Packet FIFO). */
+ uint32_t msg_fmt : 1; /**< Received an incoming Message Segment with a
+ formating error. A MSG_FMT error occurs when SRIO
+ receives a message segment with a reserved SSIZE,
+ or a illegal data payload size, or a MSGSEG greater
+ than MSGLEN, or a MSGSEG that is the duplicate of
+ one already received by an inflight message.
+ When a non-duplicate MSG_FMT error occurs, SRIO
+ drops the segment and sends an ERROR response.
+ When a duplicate MSG_FMT error occurs, SRIO
+ (internally) terminates the currently-inflight
+ message with an error and processes the duplicate,
+ which may result in a new message being generated
+ internally for the duplicate. */
+ uint32_t ill_tran : 1; /**< Received illegal fields in the request/response
+ packet for a supported transaction or any packet
+ with a reserved transaction type. When an ILL_TRAN
+ error occurs, SRIO ignores the packet. ILL_TRAN
+ errors are 2nd priority after ILL_TGT and may mask
+ other problems. Packets with ILL_TRAN errors cannot
+ enter the RX Soft Packet FIFO.
+ There are two things that can set ILL_TRAN:
+ (1) SRIO received a packet with a tt value is not
+ 0 or 1, or (2) SRIO received a response to an
+ outstanding message segment whose status was not
+ DONE, RETRY, or ERROR. */
+ uint32_t ill_tgt : 1; /**< Received a packet that contained a destination ID
+ other than SRIOMAINT*_PRI_DEV_ID or
+ SRIOMAINT*_SEC_DEV_ID. When an ILL_TGT error
+ occurs, SRIO drops the packet. ILL_TGT errors are
+ highest priority, so may mask other problems.
+ Packets with ILL_TGT errors cannot enter the RX
+ soft packet fifo. */
+ uint32_t msg_tout : 1; /**< An expected incoming message request has not been
+ received within the time-out interval specified in
+ SRIOMAINT(0,2..3)_PORT_RT_CTL. When a MSG_TOUT occurs,
+ SRIO (internally) terminates the inflight message
+ with an error. */
+ uint32_t pkt_tout : 1; /**< A required response has not been received to an
+ outgoing memory, maintenance or message request
+ before the time-out interval specified in
+ SRIOMAINT(0,2..3)_PORT_RT_CTL. When an IO or maintenance
+ read request operation has a PKT_TOUT, the issuing
+ core load or DPI DMA engine receive all ones for
+ the result. When an IO NWRITE_R has a PKT_TOUT,
+ this bit is the only indication of failure. When a
+ message request operation has a PKT_TOUT, SRIO
+ discards the the outgoing message segment, and
+ this bit is the only direct indication of failure.
+ NOTE: SRIO may continue to send or retry other
+ segments from the same message. When one or more of
+ the segments in an outgoing message have a
+ PKT_TOUT, SRIO will not set SRIO*_INT_REG[OMSG*]
+ after the message "transfer". */
+ uint32_t uns_resp : 1; /**< An unsolicited/unexpected memory, maintenance or
+ message response packet was received that was not
+ destined for the RX Soft Packet FIFO. When this
+ condition is detected, the packet is dropped. */
+ uint32_t uns_tran : 1; /**< A transaction is received that is not supported.
+ SRIO HW will never set this bit - SRIO routes all
+ unsupported transactions to the RX soft packet
+ FIFO. */
+ uint32_t reserved_1_21 : 21;
+ uint32_t resp_sz : 1; /**< Received an incoming Memory or Maintenance
+ Read response packet with a DONE status and less
+ data then expected. This condition causes the
+ Read to be completed and an error response to be
+ returned with all the data bits set to the issuing
+ Core or DMA Engine. */
+#else
+ uint32_t resp_sz : 1;
+ uint32_t reserved_1_21 : 21;
+ uint32_t uns_tran : 1;
+ uint32_t uns_resp : 1;
+ uint32_t pkt_tout : 1;
+ uint32_t msg_tout : 1;
+ uint32_t ill_tgt : 1;
+ uint32_t ill_tran : 1;
+ uint32_t msg_fmt : 1;
+ uint32_t gsm_err : 1;
+ uint32_t msg_err : 1;
+ uint32_t io_err : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_lt_err_det_s cn63xx;
+ struct cvmx_sriomaintx_erb_lt_err_det_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_lt_err_det_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_lt_err_det cvmx_sriomaintx_erb_lt_err_det_t;
+
+/**
+ * cvmx_sriomaint#_erb_lt_err_en
+ *
+ * SRIOMAINT_ERB_LT_ERR_EN = SRIO Logical/Transport Layer Error Enable
+ *
+ * SRIO Logical/Transport Layer Error Enable
+ *
+ * Notes:
+ * This register contains the bits that control if an error condition locks the Logical/Transport
+ * Layer Error Detect and Capture registers and is reported to the system host.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_LT_ERR_EN hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_lt_err_en {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_lt_err_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t io_err : 1; /**< Enable reporting of an IO error response. Save and
+ lock original request transaction information in
+ all Logical/Transport Layer Capture CSRs. */
+ uint32_t msg_err : 1; /**< Enable reporting of a Message error response. Save
+ and lock original request transaction information
+ in all Logical/Transport Layer Capture CSRs. */
+ uint32_t gsm_err : 1; /**< Enable reporting of a GSM error response. Save and
+ lock original request transaction capture
+ information in all Logical/Transport Layer Capture
+ CSRs. */
+ uint32_t msg_fmt : 1; /**< Enable reporting of a message format error. Save
+ and lock transaction capture information in
+ Logical/Transport Layer Device ID and Control
+ Capture CSRs. */
+ uint32_t ill_tran : 1; /**< Enable reporting of an illegal transaction decode
+ error Save and lock transaction capture
+ information in Logical/Transport Layer Device ID
+ and Control Capture CSRs. */
+ uint32_t ill_tgt : 1; /**< Enable reporting of an illegal transaction target
+ error. Save and lock transaction capture
+ information in Logical/Transport Layer Device ID
+ and Control Capture CSRs. */
+ uint32_t msg_tout : 1; /**< Enable reporting of a Message Request time-out
+ error. Save and lock transaction capture
+ information in Logical/Transport Layer Device ID
+ and Control Capture CSRs for the last Message
+ request segment packet received. */
+ uint32_t pkt_tout : 1; /**< Enable reporting of a packet response time-out
+ error. Save and lock original request address in
+ Logical/Transport Layer Address Capture CSRs.
+ Save and lock original request Destination ID in
+ Logical/Transport Layer Device ID Capture CSR. */
+ uint32_t uns_resp : 1; /**< Enable reporting of an unsolicited response error.
+ Save and lock transaction capture information in
+ Logical/Transport Layer Device ID and Control
+ Capture CSRs. */
+ uint32_t uns_tran : 1; /**< Enable reporting of an unsupported transaction
+ error. Save and lock transaction capture
+ information in Logical/Transport Layer Device ID
+ and Control Capture CSRs. */
+ uint32_t reserved_1_21 : 21;
+ uint32_t resp_sz : 1; /**< Enable reporting of an incoming response with
+ unexpected data size */
+#else
+ uint32_t resp_sz : 1;
+ uint32_t reserved_1_21 : 21;
+ uint32_t uns_tran : 1;
+ uint32_t uns_resp : 1;
+ uint32_t pkt_tout : 1;
+ uint32_t msg_tout : 1;
+ uint32_t ill_tgt : 1;
+ uint32_t ill_tran : 1;
+ uint32_t msg_fmt : 1;
+ uint32_t gsm_err : 1;
+ uint32_t msg_err : 1;
+ uint32_t io_err : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_lt_err_en_s cn63xx;
+ struct cvmx_sriomaintx_erb_lt_err_en_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_lt_err_en_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_lt_err_en cvmx_sriomaintx_erb_lt_err_en_t;
+
+/**
+ * cvmx_sriomaint#_erb_pack_capt_1
+ *
+ * SRIOMAINT_ERB_PACK_CAPT_1 = SRIO Packet Capture 1
+ *
+ * Packet Capture 1
+ *
+ * Notes:
+ * Error capture register 1 contains either long symbol capture information or bytes 4 through 7 of
+ * the packet header.
+ * The HW will not update this register (i.e. this register is locked) while
+ * SRIOMAINT*_ERB_ATTR_CAPT[VALID] is set. This register should only be read while this bit is set.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_PACK_CAPT_1 hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_pack_capt_1 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_pack_capt_1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t capture : 32; /**< Bytes 4 thru 7 of the packet header. */
+#else
+ uint32_t capture : 32;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_pack_capt_1_s cn63xx;
+ struct cvmx_sriomaintx_erb_pack_capt_1_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_pack_capt_1_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_pack_capt_1 cvmx_sriomaintx_erb_pack_capt_1_t;
+
+/**
+ * cvmx_sriomaint#_erb_pack_capt_2
+ *
+ * SRIOMAINT_ERB_PACK_CAPT_2 = SRIO Packet Capture 2
+ *
+ * Packet Capture 2
+ *
+ * Notes:
+ * Error capture register 2 contains bytes 8 through 11 of the packet header.
+ * The HW will not update this register (i.e. this register is locked) while
+ * SRIOMAINT*_ERB_ATTR_CAPT[VALID] is set. This register should only be read while this bit is set.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_PACK_CAPT_2 hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_pack_capt_2 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_pack_capt_2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t capture : 32; /**< Bytes 8 thru 11 of the packet header. */
+#else
+ uint32_t capture : 32;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_pack_capt_2_s cn63xx;
+ struct cvmx_sriomaintx_erb_pack_capt_2_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_pack_capt_2_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_pack_capt_2 cvmx_sriomaintx_erb_pack_capt_2_t;
+
+/**
+ * cvmx_sriomaint#_erb_pack_capt_3
+ *
+ * SRIOMAINT_ERB_PACK_CAPT_3 = SRIO Packet Capture 3
+ *
+ * Packet Capture 3
+ *
+ * Notes:
+ * Error capture register 3 contains bytes 12 through 15 of the packet header.
+ * The HW will not update this register (i.e. this register is locked) while
+ * SRIOMAINT*_ERB_ATTR_CAPT[VALID] is set. This register should only be read while this bit is set.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_PACK_CAPT_3 hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_pack_capt_3 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_pack_capt_3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t capture : 32; /**< Bytes 12 thru 15 of the packet header. */
+#else
+ uint32_t capture : 32;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_pack_capt_3_s cn63xx;
+ struct cvmx_sriomaintx_erb_pack_capt_3_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_pack_capt_3_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_pack_capt_3 cvmx_sriomaintx_erb_pack_capt_3_t;
+
+/**
+ * cvmx_sriomaint#_erb_pack_sym_capt
+ *
+ * SRIOMAINT_ERB_PACK_SYM_CAPT = SRIO Packet/Control Symbol Capture
+ *
+ * Packet/Control Symbol Capture
+ *
+ * Notes:
+ * This register contains either captured control symbol information or the first 4 bytes of captured
+ * packet information. The Errors that generate Partial Control Symbols can be found in
+ * SRIOMAINT*_ERB_ERR_DET. The HW will not update this register (i.e. this register is locked) while
+ * SRIOMAINT*_ERB_ATTR_CAPT[VALID] is set. This register should only be read while this bit is set.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_ERB_PACK_SYM_CAPT hclk hrst_n
+ */
+union cvmx_sriomaintx_erb_pack_sym_capt {
+ uint32_t u32;
+ struct cvmx_sriomaintx_erb_pack_sym_capt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t capture : 32; /**< Control Character and Control Symbol or Bytes 0 to
+ 3 of Packet Header
+ The Control Symbol consists of
+ - 31:24 - SC Character (0 in Partial Symbol)
+ - 23:21 - Stype 0
+ - 20:16 - Parameter 0
+ - 15:11 - Parameter 1
+ - 10: 8 - Stype 1 (0 in Partial Symbol)
+ - 7: 5 - Command (0 in Partial Symbol)
+ - 4: 0 - CRC5 (0 in Partial Symbol) */
+#else
+ uint32_t capture : 32;
+#endif
+ } s;
+ struct cvmx_sriomaintx_erb_pack_sym_capt_s cn63xx;
+ struct cvmx_sriomaintx_erb_pack_sym_capt_s cn63xxp1;
+ struct cvmx_sriomaintx_erb_pack_sym_capt_s cn66xx;
+};
+typedef union cvmx_sriomaintx_erb_pack_sym_capt cvmx_sriomaintx_erb_pack_sym_capt_t;
+
+/**
+ * cvmx_sriomaint#_hb_dev_id_lock
+ *
+ * SRIOMAINT_HB_DEV_ID_LOCK = SRIO Host Device ID Lock
+ *
+ * The Host Base Device ID
+ *
+ * Notes:
+ * This register contains the Device ID of the Host responsible for initializing this SRIO device.
+ * The register contains a special write once function that captures the first HOSTID written to it
+ * after reset. The function allows several potential hosts to write to this register and then read
+ * it to see if they have responsibility for initialization. The register can be unlocked by
+ * rewriting the current host value. This will reset the lock and restore the value to 0xFFFF.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_HB_DEV_ID_LOCK hclk hrst_n
+ */
+union cvmx_sriomaintx_hb_dev_id_lock {
+ uint32_t u32;
+ struct cvmx_sriomaintx_hb_dev_id_lock_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t hostid : 16; /**< Primary 16-bit Device ID */
+#else
+ uint32_t hostid : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_hb_dev_id_lock_s cn63xx;
+ struct cvmx_sriomaintx_hb_dev_id_lock_s cn63xxp1;
+ struct cvmx_sriomaintx_hb_dev_id_lock_s cn66xx;
+};
+typedef union cvmx_sriomaintx_hb_dev_id_lock cvmx_sriomaintx_hb_dev_id_lock_t;
+
+/**
+ * cvmx_sriomaint#_ir_buffer_config
+ *
+ * SRIOMAINT_IR_BUFFER_CONFIG = SRIO Buffer Configuration
+ *
+ * Buffer Configuration
+ *
+ * Notes:
+ * This register controls the operation of the SRIO Core buffer mux logic.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_BUFFER_CONFIG hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_buffer_config {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_buffer_config_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t tx_wm0 : 4; /**< Reserved. (See SRIOMAINT(0,2..3)_IR_BUFFER_CONFIG2) */
+ uint32_t tx_wm1 : 4; /**< Reserved. (See SRIOMAINT(0,2..3)_IR_BUFFER_CONFIG2) */
+ uint32_t tx_wm2 : 4; /**< Reserved. (See SRIOMAINT(0,2..3)_IR_BUFFER_CONFIG2) */
+ uint32_t reserved_3_19 : 17;
+ uint32_t tx_flow : 1; /**< Controls whether Transmitter Flow Control is
+ permitted on this device.
+ 0 - Disabled
+ 1 - Permitted
+ The reset value of this field is
+ SRIO*_IP_FEATURE[TX_FLOW]. */
+ uint32_t tx_sync : 1; /**< Reserved. */
+ uint32_t rx_sync : 1; /**< Reserved. */
+#else
+ uint32_t rx_sync : 1;
+ uint32_t tx_sync : 1;
+ uint32_t tx_flow : 1;
+ uint32_t reserved_3_19 : 17;
+ uint32_t tx_wm2 : 4;
+ uint32_t tx_wm1 : 4;
+ uint32_t tx_wm0 : 4;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_buffer_config_s cn63xx;
+ struct cvmx_sriomaintx_ir_buffer_config_s cn63xxp1;
+ struct cvmx_sriomaintx_ir_buffer_config_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_buffer_config cvmx_sriomaintx_ir_buffer_config_t;
+
+/**
+ * cvmx_sriomaint#_ir_buffer_config2
+ *
+ * SRIOMAINT_IR_BUFFER_CONFIG2 = SRIO Buffer Configuration 2
+ *
+ * Buffer Configuration 2
+ *
+ * Notes:
+ * This register controls the RX and TX Buffer availablility by priority. The typical values are
+ * optimized for normal operation. Care must be taken when changing these values to avoid values
+ * which can result in deadlocks. Disabling a priority is not recommended and can result in system
+ * level failures.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_BUFFER_CONFIG2 hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_buffer_config2 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_buffer_config2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t tx_wm3 : 4; /**< Number of buffers free before a priority 3 packet
+ will be transmitted. A value of 9 will disable
+ this priority. */
+ uint32_t tx_wm2 : 4; /**< Number of buffers free before a priority 2 packet
+ will be transmitted. A value of 9 will disable
+ this priority. */
+ uint32_t tx_wm1 : 4; /**< Number of buffers free before a priority 1 packet
+ will be transmitted. A value of 9 will disable
+ this priority. */
+ uint32_t tx_wm0 : 4; /**< Number of buffers free before a priority 0 packet
+ will be transmitted. A value of 9 will disable
+ this priority. */
+ uint32_t rx_wm3 : 4; /**< Number of buffers free before a priority 3 packet
+ will be accepted. A value of 9 will disable this
+ priority and always cause a physical layer RETRY. */
+ uint32_t rx_wm2 : 4; /**< Number of buffers free before a priority 2 packet
+ will be accepted. A value of 9 will disable this
+ priority and always cause a physical layer RETRY. */
+ uint32_t rx_wm1 : 4; /**< Number of buffers free before a priority 1 packet
+ will be accepted. A value of 9 will disable this
+ priority and always cause a physical layer RETRY. */
+ uint32_t rx_wm0 : 4; /**< Number of buffers free before a priority 0 packet
+ will be accepted. A value of 9 will disable this
+ priority and always cause a physical layer RETRY. */
+#else
+ uint32_t rx_wm0 : 4;
+ uint32_t rx_wm1 : 4;
+ uint32_t rx_wm2 : 4;
+ uint32_t rx_wm3 : 4;
+ uint32_t tx_wm0 : 4;
+ uint32_t tx_wm1 : 4;
+ uint32_t tx_wm2 : 4;
+ uint32_t tx_wm3 : 4;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_buffer_config2_s cn63xx;
+ struct cvmx_sriomaintx_ir_buffer_config2_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_buffer_config2 cvmx_sriomaintx_ir_buffer_config2_t;
+
+/**
+ * cvmx_sriomaint#_ir_pd_phy_ctrl
+ *
+ * SRIOMAINT_IR_PD_PHY_CTRL = SRIO Platform Dependent PHY Control
+ *
+ * Platform Dependent PHY Control
+ *
+ * Notes:
+ * This register can be used for testing. The register is otherwise unused by the hardware.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_PD_PHY_CTRL hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_pd_phy_ctrl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_pd_phy_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pd_ctrl : 32; /**< Unused Register available for testing */
+#else
+ uint32_t pd_ctrl : 32;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_pd_phy_ctrl_s cn63xx;
+ struct cvmx_sriomaintx_ir_pd_phy_ctrl_s cn63xxp1;
+ struct cvmx_sriomaintx_ir_pd_phy_ctrl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_pd_phy_ctrl cvmx_sriomaintx_ir_pd_phy_ctrl_t;
+
+/**
+ * cvmx_sriomaint#_ir_pd_phy_stat
+ *
+ * SRIOMAINT_IR_PD_PHY_STAT = SRIO Platform Dependent PHY Status
+ *
+ * Platform Dependent PHY Status
+ *
+ * Notes:
+ * This register is used to monitor PHY status on each lane. They are documented here to assist in
+ * debugging only. The lane numbers take into account the lane swap pin.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_PD_PHY_STAT hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_pd_phy_stat {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_pd_phy_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t ln3_rx : 3; /**< Phy Lane 3 RX Status
+ 0XX = Normal Operation
+ 100 = 8B/10B Error
+ 101 = Elastic Buffer Overflow (Data Lost)
+ 110 = Elastic Buffer Underflow (Data Corrupted)
+ 111 = Disparity Error */
+ uint32_t ln3_dis : 1; /**< Lane 3 Phy Clock Disabled
+ 0 = Phy Clock Valid
+ 1 = Phy Clock InValid */
+ uint32_t ln2_rx : 3; /**< Phy Lane 2 RX Status
+ 0XX = Normal Operation
+ 100 = 8B/10B Error
+ 101 = Elastic Buffer Overflow (Data Lost)
+ 110 = Elastic Buffer Underflow (Data Corrupted)
+ 111 = Disparity Error */
+ uint32_t ln2_dis : 1; /**< Lane 2 Phy Clock Disabled
+ 0 = Phy Clock Valid
+ 1 = Phy Clock InValid */
+ uint32_t ln1_rx : 3; /**< Phy Lane 1 RX Status
+ 0XX = Normal Operation
+ 100 = 8B/10B Error
+ 101 = Elastic Buffer Overflow (Data Lost)
+ 110 = Elastic Buffer Underflow (Data Corrupted)
+ 111 = Disparity Error */
+ uint32_t ln1_dis : 1; /**< Lane 1 Phy Clock Disabled
+ 0 = Phy Clock Valid
+ 1 = Phy Clock InValid */
+ uint32_t ln0_rx : 3; /**< Phy Lane 0 RX Status
+ 0XX = Normal Operation
+ 100 = 8B/10B Error
+ 101 = Elastic Buffer Overflow (Data Lost)
+ 110 = Elastic Buffer Underflow (Data Corrupted)
+ 111 = Disparity Error */
+ uint32_t ln0_dis : 1; /**< Lane 0 Phy Clock Disabled
+ 0 = Phy Clock Valid
+ 1 = Phy Clock InValid */
+#else
+ uint32_t ln0_dis : 1;
+ uint32_t ln0_rx : 3;
+ uint32_t ln1_dis : 1;
+ uint32_t ln1_rx : 3;
+ uint32_t ln2_dis : 1;
+ uint32_t ln2_rx : 3;
+ uint32_t ln3_dis : 1;
+ uint32_t ln3_rx : 3;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_pd_phy_stat_s cn63xx;
+ struct cvmx_sriomaintx_ir_pd_phy_stat_s cn63xxp1;
+ struct cvmx_sriomaintx_ir_pd_phy_stat_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_pd_phy_stat cvmx_sriomaintx_ir_pd_phy_stat_t;
+
+/**
+ * cvmx_sriomaint#_ir_pi_phy_ctrl
+ *
+ * SRIOMAINT_IR_PI_PHY_CTRL = SRIO Platform Independent PHY Control
+ *
+ * Platform Independent PHY Control
+ *
+ * Notes:
+ * This register is used to control platform independent operating modes of the transceivers. These
+ * control bits are uniform across all platforms.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_PI_PHY_CTRL hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_pi_phy_ctrl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_pi_phy_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t tx_reset : 1; /**< Outgoing PHY Logic Reset. 0=Reset, 1=Normal Op */
+ uint32_t rx_reset : 1; /**< Incoming PHY Logic Reset. 0=Reset, 1=Normal Op */
+ uint32_t reserved_29_29 : 1;
+ uint32_t loopback : 2; /**< These bits control the state of the loopback
+ control vector on the transceiver interface. The
+ loopback modes are enumerated as follows:
+ 00 - No Loopback
+ 01 - Near End PCS Loopback
+ 10 - Far End PCS Loopback
+ 11 - Both Near and Far End PCS Loopback */
+ uint32_t reserved_0_26 : 27;
+#else
+ uint32_t reserved_0_26 : 27;
+ uint32_t loopback : 2;
+ uint32_t reserved_29_29 : 1;
+ uint32_t rx_reset : 1;
+ uint32_t tx_reset : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_pi_phy_ctrl_s cn63xx;
+ struct cvmx_sriomaintx_ir_pi_phy_ctrl_s cn63xxp1;
+ struct cvmx_sriomaintx_ir_pi_phy_ctrl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_pi_phy_ctrl cvmx_sriomaintx_ir_pi_phy_ctrl_t;
+
+/**
+ * cvmx_sriomaint#_ir_pi_phy_stat
+ *
+ * SRIOMAINT_IR_PI_PHY_STAT = SRIO Platform Independent PHY Status
+ *
+ * Platform Independent PHY Status
+ *
+ * Notes:
+ * This register displays the status of the link initialization state machine. Changes to this state
+ * cause the SRIO(0,2..3)_INT_REG.LINK_UP or SRIO(0,2..3)_INT_REG.LINK_DOWN interrupts.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_PI_PHY_STAT hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_pi_phy_stat {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_pi_phy_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_12_31 : 20;
+ uint32_t tx_rdy : 1; /**< Minimum number of Status Transmitted */
+ uint32_t rx_rdy : 1; /**< Minimum number of Good Status Received */
+ uint32_t init_sm : 10; /**< Initialization State Machine
+ 001 - Silent
+ 002 - Seek
+ 004 - Discovery
+ 008 - 1x_Mode_Lane0
+ 010 - 1x_Mode_Lane1
+ 020 - 1x_Mode_Lane2
+ 040 - 1x_Recovery
+ 080 - 2x_Mode
+ 100 - 2x_Recovery
+ 200 - 4x_Mode
+ All others are reserved */
+#else
+ uint32_t init_sm : 10;
+ uint32_t rx_rdy : 1;
+ uint32_t tx_rdy : 1;
+ uint32_t reserved_12_31 : 20;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_pi_phy_stat_s cn63xx;
+ struct cvmx_sriomaintx_ir_pi_phy_stat_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_10_31 : 22;
+ uint32_t init_sm : 10; /**< Initialization State Machine
+ 001 - Silent
+ 002 - Seek
+ 004 - Discovery
+ 008 - 1x_Mode_Lane0
+ 010 - 1x_Mode_Lane1
+ 020 - 1x_Mode_Lane2
+ 040 - 1x_Recovery
+ 080 - 2x_Mode
+ 100 - 2x_Recovery
+ 200 - 4x_Mode
+ All others are reserved */
+#else
+ uint32_t init_sm : 10;
+ uint32_t reserved_10_31 : 22;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriomaintx_ir_pi_phy_stat_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_pi_phy_stat cvmx_sriomaintx_ir_pi_phy_stat_t;
+
+/**
+ * cvmx_sriomaint#_ir_sp_rx_ctrl
+ *
+ * SRIOMAINT_IR_SP_RX_CTRL = SRIO Soft Packet FIFO Receive Control
+ *
+ * Soft Packet FIFO Receive Control
+ *
+ * Notes:
+ * This register is used to configure events generated by the reception of packets using the soft
+ * packet FIFO.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_SP_RX_CTRL hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_sp_rx_ctrl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_sp_rx_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t overwrt : 1; /**< When clear, SRIO drops received packets that should
+ enter the soft packet FIFO when the FIFO is full.
+ In this case, SRIO also increments
+ SRIOMAINT(0,2..3)_IR_SP_RX_STAT.DROP_CNT. When set, SRIO
+ stalls received packets that should enter the soft
+ packet FIFO when the FIFO is full. SRIO may stop
+ receiving any packets in this stall case if
+ software does not drain the receive soft packet
+ FIFO. */
+#else
+ uint32_t overwrt : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_sp_rx_ctrl_s cn63xx;
+ struct cvmx_sriomaintx_ir_sp_rx_ctrl_s cn63xxp1;
+ struct cvmx_sriomaintx_ir_sp_rx_ctrl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_sp_rx_ctrl cvmx_sriomaintx_ir_sp_rx_ctrl_t;
+
+/**
+ * cvmx_sriomaint#_ir_sp_rx_data
+ *
+ * SRIOMAINT_IR_SP_RX_DATA = SRIO Soft Packet FIFO Receive Data
+ *
+ * Soft Packet FIFO Receive Data
+ *
+ * Notes:
+ * This register is used to read data from the soft packet FIFO. The Soft Packet FIFO contains the
+ * majority of the packet data received from the SRIO link. The packet does not include the Control
+ * Symbols or the initial byte containing AckId, 2 Reserved Bits and the CRF. In the case of packets
+ * with less than 80 bytes (including AckId byte) both the trailing CRC and Pad (if present) are
+ * included in the FIFO and Octet Count. In the case of a packet with exactly 80 bytes (including
+ * the AckId byte) the CRC is removed and the Pad is maintained so the Octet Count will read 81 bytes
+ * instead of the expected 83. In cases over 80 bytes the CRC at 80 bytes is removed but the
+ * trailing CRC and Pad (if necessary) are present.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_SP_RX_DATA hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_sp_rx_data {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_sp_rx_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_data : 32; /**< This register is used to read packet data from the
+ RX FIFO. */
+#else
+ uint32_t pkt_data : 32;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_sp_rx_data_s cn63xx;
+ struct cvmx_sriomaintx_ir_sp_rx_data_s cn63xxp1;
+ struct cvmx_sriomaintx_ir_sp_rx_data_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_sp_rx_data cvmx_sriomaintx_ir_sp_rx_data_t;
+
+/**
+ * cvmx_sriomaint#_ir_sp_rx_stat
+ *
+ * SRIOMAINT_IR_SP_RX_STAT = SRIO Soft Packet FIFO Receive Status
+ *
+ * Soft Packet FIFO Receive Status
+ *
+ * Notes:
+ * This register is used to monitor the reception of packets using the soft packet FIFO.
+ * The HW sets SRIO_INT_REG[SOFT_RX] every time a packet arrives in the soft packet FIFO. To read
+ * out (one or more) packets, the following procedure may be best:
+ * (1) clear SRIO_INT_REG[SOFT_RX],
+ * (2) read this CSR to determine how many packets there are,
+ * (3) read the packets out (via SRIOMAINT*_IR_SP_RX_DATA).
+ * This procedure could lead to situations where SOFT_RX will be set even though there are currently
+ * no packets - the SW interrupt handler would need to properly handle this case
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_SP_RX_STAT hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_sp_rx_stat {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_sp_rx_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t octets : 16; /**< This field shows how many octets are remaining
+ in the current packet in the RX FIFO. */
+ uint32_t buffers : 4; /**< This field indicates how many complete packets are
+ stored in the Rx FIFO. */
+ uint32_t drop_cnt : 7; /**< Number of Packets Received when the RX FIFO was
+ full and then discarded. */
+ uint32_t full : 1; /**< This bit is set when the value of Buffers Filled
+ equals the number of available reception buffers. */
+ uint32_t fifo_st : 4; /**< These bits display the state of the state machine
+ that controls loading of packet data into the RX
+ FIFO. The enumeration of states are as follows:
+ 0000 - Idle
+ 0001 - Armed
+ 0010 - Active
+ All other states are reserved. */
+#else
+ uint32_t fifo_st : 4;
+ uint32_t full : 1;
+ uint32_t drop_cnt : 7;
+ uint32_t buffers : 4;
+ uint32_t octets : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_sp_rx_stat_s cn63xx;
+ struct cvmx_sriomaintx_ir_sp_rx_stat_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t octets : 16; /**< This field shows how many octets are remaining
+ in the current packet in the RX FIFO. */
+ uint32_t buffers : 4; /**< This field indicates how many complete packets are
+ stored in the Rx FIFO. */
+ uint32_t reserved_5_11 : 7;
+ uint32_t full : 1; /**< This bit is set when the value of Buffers Filled
+ equals the number of available reception buffers.
+ This bit always reads zero in Pass 1 */
+ uint32_t fifo_st : 4; /**< These bits display the state of the state machine
+ that controls loading of packet data into the RX
+ FIFO. The enumeration of states are as follows:
+ 0000 - Idle
+ 0001 - Armed
+ 0010 - Active
+ All other states are reserved. */
+#else
+ uint32_t fifo_st : 4;
+ uint32_t full : 1;
+ uint32_t reserved_5_11 : 7;
+ uint32_t buffers : 4;
+ uint32_t octets : 16;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriomaintx_ir_sp_rx_stat_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_sp_rx_stat cvmx_sriomaintx_ir_sp_rx_stat_t;
+
+/**
+ * cvmx_sriomaint#_ir_sp_tx_ctrl
+ *
+ * SRIOMAINT_IR_SP_TX_CTRL = SRIO Soft Packet FIFO Transmit Control
+ *
+ * Soft Packet FIFO Transmit Control
+ *
+ * Notes:
+ * This register is used to configure and control the transmission of packets using the soft packet
+ * FIFO.
+ *
+ * Clk_Rst: SRIOMAINT_IR_SP_TX_CTRL hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_sp_tx_ctrl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_sp_tx_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t octets : 16; /**< Writing a non-zero value (N) to this field arms
+ the packet FIFO for packet transmission. The FIFO
+ control logic will transmit the next N bytes
+ written 4-bytes at a time to the
+ SRIOMAINT(0,2..3)_IR_SP_TX_DATA Register and create a
+ single RapidIO packet. */
+ uint32_t reserved_0_15 : 16;
+#else
+ uint32_t reserved_0_15 : 16;
+ uint32_t octets : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_sp_tx_ctrl_s cn63xx;
+ struct cvmx_sriomaintx_ir_sp_tx_ctrl_s cn63xxp1;
+ struct cvmx_sriomaintx_ir_sp_tx_ctrl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_sp_tx_ctrl cvmx_sriomaintx_ir_sp_tx_ctrl_t;
+
+/**
+ * cvmx_sriomaint#_ir_sp_tx_data
+ *
+ * SRIOMAINT_IR_SP_TX_DATA = SRIO Soft Packet FIFO Transmit Data
+ *
+ * Soft Packet FIFO Transmit Data
+ *
+ * Notes:
+ * This register is used to write data to the soft packet FIFO. The format of the packet follows the
+ * Internal Packet Format (add link here). Care must be taken on creating TIDs for the packets which
+ * generate a response. Bits [7:6] of the 8 bit TID must be set for all Soft Packet FIFO generated
+ * packets. TID values of 0x00 - 0xBF are reserved for hardware generated Tags. The remainer of the
+ * TID[5:0] must be unique for each packet in flight and cannot be reused until a response is received
+ * in the SRIOMAINT(0,2..3)_IR_SP_RX_DATA register.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_SP_TX_DATA hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_sp_tx_data {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_sp_tx_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_data : 32; /**< This register is used to write packet data to the
+ Tx FIFO. Reads of this register will return zero. */
+#else
+ uint32_t pkt_data : 32;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_sp_tx_data_s cn63xx;
+ struct cvmx_sriomaintx_ir_sp_tx_data_s cn63xxp1;
+ struct cvmx_sriomaintx_ir_sp_tx_data_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_sp_tx_data cvmx_sriomaintx_ir_sp_tx_data_t;
+
+/**
+ * cvmx_sriomaint#_ir_sp_tx_stat
+ *
+ * SRIOMAINT_IR_SP_TX_STAT = SRIO Soft Packet FIFO Transmit Status
+ *
+ * Soft Packet FIFO Transmit Status
+ *
+ * Notes:
+ * This register is used to monitor the transmission of packets using the soft packet FIFO.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_IR_SP_TX_STAT hclk hrst_n
+ */
+union cvmx_sriomaintx_ir_sp_tx_stat {
+ uint32_t u32;
+ struct cvmx_sriomaintx_ir_sp_tx_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t octets : 16; /**< This field shows how many octets are still to be
+ loaded in the current packet. */
+ uint32_t buffers : 4; /**< This field indicates how many complete packets are
+ stored in the Tx FIFO. The field always reads
+ zero in the current hardware. */
+ uint32_t reserved_5_11 : 7;
+ uint32_t full : 1; /**< This bit is set when the value of Buffers Filled
+ equals the number of available transmission
+ buffers. */
+ uint32_t fifo_st : 4; /**< These bits display the state of the state machine
+ that controls loading of packet data into the TX
+ FIFO. The enumeration of states are as follows:
+ 0000 - Idle
+ 0001 - Armed
+ 0010 - Active
+ All other states are reserved. */
+#else
+ uint32_t fifo_st : 4;
+ uint32_t full : 1;
+ uint32_t reserved_5_11 : 7;
+ uint32_t buffers : 4;
+ uint32_t octets : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_ir_sp_tx_stat_s cn63xx;
+ struct cvmx_sriomaintx_ir_sp_tx_stat_s cn63xxp1;
+ struct cvmx_sriomaintx_ir_sp_tx_stat_s cn66xx;
+};
+typedef union cvmx_sriomaintx_ir_sp_tx_stat cvmx_sriomaintx_ir_sp_tx_stat_t;
+
+/**
+ * cvmx_sriomaint#_lane_#_status_0
+ *
+ * SRIOMAINT_LANE_X_STATUS_0 = SRIO Lane X Status 0
+ *
+ * SRIO Lane Status 0
+ *
+ * Notes:
+ * This register contains status information about the local lane transceiver.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_LANE_[0:3]_STATUS_0 hclk hrst_n
+ */
+union cvmx_sriomaintx_lane_x_status_0 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_lane_x_status_0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t port : 8; /**< The number of the port within the device to which
+ the lane is assigned. */
+ uint32_t lane : 4; /**< Lane Number within the port. */
+ uint32_t tx_type : 1; /**< Transmitter Type
+ 0 = Short Run
+ 1 = Long Run */
+ uint32_t tx_mode : 1; /**< Transmitter Operating Mode
+ 0 = Short Run
+ 1 = Long Run */
+ uint32_t rx_type : 2; /**< Receiver Type
+ 0 = Short Run
+ 1 = Medium Run
+ 2 = Long Run
+ 3 = Reserved */
+ uint32_t rx_inv : 1; /**< Receiver Input Inverted
+ 0 = No Inversion
+ 1 = Input Inverted */
+ uint32_t rx_adapt : 1; /**< Receiver Trained
+ 0 = One or more adaptive equalizers are
+ controlled by the lane receiver and at least
+ one is not trained.
+ 1 = The lane receiver controls no adaptive
+ equalizers or all the equalizers are trained. */
+ uint32_t rx_sync : 1; /**< Receiver Lane Sync'd */
+ uint32_t rx_train : 1; /**< Receiver Lane Trained */
+ uint32_t dec_err : 4; /**< 8Bit/10Bit Decoding Errors
+ 0 = No Errors since last read
+ 1-14 = Number of Errors since last read
+ 15 = Fifteen or more Errors since last read */
+ uint32_t xsync : 1; /**< Receiver Lane Sync Change
+ 0 = Lane Sync has not changed since last read
+ 1 = Lane Sync has changed since last read */
+ uint32_t xtrain : 1; /**< Receiver Training Change
+ 0 = Training has not changed since last read
+ 1 = Training has changed since last read */
+ uint32_t reserved_4_5 : 2;
+ uint32_t status1 : 1; /**< Status 1 CSR Implemented */
+ uint32_t statusn : 3; /**< Status 2-7 Not Implemented */
+#else
+ uint32_t statusn : 3;
+ uint32_t status1 : 1;
+ uint32_t reserved_4_5 : 2;
+ uint32_t xtrain : 1;
+ uint32_t xsync : 1;
+ uint32_t dec_err : 4;
+ uint32_t rx_train : 1;
+ uint32_t rx_sync : 1;
+ uint32_t rx_adapt : 1;
+ uint32_t rx_inv : 1;
+ uint32_t rx_type : 2;
+ uint32_t tx_mode : 1;
+ uint32_t tx_type : 1;
+ uint32_t lane : 4;
+ uint32_t port : 8;
+#endif
+ } s;
+ struct cvmx_sriomaintx_lane_x_status_0_s cn63xx;
+ struct cvmx_sriomaintx_lane_x_status_0_s cn63xxp1;
+ struct cvmx_sriomaintx_lane_x_status_0_s cn66xx;
+};
+typedef union cvmx_sriomaintx_lane_x_status_0 cvmx_sriomaintx_lane_x_status_0_t;
+
+/**
+ * cvmx_sriomaint#_lcs_ba0
+ *
+ * SRIOMAINT_LCS_BA0 = SRIO Local Configuration Space MSB Base Address
+ *
+ * MSBs of SRIO Address Space mapped to Maintenance BAR.
+ *
+ * Notes:
+ * The double word aligned SRIO address window mapped to the SRIO Maintenance BAR. This window has
+ * the highest priority and eclipses matches to the BAR0, BAR1 and BAR2 windows. Note: Address bits
+ * not supplied in the transfer are considered zero. For example, SRIO Address 65:35 must be set to
+ * zero to match in a 34-bit access. SRIO Address 65:50 must be set to zero to match in a 50-bit
+ * access. This coding allows the Maintenance Bar window to appear in specific address spaces. The
+ * remaining bits are located in SRIOMAINT(0,2..3)_LCS_BA1. This SRIO maintenance BAR is effectively
+ * disabled when LCSBA[30] is set with 34 or 50-bit addressing.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_LCS_BA0 hclk hrst_n
+ */
+union cvmx_sriomaintx_lcs_ba0 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_lcs_ba0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_31_31 : 1;
+ uint32_t lcsba : 31; /**< SRIO Address 65:35 */
+#else
+ uint32_t lcsba : 31;
+ uint32_t reserved_31_31 : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_lcs_ba0_s cn63xx;
+ struct cvmx_sriomaintx_lcs_ba0_s cn63xxp1;
+ struct cvmx_sriomaintx_lcs_ba0_s cn66xx;
+};
+typedef union cvmx_sriomaintx_lcs_ba0 cvmx_sriomaintx_lcs_ba0_t;
+
+/**
+ * cvmx_sriomaint#_lcs_ba1
+ *
+ * SRIOMAINT_LCS_BA1 = SRIO Local Configuration Space LSB Base Address
+ *
+ * LSBs of SRIO Address Space mapped to Maintenance BAR.
+ *
+ * Notes:
+ * The double word aligned SRIO address window mapped to the SRIO Maintenance BAR. This window has
+ * the highest priority and eclipses matches to the BAR0, BAR1 and BAR2 windows. Address bits not
+ * supplied in the transfer are considered zero. For example, SRIO Address 65:35 must be set to zero
+ * to match in a 34-bit access and SRIO Address 65:50 must be set to zero to match in a 50-bit access.
+ * This coding allows the Maintenance Bar window to appear in specific address spaces. Accesses
+ * through this BAR are limited to single word (32-bit) aligned transfers of one to four bytes.
+ * Accesses which violate this rule will return an error response if possible and be otherwise
+ * ignored. The remaining bits are located in SRIOMAINT(0,2..3)_LCS_BA0.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_LCS_BA1 hclk hrst_n
+ */
+union cvmx_sriomaintx_lcs_ba1 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_lcs_ba1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lcsba : 11; /**< SRIO Address 34:24 */
+ uint32_t reserved_0_20 : 21;
+#else
+ uint32_t reserved_0_20 : 21;
+ uint32_t lcsba : 11;
+#endif
+ } s;
+ struct cvmx_sriomaintx_lcs_ba1_s cn63xx;
+ struct cvmx_sriomaintx_lcs_ba1_s cn63xxp1;
+ struct cvmx_sriomaintx_lcs_ba1_s cn66xx;
+};
+typedef union cvmx_sriomaintx_lcs_ba1 cvmx_sriomaintx_lcs_ba1_t;
+
+/**
+ * cvmx_sriomaint#_m2s_bar0_start0
+ *
+ * SRIOMAINT_M2S_BAR0_START0 = SRIO Device Access BAR0 MSB Start
+ *
+ * The starting SRIO address to forwarded to the NPEI Configuration Space.
+ *
+ * Notes:
+ * This register specifies the 50-bit and 66-bit SRIO Address mapped to the BAR0 Space. See
+ * SRIOMAINT(0,2..3)_M2S_BAR0_START1 for more details. This register is only writeable over SRIO if the
+ * SRIO(0,2..3)_ACC_CTRL.DENY_BAR0 bit is zero.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_M2S_BAR0_START0 hclk hrst_n
+ */
+union cvmx_sriomaintx_m2s_bar0_start0 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_m2s_bar0_start0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t addr64 : 16; /**< SRIO Address 63:48 */
+ uint32_t addr48 : 16; /**< SRIO Address 47:32 */
+#else
+ uint32_t addr48 : 16;
+ uint32_t addr64 : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_m2s_bar0_start0_s cn63xx;
+ struct cvmx_sriomaintx_m2s_bar0_start0_s cn63xxp1;
+ struct cvmx_sriomaintx_m2s_bar0_start0_s cn66xx;
+};
+typedef union cvmx_sriomaintx_m2s_bar0_start0 cvmx_sriomaintx_m2s_bar0_start0_t;
+
+/**
+ * cvmx_sriomaint#_m2s_bar0_start1
+ *
+ * SRIOMAINT_M2S_BAR0_START1 = SRIO Device Access BAR0 LSB Start
+ *
+ * The starting SRIO address to forwarded to the NPEI Configuration Space.
+ *
+ * Notes:
+ * This register specifies the SRIO Address mapped to the BAR0 RSL Space. If the transaction has not
+ * already been mapped to SRIO Maintenance Space through the SRIOMAINT_LCS_BA[1:0] registers, if
+ * ENABLE is set and the address bits match then the SRIO Memory transactions will map to Octeon SLI
+ * Registers. 34-bit address transactions require a match in SRIO Address 33:14 and require all the
+ * other bits in ADDR48, ADDR64 and ADDR66 fields to be zero. 50-bit address transactions a match of
+ * SRIO Address 49:14 and require all the other bits of ADDR64 and ADDR66 to be zero. 66-bit address
+ * transactions require matches of all valid address field bits. Reads and Writes through Bar0
+ * have a size limit of 8 bytes and cannot cross a 64-bit boundry. All accesses with sizes greater
+ * than this limit will be ignored and return an error on any SRIO responses. Note: ADDR48 and
+ * ADDR64 fields are located in SRIOMAINT(0,2..3)_M2S_BAR0_START0. The ADDR32/66 fields of this register
+ * are writeable over SRIO if the SRIO(0,2..3)_ACC_CTRL.DENY_ADR0 bit is zero. The ENABLE field is
+ * writeable over SRIO if the SRIO(0,2..3)_ACC_CTRL.DENY_BAR0 bit is zero.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_M2S_BAR0_START1 hclk hrst_n
+ */
+union cvmx_sriomaintx_m2s_bar0_start1 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_m2s_bar0_start1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t addr32 : 18; /**< SRIO Address 31:14 */
+ uint32_t reserved_3_13 : 11;
+ uint32_t addr66 : 2; /**< SRIO Address 65:64 */
+ uint32_t enable : 1; /**< Enable BAR0 Access */
+#else
+ uint32_t enable : 1;
+ uint32_t addr66 : 2;
+ uint32_t reserved_3_13 : 11;
+ uint32_t addr32 : 18;
+#endif
+ } s;
+ struct cvmx_sriomaintx_m2s_bar0_start1_s cn63xx;
+ struct cvmx_sriomaintx_m2s_bar0_start1_s cn63xxp1;
+ struct cvmx_sriomaintx_m2s_bar0_start1_s cn66xx;
+};
+typedef union cvmx_sriomaintx_m2s_bar0_start1 cvmx_sriomaintx_m2s_bar0_start1_t;
+
+/**
+ * cvmx_sriomaint#_m2s_bar1_start0
+ *
+ * SRIOMAINT_M2S_BAR1_START0 = SRIO Device Access BAR1 MSB Start
+ *
+ * The starting SRIO address to forwarded to the BAR1 Memory Space.
+ *
+ * Notes:
+ * This register specifies the 50-bit and 66-bit SRIO Address mapped to the BAR1 Space. See
+ * SRIOMAINT(0,2..3)_M2S_BAR1_START1 for more details. This register is only writeable over SRIO if the
+ * SRIO(0,2..3)_ACC_CTRL.DENY_ADR1 bit is zero.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_M2S_BAR1_START0 hclk hrst_n
+ */
+union cvmx_sriomaintx_m2s_bar1_start0 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_m2s_bar1_start0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t addr64 : 16; /**< SRIO Address 63:48 */
+ uint32_t addr48 : 16; /**< SRIO Address 47:32
+ The SRIO hardware does not use the low order
+ one or two bits of this field when BARSIZE is 12
+ or 13, respectively.
+ (BARSIZE is SRIOMAINT(0,2..3)_M2S_BAR1_START1[BARSIZE].) */
+#else
+ uint32_t addr48 : 16;
+ uint32_t addr64 : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_m2s_bar1_start0_s cn63xx;
+ struct cvmx_sriomaintx_m2s_bar1_start0_s cn63xxp1;
+ struct cvmx_sriomaintx_m2s_bar1_start0_s cn66xx;
+};
+typedef union cvmx_sriomaintx_m2s_bar1_start0 cvmx_sriomaintx_m2s_bar1_start0_t;
+
+/**
+ * cvmx_sriomaint#_m2s_bar1_start1
+ *
+ * SRIOMAINT_M2S_BAR1_START1 = SRIO Device to BAR1 Start
+ *
+ * The starting SRIO address to forwarded to the BAR1 Memory Space.
+ *
+ * Notes:
+ * This register specifies the SRIO Address mapped to the BAR1 Space. If the transaction has not
+ * already been mapped to SRIO Maintenance Space through the SRIOMAINT_LCS_BA[1:0] registers and the
+ * address bits do not match enabled BAR0 addresses and if ENABLE is set and the addresses match the
+ * BAR1 addresses then SRIO Memory transactions will map to Octeon Memory Space specified by
+ * SRIOMAINT(0,2..3)_BAR1_IDX[31:0] registers. The BARSIZE field determines the size of BAR1, the entry
+ * select bits, and the size of each entry. A 34-bit address matches BAR1 when it matches
+ * SRIO_Address[33:20+BARSIZE] while all the other bits in ADDR48, ADDR64 and ADDR66 are zero.
+ * A 50-bit address matches BAR1 when it matches SRIO_Address[49:20+BARSIZE] while all the
+ * other bits of ADDR64 and ADDR66 are zero. A 66-bit address matches BAR1 when all of
+ * SRIO_Address[65:20+BARSIZE] match all corresponding address CSR field bits. Note: ADDR48 and
+ * ADDR64 fields are located in SRIOMAINT(0,2..3)_M2S_BAR1_START0. The ADDR32/66 fields of this register
+ * are writeable over SRIO if the SRIO(0,2..3)_ACC_CTRL.DENY_ADR1 bit is zero. The remaining fields are
+ * writeable over SRIO if the SRIO(0,2..3)_ACC_CTRL.DENY_BAR1 bit is zero.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_M2S_BAR1_START1 hclk hrst_n
+ */
+union cvmx_sriomaintx_m2s_bar1_start1 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_m2s_bar1_start1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t addr32 : 12; /**< SRIO Address 31:20
+ This field is not used by the SRIO hardware for
+ BARSIZE values 12 or 13.
+ With BARSIZE < 12, the upper 12-BARSIZE
+ bits of this field are used, and the lower BARSIZE
+ bits of this field are unused by the SRIO hardware. */
+ uint32_t reserved_7_19 : 13;
+ uint32_t barsize : 4; /**< Bar Size.
+ SRIO_Address*
+ ---------------------
+ / \
+ BARSIZE BAR Entry Entry Entry
+ Value BAR compare Select Offset Size
+ Size bits bits bits
+ 0 1MB 65:20 19:16 15:0 64KB
+ 1 2MB 65:21 20:17 16:0 128KB
+ 2 4MB 65:22 21:18 17:0 256KB
+ 3 8MB 65:23 22:19 18:0 512KB
+ 4 16MB 65:24 23:20 19:0 1MB
+ 5 32MB 65:25 24:21 20:0 2MB
+ 6 64MB 65:26 25:22 21:0 4MB
+ 7 128MB 65:27 26:23 22:0 8MB
+ 8 256MB 65:28 27:24 23:0 16MB
+ 9 512MB 65:29 28:25 24:0 32MB
+ 10 1024MB 65:30 29:26 25:0 64MB
+ 11 2048MB 65:31 30:27 26:0 128MB
+ 12 4096MB 65:32 31:28 27:0 256MB
+ 13 8192MB 65:33 32:29 28:0 512MB
+
+ *The SRIO Transaction Address
+ The entry select bits is the X that select an
+ SRIOMAINT(0,2..3)_BAR1_IDXX entry. */
+ uint32_t addr66 : 2; /**< SRIO Address 65:64 */
+ uint32_t enable : 1; /**< Enable BAR1 Access */
+#else
+ uint32_t enable : 1;
+ uint32_t addr66 : 2;
+ uint32_t barsize : 4;
+ uint32_t reserved_7_19 : 13;
+ uint32_t addr32 : 12;
+#endif
+ } s;
+ struct cvmx_sriomaintx_m2s_bar1_start1_s cn63xx;
+ struct cvmx_sriomaintx_m2s_bar1_start1_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t addr32 : 12; /**< SRIO Address 31:20
+ With BARSIZE < 12, the upper 12-BARSIZE
+ bits of this field are used, and the lower BARSIZE
+ bits of this field are unused by the SRIO hardware. */
+ uint32_t reserved_6_19 : 14;
+ uint32_t barsize : 3; /**< Bar Size.
+ SRIO_Address*
+ ---------------------
+ / \
+ BARSIZE BAR Entry Entry Entry
+ Value BAR compare Select Offset Size
+ Size bits bits bits
+ 0 1MB 65:20 19:16 15:0 64KB
+ 1 2MB 65:21 20:17 16:0 128KB
+ 2 4MB 65:22 21:18 17:0 256KB
+ 3 8MB 65:23 22:19 18:0 512KB
+ 4 16MB 65:24 23:20 19:0 1MB
+ 5 32MB 65:25 24:21 20:0 2MB
+ 6 64MB 65:26 25:22 21:0 4MB
+ 7 128MB 65:27 26:23 22:0 8MB
+ 8 256MB ** not in pass 1
+ 9 512MB ** not in pass 1
+ 10 1GB ** not in pass 1
+ 11 2GB ** not in pass 1
+ 12 4GB ** not in pass 1
+ 13 8GB ** not in pass 1
+
+ *The SRIO Transaction Address
+ The entry select bits is the X that select an
+ SRIOMAINT(0..1)_BAR1_IDXX entry.
+
+ In O63 pass 2, BARSIZE is 4 bits (6:3 in this
+ CSR), and BARSIZE values 8-13 are implemented,
+ providing a total possible BAR1 size range from
+ 1MB up to 8GB. */
+ uint32_t addr66 : 2; /**< SRIO Address 65:64 */
+ uint32_t enable : 1; /**< Enable BAR1 Access */
+#else
+ uint32_t enable : 1;
+ uint32_t addr66 : 2;
+ uint32_t barsize : 3;
+ uint32_t reserved_6_19 : 14;
+ uint32_t addr32 : 12;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriomaintx_m2s_bar1_start1_s cn66xx;
+};
+typedef union cvmx_sriomaintx_m2s_bar1_start1 cvmx_sriomaintx_m2s_bar1_start1_t;
+
+/**
+ * cvmx_sriomaint#_m2s_bar2_start
+ *
+ * SRIOMAINT_M2S_BAR2_START = SRIO Device to BAR2 Start
+ *
+ * The starting SRIO address to forwarded to the BAR2 Memory Space.
+ *
+ * Notes:
+ * This register specifies the SRIO Address mapped to the BAR2 Space. If ENABLE is set and the
+ * address bits do not match and other enabled BAR address and match the BAR2 addresses then the SRIO
+ * Memory transactions will map to Octeon BAR2 Memory Space. 34-bit address transactions require
+ * ADDR66, ADDR64 and ADDR48 fields set to zero and supplies zeros for unused addresses 40:34.
+ * 50-bit address transactions a match of SRIO Address 49:41 and require all the other bits of ADDR64
+ * and ADDR66 to be zero. 66-bit address transactions require matches of all valid address field
+ * bits. The ADDR32/48/64/66 fields of this register are writeable over SRIO if the
+ * SRIO(0,2..3)_ACC_CTRL.DENY_ADR2 bit is zero. The remaining fields are writeable over SRIO if the
+ * SRIO(0,2..3)_ACC_CTRL.DENY_BAR2 bit is zero.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_M2S_BAR2_START hclk hrst_n
+ */
+union cvmx_sriomaintx_m2s_bar2_start {
+ uint32_t u32;
+ struct cvmx_sriomaintx_m2s_bar2_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t addr64 : 16; /**< SRIO Address 63:48 */
+ uint32_t addr48 : 7; /**< SRIO Address 47:41 */
+ uint32_t reserved_6_8 : 3;
+ uint32_t esx : 2; /**< Endian Swap Mode used for SRIO 34-bit access.
+ For 50/66-bit assesses Endian Swap is determine
+ by ESX XOR'd with SRIO Addr 39:38.
+ 0 = No Swap
+ 1 = 64-bit Swap Bytes [ABCD_EFGH] -> [HGFE_DCBA]
+ 2 = 32-bit Swap Words [ABCD_EFGH] -> [DCBA_HGFE]
+ 3 = 32-bit Word Exch [ABCD_EFGH] -> [EFGH_ABCD] */
+ uint32_t cax : 1; /**< Cacheable Access Mode. When set transfer is
+ cached. This bit is used for SRIO 34-bit access.
+ For 50/66-bit accessas NCA is determine by CAX
+ XOR'd with SRIO Addr 40. */
+ uint32_t addr66 : 2; /**< SRIO Address 65:64 */
+ uint32_t enable : 1; /**< Enable BAR2 Access */
+#else
+ uint32_t enable : 1;
+ uint32_t addr66 : 2;
+ uint32_t cax : 1;
+ uint32_t esx : 2;
+ uint32_t reserved_6_8 : 3;
+ uint32_t addr48 : 7;
+ uint32_t addr64 : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_m2s_bar2_start_s cn63xx;
+ struct cvmx_sriomaintx_m2s_bar2_start_s cn63xxp1;
+ struct cvmx_sriomaintx_m2s_bar2_start_s cn66xx;
+};
+typedef union cvmx_sriomaintx_m2s_bar2_start cvmx_sriomaintx_m2s_bar2_start_t;
+
+/**
+ * cvmx_sriomaint#_mac_ctrl
+ *
+ * SRIOMAINT_MAC_CTRL = SRIO MAC Control
+ *
+ * Control for MAC Features
+ *
+ * Notes:
+ * This register enables MAC optimizations that may not be supported by all SRIO devices. The
+ * default values should be supported. This register can be changed at any time while the MAC is
+ * out of reset.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_MAC_CTRL hclk hrst_n
+ */
+union cvmx_sriomaintx_mac_ctrl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_mac_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t sec_spf : 1; /**< Send all Incoming Packets matching Secondary ID to
+ RX Soft Packet FIFO. This bit is ignored if
+ RX_SPF is set. */
+ uint32_t ack_zero : 1; /**< Generate ACKs for all incoming Zero Byte packets.
+ Default behavior is to issue a NACK. Regardless
+ of this setting the SRIO(0,2..3)_INT_REG.ZERO_PKT
+ interrupt is generated.
+ SRIO(0,2..3)_INT_REG. */
+ uint32_t rx_spf : 1; /**< Route all received packets to RX Soft Packet FIFO.
+ No logical layer ERB Errors will be reported.
+ Used for Diagnostics Only. */
+ uint32_t eop_mrg : 1; /**< Transmitted Packets can eliminate EOP Symbol on
+ back to back packets. */
+ uint32_t type_mrg : 1; /**< Allow STYPE Merging on Transmit. */
+ uint32_t lnk_rtry : 16; /**< Number of times MAC will reissue Link Request
+ after timeout. If retry count is exceeded Fatal
+ Port Error will occur (see SRIO(0,2..3)_INT_REG.F_ERROR) */
+#else
+ uint32_t lnk_rtry : 16;
+ uint32_t type_mrg : 1;
+ uint32_t eop_mrg : 1;
+ uint32_t rx_spf : 1;
+ uint32_t ack_zero : 1;
+ uint32_t sec_spf : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } s;
+ struct cvmx_sriomaintx_mac_ctrl_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t ack_zero : 1; /**< Generate ACKs for all incoming Zero Byte packets.
+ Default behavior is to issue a NACK. Regardless
+ of this setting the SRIO(0..1)_INT_REG.ZERO_PKT
+ interrupt is generated.
+ SRIO(0..1)_INT_REG. */
+ uint32_t rx_spf : 1; /**< Route all received packets to RX Soft Packet FIFO.
+ No logical layer ERB Errors will be reported.
+ Used for Diagnostics Only. */
+ uint32_t eop_mrg : 1; /**< Transmitted Packets can eliminate EOP Symbol on
+ back to back packets. */
+ uint32_t type_mrg : 1; /**< Allow STYPE Merging on Transmit. */
+ uint32_t lnk_rtry : 16; /**< Number of times MAC will reissue Link Request
+ after timeout. If retry count is exceeded Fatal
+ Port Error will occur (see SRIO(0..1)_INT_REG.F_ERROR) */
+#else
+ uint32_t lnk_rtry : 16;
+ uint32_t type_mrg : 1;
+ uint32_t eop_mrg : 1;
+ uint32_t rx_spf : 1;
+ uint32_t ack_zero : 1;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } cn63xx;
+ struct cvmx_sriomaintx_mac_ctrl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_mac_ctrl cvmx_sriomaintx_mac_ctrl_t;
+
+/**
+ * cvmx_sriomaint#_pe_feat
+ *
+ * SRIOMAINT_PE_FEAT = SRIO Processing Element Features
+ *
+ * The Supported Processing Element Features.
+ *
+ * Notes:
+ * The Processing Element Feature register describes the major functionality provided by the SRIO
+ * device.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PE_FEAT hclk hrst_n
+ */
+union cvmx_sriomaintx_pe_feat {
+ uint32_t u32;
+ struct cvmx_sriomaintx_pe_feat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bridge : 1; /**< Bridge Functions not supported. */
+ uint32_t memory : 1; /**< PE contains addressable memory. */
+ uint32_t proc : 1; /**< PE contains a local processor. */
+ uint32_t switchf : 1; /**< Switch Functions not supported. */
+ uint32_t mult_prt : 1; /**< Multiport Functions not supported. */
+ uint32_t reserved_7_26 : 20;
+ uint32_t suppress : 1; /**< Error Recovery Suppression not supported. */
+ uint32_t crf : 1; /**< Critical Request Flow not supported. */
+ uint32_t lg_tran : 1; /**< Large Transport (16-bit Device IDs) supported. */
+ uint32_t ex_feat : 1; /**< Extended Feature Pointer is valid. */
+ uint32_t ex_addr : 3; /**< PE supports 66, 50 and 34-bit addresses.
+ [2:1] are a RO copy of SRIO*_IP_FEATURE[A66,A50]. */
+#else
+ uint32_t ex_addr : 3;
+ uint32_t ex_feat : 1;
+ uint32_t lg_tran : 1;
+ uint32_t crf : 1;
+ uint32_t suppress : 1;
+ uint32_t reserved_7_26 : 20;
+ uint32_t mult_prt : 1;
+ uint32_t switchf : 1;
+ uint32_t proc : 1;
+ uint32_t memory : 1;
+ uint32_t bridge : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_pe_feat_s cn63xx;
+ struct cvmx_sriomaintx_pe_feat_s cn63xxp1;
+ struct cvmx_sriomaintx_pe_feat_s cn66xx;
+};
+typedef union cvmx_sriomaintx_pe_feat cvmx_sriomaintx_pe_feat_t;
+
+/**
+ * cvmx_sriomaint#_pe_llc
+ *
+ * SRIOMAINT_PE_LLC = SRIO Processing Element Logical Layer Control
+ *
+ * Addresses supported by the SRIO Device.
+ *
+ * Notes:
+ * The Processing Element Logical Layer is used for general configuration for the logical interface.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PE_LLC hclk hrst_n
+ */
+union cvmx_sriomaintx_pe_llc {
+ uint32_t u32;
+ struct cvmx_sriomaintx_pe_llc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_3_31 : 29;
+ uint32_t ex_addr : 3; /**< Controls the number of address bits generated by
+ PE as a source and processed by the PE as a
+ target of an operation.
+ 001 = 34-bit Addresses
+ 010 = 50-bit Addresses
+ 100 = 66-bit Addresses
+ All other encodings are reserved. */
+#else
+ uint32_t ex_addr : 3;
+ uint32_t reserved_3_31 : 29;
+#endif
+ } s;
+ struct cvmx_sriomaintx_pe_llc_s cn63xx;
+ struct cvmx_sriomaintx_pe_llc_s cn63xxp1;
+ struct cvmx_sriomaintx_pe_llc_s cn66xx;
+};
+typedef union cvmx_sriomaintx_pe_llc cvmx_sriomaintx_pe_llc_t;
+
+/**
+ * cvmx_sriomaint#_port_0_ctl
+ *
+ * SRIOMAINT_PORT_0_CTL = SRIO Port 0 Control
+ *
+ * Port 0 Control
+ *
+ * Notes:
+ * This register contains assorted control bits.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_0_CTL hclk hrst_n
+ */
+union cvmx_sriomaintx_port_0_ctl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_0_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pt_width : 2; /**< Hardware Port Width.
+ 00 = One Lane supported.
+ 01 = One/Four Lanes supported.
+ 10 = One/Two Lanes supported.
+ 11 = One/Two/Four Lanes supported.
+ This value is a copy of SRIO*_IP_FEATURE[PT_WIDTH]
+ limited by the number of lanes the MAC has. */
+ uint32_t it_width : 3; /**< Initialized Port Width
+ 000 = Single-lane, Lane 0
+ 001 = Single-lane, Lane 1 or 2
+ 010 = Four-lane
+ 011 = Two-lane
+ 111 = Link Uninitialized
+ Others = Reserved */
+ uint32_t ov_width : 3; /**< Override Port Width. Writing this register causes
+ the port to reinitialize.
+ 000 = No Override all lanes possible
+ 001 = Reserved
+ 010 = Force Single-lane, Lane 0
+ If Ln 0 is unavailable try Ln 2 then Ln 1
+ 011 = Force Single-lane, Lane 2
+ If Ln 2 is unavailable try Ln 1 then Ln 0
+ 100 = Reserved
+ 101 = Enable Two-lane, Disable Four-Lane
+ 110 = Enable Four-lane, Disable Two-Lane
+ 111 = All lanes sizes enabled */
+ uint32_t disable : 1; /**< Port Disable. Setting this bit disables both
+ drivers and receivers. */
+ uint32_t o_enable : 1; /**< Port Output Enable. When cleared, port will
+ generate control symbols and respond to
+ maintenance transactions only. When set, all
+ transactions are allowed. */
+ uint32_t i_enable : 1; /**< Port Input Enable. When cleared, port will
+ generate control symbols and respond to
+ maintenance packets only. All other packets will
+ not be accepted. */
+ uint32_t dis_err : 1; /**< Disable Error Checking. Diagnostic Only. */
+ uint32_t mcast : 1; /**< Reserved. */
+ uint32_t reserved_18_18 : 1;
+ uint32_t enumb : 1; /**< Enumeration Boundry. SW can use this bit to
+ determine port enumeration. */
+ uint32_t reserved_16_16 : 1;
+ uint32_t ex_width : 2; /**< Extended Port Width not supported. */
+ uint32_t ex_stat : 2; /**< Extended Port Width Status. 00 = not supported */
+ uint32_t suppress : 8; /**< Retransmit Suppression Mask. CRF not Supported. */
+ uint32_t stp_port : 1; /**< Stop on Failed Port. This bit is used with the
+ DROP_PKT bit to force certain behavior when the
+ Error Rate Failed Threshold has been met or
+ exceeded. */
+ uint32_t drop_pkt : 1; /**< Drop on Failed Port. This bit is used with the
+ STP_PORT bit to force certain behavior when the
+ Error Rate Failed Threshold has been met or
+ exceeded. */
+ uint32_t prt_lock : 1; /**< When this bit is cleared, the packets that may be
+ received and issued are controlled by the state of
+ the O_ENABLE and I_ENABLE bits. When this bit is
+ set, this port is stopped and is not enabled to
+ issue or receive any packets; the input port can
+ still follow the training procedure and can still
+ send and respond to link-requests; all received
+ packets return packet-not-accepted control symbols
+ to force an error condition to be signaled by the
+ sending device. */
+ uint32_t pt_type : 1; /**< Port Type. 1 = Serial port. */
+#else
+ uint32_t pt_type : 1;
+ uint32_t prt_lock : 1;
+ uint32_t drop_pkt : 1;
+ uint32_t stp_port : 1;
+ uint32_t suppress : 8;
+ uint32_t ex_stat : 2;
+ uint32_t ex_width : 2;
+ uint32_t reserved_16_16 : 1;
+ uint32_t enumb : 1;
+ uint32_t reserved_18_18 : 1;
+ uint32_t mcast : 1;
+ uint32_t dis_err : 1;
+ uint32_t i_enable : 1;
+ uint32_t o_enable : 1;
+ uint32_t disable : 1;
+ uint32_t ov_width : 3;
+ uint32_t it_width : 3;
+ uint32_t pt_width : 2;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_0_ctl_s cn63xx;
+ struct cvmx_sriomaintx_port_0_ctl_s cn63xxp1;
+ struct cvmx_sriomaintx_port_0_ctl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_0_ctl cvmx_sriomaintx_port_0_ctl_t;
+
+/**
+ * cvmx_sriomaint#_port_0_ctl2
+ *
+ * SRIOMAINT_PORT_0_CTL2 = SRIO Port 0 Control 2
+ *
+ * Port 0 Control 2
+ *
+ * Notes:
+ * These registers are accessed when a local processor or an external device wishes to examine the
+ * port baudrate information. The Automatic Baud Rate Feature is not available on this device. The
+ * SUP_* and ENB_* fields are set directly by the QLM_SPD bits as a reference but otherwise have
+ * no effect. WARNING: Writes to this register will reinitialize the SRIO link.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_0_CTL2 hclk hrst_n
+ */
+union cvmx_sriomaintx_port_0_ctl2 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_0_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t sel_baud : 4; /**< Link Baud Rate Selected.
+ 0000 - No rate selected
+ 0001 - 1.25 GBaud
+ 0010 - 2.5 GBaud
+ 0011 - 3.125 GBaud
+ 0100 - 5.0 GBaud
+ 0101 - 6.25 GBaud (reserved)
+ 0110 - 0b1111 - Reserved
+ Indicates the speed of the interface SERDES lanes
+ (selected by the QLM*_SPD straps). */
+ uint32_t baud_sup : 1; /**< Automatic Baud Rate Discovery not supported. */
+ uint32_t baud_enb : 1; /**< Auto Baud Rate Discovery Enable. */
+ uint32_t sup_125g : 1; /**< 1.25GB Rate Operation supported.
+ Set when the interface SERDES lanes are operating
+ at 1.25 Gbaud (as selected by QLM*_SPD straps). */
+ uint32_t enb_125g : 1; /**< 1.25GB Rate Operation enable.
+ Reset to 1 when the interface SERDES lanes are
+ operating at 1.25 Gbaud (as selected by QLM*_SPD
+ straps). Reset to 0 otherwise. */
+ uint32_t sup_250g : 1; /**< 2.50GB Rate Operation supported.
+ Set when the interface SERDES lanes are operating
+ at 2.5 Gbaud (as selected by QLM*_SPD straps). */
+ uint32_t enb_250g : 1; /**< 2.50GB Rate Operation enable.
+ Reset to 1 when the interface SERDES lanes are
+ operating at 2.5 Gbaud (as selected by QLM*_SPD
+ straps). Reset to 0 otherwise. */
+ uint32_t sup_312g : 1; /**< 3.125GB Rate Operation supported.
+ Set when the interface SERDES lanes are operating
+ at 3.125 Gbaud (as selected by QLM*_SPD straps). */
+ uint32_t enb_312g : 1; /**< 3.125GB Rate Operation enable.
+ Reset to 1 when the interface SERDES lanes are
+ operating at 3.125 Gbaud (as selected by QLM*_SPD
+ straps). Reset to 0 otherwise. */
+ uint32_t sub_500g : 1; /**< 5.0GB Rate Operation supported.
+ Set when the interface SERDES lanes are operating
+ at 5.0 Gbaud (as selected by QLM*_SPD straps). */
+ uint32_t enb_500g : 1; /**< 5.0GB Rate Operation enable.
+ Reset to 1 when the interface SERDES lanes are
+ operating at 5.0 Gbaud (as selected by QLM*_SPD
+ straps). Reset to 0 otherwise. */
+ uint32_t sup_625g : 1; /**< 6.25GB Rate Operation (not supported). */
+ uint32_t enb_625g : 1; /**< 6.25GB Rate Operation enable. */
+ uint32_t reserved_2_15 : 14;
+ uint32_t tx_emph : 1; /**< Indicates whether is port is able to transmit
+ commands to control the transmit emphasis in the
+ connected port. */
+ uint32_t emph_en : 1; /**< Controls whether a port may adjust the
+ transmit emphasis in the connected port. This bit
+ should be cleared for normal operation. */
+#else
+ uint32_t emph_en : 1;
+ uint32_t tx_emph : 1;
+ uint32_t reserved_2_15 : 14;
+ uint32_t enb_625g : 1;
+ uint32_t sup_625g : 1;
+ uint32_t enb_500g : 1;
+ uint32_t sub_500g : 1;
+ uint32_t enb_312g : 1;
+ uint32_t sup_312g : 1;
+ uint32_t enb_250g : 1;
+ uint32_t sup_250g : 1;
+ uint32_t enb_125g : 1;
+ uint32_t sup_125g : 1;
+ uint32_t baud_enb : 1;
+ uint32_t baud_sup : 1;
+ uint32_t sel_baud : 4;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_0_ctl2_s cn63xx;
+ struct cvmx_sriomaintx_port_0_ctl2_s cn63xxp1;
+ struct cvmx_sriomaintx_port_0_ctl2_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_0_ctl2 cvmx_sriomaintx_port_0_ctl2_t;
+
+/**
+ * cvmx_sriomaint#_port_0_err_stat
+ *
+ * SRIOMAINT_PORT_0_ERR_STAT = SRIO Port 0 Error and Status
+ *
+ * Port 0 Error and Status
+ *
+ * Notes:
+ * This register displays port error and status information. Several port error conditions are
+ * captured here and must be cleared by writing 1's to the individual bits.
+ * Bits are R/W on 65/66xx pass 1 and R/W1C on pass 1.2
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_0_ERR_STAT hclk hrst_n
+ */
+union cvmx_sriomaintx_port_0_err_stat {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_0_err_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_27_31 : 5;
+ uint32_t pkt_drop : 1; /**< Output Packet Dropped. */
+ uint32_t o_fail : 1; /**< Output Port has encountered a failure condition,
+ meaning the port's failed error threshold has
+ reached SRIOMAINT(0,2..3)_ERB_ERR_RATE_THR.ER_FAIL value. */
+ uint32_t o_dgrad : 1; /**< Output Port has encountered a degraded condition,
+ meaning the port's degraded threshold has
+ reached SRIOMAINT(0,2..3)_ERB_ERR_RATE_THR.ER_DGRAD
+ value. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t o_retry : 1; /**< Output Retry Encountered. This bit is set when
+ bit 18 is set. */
+ uint32_t o_rtried : 1; /**< Output Port has received a packet-retry condition
+ and cannot make forward progress. This bit is set
+ when bit 18 is set and is cleared when a packet-
+ accepted or a packet-not-accepted control symbol
+ is received. */
+ uint32_t o_sm_ret : 1; /**< Output Port State Machine has received a
+ packet-retry control symbol and is retrying the
+ packet. */
+ uint32_t o_error : 1; /**< Output Error Encountered and possibly recovered
+ from. This sticky bit is set with bit 16. */
+ uint32_t o_sm_err : 1; /**< Output Port State Machine has encountered an
+ error. */
+ uint32_t reserved_11_15 : 5;
+ uint32_t i_sm_ret : 1; /**< Input Port State Machine has received a
+ packet-retry control symbol and is retrying the
+ packet. */
+ uint32_t i_error : 1; /**< Input Error Encountered and possibly recovered
+ from. This sticky bit is set with bit 8. */
+ uint32_t i_sm_err : 1; /**< Input Port State Machine has encountered an
+ error. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t pt_write : 1; /**< Port has encountered a condition which required it
+ initiate a Maintenance Port-Write Operation.
+ Never set by hardware. */
+ uint32_t reserved_3_3 : 1;
+ uint32_t pt_error : 1; /**< Input or Output Port has encountered an
+ unrecoverable error condition. */
+ uint32_t pt_ok : 1; /**< Input or Output Port are intitialized and the port
+ is exchanging error free control symbols with
+ attached device. */
+ uint32_t pt_uinit : 1; /**< Port is uninitialized. This bit and bit 1 are
+ mutually exclusive. */
+#else
+ uint32_t pt_uinit : 1;
+ uint32_t pt_ok : 1;
+ uint32_t pt_error : 1;
+ uint32_t reserved_3_3 : 1;
+ uint32_t pt_write : 1;
+ uint32_t reserved_5_7 : 3;
+ uint32_t i_sm_err : 1;
+ uint32_t i_error : 1;
+ uint32_t i_sm_ret : 1;
+ uint32_t reserved_11_15 : 5;
+ uint32_t o_sm_err : 1;
+ uint32_t o_error : 1;
+ uint32_t o_sm_ret : 1;
+ uint32_t o_rtried : 1;
+ uint32_t o_retry : 1;
+ uint32_t reserved_21_23 : 3;
+ uint32_t o_dgrad : 1;
+ uint32_t o_fail : 1;
+ uint32_t pkt_drop : 1;
+ uint32_t reserved_27_31 : 5;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_0_err_stat_s cn63xx;
+ struct cvmx_sriomaintx_port_0_err_stat_s cn63xxp1;
+ struct cvmx_sriomaintx_port_0_err_stat_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_0_err_stat cvmx_sriomaintx_port_0_err_stat_t;
+
+/**
+ * cvmx_sriomaint#_port_0_link_req
+ *
+ * SRIOMAINT_PORT_0_LINK_REQ = SRIO Port 0 Link Request
+ *
+ * Port 0 Manual Link Request
+ *
+ * Notes:
+ * Writing this register generates the link request symbol or eight device reset symbols. The
+ * progress of the request can be determined by reading SRIOMAINT(0,2..3)_PORT_0_LINK_RESP. Only a single
+ * request should be generated at a time.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_0_LINK_REQ hclk hrst_n
+ */
+union cvmx_sriomaintx_port_0_link_req {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_0_link_req_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_3_31 : 29;
+ uint32_t cmd : 3; /**< Link Request Command.
+ 011 - Reset Device
+ 100 - Link Request
+ All other values reserved. */
+#else
+ uint32_t cmd : 3;
+ uint32_t reserved_3_31 : 29;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_0_link_req_s cn63xx;
+ struct cvmx_sriomaintx_port_0_link_req_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_0_link_req cvmx_sriomaintx_port_0_link_req_t;
+
+/**
+ * cvmx_sriomaint#_port_0_link_resp
+ *
+ * SRIOMAINT_PORT_0_LINK_RESP = SRIO Port 0 Link Response
+ *
+ * Port 0 Manual Link Response
+ *
+ * Notes:
+ * This register only returns responses generated by writes to SRIOMAINT(0,2..3)_PORT_0_LINK_REQ.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_0_LINK_RESP hclk hrst_n
+ */
+union cvmx_sriomaintx_port_0_link_resp {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_0_link_resp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t valid : 1; /**< Link Response Valid.
+ 1 = Link Response Received or Reset Device
+ Symbols Transmitted. Value cleared on read.
+ 0 = No response received. */
+ uint32_t reserved_11_30 : 20;
+ uint32_t ackid : 6; /**< AckID received from link response.
+ Reset Device symbol response is always zero.
+ Bit 10 is used for IDLE2 and always reads zero. */
+ uint32_t status : 5; /**< Link Response Status.
+ Status supplied by link response.
+ Reset Device symbol response is always zero. */
+#else
+ uint32_t status : 5;
+ uint32_t ackid : 6;
+ uint32_t reserved_11_30 : 20;
+ uint32_t valid : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_0_link_resp_s cn63xx;
+ struct cvmx_sriomaintx_port_0_link_resp_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_0_link_resp cvmx_sriomaintx_port_0_link_resp_t;
+
+/**
+ * cvmx_sriomaint#_port_0_local_ackid
+ *
+ * SRIOMAINT_PORT_0_LOCAL_ACKID = SRIO Port 0 Local AckID
+ *
+ * Port 0 Local AckID Control
+ *
+ * Notes:
+ * This register is typically only written when recovering from a failed link. It may be read at any
+ * time the MAC is out of reset. Writes to the O_ACKID field will be used for both the O_ACKID and
+ * E_ACKID. Care must be taken to ensure that no packets are pending at the time of a write. The
+ * number of pending packets can be read in the TX_INUSE field of SRIO(0,2..3)_MAC_BUFFERS.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_0_LOCAL_ACKID hclk hrst_n
+ */
+union cvmx_sriomaintx_port_0_local_ackid {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_0_local_ackid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t i_ackid : 6; /**< Next Expected Inbound AckID.
+ Bit 29 is used for IDLE2 and should be zero. */
+ uint32_t reserved_14_23 : 10;
+ uint32_t e_ackid : 6; /**< Next Expected Unacknowledged AckID.
+ Bit 13 is used for IDLE2 and should be zero. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t o_ackid : 6; /**< Next Outgoing Packet AckID.
+ Bit 5 is used for IDLE2 and should be zero. */
+#else
+ uint32_t o_ackid : 6;
+ uint32_t reserved_6_7 : 2;
+ uint32_t e_ackid : 6;
+ uint32_t reserved_14_23 : 10;
+ uint32_t i_ackid : 6;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_0_local_ackid_s cn63xx;
+ struct cvmx_sriomaintx_port_0_local_ackid_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_0_local_ackid cvmx_sriomaintx_port_0_local_ackid_t;
+
+/**
+ * cvmx_sriomaint#_port_gen_ctl
+ *
+ * SRIOMAINT_PORT_GEN_CTL = SRIO Port General Control
+ *
+ * Port General Control
+ *
+ * Notes:
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_GEN_CTL hclk hrst_n
+ *
+ */
+union cvmx_sriomaintx_port_gen_ctl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_gen_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t host : 1; /**< Host Device.
+ The HOST reset value is based on corresponding
+ MIO_RST_CTL*[PRTMODE]. HOST resets to 1 when
+ this field selects RC (i.e. host) mode, else 0. */
+ uint32_t menable : 1; /**< Master Enable. Must be set for device to issue
+ read, write, doorbell, message requests. */
+ uint32_t discover : 1; /**< Discovered. The device has been discovered by the
+ host responsible for initialization. */
+ uint32_t reserved_0_28 : 29;
+#else
+ uint32_t reserved_0_28 : 29;
+ uint32_t discover : 1;
+ uint32_t menable : 1;
+ uint32_t host : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_gen_ctl_s cn63xx;
+ struct cvmx_sriomaintx_port_gen_ctl_s cn63xxp1;
+ struct cvmx_sriomaintx_port_gen_ctl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_gen_ctl cvmx_sriomaintx_port_gen_ctl_t;
+
+/**
+ * cvmx_sriomaint#_port_lt_ctl
+ *
+ * SRIOMAINT_PORT_LT_CTL = SRIO Link Layer Timeout Control
+ *
+ * Link Layer Timeout Control
+ *
+ * Notes:
+ * This register controls the timeout for link layer transactions. It is used as the timeout between
+ * sending a packet (of any type) or link request to receiving the corresponding link acknowledge or
+ * link-response. Each count represents 200ns. The minimum timeout period is the TIMEOUT x 200nS
+ * and the maximum is twice that number. A value less than 32 may not guarantee that all timeout
+ * errors will be reported correctly. When the timeout period expires the packet or link request is
+ * dropped and the error is logged in the LNK_TOUT field of the SRIOMAINT(0,2..3)_ERB_ERR_DET register. A
+ * value of 0 in this register will allow the packet or link request to be issued but it will timeout
+ * immediately. This value is not recommended for normal operation.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_LT_CTL hclk hrst_n
+ */
+union cvmx_sriomaintx_port_lt_ctl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_lt_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timeout : 24; /**< Timeout Value */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t timeout : 24;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_lt_ctl_s cn63xx;
+ struct cvmx_sriomaintx_port_lt_ctl_s cn63xxp1;
+ struct cvmx_sriomaintx_port_lt_ctl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_lt_ctl cvmx_sriomaintx_port_lt_ctl_t;
+
+/**
+ * cvmx_sriomaint#_port_mbh0
+ *
+ * SRIOMAINT_PORT_MBH0 = SRIO Port Maintenance Block Header 0
+ *
+ * Port Maintenance Block Header 0
+ *
+ * Notes:
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_MBH0 hclk hrst_n
+ *
+ */
+union cvmx_sriomaintx_port_mbh0 {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_mbh0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ef_ptr : 16; /**< Pointer to Error Management Block. */
+ uint32_t ef_id : 16; /**< Extended Feature ID (Generic Endpoint Device) */
+#else
+ uint32_t ef_id : 16;
+ uint32_t ef_ptr : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_mbh0_s cn63xx;
+ struct cvmx_sriomaintx_port_mbh0_s cn63xxp1;
+ struct cvmx_sriomaintx_port_mbh0_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_mbh0 cvmx_sriomaintx_port_mbh0_t;
+
+/**
+ * cvmx_sriomaint#_port_rt_ctl
+ *
+ * SRIOMAINT_PORT_RT_CTL = SRIO Logical Layer Timeout Control
+ *
+ * Logical Layer Timeout Control
+ *
+ * Notes:
+ * This register controls the timeout for logical layer transactions. It is used under two
+ * conditions. First, it is used as the timeout period between sending a packet requiring a packet
+ * response being sent to receiving the corresponding response. This is used for all outgoing packet
+ * types including memory, maintenance, doorbells and message operations. When the timeout period
+ * expires the packet is disgarded and the error is logged in the PKT_TOUT field of the
+ * SRIOMAINT(0,2..3)_ERB_LT_ERR_DET register. The second use of this register is as a timeout period
+ * between incoming message segments of the same message. If a message segment is received then the
+ * MSG_TOUT field of the SRIOMAINT(0,2..3)_ERB_LT_ERR_DET register is set if the next segment has not been
+ * received before the time expires. In both cases, each count represents 200ns. The minimum
+ * timeout period is the TIMEOUT x 200nS and the maximum is twice that number. A value less than 32
+ * may not guarantee that all timeout errors will be reported correctly. A value of 0 disables the
+ * logical layer timeouts and is not recommended for normal operation.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_RT_CTL hclk hrst_n
+ */
+union cvmx_sriomaintx_port_rt_ctl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_rt_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timeout : 24; /**< Timeout Value */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t timeout : 24;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_rt_ctl_s cn63xx;
+ struct cvmx_sriomaintx_port_rt_ctl_s cn63xxp1;
+ struct cvmx_sriomaintx_port_rt_ctl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_rt_ctl cvmx_sriomaintx_port_rt_ctl_t;
+
+/**
+ * cvmx_sriomaint#_port_ttl_ctl
+ *
+ * SRIOMAINT_PORT_TTL_CTL = SRIO Packet Time to Live Control
+ *
+ * Packet Time to Live
+ *
+ * Notes:
+ * This register controls the timeout for outgoing packets. It is used to make sure packets are
+ * being transmitted and acknowledged within a reasonable period of time. The timeout value
+ * corresponds to TIMEOUT x 200ns and a value of 0 disables the timer. The actualy value of the
+ * should be greater than the physical layer timout specified in SRIOMAINT(0,2..3)_PORT_LT_CTL and is
+ * typically a less SRIOMAINT(0,2..3)_PORT_LT_CTL timeout than the response timeout specified in
+ * SRIOMAINT(0,2..3)_PORT_RT_CTL. A second application of this timer is to remove all the packets waiting
+ * to be transmitted including those already in flight. This may necessary in the case of a link
+ * going down (see SRIO(0,2..3)_INT_REG.LINK_DWN). This can accomplished by setting the TIMEOUT to small
+ * value all so that all TX packets can be dropped. In either case, when the timeout expires the TTL
+ * interrupt is asserted, any packets currently being transmitted are dropped, the
+ * SRIOMAINT(0,2..3)_TX_DROP.DROP bit is set (causing any scheduled packets to be dropped), the
+ * SRIOMAINT(0,2..3)_TX_DROP.DROP_CNT is incremented for each packet and the SRIO output state is set to
+ * IDLE (all errors are cleared). Software must clear the SRIOMAINT(0,2..3)_TX_DROP.DROP bit to resume
+ * transmitting packets.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PORT_RT_CTL hclk hrst_n
+ */
+union cvmx_sriomaintx_port_ttl_ctl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_port_ttl_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t timeout : 24; /**< Timeout Value */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t timeout : 24;
+#endif
+ } s;
+ struct cvmx_sriomaintx_port_ttl_ctl_s cn63xx;
+ struct cvmx_sriomaintx_port_ttl_ctl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_port_ttl_ctl cvmx_sriomaintx_port_ttl_ctl_t;
+
+/**
+ * cvmx_sriomaint#_pri_dev_id
+ *
+ * SRIOMAINT_PRI_DEV_ID = SRIO Primary Device ID
+ *
+ * Primary 8 and 16 bit Device IDs
+ *
+ * Notes:
+ * This register defines the primary 8 and 16 bit device IDs used for large and small transport. An
+ * optional secondary set of device IDs are located in SRIOMAINT(0,2..3)_SEC_DEV_ID.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_PRI_DEV_ID hclk hrst_n
+ */
+union cvmx_sriomaintx_pri_dev_id {
+ uint32_t u32;
+ struct cvmx_sriomaintx_pri_dev_id_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t id8 : 8; /**< Primary 8-bit Device ID */
+ uint32_t id16 : 16; /**< Primary 16-bit Device ID */
+#else
+ uint32_t id16 : 16;
+ uint32_t id8 : 8;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_sriomaintx_pri_dev_id_s cn63xx;
+ struct cvmx_sriomaintx_pri_dev_id_s cn63xxp1;
+ struct cvmx_sriomaintx_pri_dev_id_s cn66xx;
+};
+typedef union cvmx_sriomaintx_pri_dev_id cvmx_sriomaintx_pri_dev_id_t;
+
+/**
+ * cvmx_sriomaint#_sec_dev_ctrl
+ *
+ * SRIOMAINT_SEC_DEV_CTRL = SRIO Secondary Device ID Control
+ *
+ * Control for Secondary Device IDs
+ *
+ * Notes:
+ * This register enables the secondary 8 and 16 bit device IDs used for large and small transport.
+ * The corresponding secondary ID must be written before the ID is enabled. The secondary IDs should
+ * not be enabled if the values of the primary and secondary IDs are identical.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_SEC_DEV_CTRL hclk hrst_n
+ */
+union cvmx_sriomaintx_sec_dev_ctrl {
+ uint32_t u32;
+ struct cvmx_sriomaintx_sec_dev_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_2_31 : 30;
+ uint32_t enable8 : 1; /**< Enable matches to secondary 8-bit Device ID */
+ uint32_t enable16 : 1; /**< Enable matches to secondary 16-bit Device ID */
+#else
+ uint32_t enable16 : 1;
+ uint32_t enable8 : 1;
+ uint32_t reserved_2_31 : 30;
+#endif
+ } s;
+ struct cvmx_sriomaintx_sec_dev_ctrl_s cn63xx;
+ struct cvmx_sriomaintx_sec_dev_ctrl_s cn63xxp1;
+ struct cvmx_sriomaintx_sec_dev_ctrl_s cn66xx;
+};
+typedef union cvmx_sriomaintx_sec_dev_ctrl cvmx_sriomaintx_sec_dev_ctrl_t;
+
+/**
+ * cvmx_sriomaint#_sec_dev_id
+ *
+ * SRIOMAINT_SEC_DEV_ID = SRIO Secondary Device ID
+ *
+ * Secondary 8 and 16 bit Device IDs
+ *
+ * Notes:
+ * This register defines the secondary 8 and 16 bit device IDs used for large and small transport.
+ * The corresponding secondary ID must be written before the ID is enabled in the
+ * SRIOMAINT(0,2..3)_SEC_DEV_CTRL register. The primary set of device IDs are located in
+ * SRIOMAINT(0,2..3)_PRI_DEV_ID register. The secondary IDs should not be written to the same values as the
+ * corresponding primary IDs.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_SEC_DEV_ID hclk hrst_n
+ */
+union cvmx_sriomaintx_sec_dev_id {
+ uint32_t u32;
+ struct cvmx_sriomaintx_sec_dev_id_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t id8 : 8; /**< Secondary 8-bit Device ID */
+ uint32_t id16 : 16; /**< Secondary 16-bit Device ID */
+#else
+ uint32_t id16 : 16;
+ uint32_t id8 : 8;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_sriomaintx_sec_dev_id_s cn63xx;
+ struct cvmx_sriomaintx_sec_dev_id_s cn63xxp1;
+ struct cvmx_sriomaintx_sec_dev_id_s cn66xx;
+};
+typedef union cvmx_sriomaintx_sec_dev_id cvmx_sriomaintx_sec_dev_id_t;
+
+/**
+ * cvmx_sriomaint#_serial_lane_hdr
+ *
+ * SRIOMAINT_SERIAL_LANE_HDR = SRIO Serial Lane Header
+ *
+ * SRIO Serial Lane Header
+ *
+ * Notes:
+ * The error management extensions block header register contains the EF_PTR to the next EF_BLK and
+ * the EF_ID that identifies this as the Serial Lane Status Block.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_SERIAL_LANE_HDR hclk hrst_n
+ */
+union cvmx_sriomaintx_serial_lane_hdr {
+ uint32_t u32;
+ struct cvmx_sriomaintx_serial_lane_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ef_ptr : 16; /**< Pointer to the next block in the extended features
+ data structure. */
+ uint32_t ef_id : 16;
+#else
+ uint32_t ef_id : 16;
+ uint32_t ef_ptr : 16;
+#endif
+ } s;
+ struct cvmx_sriomaintx_serial_lane_hdr_s cn63xx;
+ struct cvmx_sriomaintx_serial_lane_hdr_s cn63xxp1;
+ struct cvmx_sriomaintx_serial_lane_hdr_s cn66xx;
+};
+typedef union cvmx_sriomaintx_serial_lane_hdr cvmx_sriomaintx_serial_lane_hdr_t;
+
+/**
+ * cvmx_sriomaint#_src_ops
+ *
+ * SRIOMAINT_SRC_OPS = SRIO Source Operations
+ *
+ * The logical operations initiated by the Octeon.
+ *
+ * Notes:
+ * The logical operations initiated by the Cores. The Source OPs register shows the operations
+ * specified in the SRIO(0,2..3)_IP_FEATURE.OPS register.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_SRC_OPS hclk hrst_n
+ */
+union cvmx_sriomaintx_src_ops {
+ uint32_t u32;
+ struct cvmx_sriomaintx_src_ops_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t gsm_read : 1; /**< PE does not support Read Home operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<31>] */
+ uint32_t i_read : 1; /**< PE does not support Instruction Read.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<30>] */
+ uint32_t rd_own : 1; /**< PE does not support Read for Ownership.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<29>] */
+ uint32_t d_invald : 1; /**< PE does not support Data Cache Invalidate.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<28>] */
+ uint32_t castout : 1; /**< PE does not support Castout Operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<27>] */
+ uint32_t d_flush : 1; /**< PE does not support Data Cache Flush.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<26>] */
+ uint32_t io_read : 1; /**< PE does not support IO Read.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<25>] */
+ uint32_t i_invald : 1; /**< PE does not support Instruction Cache Invalidate.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<24>] */
+ uint32_t tlb_inv : 1; /**< PE does not support TLB Entry Invalidate.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<23>] */
+ uint32_t tlb_invs : 1; /**< PE does not support TLB Entry Invalidate Sync.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<22>] */
+ uint32_t reserved_16_21 : 6;
+ uint32_t read : 1; /**< PE can support Nread operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<15>] */
+ uint32_t write : 1; /**< PE can support Nwrite operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<14>] */
+ uint32_t swrite : 1; /**< PE can support Swrite operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<13>] */
+ uint32_t write_r : 1; /**< PE can support Write with Response operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<12>] */
+ uint32_t msg : 1; /**< PE can support Data Message operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<11>] */
+ uint32_t doorbell : 1; /**< PE can support Doorbell operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<10>] */
+ uint32_t compswap : 1; /**< PE does not support Atomic Compare and Swap.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<9>] */
+ uint32_t testswap : 1; /**< PE does not support Atomic Test and Swap.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<8>] */
+ uint32_t atom_inc : 1; /**< PE can support Atomic increment operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<7>] */
+ uint32_t atom_dec : 1; /**< PE can support Atomic decrement operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<6>] */
+ uint32_t atom_set : 1; /**< PE can support Atomic set operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<5>] */
+ uint32_t atom_clr : 1; /**< PE can support Atomic clear operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<4>] */
+ uint32_t atom_swp : 1; /**< PE does not support Atomic Swap.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<3>] */
+ uint32_t port_wr : 1; /**< PE can Port Write operations.
+ This is a RO copy of SRIO*_IP_FEATURE[OPS<2>] */
+ uint32_t reserved_0_1 : 2;
+#else
+ uint32_t reserved_0_1 : 2;
+ uint32_t port_wr : 1;
+ uint32_t atom_swp : 1;
+ uint32_t atom_clr : 1;
+ uint32_t atom_set : 1;
+ uint32_t atom_dec : 1;
+ uint32_t atom_inc : 1;
+ uint32_t testswap : 1;
+ uint32_t compswap : 1;
+ uint32_t doorbell : 1;
+ uint32_t msg : 1;
+ uint32_t write_r : 1;
+ uint32_t swrite : 1;
+ uint32_t write : 1;
+ uint32_t read : 1;
+ uint32_t reserved_16_21 : 6;
+ uint32_t tlb_invs : 1;
+ uint32_t tlb_inv : 1;
+ uint32_t i_invald : 1;
+ uint32_t io_read : 1;
+ uint32_t d_flush : 1;
+ uint32_t castout : 1;
+ uint32_t d_invald : 1;
+ uint32_t rd_own : 1;
+ uint32_t i_read : 1;
+ uint32_t gsm_read : 1;
+#endif
+ } s;
+ struct cvmx_sriomaintx_src_ops_s cn63xx;
+ struct cvmx_sriomaintx_src_ops_s cn63xxp1;
+ struct cvmx_sriomaintx_src_ops_s cn66xx;
+};
+typedef union cvmx_sriomaintx_src_ops cvmx_sriomaintx_src_ops_t;
+
+/**
+ * cvmx_sriomaint#_tx_drop
+ *
+ * SRIOMAINT_TX_DROP = SRIO MAC Outgoing Packet Drop
+ *
+ * Outging SRIO Packet Drop Control/Status
+ *
+ * Notes:
+ * This register controls and provides status for dropping outgoing SRIO packets. The DROP bit
+ * should only be cleared when no packets are currently being dropped. This can be guaranteed by
+ * clearing the SRIOMAINT(0,2..3)_PORT_0_CTL.O_ENABLE bit before changing the DROP bit and restoring the
+ * O_ENABLE afterwards.
+ *
+ * Clk_Rst: SRIOMAINT(0,2..3)_MAC_CTRL hclk hrst_n
+ */
+union cvmx_sriomaintx_tx_drop {
+ uint32_t u32;
+ struct cvmx_sriomaintx_tx_drop_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_17_31 : 15;
+ uint32_t drop : 1; /**< All outgoing packets are dropped. Any packets
+ requiring a response will return 1's after the
+ SRIOMAINT(0,2..3)_PORT_RT_CTL Timeout expires. This bit
+ is set automatically when the TTL Timeout occurs
+ or can be set by software and must always be
+ cleared by software. */
+ uint32_t drop_cnt : 16; /**< Number of packets dropped by transmit logic.
+ Packets are dropped whenever a packet is ready to
+ be transmitted and a TTL Timeouts occur, the DROP
+ bit is set or the SRIOMAINT(0,2..3)_ERB_ERR_RATE_THR
+ FAIL_TH has been reached and the DROP_PKT bit is
+ set in SRIOMAINT(0,2..3)_PORT_0_CTL. This counter wraps
+ on overflow and is cleared only on reset. */
+#else
+ uint32_t drop_cnt : 16;
+ uint32_t drop : 1;
+ uint32_t reserved_17_31 : 15;
+#endif
+ } s;
+ struct cvmx_sriomaintx_tx_drop_s cn63xx;
+ struct cvmx_sriomaintx_tx_drop_s cn66xx;
+};
+typedef union cvmx_sriomaintx_tx_drop cvmx_sriomaintx_tx_drop_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-sriomaintx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-sriox-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-sriox-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-sriox-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,3889 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-sriox-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon sriox.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_SRIOX_DEFS_H__
+#define __CVMX_SRIOX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_ACC_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_ACC_CTRL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000148ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_ACC_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000148ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_ASMBLY_ID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_ASMBLY_ID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000200ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_ASMBLY_ID(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000200ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_ASMBLY_INFO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_ASMBLY_INFO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000208ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_ASMBLY_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000208ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_BELL_RESP_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_BELL_RESP_CTRL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000310ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_BELL_RESP_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000310ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_BIST_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_BIST_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000108ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000108ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_IMSG_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_IMSG_CTRL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000508ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_IMSG_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000508ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_IMSG_INST_HDRX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_IMSG_INST_HDRX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000510ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_SRIOX_IMSG_INST_HDRX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000510ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_IMSG_QOS_GRPX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 31)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 31)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_IMSG_QOS_GRPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000600ull) + (((offset) & 31) + ((block_id) & 3) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_SRIOX_IMSG_QOS_GRPX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000600ull) + (((offset) & 31) + ((block_id) & 3) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_IMSG_STATUSX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 23)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 23)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_IMSG_STATUSX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000700ull) + (((offset) & 31) + ((block_id) & 3) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_SRIOX_IMSG_STATUSX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000700ull) + (((offset) & 31) + ((block_id) & 3) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_IMSG_VPORT_THR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_IMSG_VPORT_THR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000500ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_IMSG_VPORT_THR(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000500ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_IMSG_VPORT_THR2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_IMSG_VPORT_THR2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000528ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_IMSG_VPORT_THR2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000528ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_INT2_ENABLE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_INT2_ENABLE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C80003E0ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_INT2_ENABLE(block_id) (CVMX_ADD_IO_SEG(0x00011800C80003E0ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_INT2_REG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_INT2_REG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C80003E8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_INT2_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800C80003E8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_INT_ENABLE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_INT_ENABLE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000110ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_INT_ENABLE(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000110ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_INT_INFO0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_INT_INFO0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000120ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_INT_INFO0(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000120ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_INT_INFO1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_INT_INFO1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000128ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_INT_INFO1(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000128ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_INT_INFO2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_INT_INFO2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000130ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_INT_INFO2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000130ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_INT_INFO3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_INT_INFO3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000138ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_INT_INFO3(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000138ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_INT_REG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_INT_REG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000118ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000118ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_IP_FEATURE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_IP_FEATURE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C80003F8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_IP_FEATURE(block_id) (CVMX_ADD_IO_SEG(0x00011800C80003F8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_MAC_BUFFERS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_MAC_BUFFERS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000390ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_MAC_BUFFERS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000390ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_MAINT_OP(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_MAINT_OP(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000158ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_MAINT_OP(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000158ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_MAINT_RD_DATA(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_MAINT_RD_DATA(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000160ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_MAINT_RD_DATA(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000160ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_MCE_TX_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_MCE_TX_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000240ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_MCE_TX_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000240ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_MEM_OP_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_MEM_OP_CTRL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000168ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_MEM_OP_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000168ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_OMSG_CTRLX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_OMSG_CTRLX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000488ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64;
+}
+#else
+#define CVMX_SRIOX_OMSG_CTRLX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000488ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_OMSG_DONE_COUNTSX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_OMSG_DONE_COUNTSX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C80004B0ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64;
+}
+#else
+#define CVMX_SRIOX_OMSG_DONE_COUNTSX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C80004B0ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_OMSG_FMP_MRX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_OMSG_FMP_MRX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000498ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64;
+}
+#else
+#define CVMX_SRIOX_OMSG_FMP_MRX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000498ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_OMSG_NMP_MRX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_OMSG_NMP_MRX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C80004A0ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64;
+}
+#else
+#define CVMX_SRIOX_OMSG_NMP_MRX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C80004A0ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_OMSG_PORTX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_OMSG_PORTX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000480ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64;
+}
+#else
+#define CVMX_SRIOX_OMSG_PORTX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000480ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_OMSG_SILO_THR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_OMSG_SILO_THR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C80004F8ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_OMSG_SILO_THR(block_id) (CVMX_ADD_IO_SEG(0x00011800C80004F8ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_OMSG_SP_MRX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_OMSG_SP_MRX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000490ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64;
+}
+#else
+#define CVMX_SRIOX_OMSG_SP_MRX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000490ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_PRIOX_IN_USE(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_PRIOX_IN_USE(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C80003C0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_SRIOX_PRIOX_IN_USE(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C80003C0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_RX_BELL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_RX_BELL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000308ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_RX_BELL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000308ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_RX_BELL_SEQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_RX_BELL_SEQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000300ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_RX_BELL_SEQ(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000300ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_RX_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_RX_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000380ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_RX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000380ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_S2M_TYPEX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 15)) && ((block_id == 0) || (block_id == 2) || (block_id == 3))))))
+ cvmx_warn("CVMX_SRIOX_S2M_TYPEX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000180ull) + (((offset) & 15) + ((block_id) & 3) * 0x200000ull) * 8;
+}
+#else
+#define CVMX_SRIOX_S2M_TYPEX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000180ull) + (((offset) & 15) + ((block_id) & 3) * 0x200000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_SEQ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_SEQ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000278ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_SEQ(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000278ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_STATUS_REG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_STATUS_REG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000100ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_STATUS_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000100ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_TAG_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_TAG_CTRL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000178ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_TAG_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000178ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_TLP_CREDITS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_TLP_CREDITS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000150ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000150ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_TX_BELL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_TX_BELL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000280ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_TX_BELL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000280ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_TX_BELL_INFO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_TX_BELL_INFO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000288ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_TX_BELL_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000288ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_TX_CTRL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_TX_CTRL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000170ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_TX_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000170ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_TX_EMPHASIS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_TX_EMPHASIS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C80003F0ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_TX_EMPHASIS(block_id) (CVMX_ADD_IO_SEG(0x00011800C80003F0ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_TX_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_TX_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000388ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_TX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000388ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRIOX_WR_DONE_COUNTS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0) || ((block_id >= 2) && (block_id <= 3))))))
+ cvmx_warn("CVMX_SRIOX_WR_DONE_COUNTS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800C8000340ull) + ((block_id) & 3) * 0x1000000ull;
+}
+#else
+#define CVMX_SRIOX_WR_DONE_COUNTS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000340ull) + ((block_id) & 3) * 0x1000000ull)
+#endif
+
+/**
+ * cvmx_srio#_acc_ctrl
+ *
+ * SRIO_ACC_CTRL = SRIO Access Control
+ *
+ * General access control of the incoming BAR registers.
+ *
+ * Notes:
+ * This register controls write access to the BAR registers via SRIO Maintenance Operations. At
+ * powerup the BAR registers can be accessed via RSL and Maintenance Operations. If the DENY_BAR*
+ * bits or DENY_ADR* bits are set then Maintenance Writes to the corresponding BAR fields are
+ * ignored. Setting both the DENY_BAR and DENY_ADR for a corresponding BAR is compatable with the
+ * operation of the DENY_BAR bit found in 63xx Pass 2 and earlier. This register does not effect
+ * read operations. Reset values for DENY_BAR[2:0] are typically clear but they are set if
+ * the chip is operating in Authentik Mode.
+ *
+ * Clk_Rst: SRIO(0,2..3)_ACC_CTRL hclk hrst_n
+ */
+union cvmx_sriox_acc_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_acc_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t deny_adr2 : 1; /**< Deny SRIO Write Access to SRIO Address Fields in
+ SRIOMAINT(0,2..3)_BAR2* Registers */
+ uint64_t deny_adr1 : 1; /**< Deny SRIO Write Access to SRIO Address Fields in
+ SRIOMAINT(0,2..3)_BAR1* Registers */
+ uint64_t deny_adr0 : 1; /**< Deny SRIO Write Access to SRIO Address Fields in
+ SRIOMAINT(0,2..3)_BAR0* Registers */
+ uint64_t reserved_3_3 : 1;
+ uint64_t deny_bar2 : 1; /**< Deny SRIO Write Access to non-SRIO Address Fields
+ in the SRIOMAINT_BAR2 Registers */
+ uint64_t deny_bar1 : 1; /**< Deny SRIO Write Access to non-SRIO Address Fields
+ in the SRIOMAINT_BAR1 Registers */
+ uint64_t deny_bar0 : 1; /**< Deny SRIO Write Access to non-SRIO Address Fields
+ in the SRIOMAINT_BAR0 Registers */
+#else
+ uint64_t deny_bar0 : 1;
+ uint64_t deny_bar1 : 1;
+ uint64_t deny_bar2 : 1;
+ uint64_t reserved_3_3 : 1;
+ uint64_t deny_adr0 : 1;
+ uint64_t deny_adr1 : 1;
+ uint64_t deny_adr2 : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_sriox_acc_ctrl_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t deny_bar2 : 1; /**< Deny SRIO Write Access to BAR2 Registers */
+ uint64_t deny_bar1 : 1; /**< Deny SRIO Write Access to BAR1 Registers */
+ uint64_t deny_bar0 : 1; /**< Deny SRIO Write Access to BAR0 Registers */
+#else
+ uint64_t deny_bar0 : 1;
+ uint64_t deny_bar1 : 1;
+ uint64_t deny_bar2 : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn63xx;
+ struct cvmx_sriox_acc_ctrl_cn63xx cn63xxp1;
+ struct cvmx_sriox_acc_ctrl_s cn66xx;
+};
+typedef union cvmx_sriox_acc_ctrl cvmx_sriox_acc_ctrl_t;
+
+/**
+ * cvmx_srio#_asmbly_id
+ *
+ * SRIO_ASMBLY_ID = SRIO Assembly ID
+ *
+ * The Assembly ID register controls the Assembly ID and Vendor
+ *
+ * Notes:
+ * This register specifies the Assembly ID and Vendor visible in SRIOMAINT(0,2..3)_ASMBLY_ID register. The
+ * Assembly Vendor ID is typically supplied by the RapidIO Trade Association. This register is only
+ * reset during COLD boot and may only be modified while SRIO(0,2..3)_STATUS_REG.ACCESS is zero.
+ *
+ * Clk_Rst: SRIO(0,2..3)_ASMBLY_ID sclk srst_cold_n
+ */
+union cvmx_sriox_asmbly_id {
+ uint64_t u64;
+ struct cvmx_sriox_asmbly_id_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t assy_id : 16; /**< Assembly Identifer */
+ uint64_t assy_ven : 16; /**< Assembly Vendor Identifer */
+#else
+ uint64_t assy_ven : 16;
+ uint64_t assy_id : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_asmbly_id_s cn63xx;
+ struct cvmx_sriox_asmbly_id_s cn63xxp1;
+ struct cvmx_sriox_asmbly_id_s cn66xx;
+};
+typedef union cvmx_sriox_asmbly_id cvmx_sriox_asmbly_id_t;
+
+/**
+ * cvmx_srio#_asmbly_info
+ *
+ * SRIO_ASMBLY_INFO = SRIO Assembly Information
+ *
+ * The Assembly Info register controls the Assembly Revision
+ *
+ * Notes:
+ * The Assembly Info register controls the Assembly Revision visible in the ASSY_REV field of the
+ * SRIOMAINT(0,2..3)_ASMBLY_INFO register. This register is only reset during COLD boot and may only be
+ * modified while SRIO(0,2..3)_STATUS_REG.ACCESS is zero.
+ *
+ * Clk_Rst: SRIO(0,2..3)_ASMBLY_INFO sclk srst_cold_n
+ */
+union cvmx_sriox_asmbly_info {
+ uint64_t u64;
+ struct cvmx_sriox_asmbly_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t assy_rev : 16; /**< Assembly Revision */
+ uint64_t reserved_0_15 : 16;
+#else
+ uint64_t reserved_0_15 : 16;
+ uint64_t assy_rev : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_asmbly_info_s cn63xx;
+ struct cvmx_sriox_asmbly_info_s cn63xxp1;
+ struct cvmx_sriox_asmbly_info_s cn66xx;
+};
+typedef union cvmx_sriox_asmbly_info cvmx_sriox_asmbly_info_t;
+
+/**
+ * cvmx_srio#_bell_resp_ctrl
+ *
+ * SRIO_BELL_RESP_CTRL = SRIO Doorbell Response Control
+ *
+ * The SRIO Doorbell Response Control Register
+ *
+ * Notes:
+ * This register is used to override the response priority of the outgoing doorbell responses.
+ *
+ * Clk_Rst: SRIO(0,2..3)_BELL_RESP_CTRL hclk hrst_n
+ */
+union cvmx_sriox_bell_resp_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_bell_resp_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t rp1_sid : 1; /**< Sets response priority for incomimg doorbells
+ of priority 1 on the secondary ID (0=2, 1=3) */
+ uint64_t rp0_sid : 2; /**< Sets response priority for incomimg doorbells
+ of priority 0 on the secondary ID (0,1=1 2=2, 3=3) */
+ uint64_t rp1_pid : 1; /**< Sets response priority for incomimg doorbells
+ of priority 1 on the primary ID (0=2, 1=3) */
+ uint64_t rp0_pid : 2; /**< Sets response priority for incomimg doorbells
+ of priority 0 on the primary ID (0,1=1 2=2, 3=3) */
+#else
+ uint64_t rp0_pid : 2;
+ uint64_t rp1_pid : 1;
+ uint64_t rp0_sid : 2;
+ uint64_t rp1_sid : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_sriox_bell_resp_ctrl_s cn63xx;
+ struct cvmx_sriox_bell_resp_ctrl_s cn63xxp1;
+ struct cvmx_sriox_bell_resp_ctrl_s cn66xx;
+};
+typedef union cvmx_sriox_bell_resp_ctrl cvmx_sriox_bell_resp_ctrl_t;
+
+/**
+ * cvmx_srio#_bist_status
+ *
+ * SRIO_BIST_STATUS = SRIO Bist Status
+ *
+ * Results from BIST runs of SRIO's memories.
+ *
+ * Notes:
+ * BIST Results.
+ *
+ * Clk_Rst: SRIO(0,2..3)_BIST_STATUS hclk hrst_n
+ */
+union cvmx_sriox_bist_status {
+ uint64_t u64;
+ struct cvmx_sriox_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63 : 19;
+ uint64_t lram : 1; /**< Incoming Doorbell Lookup RAM. */
+ uint64_t mram : 2; /**< Incoming Message SLI FIFO. */
+ uint64_t cram : 2; /**< Incoming Rd/Wr/Response Command FIFO. */
+ uint64_t bell : 2; /**< Incoming Doorbell FIFO. */
+ uint64_t otag : 2; /**< Outgoing Tag Data. */
+ uint64_t itag : 1; /**< Incoming TAG Data. */
+ uint64_t ofree : 1; /**< Outgoing Free Pointer RAM (OFIFO) */
+ uint64_t rtn : 2; /**< Outgoing Response Return FIFO. */
+ uint64_t obulk : 4; /**< Outgoing Bulk Data RAMs (OFIFO) */
+ uint64_t optrs : 4; /**< Outgoing Priority Pointer RAMs (OFIFO) */
+ uint64_t oarb2 : 2; /**< Additional Outgoing Priority RAMs. */
+ uint64_t rxbuf2 : 2; /**< Additional Incoming SRIO MAC Buffers. */
+ uint64_t oarb : 2; /**< Outgoing Priority RAMs (OARB) */
+ uint64_t ispf : 1; /**< Incoming Soft Packet FIFO */
+ uint64_t ospf : 1; /**< Outgoing Soft Packet FIFO */
+ uint64_t txbuf : 2; /**< Outgoing SRIO MAC Buffer. */
+ uint64_t rxbuf : 2; /**< Incoming SRIO MAC Buffer. */
+ uint64_t imsg : 5; /**< Incoming Message RAMs. */
+ uint64_t omsg : 7; /**< Outgoing Message RAMs. */
+#else
+ uint64_t omsg : 7;
+ uint64_t imsg : 5;
+ uint64_t rxbuf : 2;
+ uint64_t txbuf : 2;
+ uint64_t ospf : 1;
+ uint64_t ispf : 1;
+ uint64_t oarb : 2;
+ uint64_t rxbuf2 : 2;
+ uint64_t oarb2 : 2;
+ uint64_t optrs : 4;
+ uint64_t obulk : 4;
+ uint64_t rtn : 2;
+ uint64_t ofree : 1;
+ uint64_t itag : 1;
+ uint64_t otag : 2;
+ uint64_t bell : 2;
+ uint64_t cram : 2;
+ uint64_t mram : 2;
+ uint64_t lram : 1;
+ uint64_t reserved_45_63 : 19;
+#endif
+ } s;
+ struct cvmx_sriox_bist_status_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t mram : 2; /**< Incoming Message SLI FIFO. */
+ uint64_t cram : 2; /**< Incoming Rd/Wr/Response Command FIFO. */
+ uint64_t bell : 2; /**< Incoming Doorbell FIFO. */
+ uint64_t otag : 2; /**< Outgoing Tag Data. */
+ uint64_t itag : 1; /**< Incoming TAG Data. */
+ uint64_t ofree : 1; /**< Outgoing Free Pointer RAM (OFIFO) */
+ uint64_t rtn : 2; /**< Outgoing Response Return FIFO. */
+ uint64_t obulk : 4; /**< Outgoing Bulk Data RAMs (OFIFO) */
+ uint64_t optrs : 4; /**< Outgoing Priority Pointer RAMs (OFIFO) */
+ uint64_t oarb2 : 2; /**< Additional Outgoing Priority RAMs (Pass 2). */
+ uint64_t rxbuf2 : 2; /**< Additional Incoming SRIO MAC Buffers (Pass 2). */
+ uint64_t oarb : 2; /**< Outgoing Priority RAMs (OARB) */
+ uint64_t ispf : 1; /**< Incoming Soft Packet FIFO */
+ uint64_t ospf : 1; /**< Outgoing Soft Packet FIFO */
+ uint64_t txbuf : 2; /**< Outgoing SRIO MAC Buffer. */
+ uint64_t rxbuf : 2; /**< Incoming SRIO MAC Buffer. */
+ uint64_t imsg : 5; /**< Incoming Message RAMs.
+ IMSG<0> (i.e. <7>) unused in Pass 2 */
+ uint64_t omsg : 7; /**< Outgoing Message RAMs. */
+#else
+ uint64_t omsg : 7;
+ uint64_t imsg : 5;
+ uint64_t rxbuf : 2;
+ uint64_t txbuf : 2;
+ uint64_t ospf : 1;
+ uint64_t ispf : 1;
+ uint64_t oarb : 2;
+ uint64_t rxbuf2 : 2;
+ uint64_t oarb2 : 2;
+ uint64_t optrs : 4;
+ uint64_t obulk : 4;
+ uint64_t rtn : 2;
+ uint64_t ofree : 1;
+ uint64_t itag : 1;
+ uint64_t otag : 2;
+ uint64_t bell : 2;
+ uint64_t cram : 2;
+ uint64_t mram : 2;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } cn63xx;
+ struct cvmx_sriox_bist_status_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63 : 20;
+ uint64_t mram : 2; /**< Incoming Message SLI FIFO. */
+ uint64_t cram : 2; /**< Incoming Rd/Wr/Response Command FIFO. */
+ uint64_t bell : 2; /**< Incoming Doorbell FIFO. */
+ uint64_t otag : 2; /**< Outgoing Tag Data. */
+ uint64_t itag : 1; /**< Incoming TAG Data. */
+ uint64_t ofree : 1; /**< Outgoing Free Pointer RAM (OFIFO) */
+ uint64_t rtn : 2; /**< Outgoing Response Return FIFO. */
+ uint64_t obulk : 4; /**< Outgoing Bulk Data RAMs (OFIFO) */
+ uint64_t optrs : 4; /**< Outgoing Priority Pointer RAMs (OFIFO) */
+ uint64_t reserved_20_23 : 4;
+ uint64_t oarb : 2; /**< Outgoing Priority RAMs (OARB) */
+ uint64_t ispf : 1; /**< Incoming Soft Packet FIFO */
+ uint64_t ospf : 1; /**< Outgoing Soft Packet FIFO */
+ uint64_t txbuf : 2; /**< Outgoing SRIO MAC Buffer. */
+ uint64_t rxbuf : 2; /**< Incoming SRIO MAC Buffer. */
+ uint64_t imsg : 5; /**< Incoming Message RAMs. */
+ uint64_t omsg : 7; /**< Outgoing Message RAMs. */
+#else
+ uint64_t omsg : 7;
+ uint64_t imsg : 5;
+ uint64_t rxbuf : 2;
+ uint64_t txbuf : 2;
+ uint64_t ospf : 1;
+ uint64_t ispf : 1;
+ uint64_t oarb : 2;
+ uint64_t reserved_20_23 : 4;
+ uint64_t optrs : 4;
+ uint64_t obulk : 4;
+ uint64_t rtn : 2;
+ uint64_t ofree : 1;
+ uint64_t itag : 1;
+ uint64_t otag : 2;
+ uint64_t bell : 2;
+ uint64_t cram : 2;
+ uint64_t mram : 2;
+ uint64_t reserved_44_63 : 20;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriox_bist_status_s cn66xx;
+};
+typedef union cvmx_sriox_bist_status cvmx_sriox_bist_status_t;
+
+/**
+ * cvmx_srio#_imsg_ctrl
+ *
+ * SRIO_IMSG_CTRL = SRIO Incoming Message Control
+ *
+ * The SRIO Incoming Message Control Register
+ *
+ * Notes:
+ * RSP_THR should not typically be modified from reset value.
+ *
+ * Clk_Rst: SRIO(0,2..3)_IMSG_CTRL hclk hrst_n
+ */
+union cvmx_sriox_imsg_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t to_mode : 1; /**< MP message timeout mode:
+ - 0: The timeout counter gets reset whenever the
+ next sequential segment is received, regardless
+ of whether it is accepted
+ - 1: The timeout counter gets reset only when the
+ next sequential segment is received and
+ accepted */
+ uint64_t reserved_30_30 : 1;
+ uint64_t rsp_thr : 6; /**< Reserved */
+ uint64_t reserved_22_23 : 2;
+ uint64_t rp1_sid : 1; /**< Sets msg response priority for incomimg messages
+ of priority 1 on the secondary ID (0=2, 1=3) */
+ uint64_t rp0_sid : 2; /**< Sets msg response priority for incomimg messages
+ of priority 0 on the secondary ID (0,1=1 2=2, 3=3) */
+ uint64_t rp1_pid : 1; /**< Sets msg response priority for incomimg messages
+ of priority 1 on the primary ID (0=2, 1=3) */
+ uint64_t rp0_pid : 2; /**< Sets msg response priority for incomimg messages
+ of priority 0 on the primary ID (0,1=1 2=2, 3=3) */
+ uint64_t reserved_15_15 : 1;
+ uint64_t prt_sel : 3; /**< Port/Controller selection method:
+ - 0: Table lookup based on mailbox
+ - 1: Table lookup based on priority
+ - 2: Table lookup based on letter
+ - 3: Size-based (SP to port 0, MP to port 1)
+ - 4: ID-based (pri ID to port 0, sec ID to port 1) */
+ uint64_t lttr : 4; /**< Port/Controller selection letter table */
+ uint64_t prio : 4; /**< Port/Controller selection priority table */
+ uint64_t mbox : 4; /**< Port/Controller selection mailbox table */
+#else
+ uint64_t mbox : 4;
+ uint64_t prio : 4;
+ uint64_t lttr : 4;
+ uint64_t prt_sel : 3;
+ uint64_t reserved_15_15 : 1;
+ uint64_t rp0_pid : 2;
+ uint64_t rp1_pid : 1;
+ uint64_t rp0_sid : 2;
+ uint64_t rp1_sid : 1;
+ uint64_t reserved_22_23 : 2;
+ uint64_t rsp_thr : 6;
+ uint64_t reserved_30_30 : 1;
+ uint64_t to_mode : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_imsg_ctrl_s cn63xx;
+ struct cvmx_sriox_imsg_ctrl_s cn63xxp1;
+ struct cvmx_sriox_imsg_ctrl_s cn66xx;
+};
+typedef union cvmx_sriox_imsg_ctrl cvmx_sriox_imsg_ctrl_t;
+
+/**
+ * cvmx_srio#_imsg_inst_hdr#
+ *
+ * SRIO_IMSG_INST_HDRX = SRIO Incoming Message Packet Instruction Header
+ *
+ * The SRIO Port/Controller X Incoming Message Packet Instruction Header Register
+ *
+ * Notes:
+ * SRIO HW generates most of the SRIO_WORD1 fields from these values. SRIO_WORD1 is the 2nd of two
+ * header words that SRIO inserts in front of all received messages. SRIO_WORD1 may commonly be used
+ * as a PIP/IPD PKT_INST_HDR. This CSR matches the PIP/IPD PKT_INST_HDR format except for the QOS
+ * and GRP fields. SRIO*_IMSG_QOS_GRP*[QOS*,GRP*] supply the QOS and GRP fields.
+ *
+ * Clk_Rst: SRIO(0,2..3)_IMSG_INST_HDR[0:1] hclk hrst_n
+ */
+union cvmx_sriox_imsg_inst_hdrx {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_inst_hdrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t r : 1; /**< Port/Controller X R */
+ uint64_t reserved_58_62 : 5;
+ uint64_t pm : 2; /**< Port/Controller X PM */
+ uint64_t reserved_55_55 : 1;
+ uint64_t sl : 7; /**< Port/Controller X SL */
+ uint64_t reserved_46_47 : 2;
+ uint64_t nqos : 1; /**< Port/Controller X NQOS */
+ uint64_t ngrp : 1; /**< Port/Controller X NGRP */
+ uint64_t ntt : 1; /**< Port/Controller X NTT */
+ uint64_t ntag : 1; /**< Port/Controller X NTAG */
+ uint64_t reserved_35_41 : 7;
+ uint64_t rs : 1; /**< Port/Controller X RS */
+ uint64_t tt : 2; /**< Port/Controller X TT */
+ uint64_t tag : 32; /**< Port/Controller X TAG */
+#else
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t rs : 1;
+ uint64_t reserved_35_41 : 7;
+ uint64_t ntag : 1;
+ uint64_t ntt : 1;
+ uint64_t ngrp : 1;
+ uint64_t nqos : 1;
+ uint64_t reserved_46_47 : 2;
+ uint64_t sl : 7;
+ uint64_t reserved_55_55 : 1;
+ uint64_t pm : 2;
+ uint64_t reserved_58_62 : 5;
+ uint64_t r : 1;
+#endif
+ } s;
+ struct cvmx_sriox_imsg_inst_hdrx_s cn63xx;
+ struct cvmx_sriox_imsg_inst_hdrx_s cn63xxp1;
+ struct cvmx_sriox_imsg_inst_hdrx_s cn66xx;
+};
+typedef union cvmx_sriox_imsg_inst_hdrx cvmx_sriox_imsg_inst_hdrx_t;
+
+/**
+ * cvmx_srio#_imsg_qos_grp#
+ *
+ * SRIO_IMSG_QOS_GRPX = SRIO Incoming Message QOS/GRP Table
+ *
+ * The SRIO Incoming Message QOS/GRP Table Entry X
+ *
+ * Notes:
+ * The QOS/GRP table contains 32 entries with 8 QOS/GRP pairs per entry - 256 pairs total. HW
+ * selects the table entry by the concatenation of SRIO_WORD0[PRIO,DIS,MBOX], thus entry 0 is used
+ * for messages with PRIO=0,DIS=0,MBOX=0, entry 1 is for PRIO=0,DIS=0,MBOX=1, etc. HW selects the
+ * QOS/GRP pair from the table entry by the concatenation of SRIO_WORD0[ID,LETTER] as shown above. HW
+ * then inserts the QOS/GRP pair into SRIO_WORD1[QOS,GRP], which may commonly be used for the PIP/IPD
+ * PKT_INST_HDR[QOS,GRP] fields.
+ *
+ * Clk_Rst: SRIO(0,2..3)_IMSG_QOS_GRP[0:1] hclk hrst_n
+ */
+union cvmx_sriox_imsg_qos_grpx {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_qos_grpx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63 : 1;
+ uint64_t qos7 : 3; /**< Entry X:7 QOS (ID=1, LETTER=3) */
+ uint64_t grp7 : 4; /**< Entry X:7 GRP (ID=1, LETTER=3) */
+ uint64_t reserved_55_55 : 1;
+ uint64_t qos6 : 3; /**< Entry X:6 QOS (ID=1, LETTER=2) */
+ uint64_t grp6 : 4; /**< Entry X:6 GRP (ID=1, LETTER=2) */
+ uint64_t reserved_47_47 : 1;
+ uint64_t qos5 : 3; /**< Entry X:5 QOS (ID=1, LETTER=1) */
+ uint64_t grp5 : 4; /**< Entry X:5 GRP (ID=1, LETTER=1) */
+ uint64_t reserved_39_39 : 1;
+ uint64_t qos4 : 3; /**< Entry X:4 QOS (ID=1, LETTER=0) */
+ uint64_t grp4 : 4; /**< Entry X:4 GRP (ID=1, LETTER=0) */
+ uint64_t reserved_31_31 : 1;
+ uint64_t qos3 : 3; /**< Entry X:3 QOS (ID=0, LETTER=3) */
+ uint64_t grp3 : 4; /**< Entry X:3 GRP (ID=0, LETTER=3) */
+ uint64_t reserved_23_23 : 1;
+ uint64_t qos2 : 3; /**< Entry X:2 QOS (ID=0, LETTER=2) */
+ uint64_t grp2 : 4; /**< Entry X:2 GRP (ID=0, LETTER=2) */
+ uint64_t reserved_15_15 : 1;
+ uint64_t qos1 : 3; /**< Entry X:1 QOS (ID=0, LETTER=1) */
+ uint64_t grp1 : 4; /**< Entry X:1 GRP (ID=0, LETTER=1) */
+ uint64_t reserved_7_7 : 1;
+ uint64_t qos0 : 3; /**< Entry X:0 QOS (ID=0, LETTER=0) */
+ uint64_t grp0 : 4; /**< Entry X:0 GRP (ID=0, LETTER=0) */
+#else
+ uint64_t grp0 : 4;
+ uint64_t qos0 : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t grp1 : 4;
+ uint64_t qos1 : 3;
+ uint64_t reserved_15_15 : 1;
+ uint64_t grp2 : 4;
+ uint64_t qos2 : 3;
+ uint64_t reserved_23_23 : 1;
+ uint64_t grp3 : 4;
+ uint64_t qos3 : 3;
+ uint64_t reserved_31_31 : 1;
+ uint64_t grp4 : 4;
+ uint64_t qos4 : 3;
+ uint64_t reserved_39_39 : 1;
+ uint64_t grp5 : 4;
+ uint64_t qos5 : 3;
+ uint64_t reserved_47_47 : 1;
+ uint64_t grp6 : 4;
+ uint64_t qos6 : 3;
+ uint64_t reserved_55_55 : 1;
+ uint64_t grp7 : 4;
+ uint64_t qos7 : 3;
+ uint64_t reserved_63_63 : 1;
+#endif
+ } s;
+ struct cvmx_sriox_imsg_qos_grpx_s cn63xx;
+ struct cvmx_sriox_imsg_qos_grpx_s cn63xxp1;
+ struct cvmx_sriox_imsg_qos_grpx_s cn66xx;
+};
+typedef union cvmx_sriox_imsg_qos_grpx cvmx_sriox_imsg_qos_grpx_t;
+
+/**
+ * cvmx_srio#_imsg_status#
+ *
+ * SRIO_IMSG_STATUSX = SRIO Incoming Message Status Table
+ *
+ * The SRIO Incoming Message Status Table Entry X
+ *
+ * Notes:
+ * Clk_Rst: SRIO(0,2..3)_IMSG_STATUS[0:1] hclk hrst_n
+ *
+ */
+union cvmx_sriox_imsg_statusx {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_statusx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t val1 : 1; /**< Entry X:1 Valid */
+ uint64_t err1 : 1; /**< Entry X:1 Error */
+ uint64_t toe1 : 1; /**< Entry X:1 Timeout Error */
+ uint64_t toc1 : 1; /**< Entry X:1 Timeout Count */
+ uint64_t prt1 : 1; /**< Entry X:1 Port */
+ uint64_t reserved_58_58 : 1;
+ uint64_t tt1 : 1; /**< Entry X:1 TT ID */
+ uint64_t dis1 : 1; /**< Entry X:1 Dest ID */
+ uint64_t seg1 : 4; /**< Entry X:1 Next Segment */
+ uint64_t mbox1 : 2; /**< Entry X:1 Mailbox */
+ uint64_t lttr1 : 2; /**< Entry X:1 Letter */
+ uint64_t sid1 : 16; /**< Entry X:1 Source ID */
+ uint64_t val0 : 1; /**< Entry X:0 Valid */
+ uint64_t err0 : 1; /**< Entry X:0 Error */
+ uint64_t toe0 : 1; /**< Entry X:0 Timeout Error */
+ uint64_t toc0 : 1; /**< Entry X:0 Timeout Count */
+ uint64_t prt0 : 1; /**< Entry X:0 Port */
+ uint64_t reserved_26_26 : 1;
+ uint64_t tt0 : 1; /**< Entry X:0 TT ID */
+ uint64_t dis0 : 1; /**< Entry X:0 Dest ID */
+ uint64_t seg0 : 4; /**< Entry X:0 Next Segment */
+ uint64_t mbox0 : 2; /**< Entry X:0 Mailbox */
+ uint64_t lttr0 : 2; /**< Entry X:0 Letter */
+ uint64_t sid0 : 16; /**< Entry X:0 Source ID */
+#else
+ uint64_t sid0 : 16;
+ uint64_t lttr0 : 2;
+ uint64_t mbox0 : 2;
+ uint64_t seg0 : 4;
+ uint64_t dis0 : 1;
+ uint64_t tt0 : 1;
+ uint64_t reserved_26_26 : 1;
+ uint64_t prt0 : 1;
+ uint64_t toc0 : 1;
+ uint64_t toe0 : 1;
+ uint64_t err0 : 1;
+ uint64_t val0 : 1;
+ uint64_t sid1 : 16;
+ uint64_t lttr1 : 2;
+ uint64_t mbox1 : 2;
+ uint64_t seg1 : 4;
+ uint64_t dis1 : 1;
+ uint64_t tt1 : 1;
+ uint64_t reserved_58_58 : 1;
+ uint64_t prt1 : 1;
+ uint64_t toc1 : 1;
+ uint64_t toe1 : 1;
+ uint64_t err1 : 1;
+ uint64_t val1 : 1;
+#endif
+ } s;
+ struct cvmx_sriox_imsg_statusx_s cn63xx;
+ struct cvmx_sriox_imsg_statusx_s cn63xxp1;
+ struct cvmx_sriox_imsg_statusx_s cn66xx;
+};
+typedef union cvmx_sriox_imsg_statusx cvmx_sriox_imsg_statusx_t;
+
+/**
+ * cvmx_srio#_imsg_vport_thr
+ *
+ * SRIO_IMSG_VPORT_THR = SRIO Incoming Message Virtual Port Threshold
+ *
+ * The SRIO Incoming Message Virtual Port Threshold Register
+ *
+ * Notes:
+ * SRIO0_IMSG_VPORT_THR.MAX_TOT must be >= SRIO0_IMSG_VPORT_THR.BUF_THR
+ * + SRIO2_IMSG_VPORT_THR.BUF_THR + SRIO3_IMSG_VPORT_THR.BUF_THR. This register can be accessed
+ * regardless of the value in SRIO(0,2..3)_STATUS_REG.ACCESS and is not effected by MAC reset. The maximum
+ * number of VPORTs allocated to a MAC is limited to 46 if QLM0 is configured to x2 or x4 mode and 44
+ * if configured in x1 mode.
+ *
+ * Clk_Rst: SRIO(0,2..3)_IMSG_VPORT_THR sclk srst_n
+ */
+union cvmx_sriox_imsg_vport_thr {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_vport_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t max_tot : 6; /**< Sets max number of vports available to the chip
+ This field is only used in SRIO0. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t max_s1 : 6; /**< Reserved
+ This field is only used in SRIO0. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t max_s0 : 6; /**< Sets max number of vports available to SRIO0
+ This field is only used in SRIO0. */
+ uint64_t sp_vport : 1; /**< Single-segment vport pre-allocation.
+ When set, single-segment messages use pre-allocated
+ vport slots (that do not count toward thresholds).
+ When clear, single-segment messages must allocate
+ vport slots just like multi-segment messages do. */
+ uint64_t reserved_20_30 : 11;
+ uint64_t buf_thr : 4; /**< Sets number of vports to be buffered by this
+ interface. BUF_THR must not be zero when receiving
+ messages. The max BUF_THR value is 8.
+ Recommend BUF_THR values 1-4. If the 46 available
+ vports are not statically-allocated across the two
+ SRIO's, smaller BUF_THR values may leave more
+ vports available for the other SRIO. Lack of a
+ buffered vport can force a retry for a received
+ first segment, so, particularly if SP_VPORT=0
+ (which is not recommended) or the segment size is
+ small, larger BUF_THR values may improve
+ performance. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t max_p1 : 6; /**< Sets max number of open vports in port 1 */
+ uint64_t reserved_6_7 : 2;
+ uint64_t max_p0 : 6; /**< Sets max number of open vports in port 0 */
+#else
+ uint64_t max_p0 : 6;
+ uint64_t reserved_6_7 : 2;
+ uint64_t max_p1 : 6;
+ uint64_t reserved_14_15 : 2;
+ uint64_t buf_thr : 4;
+ uint64_t reserved_20_30 : 11;
+ uint64_t sp_vport : 1;
+ uint64_t max_s0 : 6;
+ uint64_t reserved_38_39 : 2;
+ uint64_t max_s1 : 6;
+ uint64_t reserved_46_47 : 2;
+ uint64_t max_tot : 6;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_sriox_imsg_vport_thr_s cn63xx;
+ struct cvmx_sriox_imsg_vport_thr_s cn63xxp1;
+ struct cvmx_sriox_imsg_vport_thr_s cn66xx;
+};
+typedef union cvmx_sriox_imsg_vport_thr cvmx_sriox_imsg_vport_thr_t;
+
+/**
+ * cvmx_srio#_imsg_vport_thr2
+ *
+ * SRIO_IMSG_VPORT_THR2 = SRIO Incoming Message Virtual Port Additional Threshold
+ *
+ * The SRIO Incoming Message Virtual Port Additional Threshold Register
+ *
+ * Notes:
+ * Additional vport thresholds for SRIO MACs 2 and 3. This register is only used in SRIO0 and is only
+ * used when the QLM0 is configured as x1 lanes or x2 lanes. In the x1 case the maximum number of
+ * VPORTs is limited to 44. In the x2 case the maximum number of VPORTs is limited to 46. These
+ * values are ignored in the x4 configuration. This register can be accessed regardless of the value
+ * in SRIO(0,2..3)_STATUS_REG.ACCESS and is not effected by MAC reset.
+ *
+ * Clk_Rst: SRIO(0,2..3)_IMSG_VPORT_THR sclk srst_n
+ */
+union cvmx_sriox_imsg_vport_thr2 {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_vport_thr2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t max_s3 : 6; /**< Sets max number of vports available to SRIO3
+ This field is only used in SRIO0. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t max_s2 : 6; /**< Sets max number of vports available to SRIO2
+ This field is only used in SRIO0. */
+ uint64_t reserved_0_31 : 32;
+#else
+ uint64_t reserved_0_31 : 32;
+ uint64_t max_s2 : 6;
+ uint64_t reserved_38_39 : 2;
+ uint64_t max_s3 : 6;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } s;
+ struct cvmx_sriox_imsg_vport_thr2_s cn66xx;
+};
+typedef union cvmx_sriox_imsg_vport_thr2 cvmx_sriox_imsg_vport_thr2_t;
+
+/**
+ * cvmx_srio#_int2_enable
+ *
+ * SRIO_INT2_ENABLE = SRIO Interrupt 2 Enable
+ *
+ * Allows SRIO to generate additional interrupts when corresponding enable bit is set.
+ *
+ * Notes:
+ * This register enables interrupts in SRIO(0,2..3)_INT2_REG that can be asserted while the MAC is in reset.
+ * The register can be accessed/modified regardless of the value of SRIO(0,2..3)_STATUS_REG.ACCESS.
+ *
+ * Clk_Rst: SRIO(0,2..3)_INT2_ENABLE sclk srst_n
+ */
+union cvmx_sriox_int2_enable {
+ uint64_t u64;
+ struct cvmx_sriox_int2_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t pko_rst : 1; /**< PKO Reset Error Enable */
+#else
+ uint64_t pko_rst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_sriox_int2_enable_s cn63xx;
+ struct cvmx_sriox_int2_enable_s cn66xx;
+};
+typedef union cvmx_sriox_int2_enable cvmx_sriox_int2_enable_t;
+
+/**
+ * cvmx_srio#_int2_reg
+ *
+ * SRIO_INT2_REG = SRIO Interrupt 2 Register
+ *
+ * Displays and clears which enabled interrupts have occured
+ *
+ * Notes:
+ * This register provides interrupt status. Unlike SRIO*_INT_REG, SRIO*_INT2_REG can be accessed
+ * whenever the SRIO is present, regardless of whether the corresponding SRIO is in reset or not.
+ * INT_SUM shows the status of the interrupts in SRIO(0,2..3)_INT_REG. Any set bits written to this
+ * register clear the corresponding interrupt. The register can be accessed/modified regardless of
+ * the value of SRIO(0,2..3)_STATUS_REG.ACCESS and probably should be the first register read when an SRIO
+ * interrupt occurs.
+ *
+ * Clk_Rst: SRIO(0,2..3)_INT2_REG sclk srst_n
+ */
+union cvmx_sriox_int2_reg {
+ uint64_t u64;
+ struct cvmx_sriox_int2_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t int_sum : 1; /**< Interrupt Set and Enabled in SRIO(0,2..3)_INT_REG */
+ uint64_t reserved_1_30 : 30;
+ uint64_t pko_rst : 1; /**< PKO Reset Error - Message Received from PKO while
+ MAC in reset. */
+#else
+ uint64_t pko_rst : 1;
+ uint64_t reserved_1_30 : 30;
+ uint64_t int_sum : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_int2_reg_s cn63xx;
+ struct cvmx_sriox_int2_reg_s cn66xx;
+};
+typedef union cvmx_sriox_int2_reg cvmx_sriox_int2_reg_t;
+
+/**
+ * cvmx_srio#_int_enable
+ *
+ * SRIO_INT_ENABLE = SRIO Interrupt Enable
+ *
+ * Allows SRIO to generate interrupts when corresponding enable bit is set.
+ *
+ * Notes:
+ * This register enables interrupts.
+ *
+ * Clk_Rst: SRIO(0,2..3)_INT_ENABLE hclk hrst_n
+ */
+union cvmx_sriox_int_enable {
+ uint64_t u64;
+ struct cvmx_sriox_int_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t zero_pkt : 1; /**< Received Incoming SRIO Zero byte packet */
+ uint64_t ttl_tout : 1; /**< Outgoing Packet Time to Live Timeout */
+ uint64_t fail : 1; /**< ERB Error Rate reached Fail Count */
+ uint64_t degrade : 1; /**< ERB Error Rate reached Degrade Count */
+ uint64_t mac_buf : 1; /**< SRIO MAC Buffer CRC Error */
+ uint64_t f_error : 1; /**< SRIO Fatal Port Error (MAC reset required) */
+ uint64_t rtry_err : 1; /**< Outbound Message Retry Threshold Exceeded */
+ uint64_t pko_err : 1; /**< Outbound Message Received PKO Error */
+ uint64_t omsg_err : 1; /**< Outbound Message Invalid Descriptor Error */
+ uint64_t omsg1 : 1; /**< Controller 1 Outbound Message Complete */
+ uint64_t omsg0 : 1; /**< Controller 0 Outbound Message Complete */
+ uint64_t link_up : 1; /**< Serial Link going from Inactive to Active */
+ uint64_t link_dwn : 1; /**< Serial Link going from Active to Inactive */
+ uint64_t phy_erb : 1; /**< Physical Layer Error detected in ERB */
+ uint64_t log_erb : 1; /**< Logical/Transport Layer Error detected in ERB */
+ uint64_t soft_rx : 1; /**< Incoming Packet received by Soft Packet FIFO */
+ uint64_t soft_tx : 1; /**< Outgoing Packet sent by Soft Packet FIFO */
+ uint64_t mce_rx : 1; /**< Incoming Multicast Event Symbol */
+ uint64_t mce_tx : 1; /**< Outgoing Multicast Event Transmit Complete */
+ uint64_t wr_done : 1; /**< Outgoing Last Nwrite_R DONE Response Received. */
+ uint64_t sli_err : 1; /**< Unsupported S2M Transaction Received. */
+ uint64_t deny_wr : 1; /**< Incoming Maint_Wr Access to Denied Bar Registers. */
+ uint64_t bar_err : 1; /**< Incoming Access Crossing/Missing BAR Address */
+ uint64_t maint_op : 1; /**< Internal Maintenance Operation Complete. */
+ uint64_t rxbell : 1; /**< One or more Incoming Doorbells Received. */
+ uint64_t bell_err : 1; /**< Outgoing Doorbell Timeout, Retry or Error. */
+ uint64_t txbell : 1; /**< Outgoing Doorbell Complete. */
+#else
+ uint64_t txbell : 1;
+ uint64_t bell_err : 1;
+ uint64_t rxbell : 1;
+ uint64_t maint_op : 1;
+ uint64_t bar_err : 1;
+ uint64_t deny_wr : 1;
+ uint64_t sli_err : 1;
+ uint64_t wr_done : 1;
+ uint64_t mce_tx : 1;
+ uint64_t mce_rx : 1;
+ uint64_t soft_tx : 1;
+ uint64_t soft_rx : 1;
+ uint64_t log_erb : 1;
+ uint64_t phy_erb : 1;
+ uint64_t link_dwn : 1;
+ uint64_t link_up : 1;
+ uint64_t omsg0 : 1;
+ uint64_t omsg1 : 1;
+ uint64_t omsg_err : 1;
+ uint64_t pko_err : 1;
+ uint64_t rtry_err : 1;
+ uint64_t f_error : 1;
+ uint64_t mac_buf : 1;
+ uint64_t degrade : 1;
+ uint64_t fail : 1;
+ uint64_t ttl_tout : 1;
+ uint64_t zero_pkt : 1;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } s;
+ struct cvmx_sriox_int_enable_s cn63xx;
+ struct cvmx_sriox_int_enable_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t f_error : 1; /**< SRIO Fatal Port Error (MAC reset required) */
+ uint64_t rtry_err : 1; /**< Outbound Message Retry Threshold Exceeded */
+ uint64_t pko_err : 1; /**< Outbound Message Received PKO Error */
+ uint64_t omsg_err : 1; /**< Outbound Message Invalid Descriptor Error */
+ uint64_t omsg1 : 1; /**< Controller 1 Outbound Message Complete */
+ uint64_t omsg0 : 1; /**< Controller 0 Outbound Message Complete */
+ uint64_t link_up : 1; /**< Serial Link going from Inactive to Active */
+ uint64_t link_dwn : 1; /**< Serial Link going from Active to Inactive */
+ uint64_t phy_erb : 1; /**< Physical Layer Error detected in ERB */
+ uint64_t log_erb : 1; /**< Logical/Transport Layer Error detected in ERB */
+ uint64_t soft_rx : 1; /**< Incoming Packet received by Soft Packet FIFO */
+ uint64_t soft_tx : 1; /**< Outgoing Packet sent by Soft Packet FIFO */
+ uint64_t mce_rx : 1; /**< Incoming Multicast Event Symbol */
+ uint64_t mce_tx : 1; /**< Outgoing Multicast Event Transmit Complete */
+ uint64_t wr_done : 1; /**< Outgoing Last Nwrite_R DONE Response Received. */
+ uint64_t sli_err : 1; /**< Unsupported S2M Transaction Received. */
+ uint64_t deny_wr : 1; /**< Incoming Maint_Wr Access to Denied Bar Registers. */
+ uint64_t bar_err : 1; /**< Incoming Access Crossing/Missing BAR Address */
+ uint64_t maint_op : 1; /**< Internal Maintenance Operation Complete. */
+ uint64_t rxbell : 1; /**< One or more Incoming Doorbells Received. */
+ uint64_t bell_err : 1; /**< Outgoing Doorbell Timeout, Retry or Error. */
+ uint64_t txbell : 1; /**< Outgoing Doorbell Complete. */
+#else
+ uint64_t txbell : 1;
+ uint64_t bell_err : 1;
+ uint64_t rxbell : 1;
+ uint64_t maint_op : 1;
+ uint64_t bar_err : 1;
+ uint64_t deny_wr : 1;
+ uint64_t sli_err : 1;
+ uint64_t wr_done : 1;
+ uint64_t mce_tx : 1;
+ uint64_t mce_rx : 1;
+ uint64_t soft_tx : 1;
+ uint64_t soft_rx : 1;
+ uint64_t log_erb : 1;
+ uint64_t phy_erb : 1;
+ uint64_t link_dwn : 1;
+ uint64_t link_up : 1;
+ uint64_t omsg0 : 1;
+ uint64_t omsg1 : 1;
+ uint64_t omsg_err : 1;
+ uint64_t pko_err : 1;
+ uint64_t rtry_err : 1;
+ uint64_t f_error : 1;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriox_int_enable_s cn66xx;
+};
+typedef union cvmx_sriox_int_enable cvmx_sriox_int_enable_t;
+
+/**
+ * cvmx_srio#_int_info0
+ *
+ * SRIO_INT_INFO0 = SRIO Interrupt Information
+ *
+ * The SRIO Interrupt Information
+ *
+ * Notes:
+ * This register contains the first header word of the illegal s2m transaction associated with the
+ * SLI_ERR interrupt. The remaining information is located in SRIO(0,2..3)_INT_INFO1. This register is
+ * only updated when the SLI_ERR is initially detected. Once the interrupt is cleared then
+ * additional information can be captured.
+ * Common Errors Include:
+ * 1. Load/Stores with Length over 32
+ * 2. Load/Stores that translate to Maintenance Ops with a length over 8
+ * 3. Load Ops that translate to Atomic Ops with other than 1, 2 and 4 byte accesses
+ * 4. Load/Store Ops with a Length 0
+ * 5. Unexpected Responses
+ *
+ * Clk_Rst: SRIO(0,2..3)_INT_REG hclk hrst_n
+ */
+union cvmx_sriox_int_info0 {
+ uint64_t u64;
+ struct cvmx_sriox_int_info0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cmd : 4; /**< Command
+ 0 = Load, Outgoing Read Request
+ 4 = Store, Outgoing Write Request
+ 8 = Response, Outgoing Read Response
+ All Others are reserved and generate errors */
+ uint64_t type : 4; /**< Command Type
+ Load/Store SRIO_S2M_TYPE used
+ Response (Reserved) */
+ uint64_t tag : 8; /**< Internal Transaction Number */
+ uint64_t reserved_42_47 : 6;
+ uint64_t length : 10; /**< Data Length in 64-bit Words (Load/Store Only) */
+ uint64_t status : 3; /**< Response Status
+ 0 = Success
+ 1 = Error
+ All others reserved */
+ uint64_t reserved_16_28 : 13;
+ uint64_t be0 : 8; /**< First 64-bit Word Byte Enables (Load/Store Only) */
+ uint64_t be1 : 8; /**< Last 64-bit Word Byte Enables (Load/Store Only) */
+#else
+ uint64_t be1 : 8;
+ uint64_t be0 : 8;
+ uint64_t reserved_16_28 : 13;
+ uint64_t status : 3;
+ uint64_t length : 10;
+ uint64_t reserved_42_47 : 6;
+ uint64_t tag : 8;
+ uint64_t type : 4;
+ uint64_t cmd : 4;
+#endif
+ } s;
+ struct cvmx_sriox_int_info0_s cn63xx;
+ struct cvmx_sriox_int_info0_s cn63xxp1;
+ struct cvmx_sriox_int_info0_s cn66xx;
+};
+typedef union cvmx_sriox_int_info0 cvmx_sriox_int_info0_t;
+
+/**
+ * cvmx_srio#_int_info1
+ *
+ * SRIO_INT_INFO1 = SRIO Interrupt Information
+ *
+ * The SRIO Interrupt Information
+ *
+ * Notes:
+ * This register contains the second header word of the illegal s2m transaction associated with the
+ * SLI_ERR interrupt. The remaining information is located in SRIO(0,2..3)_INT_INFO0. This register is
+ * only updated when the SLI_ERR is initially detected. Once the interrupt is cleared then
+ * additional information can be captured.
+ *
+ * Clk_Rst: SRIO(0,2..3)_INT_REG hclk hrst_n
+ */
+union cvmx_sriox_int_info1 {
+ uint64_t u64;
+ struct cvmx_sriox_int_info1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t info1 : 64; /**< Address (Load/Store) or First 64-bit Word of
+ Response Data Associated with Interrupt */
+#else
+ uint64_t info1 : 64;
+#endif
+ } s;
+ struct cvmx_sriox_int_info1_s cn63xx;
+ struct cvmx_sriox_int_info1_s cn63xxp1;
+ struct cvmx_sriox_int_info1_s cn66xx;
+};
+typedef union cvmx_sriox_int_info1 cvmx_sriox_int_info1_t;
+
+/**
+ * cvmx_srio#_int_info2
+ *
+ * SRIO_INT_INFO2 = SRIO Interrupt Information
+ *
+ * The SRIO Interrupt Information
+ *
+ * Notes:
+ * This register contains the invalid outbound message descriptor associated with the OMSG_ERR
+ * interrupt. This register is only updated when the OMSG_ERR is initially detected. Once the
+ * interrupt is cleared then additional information can be captured.
+ *
+ * Clk_Rst: SRIO(0,2..3)_INT_REG hclk hrst_n
+ */
+union cvmx_sriox_int_info2 {
+ uint64_t u64;
+ struct cvmx_sriox_int_info2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prio : 2; /**< PRIO field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t tt : 1; /**< TT field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t sis : 1; /**< SIS field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t ssize : 4; /**< SSIZE field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t did : 16; /**< DID field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t xmbox : 4; /**< XMBOX field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t mbox : 2; /**< MBOX field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t letter : 2; /**< LETTER field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t rsrvd : 30; /**< RSRVD field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t lns : 1; /**< LNS field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+ uint64_t intr : 1; /**< INT field of outbound message descriptor
+ associated with the OMSG_ERR interrupt */
+#else
+ uint64_t intr : 1;
+ uint64_t lns : 1;
+ uint64_t rsrvd : 30;
+ uint64_t letter : 2;
+ uint64_t mbox : 2;
+ uint64_t xmbox : 4;
+ uint64_t did : 16;
+ uint64_t ssize : 4;
+ uint64_t sis : 1;
+ uint64_t tt : 1;
+ uint64_t prio : 2;
+#endif
+ } s;
+ struct cvmx_sriox_int_info2_s cn63xx;
+ struct cvmx_sriox_int_info2_s cn63xxp1;
+ struct cvmx_sriox_int_info2_s cn66xx;
+};
+typedef union cvmx_sriox_int_info2 cvmx_sriox_int_info2_t;
+
+/**
+ * cvmx_srio#_int_info3
+ *
+ * SRIO_INT_INFO3 = SRIO Interrupt Information
+ *
+ * The SRIO Interrupt Information
+ *
+ * Notes:
+ * This register contains the retry response associated with the RTRY_ERR interrupt. This register
+ * is only updated when the RTRY_ERR is initially detected. Once the interrupt is cleared then
+ * additional information can be captured.
+ *
+ * Clk_Rst: SRIO(0,2..3)_INT_REG hclk hrst_n
+ */
+union cvmx_sriox_int_info3 {
+ uint64_t u64;
+ struct cvmx_sriox_int_info3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prio : 2; /**< Priority of received retry response message */
+ uint64_t tt : 2; /**< TT of received retry response message */
+ uint64_t type : 4; /**< Type of received retry response message
+ (should be 13) */
+ uint64_t other : 48; /**< Other fields of received retry response message
+ If TT==0 (8-bit ID's)
+ OTHER<47:40> => destination ID
+ OTHER<39:32> => source ID
+ OTHER<31:28> => transaction (should be 1 - msg)
+ OTHER<27:24> => status (should be 3 - retry)
+ OTHER<23:22> => letter
+ OTHER<21:20> => mbox
+ OTHER<19:16> => msgseg
+ OTHER<15:0> => unused
+ If TT==1 (16-bit ID's)
+ OTHER<47:32> => destination ID
+ OTHER<31:16> => source ID
+ OTHER<15:12> => transaction (should be 1 - msg)
+ OTHER<11:8> => status (should be 3 - retry)
+ OTHER<7:6> => letter
+ OTHER<5:4> => mbox
+ OTHER<3:0> => msgseg */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t other : 48;
+ uint64_t type : 4;
+ uint64_t tt : 2;
+ uint64_t prio : 2;
+#endif
+ } s;
+ struct cvmx_sriox_int_info3_s cn63xx;
+ struct cvmx_sriox_int_info3_s cn63xxp1;
+ struct cvmx_sriox_int_info3_s cn66xx;
+};
+typedef union cvmx_sriox_int_info3 cvmx_sriox_int_info3_t;
+
+/**
+ * cvmx_srio#_int_reg
+ *
+ * SRIO_INT_REG = SRIO Interrupt Register
+ *
+ * Displays and clears which enabled interrupts have occured
+ *
+ * Notes:
+ * This register provides interrupt status. Like most SRIO CSRs, this register can only
+ * be read/written when the corresponding SRIO is both present and not in reset. (SRIO*_INT2_REG
+ * can be accessed when SRIO is in reset.) Any set bits written to this register clear the
+ * corresponding interrupt. The RXBELL interrupt is cleared by reading all the entries in the
+ * incoming Doorbell FIFO. The LOG_ERB interrupt must be cleared before writing zeroes
+ * to clear the bits in the SRIOMAINT*_ERB_LT_ERR_DET register. Otherwise a new interrupt may be
+ * lost. The PHY_ERB interrupt must be cleared before writing a zero to
+ * SRIOMAINT*_ERB_ATTR_CAPT[VALID]. Otherwise, a new interrupt may be lost. OMSG_ERR is set when an
+ * invalid outbound message descriptor is received. The descriptor is deemed to be invalid if the
+ * SSIZE field is set to a reserved value, the SSIZE field combined with the packet length would
+ * result in more than 16 message segments, or the packet only contains a descriptor (no data).
+ *
+ * Clk_Rst: SRIO(0,2..3)_INT_REG hclk hrst_n
+ */
+union cvmx_sriox_int_reg {
+ uint64_t u64;
+ struct cvmx_sriox_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t int2_sum : 1; /**< Interrupt Set and Enabled in SRIO(0,2..3)_INT2_REG */
+ uint64_t reserved_27_30 : 4;
+ uint64_t zero_pkt : 1; /**< Received Incoming SRIO Zero byte packet */
+ uint64_t ttl_tout : 1; /**< Outgoing Packet Time to Live Timeout
+ See SRIOMAINT(0,2..3)_DROP_PACKET */
+ uint64_t fail : 1; /**< ERB Error Rate reached Fail Count
+ See SRIOMAINT(0,2..3)_ERB_ERR_RATE */
+ uint64_t degrad : 1; /**< ERB Error Rate reached Degrade Count
+ See SRIOMAINT(0,2..3)_ERB_ERR_RATE */
+ uint64_t mac_buf : 1; /**< SRIO MAC Buffer CRC Error
+ See SRIO(0,2..3)_MAC_BUFFERS */
+ uint64_t f_error : 1; /**< SRIO Fatal Port Error (MAC reset required) */
+ uint64_t rtry_err : 1; /**< Outbound Message Retry Threshold Exceeded
+ See SRIO(0,2..3)_INT_INFO3
+ When one or more of the segments in an outgoing
+ message have a RTRY_ERR, SRIO will not set
+ OMSG* after the message "transfer". */
+ uint64_t pko_err : 1; /**< Outbound Message Received PKO Error */
+ uint64_t omsg_err : 1; /**< Outbound Message Invalid Descriptor Error
+ See SRIO(0,2..3)_INT_INFO2 */
+ uint64_t omsg1 : 1; /**< Controller 1 Outbound Message Complete
+ See SRIO(0,2..3)_OMSG_DONE_COUNTS1 */
+ uint64_t omsg0 : 1; /**< Controller 0 Outbound Message Complete
+ See SRIO(0,2..3)_OMSG_DONE_COUNTS0 */
+ uint64_t link_up : 1; /**< Serial Link going from Inactive to Active */
+ uint64_t link_dwn : 1; /**< Serial Link going from Active to Inactive */
+ uint64_t phy_erb : 1; /**< Physical Layer Error detected in ERB
+ See SRIOMAINT*_ERB_ATTR_CAPT */
+ uint64_t log_erb : 1; /**< Logical/Transport Layer Error detected in ERB
+ See SRIOMAINT(0,2..3)_ERB_LT_ERR_DET */
+ uint64_t soft_rx : 1; /**< Incoming Packet received by Soft Packet FIFO */
+ uint64_t soft_tx : 1; /**< Outgoing Packet sent by Soft Packet FIFO */
+ uint64_t mce_rx : 1; /**< Incoming Multicast Event Symbol */
+ uint64_t mce_tx : 1; /**< Outgoing Multicast Event Transmit Complete */
+ uint64_t wr_done : 1; /**< Outgoing Last Nwrite_R DONE Response Received.
+ See SRIO(0,2..3)_WR_DONE_COUNTS */
+ uint64_t sli_err : 1; /**< Unsupported S2M Transaction Received.
+ See SRIO(0,2..3)_INT_INFO[1:0] */
+ uint64_t deny_wr : 1; /**< Incoming Maint_Wr Access to Denied Bar Registers. */
+ uint64_t bar_err : 1; /**< Incoming Access Crossing/Missing BAR Address */
+ uint64_t maint_op : 1; /**< Internal Maintenance Operation Complete.
+ See SRIO(0,2..3)_MAINT_OP and SRIO(0,2..3)_MAINT_RD_DATA */
+ uint64_t rxbell : 1; /**< One or more Incoming Doorbells Received.
+ Read SRIO(0,2..3)_RX_BELL to empty FIFO */
+ uint64_t bell_err : 1; /**< Outgoing Doorbell Timeout, Retry or Error.
+ See SRIO(0,2..3)_TX_BELL_INFO */
+ uint64_t txbell : 1; /**< Outgoing Doorbell Complete.
+ TXBELL will not be asserted if a Timeout, Retry or
+ Error occurs. */
+#else
+ uint64_t txbell : 1;
+ uint64_t bell_err : 1;
+ uint64_t rxbell : 1;
+ uint64_t maint_op : 1;
+ uint64_t bar_err : 1;
+ uint64_t deny_wr : 1;
+ uint64_t sli_err : 1;
+ uint64_t wr_done : 1;
+ uint64_t mce_tx : 1;
+ uint64_t mce_rx : 1;
+ uint64_t soft_tx : 1;
+ uint64_t soft_rx : 1;
+ uint64_t log_erb : 1;
+ uint64_t phy_erb : 1;
+ uint64_t link_dwn : 1;
+ uint64_t link_up : 1;
+ uint64_t omsg0 : 1;
+ uint64_t omsg1 : 1;
+ uint64_t omsg_err : 1;
+ uint64_t pko_err : 1;
+ uint64_t rtry_err : 1;
+ uint64_t f_error : 1;
+ uint64_t mac_buf : 1;
+ uint64_t degrad : 1;
+ uint64_t fail : 1;
+ uint64_t ttl_tout : 1;
+ uint64_t zero_pkt : 1;
+ uint64_t reserved_27_30 : 4;
+ uint64_t int2_sum : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_int_reg_s cn63xx;
+ struct cvmx_sriox_int_reg_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t f_error : 1; /**< SRIO Fatal Port Error (MAC reset required) */
+ uint64_t rtry_err : 1; /**< Outbound Message Retry Threshold Exceeded
+ See SRIO(0..1)_INT_INFO3
+ When one or more of the segments in an outgoing
+ message have a RTRY_ERR, SRIO will not set
+ OMSG* after the message "transfer". */
+ uint64_t pko_err : 1; /**< Outbound Message Received PKO Error */
+ uint64_t omsg_err : 1; /**< Outbound Message Invalid Descriptor Error
+ See SRIO(0..1)_INT_INFO2 */
+ uint64_t omsg1 : 1; /**< Controller 1 Outbound Message Complete */
+ uint64_t omsg0 : 1; /**< Controller 0 Outbound Message Complete */
+ uint64_t link_up : 1; /**< Serial Link going from Inactive to Active */
+ uint64_t link_dwn : 1; /**< Serial Link going from Active to Inactive */
+ uint64_t phy_erb : 1; /**< Physical Layer Error detected in ERB
+ See SRIOMAINT*_ERB_ATTR_CAPT */
+ uint64_t log_erb : 1; /**< Logical/Transport Layer Error detected in ERB
+ See SRIOMAINT(0..1)_ERB_LT_ERR_DET */
+ uint64_t soft_rx : 1; /**< Incoming Packet received by Soft Packet FIFO */
+ uint64_t soft_tx : 1; /**< Outgoing Packet sent by Soft Packet FIFO */
+ uint64_t mce_rx : 1; /**< Incoming Multicast Event Symbol */
+ uint64_t mce_tx : 1; /**< Outgoing Multicast Event Transmit Complete */
+ uint64_t wr_done : 1; /**< Outgoing Last Nwrite_R DONE Response Received. */
+ uint64_t sli_err : 1; /**< Unsupported S2M Transaction Received.
+ See SRIO(0..1)_INT_INFO[1:0] */
+ uint64_t deny_wr : 1; /**< Incoming Maint_Wr Access to Denied Bar Registers. */
+ uint64_t bar_err : 1; /**< Incoming Access Crossing/Missing BAR Address */
+ uint64_t maint_op : 1; /**< Internal Maintenance Operation Complete.
+ See SRIO(0..1)_MAINT_OP and SRIO(0..1)_MAINT_RD_DATA */
+ uint64_t rxbell : 1; /**< One or more Incoming Doorbells Received.
+ Read SRIO(0..1)_RX_BELL to empty FIFO */
+ uint64_t bell_err : 1; /**< Outgoing Doorbell Timeout, Retry or Error.
+ See SRIO(0..1)_TX_BELL_INFO */
+ uint64_t txbell : 1; /**< Outgoing Doorbell Complete.
+ TXBELL will not be asserted if a Timeout, Retry or
+ Error occurs. */
+#else
+ uint64_t txbell : 1;
+ uint64_t bell_err : 1;
+ uint64_t rxbell : 1;
+ uint64_t maint_op : 1;
+ uint64_t bar_err : 1;
+ uint64_t deny_wr : 1;
+ uint64_t sli_err : 1;
+ uint64_t wr_done : 1;
+ uint64_t mce_tx : 1;
+ uint64_t mce_rx : 1;
+ uint64_t soft_tx : 1;
+ uint64_t soft_rx : 1;
+ uint64_t log_erb : 1;
+ uint64_t phy_erb : 1;
+ uint64_t link_dwn : 1;
+ uint64_t link_up : 1;
+ uint64_t omsg0 : 1;
+ uint64_t omsg1 : 1;
+ uint64_t omsg_err : 1;
+ uint64_t pko_err : 1;
+ uint64_t rtry_err : 1;
+ uint64_t f_error : 1;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriox_int_reg_s cn66xx;
+};
+typedef union cvmx_sriox_int_reg cvmx_sriox_int_reg_t;
+
+/**
+ * cvmx_srio#_ip_feature
+ *
+ * SRIO_IP_FEATURE = SRIO IP Feature Select
+ *
+ * Debug Register used to enable IP Core Features
+ *
+ * Notes:
+ * This register is used to override powerup values used by the SRIOMAINT Registers and QLM
+ * configuration. The register is only reset during COLD boot. It should only be modified only
+ * while SRIO(0,2..3)_STATUS_REG.ACCESS is zero.
+ *
+ * Clk_Rst: SRIO(0,2..3)_IP_FEATURE sclk srst_cold_n
+ */
+union cvmx_sriox_ip_feature {
+ uint64_t u64;
+ struct cvmx_sriox_ip_feature_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ops : 32; /**< Reset Value for the OPs fields in both the
+ SRIOMAINT(0,2..3)_SRC_OPS and SRIOMAINT(0,2..3)_DST_OPS
+ registers. */
+ uint64_t reserved_15_31 : 17;
+ uint64_t no_vmin : 1; /**< Lane Sync Valid Minimum Count Disable. (Pass 3)
+ 0 = Wait for 2^12 valid codewords and at least
+ 127 comma characters before starting
+ alignment.
+ 1 = Wait only for 127 comma characters before
+ starting alignment. (SRIO V1.3 Compatable) */
+ uint64_t a66 : 1; /**< 66-bit Address Support. Value for bit 2 of the
+ EX_ADDR field in the SRIOMAINT(0,2..3)_PE_FEAT register. */
+ uint64_t a50 : 1; /**< 50-bit Address Support. Value for bit 1 of the
+ EX_ADDR field in the SRIOMAINT(0,2..3)_PE_FEAT register. */
+ uint64_t reserved_11_11 : 1;
+ uint64_t tx_flow : 1; /**< Reset Value for the TX_FLOW field in the
+ SRIOMAINT(0,2..3)_IR_BUFFER_CONFIG register. */
+ uint64_t pt_width : 2; /**< Value for the PT_WIDTH field in the
+ SRIOMAINT(0,2..3)_PORT_0_CTL register. */
+ uint64_t tx_pol : 4; /**< TX Serdes Polarity Lanes 3-0
+ 0 = Normal Operation
+ 1 = Invert, Swap +/- Tx SERDES Pins */
+ uint64_t rx_pol : 4; /**< RX Serdes Polarity Lanes 3-0
+ 0 = Normal Operation
+ 1 = Invert, Swap +/- Rx SERDES Pins */
+#else
+ uint64_t rx_pol : 4;
+ uint64_t tx_pol : 4;
+ uint64_t pt_width : 2;
+ uint64_t tx_flow : 1;
+ uint64_t reserved_11_11 : 1;
+ uint64_t a50 : 1;
+ uint64_t a66 : 1;
+ uint64_t no_vmin : 1;
+ uint64_t reserved_15_31 : 17;
+ uint64_t ops : 32;
+#endif
+ } s;
+ struct cvmx_sriox_ip_feature_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ops : 32; /**< Reset Value for the OPs fields in both the
+ SRIOMAINT(0..1)_SRC_OPS and SRIOMAINT(0..1)_DST_OPS
+ registers. */
+ uint64_t reserved_14_31 : 18;
+ uint64_t a66 : 1; /**< 66-bit Address Support. Value for bit 2 of the
+ EX_ADDR field in the SRIOMAINT(0..1)_PE_FEAT register. */
+ uint64_t a50 : 1; /**< 50-bit Address Support. Value for bit 1 of the
+ EX_ADDR field in the SRIOMAINT(0..1)_PE_FEAT register. */
+ uint64_t reserved_11_11 : 1;
+ uint64_t tx_flow : 1; /**< Reset Value for the TX_FLOW field in the
+ SRIOMAINT(0..1)_IR_BUFFER_CONFIG register.
+ Pass 2 will Reset to 1 when RTL ready.
+ (TX flow control not supported in pass 1) */
+ uint64_t pt_width : 2; /**< Value for the PT_WIDTH field in the
+ SRIOMAINT(0..1)_PORT_0_CTL register.
+ Reset to 0x2 rather than 0x3 in pass 1 (2 lane
+ interface supported in pass 1). */
+ uint64_t tx_pol : 4; /**< TX Serdes Polarity Lanes 3-0
+ 0 = Normal Operation
+ 1 = Invert, Swap +/- Tx SERDES Pins */
+ uint64_t rx_pol : 4; /**< RX Serdes Polarity Lanes 3-0
+ 0 = Normal Operation
+ 1 = Invert, Swap +/- Rx SERDES Pins */
+#else
+ uint64_t rx_pol : 4;
+ uint64_t tx_pol : 4;
+ uint64_t pt_width : 2;
+ uint64_t tx_flow : 1;
+ uint64_t reserved_11_11 : 1;
+ uint64_t a50 : 1;
+ uint64_t a66 : 1;
+ uint64_t reserved_14_31 : 18;
+ uint64_t ops : 32;
+#endif
+ } cn63xx;
+ struct cvmx_sriox_ip_feature_cn63xx cn63xxp1;
+ struct cvmx_sriox_ip_feature_s cn66xx;
+};
+typedef union cvmx_sriox_ip_feature cvmx_sriox_ip_feature_t;
+
+/**
+ * cvmx_srio#_mac_buffers
+ *
+ * SRIO_MAC_BUFFERS = SRIO MAC Buffer Control
+ *
+ * Reports errors and controls buffer usage on the main MAC buffers
+ *
+ * Notes:
+ * Register displays errors status for each of the eight RX and TX buffers and controls use of the
+ * buffer in future operations. It also displays the number of RX and TX buffers currently used by
+ * the MAC.
+ *
+ * Clk_Rst: SRIO(0,2..3)_MAC_BUFFERS hclk hrst_n
+ */
+union cvmx_sriox_mac_buffers {
+ uint64_t u64;
+ struct cvmx_sriox_mac_buffers_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63 : 8;
+ uint64_t tx_enb : 8; /**< TX Buffer Enable. Each bit enables a specific TX
+ Buffer. At least 2 of these bits must be set for
+ proper operation. These bits must be cleared to
+ and then set again to reuese the buffer after an
+ error occurs. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t tx_inuse : 4; /**< Number of TX buffers containing packets waiting
+ to be transmitted or to be acknowledged. */
+ uint64_t tx_stat : 8; /**< Errors detected in main SRIO Transmit Buffers.
+ CRC error detected in buffer sets bit of buffer \#
+ until the corresponding TX_ENB is disabled. Each
+ bit set causes the SRIO(0,2..3)_INT_REG.MAC_BUF
+ interrupt. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t rx_enb : 8; /**< RX Buffer Enable. Each bit enables a specific RX
+ Buffer. At least 2 of these bits must be set for
+ proper operation. These bits must be cleared to
+ and then set again to reuese the buffer after an
+ error occurs. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_inuse : 4; /**< Number of RX buffers containing valid packets
+ waiting to be processed by the logical layer. */
+ uint64_t rx_stat : 8; /**< Errors detected in main SRIO Receive Buffers. CRC
+ error detected in buffer sets bit of buffer \#
+ until the corresponding RX_ENB is disabled. Each
+ bit set causes the SRIO(0,2..3)_INT_REG.MAC_BUF
+ interrupt. */
+#else
+ uint64_t rx_stat : 8;
+ uint64_t rx_inuse : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_enb : 8;
+ uint64_t reserved_24_31 : 8;
+ uint64_t tx_stat : 8;
+ uint64_t tx_inuse : 4;
+ uint64_t reserved_44_47 : 4;
+ uint64_t tx_enb : 8;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } s;
+ struct cvmx_sriox_mac_buffers_s cn63xx;
+ struct cvmx_sriox_mac_buffers_s cn66xx;
+};
+typedef union cvmx_sriox_mac_buffers cvmx_sriox_mac_buffers_t;
+
+/**
+ * cvmx_srio#_maint_op
+ *
+ * SRIO_MAINT_OP = SRIO Maintenance Operation
+ *
+ * Allows access to maintenance registers.
+ *
+ * Notes:
+ * This register allows write access to the local SRIOMAINT registers. A write to this register
+ * posts a read or write operation selected by the OP bit to the local SRIOMAINT register selected by
+ * ADDR. This write also sets the PENDING bit. The PENDING bit is cleared by hardware when the
+ * operation is complete. The MAINT_OP Interrupt is also set as the PENDING bit is cleared. While
+ * this bit is set, additional writes to this register stall the RSL. The FAIL bit is set with the
+ * clearing of the PENDING bit when an illegal address is selected. WR_DATA is used only during write
+ * operations. Only 32-bit Maintenance Operations are supported.
+ *
+ * Clk_Rst: SRIO(0,2..3)_MAINT_OP hclk hrst_n
+ */
+union cvmx_sriox_maint_op {
+ uint64_t u64;
+ struct cvmx_sriox_maint_op_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_data : 32; /**< Write Data[31:0]. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t fail : 1; /**< Maintenance Operation Address Error */
+ uint64_t pending : 1; /**< Maintenance Operation Pending */
+ uint64_t op : 1; /**< Operation. 0=Read, 1=Write */
+ uint64_t addr : 24; /**< Address. Addr[1:0] are ignored. */
+#else
+ uint64_t addr : 24;
+ uint64_t op : 1;
+ uint64_t pending : 1;
+ uint64_t fail : 1;
+ uint64_t reserved_27_31 : 5;
+ uint64_t wr_data : 32;
+#endif
+ } s;
+ struct cvmx_sriox_maint_op_s cn63xx;
+ struct cvmx_sriox_maint_op_s cn63xxp1;
+ struct cvmx_sriox_maint_op_s cn66xx;
+};
+typedef union cvmx_sriox_maint_op cvmx_sriox_maint_op_t;
+
+/**
+ * cvmx_srio#_maint_rd_data
+ *
+ * SRIO_MAINT_RD_DATA = SRIO Maintenance Read Data
+ *
+ * Allows read access of maintenance registers.
+ *
+ * Notes:
+ * This register allows read access of the local SRIOMAINT registers. A write to the SRIO(0,2..3)_MAINT_OP
+ * register with the OP bit set to zero initiates a read request and clears the VALID bit. The
+ * resulting read is returned here and the VALID bit is set. Access to the register will not stall
+ * the RSL but the VALID bit should be read.
+ *
+ * Clk_Rst: SRIO(0,2..3)_MAINT_RD_DATA hclk hrst_n
+ */
+union cvmx_sriox_maint_rd_data {
+ uint64_t u64;
+ struct cvmx_sriox_maint_rd_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t valid : 1; /**< Read Data Valid. */
+ uint64_t rd_data : 32; /**< Read Data[31:0]. */
+#else
+ uint64_t rd_data : 32;
+ uint64_t valid : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_sriox_maint_rd_data_s cn63xx;
+ struct cvmx_sriox_maint_rd_data_s cn63xxp1;
+ struct cvmx_sriox_maint_rd_data_s cn66xx;
+};
+typedef union cvmx_sriox_maint_rd_data cvmx_sriox_maint_rd_data_t;
+
+/**
+ * cvmx_srio#_mce_tx_ctl
+ *
+ * SRIO_MCE_TX_CTL = SRIO Multicast Event Transmit Control
+ *
+ * Multicast Event TX Control
+ *
+ * Notes:
+ * Writes to this register cause the SRIO device to generate a Multicast Event. Setting the MCE bit
+ * requests the logic to generate the Multicast Event Symbol. Reading the MCS bit shows the status
+ * of the transmit event. The hardware will clear the bit when the event has been transmitted and
+ * set the MCS_TX Interrupt.
+ *
+ * Clk_Rst: SRIO(0,2..3)_MCE_TX_CTL hclk hrst_n
+ */
+union cvmx_sriox_mce_tx_ctl {
+ uint64_t u64;
+ struct cvmx_sriox_mce_tx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t mce : 1; /**< Multicast Event Transmit. */
+#else
+ uint64_t mce : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_sriox_mce_tx_ctl_s cn63xx;
+ struct cvmx_sriox_mce_tx_ctl_s cn63xxp1;
+ struct cvmx_sriox_mce_tx_ctl_s cn66xx;
+};
+typedef union cvmx_sriox_mce_tx_ctl cvmx_sriox_mce_tx_ctl_t;
+
+/**
+ * cvmx_srio#_mem_op_ctrl
+ *
+ * SRIO_MEM_OP_CTRL = SRIO Memory Operation Control
+ *
+ * The SRIO Memory Operation Control
+ *
+ * Notes:
+ * This register is used to control memory operations. Bits are provided to override the priority of
+ * the outgoing responses to memory operations. The memory operations with responses include NREAD,
+ * NWRITE_R, ATOMIC_INC, ATOMIC_DEC, ATOMIC_SET and ATOMIC_CLR.
+ *
+ * Clk_Rst: SRIO(0,2..3)_MEM_OP_CTRL hclk hrst_n
+ */
+union cvmx_sriox_mem_op_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_mem_op_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t rr_ro : 1; /**< Read Response Relaxed Ordering. Controls ordering
+ rules for incoming memory operations
+ 0 = Normal Ordering
+ 1 = Relaxed Ordering */
+ uint64_t w_ro : 1; /**< Write Relaxed Ordering. Controls ordering rules
+ for incoming memory operations
+ 0 = Normal Ordering
+ 1 = Relaxed Ordering */
+ uint64_t reserved_6_7 : 2;
+ uint64_t rp1_sid : 1; /**< Sets response priority for incomimg memory ops
+ of priority 1 on the secondary ID (0=2, 1=3) */
+ uint64_t rp0_sid : 2; /**< Sets response priority for incomimg memory ops
+ of priority 0 on the secondary ID (0,1=1 2=2, 3=3) */
+ uint64_t rp1_pid : 1; /**< Sets response priority for incomimg memory ops
+ of priority 1 on the primary ID (0=2, 1=3) */
+ uint64_t rp0_pid : 2; /**< Sets response priority for incomimg memory ops
+ of priority 0 on the primary ID (0,1=1 2=2, 3=3) */
+#else
+ uint64_t rp0_pid : 2;
+ uint64_t rp1_pid : 1;
+ uint64_t rp0_sid : 2;
+ uint64_t rp1_sid : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t w_ro : 1;
+ uint64_t rr_ro : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_sriox_mem_op_ctrl_s cn63xx;
+ struct cvmx_sriox_mem_op_ctrl_s cn63xxp1;
+ struct cvmx_sriox_mem_op_ctrl_s cn66xx;
+};
+typedef union cvmx_sriox_mem_op_ctrl cvmx_sriox_mem_op_ctrl_t;
+
+/**
+ * cvmx_srio#_omsg_ctrl#
+ *
+ * SRIO_OMSG_CTRLX = SRIO Outbound Message Control
+ *
+ * The SRIO Controller X Outbound Message Control Register
+ *
+ * Notes:
+ * 1) If IDM_TT, IDM_SIS, and IDM_DID are all clear, then the "ID match" will always be false.
+ * 2) LTTR_SP and LTTR_MP must be non-zero at all times, otherwise the message output queue can
+ * get blocked
+ * 3) TESTMODE has no function on controller 1
+ * 4) When IDM_TT=0, it is possible for an ID match to match an 8-bit DID with a 16-bit DID - SRIO
+ * zero-extends all 8-bit DID's, and the DID comparisons are always 16-bits.
+ *
+ * Clk_Rst: SRIO(0,2..3)_OMSG_CTRL[0:1] hclk hrst_n
+ */
+union cvmx_sriox_omsg_ctrlx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_ctrlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t testmode : 1; /**< Controller X test mode (keep as RSVD in HRM) */
+ uint64_t reserved_37_62 : 26;
+ uint64_t silo_max : 5; /**< Sets max number outgoing segments for controller X
+ Valid range is 0x01 .. 0x10 Note that lower
+ values will reduce bandwidth. */
+ uint64_t rtry_thr : 16; /**< Controller X Retry threshold */
+ uint64_t rtry_en : 1; /**< Controller X Retry threshold enable */
+ uint64_t reserved_11_14 : 4;
+ uint64_t idm_tt : 1; /**< Controller X ID match includes TT ID */
+ uint64_t idm_sis : 1; /**< Controller X ID match includes SIS */
+ uint64_t idm_did : 1; /**< Controller X ID match includes DID */
+ uint64_t lttr_sp : 4; /**< Controller X SP allowable letters in dynamic
+ letter select mode (LNS) */
+ uint64_t lttr_mp : 4; /**< Controller X MP allowable letters in dynamic
+ letter select mode (LNS) */
+#else
+ uint64_t lttr_mp : 4;
+ uint64_t lttr_sp : 4;
+ uint64_t idm_did : 1;
+ uint64_t idm_sis : 1;
+ uint64_t idm_tt : 1;
+ uint64_t reserved_11_14 : 4;
+ uint64_t rtry_en : 1;
+ uint64_t rtry_thr : 16;
+ uint64_t silo_max : 5;
+ uint64_t reserved_37_62 : 26;
+ uint64_t testmode : 1;
+#endif
+ } s;
+ struct cvmx_sriox_omsg_ctrlx_s cn63xx;
+ struct cvmx_sriox_omsg_ctrlx_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t testmode : 1; /**< Controller X test mode (keep as RSVD in HRM) */
+ uint64_t reserved_32_62 : 31;
+ uint64_t rtry_thr : 16; /**< Controller X Retry threshold */
+ uint64_t rtry_en : 1; /**< Controller X Retry threshold enable */
+ uint64_t reserved_11_14 : 4;
+ uint64_t idm_tt : 1; /**< Controller X ID match includes TT ID */
+ uint64_t idm_sis : 1; /**< Controller X ID match includes SIS */
+ uint64_t idm_did : 1; /**< Controller X ID match includes DID */
+ uint64_t lttr_sp : 4; /**< Controller X SP allowable letters in dynamic
+ letter select mode (LNS) */
+ uint64_t lttr_mp : 4; /**< Controller X MP allowable letters in dynamic
+ letter select mode (LNS) */
+#else
+ uint64_t lttr_mp : 4;
+ uint64_t lttr_sp : 4;
+ uint64_t idm_did : 1;
+ uint64_t idm_sis : 1;
+ uint64_t idm_tt : 1;
+ uint64_t reserved_11_14 : 4;
+ uint64_t rtry_en : 1;
+ uint64_t rtry_thr : 16;
+ uint64_t reserved_32_62 : 31;
+ uint64_t testmode : 1;
+#endif
+ } cn63xxp1;
+ struct cvmx_sriox_omsg_ctrlx_s cn66xx;
+};
+typedef union cvmx_sriox_omsg_ctrlx cvmx_sriox_omsg_ctrlx_t;
+
+/**
+ * cvmx_srio#_omsg_done_counts#
+ *
+ * SRIO_OMSG_DONE_COUNTSX = SRIO Outbound Message Complete Counts
+ *
+ * The SRIO Controller X Outbound Message Complete Counts Register
+ *
+ * Notes:
+ * This register shows the number of successful and unsuccessful Outgoing Messages issued through
+ * this controller. The only messages considered are the ones with the INT field set in the PKO
+ * message header. This register is typically not written while Outbound SRIO Memory traffic is
+ * enabled. The sum of the GOOD and BAD counts should equal the number of messages sent unless
+ * the MAC has been reset.
+ *
+ * Clk_Rst: SRIO(0,2..3)_OMSG_DONE_COUNTS[0:1] hclk hrst_n
+ */
+union cvmx_sriox_omsg_done_countsx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_done_countsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bad : 16; /**< Number of Outbound Messages requesting an INT that
+ did not increment GOOD. (One or more segment of the
+ message either timed out, reached the retry limit,
+ or received an ERROR response.) */
+ uint64_t good : 16; /**< Number of Outbound Messages requesting an INT that
+ received a DONE response for every segment. */
+#else
+ uint64_t good : 16;
+ uint64_t bad : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_omsg_done_countsx_s cn63xx;
+ struct cvmx_sriox_omsg_done_countsx_s cn66xx;
+};
+typedef union cvmx_sriox_omsg_done_countsx cvmx_sriox_omsg_done_countsx_t;
+
+/**
+ * cvmx_srio#_omsg_fmp_mr#
+ *
+ * SRIO_OMSG_FMP_MRX = SRIO Outbound Message FIRSTMP Message Restriction
+ *
+ * The SRIO Controller X Outbound Message FIRSTMP Message Restriction Register
+ *
+ * Notes:
+ * This CSR controls when FMP candidate message segments (from the two different controllers) can enter
+ * the message segment silo to be sent out. A segment remains in the silo until after is has
+ * been transmitted and either acknowledged or errored out.
+ *
+ * Candidates and silo entries are one of 4 types:
+ * SP - a single-segment message
+ * FMP - the first segment of a multi-segment message
+ * NMP - the other segments in a multi-segment message
+ * PSD - the silo psuedo-entry that is valid only while a controller is in the middle of pushing
+ * a multi-segment message into the silo and can match against segments generated by
+ * the other controller
+ *
+ * When a candidate "matches" against a silo entry or pseudo entry, it cannot enter the silo.
+ * By default (i.e. zeroes in this CSR), the FMP candidate matches against all entries in the
+ * silo. When fields in this CSR are set, FMP candidate segments will match fewer silo entries and
+ * can enter the silo more freely, probably providing better performance.
+ *
+ * Clk_Rst: SRIO(0,2..3)_OMSG_FMP_MR[0:1] hclk hrst_n
+ */
+union cvmx_sriox_omsg_fmp_mrx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_fmp_mrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t ctlr_sp : 1; /**< Controller X FIRSTMP enable controller SP
+ When set, the FMP candidate message segment can
+ only match siloed SP segments that were created
+ by the same controller. When clear, this FMP-SP
+ match can also occur when the segments were
+ created by the other controller.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t ctlr_fmp : 1; /**< Controller X FIRSTMP enable controller FIRSTMP
+ When set, the FMP candidate message segment can
+ only match siloed FMP segments that were created
+ by the same controller. When clear, this FMP-FMP
+ match can also occur when the segments were
+ created by the other controller.
+ Not used by the hardware when ALL_FMP is set. */
+ uint64_t ctlr_nmp : 1; /**< Controller X FIRSTMP enable controller NFIRSTMP
+ When set, the FMP candidate message segment can
+ only match siloed NMP segments that were created
+ by the same controller. When clear, this FMP-NMP
+ match can also occur when the segments were
+ created by the other controller.
+ Not used by the hardware when ALL_NMP is set. */
+ uint64_t id_sp : 1; /**< Controller X FIRSTMP enable ID SP
+ When set, the FMP candidate message segment can
+ only match siloed SP segments that "ID match" the
+ candidate. When clear, this FMP-SP match can occur
+ with any ID values.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t id_fmp : 1; /**< Controller X FIRSTMP enable ID FIRSTMP
+ When set, the FMP candidate message segment can
+ only match siloed FMP segments that "ID match" the
+ candidate. When clear, this FMP-FMP match can occur
+ with any ID values.
+ Not used by the hardware when ALL_FMP is set. */
+ uint64_t id_nmp : 1; /**< Controller X FIRSTMP enable ID NFIRSTMP
+ When set, the FMP candidate message segment can
+ only match siloed NMP segments that "ID match" the
+ candidate. When clear, this FMP-NMP match can occur
+ with any ID values.
+ Not used by the hardware when ALL_NMP is set. */
+ uint64_t id_psd : 1; /**< Controller X FIRSTMP enable ID PSEUDO
+ When set, the FMP candidate message segment can
+ only match the silo pseudo (for the other
+ controller) when it is an "ID match". When clear,
+ this FMP-PSD match can occur with any ID values.
+ Not used by the hardware when ALL_PSD is set. */
+ uint64_t mbox_sp : 1; /**< Controller X FIRSTMP enable MBOX SP
+ When set, the FMP candidate message segment can
+ only match siloed SP segments with the same 2-bit
+ mbox value as the candidate. When clear, this
+ FMP-SP match can occur with any mbox values.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t mbox_fmp : 1; /**< Controller X FIRSTMP enable MBOX FIRSTMP
+ When set, the FMP candidate message segment can
+ only match siloed FMP segments with the same 2-bit
+ mbox value as the candidate. When clear, this
+ FMP-FMP match can occur with any mbox values.
+ Not used by the hardware when ALL_FMP is set. */
+ uint64_t mbox_nmp : 1; /**< Controller X FIRSTMP enable MBOX NFIRSTMP
+ When set, the FMP candidate message segment can
+ only match siloed NMP segments with the same 2-bit
+ mbox value as the candidate. When clear, this
+ FMP-NMP match can occur with any mbox values.
+ Not used by the hardware when ALL_NMP is set. */
+ uint64_t mbox_psd : 1; /**< Controller X FIRSTMP enable MBOX PSEUDO
+ When set, the FMP candidate message segment can
+ only match the silo pseudo (for the other
+ controller) if the pseudo has the same 2-bit mbox
+ value as the candidate. When clear, this FMP-PSD
+ match can occur with any mbox values.
+ Not used by the hardware when ALL_PSD is set. */
+ uint64_t all_sp : 1; /**< Controller X FIRSTMP enable all SP
+ When set, no FMP candidate message segments ever
+ match siloed SP segments and ID_SP
+ and MBOX_SP are not used. When clear, FMP-SP
+ matches can occur. */
+ uint64_t all_fmp : 1; /**< Controller X FIRSTMP enable all FIRSTMP
+ When set, no FMP candidate message segments ever
+ match siloed FMP segments and ID_FMP and MBOX_FMP
+ are not used. When clear, FMP-FMP matches can
+ occur. */
+ uint64_t all_nmp : 1; /**< Controller X FIRSTMP enable all NFIRSTMP
+ When set, no FMP candidate message segments ever
+ match siloed NMP segments and ID_NMP and MBOX_NMP
+ are not used. When clear, FMP-NMP matches can
+ occur. */
+ uint64_t all_psd : 1; /**< Controller X FIRSTMP enable all PSEUDO
+ When set, no FMP candidate message segments ever
+ match the silo pseudo (for the other controller)
+ and ID_PSD and MBOX_PSD are not used. When clear,
+ FMP-PSD matches can occur. */
+#else
+ uint64_t all_psd : 1;
+ uint64_t all_nmp : 1;
+ uint64_t all_fmp : 1;
+ uint64_t all_sp : 1;
+ uint64_t mbox_psd : 1;
+ uint64_t mbox_nmp : 1;
+ uint64_t mbox_fmp : 1;
+ uint64_t mbox_sp : 1;
+ uint64_t id_psd : 1;
+ uint64_t id_nmp : 1;
+ uint64_t id_fmp : 1;
+ uint64_t id_sp : 1;
+ uint64_t ctlr_nmp : 1;
+ uint64_t ctlr_fmp : 1;
+ uint64_t ctlr_sp : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_sriox_omsg_fmp_mrx_s cn63xx;
+ struct cvmx_sriox_omsg_fmp_mrx_s cn63xxp1;
+ struct cvmx_sriox_omsg_fmp_mrx_s cn66xx;
+};
+typedef union cvmx_sriox_omsg_fmp_mrx cvmx_sriox_omsg_fmp_mrx_t;
+
+/**
+ * cvmx_srio#_omsg_nmp_mr#
+ *
+ * SRIO_OMSG_NMP_MRX = SRIO Outbound Message NFIRSTMP Message Restriction
+ *
+ * The SRIO Controller X Outbound Message NFIRSTMP Message Restriction Register
+ *
+ * Notes:
+ * This CSR controls when NMP candidate message segments (from the two different controllers) can enter
+ * the message segment silo to be sent out. A segment remains in the silo until after is has
+ * been transmitted and either acknowledged or errored out.
+ *
+ * Candidates and silo entries are one of 4 types:
+ * SP - a single-segment message
+ * FMP - the first segment of a multi-segment message
+ * NMP - the other segments in a multi-segment message
+ * PSD - the silo psuedo-entry that is valid only while a controller is in the middle of pushing
+ * a multi-segment message into the silo and can match against segments generated by
+ * the other controller
+ *
+ * When a candidate "matches" against a silo entry or pseudo entry, it cannot enter the silo.
+ * By default (i.e. zeroes in this CSR), the NMP candidate matches against all entries in the
+ * silo. When fields in this CSR are set, NMP candidate segments will match fewer silo entries and
+ * can enter the silo more freely, probably providing better performance.
+ *
+ * Clk_Rst: SRIO(0,2..3)_OMSG_NMP_MR[0:1] hclk hrst_n
+ */
+union cvmx_sriox_omsg_nmp_mrx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_nmp_mrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t ctlr_sp : 1; /**< Controller X NFIRSTMP enable controller SP
+ When set, the NMP candidate message segment can
+ only match siloed SP segments that were created
+ by the same controller. When clear, this NMP-SP
+ match can also occur when the segments were
+ created by the other controller.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t ctlr_fmp : 1; /**< Controller X NFIRSTMP enable controller FIRSTMP
+ When set, the NMP candidate message segment can
+ only match siloed FMP segments that were created
+ by the same controller. When clear, this NMP-FMP
+ match can also occur when the segments were
+ created by the other controller.
+ Not used by the hardware when ALL_FMP is set. */
+ uint64_t ctlr_nmp : 1; /**< Controller X NFIRSTMP enable controller NFIRSTMP
+ When set, the NMP candidate message segment can
+ only match siloed NMP segments that were created
+ by the same controller. When clear, this NMP-NMP
+ match can also occur when the segments were
+ created by the other controller.
+ Not used by the hardware when ALL_NMP is set. */
+ uint64_t id_sp : 1; /**< Controller X NFIRSTMP enable ID SP
+ When set, the NMP candidate message segment can
+ only match siloed SP segments that "ID match" the
+ candidate. When clear, this NMP-SP match can occur
+ with any ID values.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t id_fmp : 1; /**< Controller X NFIRSTMP enable ID FIRSTMP
+ When set, the NMP candidate message segment can
+ only match siloed FMP segments that "ID match" the
+ candidate. When clear, this NMP-FMP match can occur
+ with any ID values.
+ Not used by the hardware when ALL_FMP is set. */
+ uint64_t id_nmp : 1; /**< Controller X NFIRSTMP enable ID NFIRSTMP
+ When set, the NMP candidate message segment can
+ only match siloed NMP segments that "ID match" the
+ candidate. When clear, this NMP-NMP match can occur
+ with any ID values.
+ Not used by the hardware when ALL_NMP is set. */
+ uint64_t reserved_8_8 : 1;
+ uint64_t mbox_sp : 1; /**< Controller X NFIRSTMP enable MBOX SP
+ When set, the NMP candidate message segment can
+ only match siloed SP segments with the same 2-bit
+ mbox value as the candidate. When clear, this
+ NMP-SP match can occur with any mbox values.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t mbox_fmp : 1; /**< Controller X NFIRSTMP enable MBOX FIRSTMP
+ When set, the NMP candidate message segment can
+ only match siloed FMP segments with the same 2-bit
+ mbox value as the candidate. When clear, this
+ NMP-FMP match can occur with any mbox values.
+ Not used by the hardware when ALL_FMP is set. */
+ uint64_t mbox_nmp : 1; /**< Controller X NFIRSTMP enable MBOX NFIRSTMP
+ When set, the NMP candidate message segment can
+ only match siloed NMP segments with the same 2-bit
+ mbox value as the candidate. When clear, this
+ NMP-NMP match can occur with any mbox values.
+ Not used by the hardware when ALL_NMP is set. */
+ uint64_t reserved_4_4 : 1;
+ uint64_t all_sp : 1; /**< Controller X NFIRSTMP enable all SP
+ When set, no NMP candidate message segments ever
+ match siloed SP segments and ID_SP
+ and MBOX_SP are not used. When clear, NMP-SP
+ matches can occur. */
+ uint64_t all_fmp : 1; /**< Controller X NFIRSTMP enable all FIRSTMP
+ When set, no NMP candidate message segments ever
+ match siloed FMP segments and ID_FMP and MBOX_FMP
+ are not used. When clear, NMP-FMP matches can
+ occur. */
+ uint64_t all_nmp : 1; /**< Controller X NFIRSTMP enable all NFIRSTMP
+ When set, no NMP candidate message segments ever
+ match siloed NMP segments and ID_NMP and MBOX_NMP
+ are not used. When clear, NMP-NMP matches can
+ occur. */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t all_nmp : 1;
+ uint64_t all_fmp : 1;
+ uint64_t all_sp : 1;
+ uint64_t reserved_4_4 : 1;
+ uint64_t mbox_nmp : 1;
+ uint64_t mbox_fmp : 1;
+ uint64_t mbox_sp : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t id_nmp : 1;
+ uint64_t id_fmp : 1;
+ uint64_t id_sp : 1;
+ uint64_t ctlr_nmp : 1;
+ uint64_t ctlr_fmp : 1;
+ uint64_t ctlr_sp : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_sriox_omsg_nmp_mrx_s cn63xx;
+ struct cvmx_sriox_omsg_nmp_mrx_s cn63xxp1;
+ struct cvmx_sriox_omsg_nmp_mrx_s cn66xx;
+};
+typedef union cvmx_sriox_omsg_nmp_mrx cvmx_sriox_omsg_nmp_mrx_t;
+
+/**
+ * cvmx_srio#_omsg_port#
+ *
+ * SRIO_OMSG_PORTX = SRIO Outbound Message Port
+ *
+ * The SRIO Controller X Outbound Message Port Register
+ *
+ * Notes:
+ * PORT maps the PKO port to SRIO interface \# / controller X as follows:
+ *
+ * 000 == PKO port 40
+ * 001 == PKO port 41
+ * 010 == PKO port 42
+ * 011 == PKO port 43
+ * 100 == PKO port 44
+ * 101 == PKO port 45
+ * 110 == PKO port 46
+ * 111 == PKO port 47
+ *
+ * No two PORT fields among the enabled controllers (ENABLE == 1) may be set to the same value.
+ * The register is only reset during COLD boot. The register can be accessed/modified regardless of
+ * the value in SRIO(0,2..3)_STATUS_REG.ACCESS.
+ *
+ * Clk_Rst: SRIO(0,2..3)_OMSG_PORT[0:1] sclk srst_n
+ */
+union cvmx_sriox_omsg_portx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_portx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t enable : 1; /**< Controller X enable */
+ uint64_t reserved_3_30 : 28;
+ uint64_t port : 3; /**< Controller X PKO port */
+#else
+ uint64_t port : 3;
+ uint64_t reserved_3_30 : 28;
+ uint64_t enable : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_omsg_portx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t enable : 1; /**< Controller X enable */
+ uint64_t reserved_2_30 : 29;
+ uint64_t port : 2; /**< Controller X PKO port */
+#else
+ uint64_t port : 2;
+ uint64_t reserved_2_30 : 29;
+ uint64_t enable : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn63xx;
+ struct cvmx_sriox_omsg_portx_cn63xx cn63xxp1;
+ struct cvmx_sriox_omsg_portx_s cn66xx;
+};
+typedef union cvmx_sriox_omsg_portx cvmx_sriox_omsg_portx_t;
+
+/**
+ * cvmx_srio#_omsg_silo_thr
+ *
+ * SRIO_OMSG_SILO_THR = SRIO Outgoing Message SILO Thresholds
+ *
+ * The SRIO Outgoing Message SILO Thresholds
+ *
+ * Notes:
+ * Limits the number of Outgoing Message Segments in flight at a time.
+ *
+ * Clk_Rst: SRIO(0,2..3)_OMSG_SILO_THR hclk hrst_n
+ */
+union cvmx_sriox_omsg_silo_thr {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_silo_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t tot_silo : 5; /**< Sets max number segments in flight for all
+ controllers. Valid range is 0x01 .. 0x10 but
+ lower values reduce bandwidth. */
+#else
+ uint64_t tot_silo : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_sriox_omsg_silo_thr_s cn63xx;
+ struct cvmx_sriox_omsg_silo_thr_s cn66xx;
+};
+typedef union cvmx_sriox_omsg_silo_thr cvmx_sriox_omsg_silo_thr_t;
+
+/**
+ * cvmx_srio#_omsg_sp_mr#
+ *
+ * SRIO_OMSG_SP_MRX = SRIO Outbound Message SP Message Restriction
+ *
+ * The SRIO Controller X Outbound Message SP Message Restriction Register
+ *
+ * Notes:
+ * This CSR controls when SP candidate message segments (from the two different controllers) can enter
+ * the message segment silo to be sent out. A segment remains in the silo until after is has
+ * been transmitted and either acknowledged or errored out.
+ *
+ * Candidates and silo entries are one of 4 types:
+ * SP - a single-segment message
+ * FMP - the first segment of a multi-segment message
+ * NMP - the other segments in a multi-segment message
+ * PSD - the silo psuedo-entry that is valid only while a controller is in the middle of pushing
+ * a multi-segment message into the silo and can match against segments generated by
+ * the other controller
+ *
+ * When a candidate "matches" against a silo entry or pseudo entry, it cannot enter the silo.
+ * By default (i.e. zeroes in this CSR), the SP candidate matches against all entries in the
+ * silo. When fields in this CSR are set, SP candidate segments will match fewer silo entries and
+ * can enter the silo more freely, probably providing better performance.
+ *
+ * Clk_Rst: SRIO(0,2..3)_OMSG_SP_MR[0:1] hclk hrst_n
+ */
+union cvmx_sriox_omsg_sp_mrx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_sp_mrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t xmbox_sp : 1; /**< Controller X SP enable XMBOX SP
+ When set, the SP candidate message can only
+ match siloed SP segments with the same 4-bit xmbox
+ value as the candidate. When clear, this SP-SP
+ match can occur with any xmbox values.
+ When XMBOX_SP is set, MBOX_SP will commonly be set.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t ctlr_sp : 1; /**< Controller X SP enable controller SP
+ When set, the SP candidate message can
+ only match siloed SP segments that were created
+ by the same controller. When clear, this SP-SP
+ match can also occur when the segments were
+ created by the other controller.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t ctlr_fmp : 1; /**< Controller X SP enable controller FIRSTMP
+ When set, the SP candidate message can
+ only match siloed FMP segments that were created
+ by the same controller. When clear, this SP-FMP
+ match can also occur when the segments were
+ created by the other controller.
+ Not used by the hardware when ALL_FMP is set. */
+ uint64_t ctlr_nmp : 1; /**< Controller X SP enable controller NFIRSTMP
+ When set, the SP candidate message can
+ only match siloed NMP segments that were created
+ by the same controller. When clear, this SP-NMP
+ match can also occur when the segments were
+ created by the other controller.
+ Not used by the hardware when ALL_NMP is set. */
+ uint64_t id_sp : 1; /**< Controller X SP enable ID SP
+ When set, the SP candidate message can
+ only match siloed SP segments that "ID match" the
+ candidate. When clear, this SP-SP match can occur
+ with any ID values.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t id_fmp : 1; /**< Controller X SP enable ID FIRSTMP
+ When set, the SP candidate message can
+ only match siloed FMP segments that "ID match" the
+ candidate. When clear, this SP-FMP match can occur
+ with any ID values.
+ Not used by the hardware when ALL_FMP is set. */
+ uint64_t id_nmp : 1; /**< Controller X SP enable ID NFIRSTMP
+ When set, the SP candidate message can
+ only match siloed NMP segments that "ID match" the
+ candidate. When clear, this SP-NMP match can occur
+ with any ID values.
+ Not used by the hardware when ALL_NMP is set. */
+ uint64_t id_psd : 1; /**< Controller X SP enable ID PSEUDO
+ When set, the SP candidate message can
+ only match the silo pseudo (for the other
+ controller) when it is an "ID match". When clear,
+ this SP-PSD match can occur with any ID values.
+ Not used by the hardware when ALL_PSD is set. */
+ uint64_t mbox_sp : 1; /**< Controller X SP enable MBOX SP
+ When set, the SP candidate message can only
+ match siloed SP segments with the same 2-bit mbox
+ value as the candidate. When clear, this SP-SP
+ match can occur with any mbox values.
+ Not used by the hardware when ALL_SP is set. */
+ uint64_t mbox_fmp : 1; /**< Controller X SP enable MBOX FIRSTMP
+ When set, the SP candidate message can only
+ match siloed FMP segments with the same 2-bit mbox
+ value as the candidate. When clear, this SP-FMP
+ match can occur with any mbox values.
+ Not used by the hardware when ALL_FMP is set. */
+ uint64_t mbox_nmp : 1; /**< Controller X SP enable MBOX NFIRSTMP
+ When set, the SP candidate message can only
+ match siloed NMP segments with the same 2-bit mbox
+ value as the candidate. When clear, this SP-NMP
+ match can occur with any mbox values.
+ Not used by the hardware when ALL_NMP is set. */
+ uint64_t mbox_psd : 1; /**< Controller X SP enable MBOX PSEUDO
+ When set, the SP candidate message can only
+ match the silo pseudo (for the other controller)
+ if the pseudo has the same 2-bit mbox value as the
+ candidate. When clear, this SP-PSD match can occur
+ with any mbox values.
+ Not used by the hardware when ALL_PSD is set. */
+ uint64_t all_sp : 1; /**< Controller X SP enable all SP
+ When set, no SP candidate messages ever
+ match siloed SP segments, and XMBOX_SP, ID_SP,
+ and MBOX_SP are not used. When clear, SP-SP
+ matches can occur. */
+ uint64_t all_fmp : 1; /**< Controller X SP enable all FIRSTMP
+ When set, no SP candidate messages ever
+ match siloed FMP segments and ID_FMP and MBOX_FMP
+ are not used. When clear, SP-FMP matches can
+ occur. */
+ uint64_t all_nmp : 1; /**< Controller X SP enable all NFIRSTMP
+ When set, no SP candidate messages ever
+ match siloed NMP segments and ID_NMP and MBOX_NMP
+ are not used. When clear, SP-NMP matches can
+ occur. */
+ uint64_t all_psd : 1; /**< Controller X SP enable all PSEUDO
+ When set, no SP candidate messages ever
+ match the silo pseudo (for the other controller)
+ and ID_PSD and MBOX_PSD are not used. When clear,
+ SP-PSD matches can occur. */
+#else
+ uint64_t all_psd : 1;
+ uint64_t all_nmp : 1;
+ uint64_t all_fmp : 1;
+ uint64_t all_sp : 1;
+ uint64_t mbox_psd : 1;
+ uint64_t mbox_nmp : 1;
+ uint64_t mbox_fmp : 1;
+ uint64_t mbox_sp : 1;
+ uint64_t id_psd : 1;
+ uint64_t id_nmp : 1;
+ uint64_t id_fmp : 1;
+ uint64_t id_sp : 1;
+ uint64_t ctlr_nmp : 1;
+ uint64_t ctlr_fmp : 1;
+ uint64_t ctlr_sp : 1;
+ uint64_t xmbox_sp : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_sriox_omsg_sp_mrx_s cn63xx;
+ struct cvmx_sriox_omsg_sp_mrx_s cn63xxp1;
+ struct cvmx_sriox_omsg_sp_mrx_s cn66xx;
+};
+typedef union cvmx_sriox_omsg_sp_mrx cvmx_sriox_omsg_sp_mrx_t;
+
+/**
+ * cvmx_srio#_prio#_in_use
+ *
+ * SRIO_PRIO[0:3]_IN_USE = S2M PRIORITY FIFO IN USE COUNTS
+ *
+ * SRIO S2M Priority X FIFO Inuse counts
+ *
+ * Notes:
+ * These registers provide status information on the number of read/write requests pending in the S2M
+ * Priority FIFOs. The information can be used to help determine when an S2M_TYPE register can be
+ * reallocated. For example, if an S2M_TYPE is used N times in a DMA write operation and the DMA has
+ * completed. The register corresponding to the RD/WR_PRIOR of the S2M_TYPE can be read to determine
+ * the START_CNT and then can be polled to see if the END_CNT equals the START_CNT or at least
+ * START_CNT+N. These registers can be accessed regardless of the value of SRIO(0,2..3)_STATUS_REG.ACCESS
+ * but are reset by either the MAC or Core being reset.
+ *
+ * Clk_Rst: SRIO(0,2..3)_PRIO[0:3]_IN_USE sclk srst_n, hrst_n
+ */
+union cvmx_sriox_priox_in_use {
+ uint64_t u64;
+ struct cvmx_sriox_priox_in_use_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t end_cnt : 16; /**< Count of Packets with S2M_TYPES completed for this
+ Priority X FIFO */
+ uint64_t start_cnt : 16; /**< Count of Packets with S2M_TYPES started for this
+ Priority X FIFO */
+#else
+ uint64_t start_cnt : 16;
+ uint64_t end_cnt : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_priox_in_use_s cn63xx;
+ struct cvmx_sriox_priox_in_use_s cn66xx;
+};
+typedef union cvmx_sriox_priox_in_use cvmx_sriox_priox_in_use_t;
+
+/**
+ * cvmx_srio#_rx_bell
+ *
+ * SRIO_RX_BELL = SRIO Receive Doorbell
+ *
+ * The SRIO Incoming (RX) Doorbell
+ *
+ * Notes:
+ * This register contains the SRIO Information, Device ID, Transaction Type and Priority of the
+ * incoming Doorbell Transaction as well as the number of transactions waiting to be read. Reading
+ * this register causes a Doorbell to be removed from the RX Bell FIFO and the COUNT to be
+ * decremented. If the COUNT is zero then the FIFO is empty and the other fields should be
+ * considered invalid. When the FIFO is full an ERROR is automatically issued. The RXBELL Interrupt
+ * can be used to detect posts to this FIFO.
+ *
+ * Clk_Rst: SRIO(0,2..3)_RX_BELL hclk hrst_n
+ */
+union cvmx_sriox_rx_bell {
+ uint64_t u64;
+ struct cvmx_sriox_rx_bell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t data : 16; /**< Information field from received doorbell */
+ uint64_t src_id : 16; /**< Doorbell Source Device ID[15:0] */
+ uint64_t count : 8; /**< RX Bell FIFO Count
+ Note: Count must be > 0 for entry to be valid. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t dest_id : 1; /**< Destination Device ID 0=Primary, 1=Secondary */
+ uint64_t id16 : 1; /**< Transaction Type, 0=use ID[7:0], 1=use ID[15:0] */
+ uint64_t reserved_2_2 : 1;
+ uint64_t priority : 2; /**< Doorbell Priority */
+#else
+ uint64_t priority : 2;
+ uint64_t reserved_2_2 : 1;
+ uint64_t id16 : 1;
+ uint64_t dest_id : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t count : 8;
+ uint64_t src_id : 16;
+ uint64_t data : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_sriox_rx_bell_s cn63xx;
+ struct cvmx_sriox_rx_bell_s cn63xxp1;
+ struct cvmx_sriox_rx_bell_s cn66xx;
+};
+typedef union cvmx_sriox_rx_bell cvmx_sriox_rx_bell_t;
+
+/**
+ * cvmx_srio#_rx_bell_seq
+ *
+ * SRIO_RX_BELL_SEQ = SRIO Receive Doorbell Sequence Count
+ *
+ * The SRIO Incoming (RX) Doorbell Sequence Count
+ *
+ * Notes:
+ * This register contains the value of the sequence counter when the doorbell was received and a
+ * shadow copy of the Bell FIFO Count that can be read without emptying the FIFO. This register must
+ * be read prior to SRIO(0,2..3)_RX_BELL to guarantee that the information corresponds to the correct
+ * doorbell.
+ *
+ * Clk_Rst: SRIO(0,2..3)_RX_BELL_SEQ hclk hrst_n
+ */
+union cvmx_sriox_rx_bell_seq {
+ uint64_t u64;
+ struct cvmx_sriox_rx_bell_seq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t count : 8; /**< RX Bell FIFO Count
+ Note: Count must be > 0 for entry to be valid. */
+ uint64_t seq : 32; /**< 32-bit Sequence \# associated with Doorbell Message */
+#else
+ uint64_t seq : 32;
+ uint64_t count : 8;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_sriox_rx_bell_seq_s cn63xx;
+ struct cvmx_sriox_rx_bell_seq_s cn63xxp1;
+ struct cvmx_sriox_rx_bell_seq_s cn66xx;
+};
+typedef union cvmx_sriox_rx_bell_seq cvmx_sriox_rx_bell_seq_t;
+
+/**
+ * cvmx_srio#_rx_status
+ *
+ * SRIO_RX_STATUS = SRIO Inbound Credits/Response Status
+ *
+ * Specifies the current number of credits/responses by SRIO for Inbound Traffic
+ *
+ * Notes:
+ * Debug Register specifying the number of credits/responses currently in use for Inbound Traffic.
+ * The maximum value for COMP, N_POST and POST is set in SRIO(0,2..3)_TLP_CREDITS. When all inbound traffic
+ * has stopped the values should eventually return to the maximum values. The RTN_PR[3:1] entry
+ * counts should eventually return to the reset values.
+ *
+ * Clk_Rst: SRIO(0,2..3)_RX_STATUS hclk hrst_n
+ */
+union cvmx_sriox_rx_status {
+ uint64_t u64;
+ struct cvmx_sriox_rx_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rtn_pr3 : 8; /**< Number of pending Priority 3 Response Entries. */
+ uint64_t rtn_pr2 : 8; /**< Number of pending Priority 2 Response Entries. */
+ uint64_t rtn_pr1 : 8; /**< Number of pending Priority 1 Response Entries. */
+ uint64_t reserved_28_39 : 12;
+ uint64_t mbox : 4; /**< Credits for Mailbox Data used in M2S. */
+ uint64_t comp : 8; /**< Credits for Read Completions used in M2S. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t n_post : 5; /**< Credits for Read Requests used in M2S. */
+ uint64_t post : 8; /**< Credits for Write Request Postings used in M2S. */
+#else
+ uint64_t post : 8;
+ uint64_t n_post : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t comp : 8;
+ uint64_t mbox : 4;
+ uint64_t reserved_28_39 : 12;
+ uint64_t rtn_pr1 : 8;
+ uint64_t rtn_pr2 : 8;
+ uint64_t rtn_pr3 : 8;
+#endif
+ } s;
+ struct cvmx_sriox_rx_status_s cn63xx;
+ struct cvmx_sriox_rx_status_s cn63xxp1;
+ struct cvmx_sriox_rx_status_s cn66xx;
+};
+typedef union cvmx_sriox_rx_status cvmx_sriox_rx_status_t;
+
+/**
+ * cvmx_srio#_s2m_type#
+ *
+ * SRIO_S2M_TYPE[0:15] = SLI to SRIO MAC Operation Type
+ *
+ * SRIO Operation Type selected by PP or DMA Accesses
+ *
+ * Notes:
+ * This CSR table specifies how to convert a SLI/DPI MAC read or write into sRIO operations.
+ * Each SLI/DPI read or write access supplies a 64-bit address (MACADD[63:0]), 2-bit ADDRTYPE, and
+ * 2-bit endian-swap. This SRIO*_S2M_TYPE* CSR description specifies a table with 16 CSRs. SRIO
+ * selects one of the table entries with TYPEIDX[3:0], which it creates from the SLI/DPI MAC memory
+ * space read or write as follows:
+ * TYPEIDX[1:0] = ADDRTYPE[1:0] (ADDRTYPE[1] is no-snoop to the PCIe MAC,
+ * ADDRTYPE[0] is relaxed-ordering to the PCIe MAC)
+ * TYPEIDX[2] = MACADD[50]
+ * TYPEIDX[3] = MACADD[59]
+ *
+ * Clk_Rst: SRIO(0,2..3)_S2M_TYPE[0:15] hclk hrst_n
+ */
+union cvmx_sriox_s2m_typex {
+ uint64_t u64;
+ struct cvmx_sriox_s2m_typex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t wr_op : 3; /**< sRIO operation for SLI/DPI writes
+
+ SLI/DPI hardware break MAC memory space writes
+ that they generate into pieces of maximum size
+ 256B. For NWRITE/NWRITE_R/SWRITE WR_OP variants
+ below, SRIO will, if necessary to obey sRIO
+ requirements, automatically break the write into
+ even smaller writes. The same is not true for
+ MAINTENANCE writes and port-writes. Additional
+ SW/usage restrictions are required for these
+ MAINTENANCE WR_OP's to work correctly. SW must
+ restrict the alignment and length of DPI pointers,
+ limit the store sizes that the cores issue, and
+ possibly also set SLI_MEM_ACCESS_SUBID*[NMERGE]
+ so that all MAC memory space writes with
+ MAINTENANCE write and port-write WR_OP's can be
+ serviced in a single sRIO operation.
+
+ SRIO always sends the write data (64-bit) words
+ out in order.
+
+ WR_OP = 0 = Normal Write (NWRITE)
+ SRIO breaks a MAC memory space write into
+ the minimum number of required sRIO NWRITE
+ operations. This will be 1-5 total NWRITEs,
+ depending on endian-swap, alignment, and
+ length.
+
+ WR_OP = 1 = Normal Write w/Response (NWRITE_R)
+ SRIO breaks a MAC memory space write into
+ the minimum number of required sRIO
+ NWRITE_R operations. This will be 1-5 total
+ NWRITE_R's, depending on endian-swap,
+ alignment, and length.
+
+ SRIO sets SRIO*_INT_REG[WR_DONE] after it
+ receives the DONE response for the last
+ NWRITE_R sent.
+
+ WR_OP = 2 = NWRITE, Streaming write (SWRITE),
+ NWRITE
+ SRIO attempts to turn the MAC memory space
+ write into an SWRITE operation. There will
+ be 1-5 total sRIO operations (0-2 NWRITE's
+ followed by 0-1 SWRITE's followed by 0-2
+ NWRITE's) generated to complete the MAC
+ memory space write, depending on
+ endian-swap, alignment, and length.
+
+ If the starting address is not 64-bit
+ aligned, SRIO first creates 1-4 NWRITE's to
+ either align it or complete the write. Then
+ SRIO creates a SWRITE including all aligned
+ 64-bit words. (SRIO won't create an SWRITE
+ when there are none.) If store data
+ remains, SRIO finally creates another 1 or
+ 2 NWRITE's.
+
+ WR_OP = 3 = NWRITE, SWRITE, NWRITE_R
+ SRIO attempts to turn the MAC memory space
+ write into an SWRITE operation followed by
+ a NWRITE_R operation. The last operation
+ is always NWRITE_R. There will be 1-5
+ total sRIO operations (0-2 NWRITE's,
+ followed by 0-1 SWRITE, followed by 1-4
+ NWRITE_R's) generated to service the MAC
+ memory space write, depending on
+ endian-swap, alignment, and length.
+
+ If the write is contained in one aligned
+ 64-bit word, SRIO will completely service
+ the MAC memory space write with 1-4
+ NWRITE_R's.
+
+ Otherwise, if the write spans multiple
+ words, SRIO services the write as follows.
+ First, if the start of the write is not
+ word-aligned, SRIO creates 1 or 2 NWRITE's
+ to align it. Then SRIO creates an SWRITE
+ that includes all aligned 64-bit words,
+ leaving data for the final NWRITE_R(s).
+ (SRIO won't create the SWRITE when there is
+ no data for it.) Then SRIO finally creates
+ 1 or 2 NWRITE_R's.
+
+ In any case, SRIO sets
+ SRIO*_INT_REG[WR_DONE] after it receives
+ the DONE response for the last NWRITE_R
+ sent.
+
+ WR_OP = 4 = NWRITE, NWRITE_R
+ SRIO attempts to turn the MAC memory space
+ write into an NWRITE operation followed by
+ a NWRITE_R operation. The last operation
+ is always NWRITE_R. There will be 1-5
+ total sRIO operations (0-3 NWRITE's
+ followed by 1-4 NWRITE_R's) generated to
+ service the MAC memory space write,
+ depending on endian-swap, alignment, and
+ length.
+
+ If the write is contained in one aligned
+ 64-bit word, SRIO will completely service
+ the MAC memory space write with 1-4
+ NWRITE_R's.
+
+ Otherwise, if the write spans multiple
+ words, SRIO services the write as follows.
+ First, if the start of the write is not
+ word-aligned, SRIO creates 1 or 2 NWRITE's
+ to align it. Then SRIO creates an NWRITE
+ that includes all aligned 64-bit words,
+ leaving data for the final NWRITE_R(s).
+ (SRIO won't create this NWRITE when there
+ is no data for it.) Then SRIO finally
+ creates 1 or 2 NWRITE_R's.
+
+ In any case, SRIO sets
+ SRIO*_INT_REG[WR_DONE] after it receives
+ the DONE response for the last NWRITE_R
+ sent.
+
+ WR_OP = 5 = Reserved
+
+ WR_OP = 6 = Maintenance Write
+ - SRIO will create one sRIO MAINTENANCE write
+ operation to service the MAC memory space
+ write
+ - IAOW_SEL must be zero. (see description
+ below.)
+ - MDS must be zero. (MDS is MACADD[63:62] -
+ see IAOW_SEL description below.)
+ - Hop Cnt is MACADD[31:24]/SRIOAddress[31:24]
+ - MACADD[23:0]/SRIOAddress[23:0] selects
+ maintenance register (i.e. config_offset)
+ - sRIODestID[15:0] is MACADD[49:34].
+ (MACADD[49:42] unused when ID16=0)
+ - Write size/alignment must obey sRIO rules
+ (4, 8, 16, 24, 32, 40, 48, 56 and 64 byte
+ lengths allowed)
+
+ WR_OP = 7 = Maintenance Port Write
+ - SRIO will create one sRIO MAINTENANCE port
+ write operation to service the MAC memory
+ space write
+ - IAOW_SEL must be zero. (see description
+ below.)
+ - MDS must be zero. (MDS is MACADD[63:62] -
+ see IAOW_SEL description below.)
+ - Hop Cnt is MACADD[31:24]/sRIOAddress[31:24]
+ - MACADD[23:0]/sRIOAddress[23:0] MBZ
+ (config_offset field reserved by sRIO)
+ - sRIODestID[15:0] is MACADD[49:34].
+ (MACADD[49:42] unused when ID16=0)
+ - Write size/alignment must obey sRIO rules
+ (4, 8, 16, 24, 32, 40, 48, 56 and 64 byte
+ lengths allowed) */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rd_op : 3; /**< sRIO operation for SLI/DPI reads
+
+ SLI/DPI hardware and sRIO configuration
+ restrictions guarantee that SRIO can service any
+ MAC memory space read that it receives from SLI/DPI
+ with a single NREAD, assuming that RD_OP selects
+ NREAD. DPI will break a read into multiple MAC
+ memory space reads to ensure this holds. The same
+ is not true for the ATOMIC and MAINTENANCE RD_OP
+ values. Additional SW/usage restrictions are
+ required for ATOMIC and MAINTENANCE RD_OP to work
+ correctly. SW must restrict the alignment and
+ length of DPI pointers and limit the load sizes
+ that the cores issue such that all MAC memory space
+ reads with ATOMIC and MAINTENANCE RD_OP's can be
+ serviced in a single sRIO operation.
+
+ RD_OP = 0 = Normal Read (NREAD)
+ - SRIO will create one sRIO NREAD
+ operation to service the MAC memory
+ space read
+ - Read size/alignment must obey sRIO rules
+ (up to 256 byte lengths). (This requirement
+ is guaranteed by SLI/DPI usage restrictions
+ and configuration.)
+
+ RD_OP = 1 = Reserved
+
+ RD_OP = 2 = Atomic Set
+ - SRIO will create one sRIO ATOMIC set
+ operation to service the MAC memory
+ space read
+ - Read size/alignment must obey sRIO rules
+ (1, 2, and 4 byte lengths allowed)
+
+ RD_OP = 3 = Atomic Clear
+ - SRIO will create one sRIO ATOMIC clr
+ operation to service the MAC memory
+ space read
+ - Read size/alignment must obey sRIO rules
+ (1, 2, and 4 byte lengths allowed)
+
+ RD_OP = 4 = Atomic Increment
+ - SRIO will create one sRIO ATOMIC inc
+ operation to service the MAC memory
+ space read
+ - Read size/alignment must obey sRIO rules
+ (1, 2, and 4 byte lengths allowed)
+
+ RD_OP = 5 = Atomic Decrement
+ - SRIO will create one sRIO ATOMIC dec
+ operation to service the MAC memory
+ space read
+ - Read size/alignment must obey sRIO rules
+ (1, 2, and 4 byte lengths allowed)
+
+ RD_OP = 6 = Maintenance Read
+ - SRIO will create one sRIO MAINTENANCE read
+ operation to service the MAC memory
+ space read
+ - IAOW_SEL must be zero. (see description
+ below.)
+ - MDS must be zero. (MDS is MACADD[63:62] -
+ see IAOW_SEL description below.)
+ - Hop Cnt is MACADD[31:24]/sRIOAddress[31:24]
+ - MACADD[23:0]/sRIOAddress[23:0] selects
+ maintenance register (i.e. config_offset)
+ - sRIODestID[15:0] is MACADD[49:34].
+ (MACADD[49:42] unused when ID16=0)
+ - Read size/alignment must obey sRIO rules
+ (4, 8, 16, 32 and 64 byte lengths allowed)
+
+ RD_OP = 7 = Reserved */
+ uint64_t wr_prior : 2; /**< Transaction Priority 0-3 used for writes */
+ uint64_t rd_prior : 2; /**< Transaction Priority 0-3 used for reads/ATOMICs */
+ uint64_t reserved_6_7 : 2;
+ uint64_t src_id : 1; /**< Source ID
+
+ 0 = Use Primary ID as Source ID
+ (SRIOMAINT*_PRI_DEV_ID[ID16 or ID8], depending
+ on SRIO TT ID (i.e. ID16 below))
+
+ 1 = Use Secondary ID as Source ID
+ (SRIOMAINT*_SEC_DEV_ID[ID16 or ID8], depending
+ on SRIO TT ID (i.e. ID16 below)) */
+ uint64_t id16 : 1; /**< SRIO TT ID 0=8bit, 1=16-bit
+ IAOW_SEL must not be 2 when ID16=1. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t iaow_sel : 2; /**< Internal Address Offset Width Select
+
+ IAOW_SEL determines how to convert the
+ MACADD[63:62,58:51,49:0] recieved from SLI/DPI with
+ read/write into an sRIO address (sRIOAddress[...])
+ and sRIO destination ID (sRIODestID[...]). The sRIO
+ address width mode (SRIOMAINT_PE_LLC[EX_ADDR]) and
+ ID16, determine the width of the sRIO address and
+ ID in the outgoing request(s), respectively.
+
+ MACADD[61:60] is always unused.
+
+ MACADD[59] is always TYPEIDX[3]
+ MACADD[50] is always TYPEIDX[2]
+ (TYPEIDX[3:0] selects one of these
+ SRIO*_S2M_TYPE* table entries.)
+
+ MACADD[17:0] always becomes sRIOAddress[17:0].
+
+ IAOW_SEL = 0 = 34-bit Address Offset
+
+ Must be used when sRIO link is in 34-bit
+ address width mode.
+ When sRIO is in 50-bit address width mode,
+ sRIOAddress[49:34]=0 in the outgoing request.
+ When sRIO is in 66-bit address width mode,
+ sRIOAddress[65:34]=0 in the outgoing request.
+
+ Usage of the SLI/DPI MAC address when
+ IAOW_SEL = 0:
+ MACADD[63:62] = Multi-Device Swap (MDS)
+ MDS value affects MACADD[49:18] usage
+ MACADD[58:51] => unused
+ MACADD[49:18] usage depends on MDS value
+ MDS = 0
+ MACADD[49:34] => sRIODestID[15:0]
+ (MACADD[49:42] unused when ID16=0)
+ MACADD[33:18] => sRIOAddress[33:18]
+ MDS = 1
+ MACADD[49:42] => sRIODestID[15:8]
+ (MACADD[49:42] unused when ID16 = 0)
+ MACADD[41:34] => sRIOAddress[33:26]
+ MACADD[33:26] => sRIODestID[7:0]
+ MACADD[25:18] => sRIOAddress[25:18]
+ MDS = 2
+ ID16 must be one.
+ MACADD[49:34] => sRIOAddress[33:18]
+ MACADD[33:18] => sRIODestID[15:0]
+ MDS = 3 = Reserved
+
+ IAOW_SEL = 1 = 42-bit Address Offset
+
+ Must not be used when sRIO link is in 34-bit
+ address width mode.
+ When sRIO is in 50-bit address width mode,
+ sRIOAddress[49:42]=0 in the outgoing request.
+ When sRIO is in 66-bit address width mode,
+ sRIOAddress[65:42]=0 in the outgoing request.
+
+ Usage of the SLI/DPI MAC address when
+ IAOW_SEL = 1:
+ MACADD[63:62] => Multi-Device Swap (MDS)
+ MDS value affects MACADD[58:51,49:42,33:18]
+ use
+ MACADD[41:34] => sRIOAddress[41:34]
+ MACADD[58:51,49:42,33:18] usage depends on
+ MDS value:
+ MDS = 0
+ MACADD[58:51] => sRIODestID[15:8]
+ MACADD[49:42] => sRIODestID[7:0]
+ (MACADD[58:51] unused when ID16=0)
+ MACADD[33:18] => sRIOAddress[33:18]
+ MDS = 1
+ MACADD[58:51] => sRIODestID[15:8]
+ (MACADD[58:51] unused when ID16 = 0)
+ MACADD[49:42] => sRIOAddress[33:26]
+ MACADD[33:26] => sRIODestID[7:0]
+ MACADD[25:18] => sRIOAddress[25:18]
+ MDS = 2
+ ID16 must be one.
+ MACADD[58:51] => sRIOAddress[33:26]
+ MACADD[49:42] => sRIOAddress[25:18]
+ MACADD[33:18] => sRIODestID[15:0]
+ MDS = 3 = Reserved
+
+ IAOW_SEL = 2 = 50-bit Address Offset
+
+ Must not be used when sRIO link is in 34-bit
+ address width mode.
+ Must not be used when ID16=1.
+ When sRIO is in 66-bit address width mode,
+ sRIOAddress[65:50]=0 in the outgoing request.
+
+ Usage of the SLI/DPI MAC address when
+ IAOW_SEL = 2:
+ MACADD[63:62] => Multi-Device Swap (MDS)
+ MDS value affects MACADD[58:51,33:26] use
+ MDS value 3 is reserved
+ MACADD[49:34] => sRIOAddress[49:34]
+ MACADD[25:18] => sRIOAddress[25:18]
+ MACADD[58:51,33:26] usage depends on
+ MDS value:
+ MDS = 0
+ MACADD[58:51] => sRIODestID[7:0]
+ MACADD[33:26] => sRIOAddress[33:26]
+ MDS = 1
+ MACADD[58:51] => sRIOAddress[33:26]
+ MACADD[33:26] => sRIODestID[7:0]
+ MDS = 2 = Reserved
+ MDS = 3 = Reserved
+
+ IAOW_SEL = 3 = Reserved */
+#else
+ uint64_t iaow_sel : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t id16 : 1;
+ uint64_t src_id : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t rd_prior : 2;
+ uint64_t wr_prior : 2;
+ uint64_t rd_op : 3;
+ uint64_t reserved_15_15 : 1;
+ uint64_t wr_op : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_sriox_s2m_typex_s cn63xx;
+ struct cvmx_sriox_s2m_typex_s cn63xxp1;
+ struct cvmx_sriox_s2m_typex_s cn66xx;
+};
+typedef union cvmx_sriox_s2m_typex cvmx_sriox_s2m_typex_t;
+
+/**
+ * cvmx_srio#_seq
+ *
+ * SRIO_SEQ = SRIO Sequence Count
+ *
+ * The SRIO Sequence Count
+ *
+ * Notes:
+ * This register contains the current value of the sequence counter. This counter increments every
+ * time a doorbell or the first segment of a message is accepted.
+ *
+ * Clk_Rst: SRIO(0,2..3)_SEQ hclk hrst_n
+ */
+union cvmx_sriox_seq {
+ uint64_t u64;
+ struct cvmx_sriox_seq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t seq : 32; /**< 32-bit Sequence \# */
+#else
+ uint64_t seq : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_seq_s cn63xx;
+ struct cvmx_sriox_seq_s cn63xxp1;
+ struct cvmx_sriox_seq_s cn66xx;
+};
+typedef union cvmx_sriox_seq cvmx_sriox_seq_t;
+
+/**
+ * cvmx_srio#_status_reg
+ *
+ * 13e20 reserved
+ *
+ *
+ * SRIO_STATUS_REG = SRIO Status Register
+ *
+ * General status of the SRIO.
+ *
+ * Notes:
+ * The SRIO field displays if the port has been configured for SRIO operation. This register can be
+ * read regardless of whether the SRIO is selected or being reset. Although some other registers can
+ * be accessed while the ACCESS bit is zero (see individual registers for details), the majority of
+ * SRIO registers and all the SRIOMAINT registers can be used only when the ACCESS bit is asserted.
+ *
+ * Clk_Rst: SRIO(0,2..3)_STATUS_REG sclk srst_n
+ */
+union cvmx_sriox_status_reg {
+ uint64_t u64;
+ struct cvmx_sriox_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t access : 1; /**< SRIO and SRIOMAINT Register Access.
+ 0 - Register Access Disabled.
+ 1 - Register Access Enabled. */
+ uint64_t srio : 1; /**< SRIO Port Enabled.
+ 0 - All SRIO functions disabled.
+ 1 - All SRIO Operations permitted. */
+#else
+ uint64_t srio : 1;
+ uint64_t access : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_sriox_status_reg_s cn63xx;
+ struct cvmx_sriox_status_reg_s cn63xxp1;
+ struct cvmx_sriox_status_reg_s cn66xx;
+};
+typedef union cvmx_sriox_status_reg cvmx_sriox_status_reg_t;
+
+/**
+ * cvmx_srio#_tag_ctrl
+ *
+ * SRIO_TAG_CTRL = SRIO TAG Control
+ *
+ * The SRIO TAG Control
+ *
+ * Notes:
+ * This register is used to show the state of the internal transaction tags and provides a manual
+ * reset of the outgoing tags.
+ *
+ * Clk_Rst: SRIO(0,2..3)_TAG_CTRL hclk hrst_n
+ */
+union cvmx_sriox_tag_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_tag_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t o_clr : 1; /**< Manual OTAG Clear. This bit manually resets the
+ number of OTAGs back to 16 and loses track of any
+ outgoing packets. This function is automatically
+ performed when the SRIO MAC is reset but it may be
+ necessary after a chip reset while the MAC is in
+ operation. This bit must be set then cleared to
+ return to normal operation. Typically, Outgoing
+ SRIO packets must be halted 6 seconds prior to
+ this bit is set to avoid generating duplicate tags
+ and unexpected response errors. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t otag : 5; /**< Number of Available Outbound Tags. Tags are
+ required for all outgoing memory and maintenance
+ operations that require a response. (Max 16) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t itag : 5; /**< Number of Available Inbound Tags. Tags are
+ required for all incoming memory operations that
+ require a response. (Max 16) */
+#else
+ uint64_t itag : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t otag : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t o_clr : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_sriox_tag_ctrl_s cn63xx;
+ struct cvmx_sriox_tag_ctrl_s cn63xxp1;
+ struct cvmx_sriox_tag_ctrl_s cn66xx;
+};
+typedef union cvmx_sriox_tag_ctrl cvmx_sriox_tag_ctrl_t;
+
+/**
+ * cvmx_srio#_tlp_credits
+ *
+ * SRIO_TLP_CREDITS = SRIO TLP Credits
+ *
+ * Specifies the number of credits the SRIO can use for incoming Commands and Messages.
+ *
+ * Notes:
+ * Specifies the number of maximum credits the SRIO can use for incoming Commands and Messages.
+ * Reset values for COMP, N_POST and POST credits are based on the number of lanes allocated by the
+ * QLM Configuration to the SRIO MAC and whether QLM1 is used by PCIe. If SRIO MACs are unused then
+ * credits may be allocated to other MACs under some circumstances. The following table shows the
+ * reset values for COMP/N_POST/POST:
+ * QLM0_CFG QLM1_CFG SRIO0 SRIO2 SRIO3
+ * ======================================================
+ * PEM Any 0/0/0 0/0/0 0/0/0
+ * SRIO x4 Any 128/16/128 0/0/0 0/0/0
+ * SRIO x2 PEM 64/8/64 64/8/64 0/0/0
+ * SRIO x2 non-PEM 128/16/128 128/16/128 0/0/0
+ * SRIO x1 PEM 42/5/42 42/5/42 42/5/42
+ * SRIO x1 non-PEM 64/8/64 64/8/64 64/8/64
+ *
+ * Clk_Rst: SRIO(0,2..3)_TLP_CREDITS hclk hrst_n
+ */
+union cvmx_sriox_tlp_credits {
+ uint64_t u64;
+ struct cvmx_sriox_tlp_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63 : 36;
+ uint64_t mbox : 4; /**< Credits for Mailbox Data used in M2S.
+ Legal values are 0x2 to 0x8. */
+ uint64_t comp : 8; /**< Credits for Read Completions used in M2S.
+ Legal values are 0x22 to 0x80. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t n_post : 5; /**< Credits for Read Requests used in M2S.
+ Legal values are 0x4 to 0x10. */
+ uint64_t post : 8; /**< Credits for Write Request Postings used in M2S.
+ Legal values are 0x22 to 0x80. */
+#else
+ uint64_t post : 8;
+ uint64_t n_post : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t comp : 8;
+ uint64_t mbox : 4;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_sriox_tlp_credits_s cn63xx;
+ struct cvmx_sriox_tlp_credits_s cn63xxp1;
+ struct cvmx_sriox_tlp_credits_s cn66xx;
+};
+typedef union cvmx_sriox_tlp_credits cvmx_sriox_tlp_credits_t;
+
+/**
+ * cvmx_srio#_tx_bell
+ *
+ * SRIO_TX_BELL = SRIO Transmit Doorbell
+ *
+ * The SRIO Outgoing (TX) Doorbell
+ *
+ * Notes:
+ * This register specifies SRIO Information, Device ID, Transaction Type and Priority of the outgoing
+ * Doorbell Transaction. Writes to this register causes the Doorbell to be issued using these bits.
+ * The write also causes the PENDING bit to be set. The hardware automatically clears bit when the
+ * Doorbell operation has been acknowledged. A write to this register while the PENDING bit is set
+ * should be avoided as it will stall the RSL until the first Doorbell has completed.
+ *
+ * Clk_Rst: SRIO(0,2..3)_TX_BELL hclk hrst_n
+ */
+union cvmx_sriox_tx_bell {
+ uint64_t u64;
+ struct cvmx_sriox_tx_bell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t data : 16; /**< Information field for next doorbell operation */
+ uint64_t dest_id : 16; /**< Doorbell Destination Device ID[15:0] */
+ uint64_t reserved_9_15 : 7;
+ uint64_t pending : 1; /**< Doorbell Transmit in Progress */
+ uint64_t reserved_5_7 : 3;
+ uint64_t src_id : 1; /**< Source Device ID 0=Primary, 1=Secondary */
+ uint64_t id16 : 1; /**< Transaction Type, 0=use ID[7:0], 1=use ID[15:0] */
+ uint64_t reserved_2_2 : 1;
+ uint64_t priority : 2; /**< Doorbell Priority */
+#else
+ uint64_t priority : 2;
+ uint64_t reserved_2_2 : 1;
+ uint64_t id16 : 1;
+ uint64_t src_id : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pending : 1;
+ uint64_t reserved_9_15 : 7;
+ uint64_t dest_id : 16;
+ uint64_t data : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_sriox_tx_bell_s cn63xx;
+ struct cvmx_sriox_tx_bell_s cn63xxp1;
+ struct cvmx_sriox_tx_bell_s cn66xx;
+};
+typedef union cvmx_sriox_tx_bell cvmx_sriox_tx_bell_t;
+
+/**
+ * cvmx_srio#_tx_bell_info
+ *
+ * SRIO_TX_BELL_INFO = SRIO Transmit Doorbell Interrupt Information
+ *
+ * The SRIO Outgoing (TX) Doorbell Interrupt Information
+ *
+ * Notes:
+ * This register is only updated if the BELL_ERR bit is clear in SRIO(0,2..3)_INT_REG. This register
+ * displays SRIO Information, Device ID, Transaction Type and Priority of the Doorbell Transaction
+ * that generated the BELL_ERR Interrupt. The register includes either a RETRY, ERROR or TIMEOUT
+ * Status.
+ *
+ * Clk_Rst: SRIO(0,2..3)_TX_BELL_INFO hclk hrst_n
+ */
+union cvmx_sriox_tx_bell_info {
+ uint64_t u64;
+ struct cvmx_sriox_tx_bell_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t data : 16; /**< Information field from last doorbell operation */
+ uint64_t dest_id : 16; /**< Doorbell Destination Device ID[15:0] */
+ uint64_t reserved_8_15 : 8;
+ uint64_t timeout : 1; /**< Transmit Doorbell Failed with Timeout. */
+ uint64_t error : 1; /**< Transmit Doorbell Destination returned Error. */
+ uint64_t retry : 1; /**< Transmit Doorbell Requests a retransmission. */
+ uint64_t src_id : 1; /**< Source Device ID 0=Primary, 1=Secondary */
+ uint64_t id16 : 1; /**< Transaction Type, 0=use ID[7:0], 1=use ID[15:0] */
+ uint64_t reserved_2_2 : 1;
+ uint64_t priority : 2; /**< Doorbell Priority */
+#else
+ uint64_t priority : 2;
+ uint64_t reserved_2_2 : 1;
+ uint64_t id16 : 1;
+ uint64_t src_id : 1;
+ uint64_t retry : 1;
+ uint64_t error : 1;
+ uint64_t timeout : 1;
+ uint64_t reserved_8_15 : 8;
+ uint64_t dest_id : 16;
+ uint64_t data : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_sriox_tx_bell_info_s cn63xx;
+ struct cvmx_sriox_tx_bell_info_s cn63xxp1;
+ struct cvmx_sriox_tx_bell_info_s cn66xx;
+};
+typedef union cvmx_sriox_tx_bell_info cvmx_sriox_tx_bell_info_t;
+
+/**
+ * cvmx_srio#_tx_ctrl
+ *
+ * SRIO_TX_CTRL = SRIO Transmit Control
+ *
+ * The SRIO Transmit Control
+ *
+ * Notes:
+ * This register is used to control SRIO Outgoing Packet Allocation. TAG_TH[2:0] set the thresholds
+ * to allow priority traffic requiring responses to be queued based on the number of outgoing tags
+ * (TIDs) available. 16 Tags are available. If a priority is blocked for lack of tags then all
+ * lower priority packets are also blocked irregardless of whether they require tags.
+ *
+ * Clk_Rst: SRIO(0,2..3)_TX_CTRL hclk hrst_n
+ */
+union cvmx_sriox_tx_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_tx_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_53_63 : 11;
+ uint64_t tag_th2 : 5; /**< Sets threshold for minimum number of OTAGs
+ required before a packet of priority 2 requiring a
+ response will be queued for transmission. (Max 16)
+ There generally should be no priority 3 request
+ packets which require a response/tag, so a TAG_THR
+ value as low as 0 is allowed. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t tag_th1 : 5; /**< Sets threshold for minimum number of OTAGs
+ required before a packet of priority 1 requiring a
+ response will be queued for transmission. (Max 16)
+ Generally, TAG_TH1 must be > TAG_TH2 to leave OTAGs
+ for outgoing priority 2 (or 3) requests. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t tag_th0 : 5; /**< Sets threshold for minimum number of OTAGs
+ required before a packet of priority 0 requiring a
+ response will be queued for transmission. (Max 16)
+ Generally, TAG_TH0 must be > TAG_TH1 to leave OTAGs
+ for outgoing priority 1 or 2 (or 3) requests. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t tx_th2 : 4; /**< Reserved. (See SRIOMAINT(0,2..3)_IR_BUFFER_CONFIG2) */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_th1 : 4; /**< Reserved. (See SRIOMAINT(0,2..3)_IR_BUFFER_CONFIG2) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_th0 : 4; /**< Reserved. (See SRIOMAINT(0,2..3)_IR_BUFFER_CONFIG2) */
+#else
+ uint64_t tx_th0 : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_th1 : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_th2 : 4;
+ uint64_t reserved_20_31 : 12;
+ uint64_t tag_th0 : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t tag_th1 : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t tag_th2 : 5;
+ uint64_t reserved_53_63 : 11;
+#endif
+ } s;
+ struct cvmx_sriox_tx_ctrl_s cn63xx;
+ struct cvmx_sriox_tx_ctrl_s cn63xxp1;
+ struct cvmx_sriox_tx_ctrl_s cn66xx;
+};
+typedef union cvmx_sriox_tx_ctrl cvmx_sriox_tx_ctrl_t;
+
+/**
+ * cvmx_srio#_tx_emphasis
+ *
+ * SRIO_TX_EMPHASIS = SRIO TX Lane Emphasis
+ *
+ * Controls TX Emphasis used by the SRIO SERDES
+ *
+ * Notes:
+ * This controls the emphasis value used by the SRIO SERDES. This register is only reset during COLD
+ * boot and may be modified regardless of the value in SRIO(0,2..3)_STATUS_REG.ACCESS. This register is not
+ * connected to the QLM and thus has no effect. It should not be included in the documentation.
+ *
+ * Clk_Rst: SRIO(0,2..3)_TX_EMPHASIS sclk srst_cold_n
+ */
+union cvmx_sriox_tx_emphasis {
+ uint64_t u64;
+ struct cvmx_sriox_tx_emphasis_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t emph : 4; /**< Emphasis Value used for all lanes. Default value
+ is 0x0 for 1.25G b/s and 0xA for all other rates. */
+#else
+ uint64_t emph : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_sriox_tx_emphasis_s cn63xx;
+ struct cvmx_sriox_tx_emphasis_s cn66xx;
+};
+typedef union cvmx_sriox_tx_emphasis cvmx_sriox_tx_emphasis_t;
+
+/**
+ * cvmx_srio#_tx_status
+ *
+ * SRIO_TX_STATUS = SRIO Outbound Credits/Ops Status
+ *
+ * Specifies the current number of credits/ops by SRIO for Outbound Traffic
+ *
+ * Notes:
+ * Debug Register specifying the number of credits/ops currently in use for Outbound Traffic.
+ * When all outbound traffic has stopped the values should eventually return to the reset values.
+ *
+ * Clk_Rst: SRIO(0,2..3)_TX_STATUS hclk hrst_n
+ */
+union cvmx_sriox_tx_status {
+ uint64_t u64;
+ struct cvmx_sriox_tx_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t s2m_pr3 : 8; /**< Number of pending S2M Priority 3 Entries. */
+ uint64_t s2m_pr2 : 8; /**< Number of pending S2M Priority 2 Entries. */
+ uint64_t s2m_pr1 : 8; /**< Number of pending S2M Priority 1 Entries. */
+ uint64_t s2m_pr0 : 8; /**< Number of pending S2M Priority 0 Entries. */
+#else
+ uint64_t s2m_pr0 : 8;
+ uint64_t s2m_pr1 : 8;
+ uint64_t s2m_pr2 : 8;
+ uint64_t s2m_pr3 : 8;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_tx_status_s cn63xx;
+ struct cvmx_sriox_tx_status_s cn63xxp1;
+ struct cvmx_sriox_tx_status_s cn66xx;
+};
+typedef union cvmx_sriox_tx_status cvmx_sriox_tx_status_t;
+
+/**
+ * cvmx_srio#_wr_done_counts
+ *
+ * SRIO_WR_DONE_COUNTS = SRIO Outgoing Write Done Counts
+ *
+ * The SRIO Outbound Write Done Counts
+ *
+ * Notes:
+ * This register shows the number of successful and unsuccessful NwriteRs issued through this MAC.
+ * These count only considers the last NwriteR generated by each Store Instruction. If any NwriteR
+ * in the series receives an ERROR Status then it is reported in SRIOMAINT(0,2..3)_ERB_LT_ERR_DET.IO_ERR.
+ * If any NwriteR does not receive a response within the timeout period then it is reported in
+ * SRIOMAINT(0,2..3)_ERB_LT_ERR_DET.PKT_TOUT. Only errors on the last NwriteR's are counted as BAD. This
+ * register is typically not written while Outbound SRIO Memory traffic is enabled.
+ *
+ * Clk_Rst: SRIO(0,2..3)_WR_DONE_COUNTS hclk hrst_n
+ */
+union cvmx_sriox_wr_done_counts {
+ uint64_t u64;
+ struct cvmx_sriox_wr_done_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t bad : 16; /**< Count of the final outbound NwriteR in the series
+ associated with a Store Operation that have timed
+ out or received a response with an ERROR status. */
+ uint64_t good : 16; /**< Count of the final outbound NwriteR in the series
+ associated with a Store operation that has
+ received a response with a DONE status. */
+#else
+ uint64_t good : 16;
+ uint64_t bad : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sriox_wr_done_counts_s cn63xx;
+ struct cvmx_sriox_wr_done_counts_s cn66xx;
+};
+typedef union cvmx_sriox_wr_done_counts cvmx_sriox_wr_done_counts_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-sriox-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-srxx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-srxx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-srxx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,364 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-srxx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon srxx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_SRXX_DEFS_H__
+#define __CVMX_SRXX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRXX_COM_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SRXX_COM_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000200ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SRXX_COM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000200ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRXX_IGN_RX_FULL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SRXX_IGN_RX_FULL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000218ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SRXX_IGN_RX_FULL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000218ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRXX_SPI4_CALX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 31)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 31)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_SRXX_SPI4_CALX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000000ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_SRXX_SPI4_CALX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180090000000ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRXX_SPI4_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SRXX_SPI4_STAT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000208ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SRXX_SPI4_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000208ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRXX_SW_TICK_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SRXX_SW_TICK_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000220ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SRXX_SW_TICK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000220ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SRXX_SW_TICK_DAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_SRXX_SW_TICK_DAT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000228ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_SRXX_SW_TICK_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000228ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+
+/**
+ * cvmx_srx#_com_ctl
+ *
+ * SRX_COM_CTL - Spi receive common control
+ *
+ *
+ * Notes:
+ * Restrictions:
+ * Both the calendar table and the LEN and M parameters must be completely
+ * setup before writing the Interface enable (INF_EN) and Status channel
+ * enabled (ST_EN) asserted.
+ */
+union cvmx_srxx_com_ctl {
+ uint64_t u64;
+ struct cvmx_srxx_com_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t prts : 4; /**< Number of ports in the receiver (write: ports - 1)
+ - 0: 1 port
+ - 1: 2 ports
+ - 2: 3 ports
+ - ...
+ - 15: 16 ports */
+ uint64_t st_en : 1; /**< Status channel enabled
+ This is to allow configs without a status channel.
+ This bit should not be modified once the
+ interface is enabled. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t inf_en : 1; /**< Interface enable
+ The master switch that enables the entire
+ interface. SRX will not validiate any data until
+ this bit is set. This bit should not be modified
+ once the interface is enabled. */
+#else
+ uint64_t inf_en : 1;
+ uint64_t reserved_1_2 : 2;
+ uint64_t st_en : 1;
+ uint64_t prts : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_srxx_com_ctl_s cn38xx;
+ struct cvmx_srxx_com_ctl_s cn38xxp2;
+ struct cvmx_srxx_com_ctl_s cn58xx;
+ struct cvmx_srxx_com_ctl_s cn58xxp1;
+};
+typedef union cvmx_srxx_com_ctl cvmx_srxx_com_ctl_t;
+
+/**
+ * cvmx_srx#_ign_rx_full
+ *
+ * SRX_IGN_RX_FULL - Ignore RX FIFO backpressure
+ *
+ *
+ * Notes:
+ * * IGNORE
+ * If a device can not or should not assert backpressure, then setting DROP
+ * will force STARVING status on the status channel for all ports. This
+ * eliminates any back pressure from N2.
+ *
+ * This implies that it's ok drop packets when the FIFOS fill up.
+ *
+ * A side effect of this mode is that the TPA Watcher will effectively be
+ * disabled. Since the DROP mode forces all TPA lines asserted, the TPA
+ * Watcher will never find a cycle where the TPA for the selected port is
+ * deasserted in order to increment its count.
+ */
+union cvmx_srxx_ign_rx_full {
+ uint64_t u64;
+ struct cvmx_srxx_ign_rx_full_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t ignore : 16; /**< This port should ignore backpressure hints from
+ GMX when the RX FIFO fills up
+ - 0: Use GMX backpressure
+ - 1: Ignore GMX backpressure */
+#else
+ uint64_t ignore : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_srxx_ign_rx_full_s cn38xx;
+ struct cvmx_srxx_ign_rx_full_s cn38xxp2;
+ struct cvmx_srxx_ign_rx_full_s cn58xx;
+ struct cvmx_srxx_ign_rx_full_s cn58xxp1;
+};
+typedef union cvmx_srxx_ign_rx_full cvmx_srxx_ign_rx_full_t;
+
+/**
+ * cvmx_srx#_spi4_cal#
+ *
+ * specify the RSL base addresses for the block
+ * SRX_SPI4_CAL - Spi4 Calender table
+ * direct_calendar_write / direct_calendar_read
+ *
+ * Notes:
+ * There are 32 calendar table CSR's, each containing 4 entries for a
+ * total of 128 entries. In the above definition...
+ *
+ * n = calendar table offset * 4
+ *
+ * Example, offset 0x00 contains the calendar table entries 0, 1, 2, 3
+ * (with n == 0). Offset 0x10 is the 16th entry in the calendar table
+ * and would contain entries (16*4) = 64, 65, 66, and 67.
+ *
+ * Restrictions:
+ * Calendar table entry accesses (read or write) can only occur
+ * if the interface is disabled. All other accesses will be
+ * unpredictable.
+ *
+ * Both the calendar table and the LEN and M parameters must be
+ * completely setup before writing the Interface enable (INF_EN) and
+ * Status channel enabled (ST_EN) asserted.
+ */
+union cvmx_srxx_spi4_calx {
+ uint64_t u64;
+ struct cvmx_srxx_spi4_calx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t oddpar : 1; /**< Odd parity over SRX_SPI4_CAL[15:0]
+ (^SRX_SPI4_CAL[16:0] === 1'b1) | $NS NS */
+ uint64_t prt3 : 4; /**< Status for port n+3 */
+ uint64_t prt2 : 4; /**< Status for port n+2 */
+ uint64_t prt1 : 4; /**< Status for port n+1 */
+ uint64_t prt0 : 4; /**< Status for port n+0 */
+#else
+ uint64_t prt0 : 4;
+ uint64_t prt1 : 4;
+ uint64_t prt2 : 4;
+ uint64_t prt3 : 4;
+ uint64_t oddpar : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_srxx_spi4_calx_s cn38xx;
+ struct cvmx_srxx_spi4_calx_s cn38xxp2;
+ struct cvmx_srxx_spi4_calx_s cn58xx;
+ struct cvmx_srxx_spi4_calx_s cn58xxp1;
+};
+typedef union cvmx_srxx_spi4_calx cvmx_srxx_spi4_calx_t;
+
+/**
+ * cvmx_srx#_spi4_stat
+ *
+ * SRX_SPI4_STAT - Spi4 status channel control
+ *
+ *
+ * Notes:
+ * Restrictions:
+ * Both the calendar table and the LEN and M parameters must be
+ * completely setup before writing the Interface enable (INF_EN) and
+ * Status channel enabled (ST_EN) asserted.
+ *
+ * Current rev only supports LVTTL status IO
+ */
+union cvmx_srxx_spi4_stat {
+ uint64_t u64;
+ struct cvmx_srxx_spi4_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t m : 8; /**< CALENDAR_M (from spi4.2 spec) */
+ uint64_t reserved_7_7 : 1;
+ uint64_t len : 7; /**< CALENDAR_LEN (from spi4.2 spec) */
+#else
+ uint64_t len : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t m : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_srxx_spi4_stat_s cn38xx;
+ struct cvmx_srxx_spi4_stat_s cn38xxp2;
+ struct cvmx_srxx_spi4_stat_s cn58xx;
+ struct cvmx_srxx_spi4_stat_s cn58xxp1;
+};
+typedef union cvmx_srxx_spi4_stat cvmx_srxx_spi4_stat_t;
+
+/**
+ * cvmx_srx#_sw_tick_ctl
+ *
+ * SRX_SW_TICK_CTL - Create a software tick of Spi4 data. A write to this register will create a data tick.
+ *
+ */
+union cvmx_srxx_sw_tick_ctl {
+ uint64_t u64;
+ struct cvmx_srxx_sw_tick_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t eop : 1; /**< SW Tick EOP
+ (PASS3 only) */
+ uint64_t sop : 1; /**< SW Tick SOP
+ (PASS3 only) */
+ uint64_t mod : 4; /**< SW Tick MOD - valid byte count
+ (PASS3 only) */
+ uint64_t opc : 4; /**< SW Tick ERR - packet had an error
+ (PASS3 only) */
+ uint64_t adr : 4; /**< SW Tick port address
+ (PASS3 only) */
+#else
+ uint64_t adr : 4;
+ uint64_t opc : 4;
+ uint64_t mod : 4;
+ uint64_t sop : 1;
+ uint64_t eop : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_srxx_sw_tick_ctl_s cn38xx;
+ struct cvmx_srxx_sw_tick_ctl_s cn58xx;
+ struct cvmx_srxx_sw_tick_ctl_s cn58xxp1;
+};
+typedef union cvmx_srxx_sw_tick_ctl cvmx_srxx_sw_tick_ctl_t;
+
+/**
+ * cvmx_srx#_sw_tick_dat
+ *
+ * SRX_SW_TICK_DAT - Create a software tick of Spi4 data
+ *
+ */
+union cvmx_srxx_sw_tick_dat {
+ uint64_t u64;
+ struct cvmx_srxx_sw_tick_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat : 64; /**< Data tick when SRX_SW_TICK_CTL is written
+ (PASS3 only) */
+#else
+ uint64_t dat : 64;
+#endif
+ } s;
+ struct cvmx_srxx_sw_tick_dat_s cn38xx;
+ struct cvmx_srxx_sw_tick_dat_s cn58xx;
+ struct cvmx_srxx_sw_tick_dat_s cn58xxp1;
+};
+typedef union cvmx_srxx_sw_tick_dat cvmx_srxx_sw_tick_dat_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-srxx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-sso-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-sso-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-sso-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,2195 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-sso-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon sso.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_SSO_DEFS_H__
+#define __CVMX_SSO_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_ACTIVE_CYCLES CVMX_SSO_ACTIVE_CYCLES_FUNC()
+static inline uint64_t CVMX_SSO_ACTIVE_CYCLES_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_ACTIVE_CYCLES not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010E8ull);
+}
+#else
+#define CVMX_SSO_ACTIVE_CYCLES (CVMX_ADD_IO_SEG(0x00016700000010E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_BIST_STAT CVMX_SSO_BIST_STAT_FUNC()
+static inline uint64_t CVMX_SSO_BIST_STAT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_BIST_STAT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001078ull);
+}
+#else
+#define CVMX_SSO_BIST_STAT (CVMX_ADD_IO_SEG(0x0001670000001078ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_CFG CVMX_SSO_CFG_FUNC()
+static inline uint64_t CVMX_SSO_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001088ull);
+}
+#else
+#define CVMX_SSO_CFG (CVMX_ADD_IO_SEG(0x0001670000001088ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_DS_PC CVMX_SSO_DS_PC_FUNC()
+static inline uint64_t CVMX_SSO_DS_PC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_DS_PC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001070ull);
+}
+#else
+#define CVMX_SSO_DS_PC (CVMX_ADD_IO_SEG(0x0001670000001070ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_ERR CVMX_SSO_ERR_FUNC()
+static inline uint64_t CVMX_SSO_ERR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_ERR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001038ull);
+}
+#else
+#define CVMX_SSO_ERR (CVMX_ADD_IO_SEG(0x0001670000001038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_ERR_ENB CVMX_SSO_ERR_ENB_FUNC()
+static inline uint64_t CVMX_SSO_ERR_ENB_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_ERR_ENB not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001030ull);
+}
+#else
+#define CVMX_SSO_ERR_ENB (CVMX_ADD_IO_SEG(0x0001670000001030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_FIDX_ECC_CTL CVMX_SSO_FIDX_ECC_CTL_FUNC()
+static inline uint64_t CVMX_SSO_FIDX_ECC_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_FIDX_ECC_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010D0ull);
+}
+#else
+#define CVMX_SSO_FIDX_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_FIDX_ECC_ST CVMX_SSO_FIDX_ECC_ST_FUNC()
+static inline uint64_t CVMX_SSO_FIDX_ECC_ST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_FIDX_ECC_ST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010D8ull);
+}
+#else
+#define CVMX_SSO_FIDX_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010D8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_FPAGE_CNT CVMX_SSO_FPAGE_CNT_FUNC()
+static inline uint64_t CVMX_SSO_FPAGE_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_FPAGE_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001090ull);
+}
+#else
+#define CVMX_SSO_FPAGE_CNT (CVMX_ADD_IO_SEG(0x0001670000001090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_GWE_CFG CVMX_SSO_GWE_CFG_FUNC()
+static inline uint64_t CVMX_SSO_GWE_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_GWE_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001098ull);
+}
+#else
+#define CVMX_SSO_GWE_CFG (CVMX_ADD_IO_SEG(0x0001670000001098ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_IDX_ECC_CTL CVMX_SSO_IDX_ECC_CTL_FUNC()
+static inline uint64_t CVMX_SSO_IDX_ECC_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_IDX_ECC_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010C0ull);
+}
+#else
+#define CVMX_SSO_IDX_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010C0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_IDX_ECC_ST CVMX_SSO_IDX_ECC_ST_FUNC()
+static inline uint64_t CVMX_SSO_IDX_ECC_ST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_IDX_ECC_ST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010C8ull);
+}
+#else
+#define CVMX_SSO_IDX_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010C8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_IQ_CNTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_SSO_IQ_CNTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000009000ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_SSO_IQ_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000009000ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_IQ_COM_CNT CVMX_SSO_IQ_COM_CNT_FUNC()
+static inline uint64_t CVMX_SSO_IQ_COM_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_IQ_COM_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001058ull);
+}
+#else
+#define CVMX_SSO_IQ_COM_CNT (CVMX_ADD_IO_SEG(0x0001670000001058ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_IQ_INT CVMX_SSO_IQ_INT_FUNC()
+static inline uint64_t CVMX_SSO_IQ_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_IQ_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001048ull);
+}
+#else
+#define CVMX_SSO_IQ_INT (CVMX_ADD_IO_SEG(0x0001670000001048ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_IQ_INT_EN CVMX_SSO_IQ_INT_EN_FUNC()
+static inline uint64_t CVMX_SSO_IQ_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_IQ_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001050ull);
+}
+#else
+#define CVMX_SSO_IQ_INT_EN (CVMX_ADD_IO_SEG(0x0001670000001050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_IQ_THRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_SSO_IQ_THRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000167000000A000ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_SSO_IQ_THRX(offset) (CVMX_ADD_IO_SEG(0x000167000000A000ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_NOS_CNT CVMX_SSO_NOS_CNT_FUNC()
+static inline uint64_t CVMX_SSO_NOS_CNT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_NOS_CNT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001040ull);
+}
+#else
+#define CVMX_SSO_NOS_CNT (CVMX_ADD_IO_SEG(0x0001670000001040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_NW_TIM CVMX_SSO_NW_TIM_FUNC()
+static inline uint64_t CVMX_SSO_NW_TIM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_NW_TIM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001028ull);
+}
+#else
+#define CVMX_SSO_NW_TIM (CVMX_ADD_IO_SEG(0x0001670000001028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_OTH_ECC_CTL CVMX_SSO_OTH_ECC_CTL_FUNC()
+static inline uint64_t CVMX_SSO_OTH_ECC_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_OTH_ECC_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010B0ull);
+}
+#else
+#define CVMX_SSO_OTH_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010B0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_OTH_ECC_ST CVMX_SSO_OTH_ECC_ST_FUNC()
+static inline uint64_t CVMX_SSO_OTH_ECC_ST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_OTH_ECC_ST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010B8ull);
+}
+#else
+#define CVMX_SSO_OTH_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010B8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_PND_ECC_CTL CVMX_SSO_PND_ECC_CTL_FUNC()
+static inline uint64_t CVMX_SSO_PND_ECC_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_PND_ECC_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010A0ull);
+}
+#else
+#define CVMX_SSO_PND_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_PND_ECC_ST CVMX_SSO_PND_ECC_ST_FUNC()
+static inline uint64_t CVMX_SSO_PND_ECC_ST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_PND_ECC_ST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010A8ull);
+}
+#else
+#define CVMX_SSO_PND_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_PPX_GRP_MSK(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SSO_PPX_GRP_MSK(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8;
+}
+#else
+#define CVMX_SSO_PPX_GRP_MSK(offset) (CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_PPX_QOS_PRI(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_SSO_PPX_QOS_PRI(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000003000ull) + ((offset) & 31) * 8;
+}
+#else
+#define CVMX_SSO_PPX_QOS_PRI(offset) (CVMX_ADD_IO_SEG(0x0001670000003000ull) + ((offset) & 31) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_PP_STRICT CVMX_SSO_PP_STRICT_FUNC()
+static inline uint64_t CVMX_SSO_PP_STRICT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_PP_STRICT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010E0ull);
+}
+#else
+#define CVMX_SSO_PP_STRICT (CVMX_ADD_IO_SEG(0x00016700000010E0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_QOSX_RND(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_SSO_QOSX_RND(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000002000ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_SSO_QOSX_RND(offset) (CVMX_ADD_IO_SEG(0x0001670000002000ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_QOS_THRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_SSO_QOS_THRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000167000000B000ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_SSO_QOS_THRX(offset) (CVMX_ADD_IO_SEG(0x000167000000B000ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_QOS_WE CVMX_SSO_QOS_WE_FUNC()
+static inline uint64_t CVMX_SSO_QOS_WE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_QOS_WE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001080ull);
+}
+#else
+#define CVMX_SSO_QOS_WE (CVMX_ADD_IO_SEG(0x0001670000001080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_RESET CVMX_SSO_RESET_FUNC()
+static inline uint64_t CVMX_SSO_RESET_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_RESET not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00016700000010F0ull);
+}
+#else
+#define CVMX_SSO_RESET (CVMX_ADD_IO_SEG(0x00016700000010F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_RWQ_HEAD_PTRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_SSO_RWQ_HEAD_PTRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000167000000C000ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_SSO_RWQ_HEAD_PTRX(offset) (CVMX_ADD_IO_SEG(0x000167000000C000ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_RWQ_POP_FPTR CVMX_SSO_RWQ_POP_FPTR_FUNC()
+static inline uint64_t CVMX_SSO_RWQ_POP_FPTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_RWQ_POP_FPTR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x000167000000C408ull);
+}
+#else
+#define CVMX_SSO_RWQ_POP_FPTR (CVMX_ADD_IO_SEG(0x000167000000C408ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_RWQ_PSH_FPTR CVMX_SSO_RWQ_PSH_FPTR_FUNC()
+static inline uint64_t CVMX_SSO_RWQ_PSH_FPTR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_RWQ_PSH_FPTR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x000167000000C400ull);
+}
+#else
+#define CVMX_SSO_RWQ_PSH_FPTR (CVMX_ADD_IO_SEG(0x000167000000C400ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_RWQ_TAIL_PTRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_SSO_RWQ_TAIL_PTRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x000167000000C200ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_SSO_RWQ_TAIL_PTRX(offset) (CVMX_ADD_IO_SEG(0x000167000000C200ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_TS_PC CVMX_SSO_TS_PC_FUNC()
+static inline uint64_t CVMX_SSO_TS_PC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_TS_PC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001068ull);
+}
+#else
+#define CVMX_SSO_TS_PC (CVMX_ADD_IO_SEG(0x0001670000001068ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_WA_COM_PC CVMX_SSO_WA_COM_PC_FUNC()
+static inline uint64_t CVMX_SSO_WA_COM_PC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_WA_COM_PC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001060ull);
+}
+#else
+#define CVMX_SSO_WA_COM_PC (CVMX_ADD_IO_SEG(0x0001670000001060ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_WA_PCX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_SSO_WA_PCX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000005000ull) + ((offset) & 7) * 8;
+}
+#else
+#define CVMX_SSO_WA_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000005000ull) + ((offset) & 7) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_WQ_INT CVMX_SSO_WQ_INT_FUNC()
+static inline uint64_t CVMX_SSO_WQ_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_WQ_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001000ull);
+}
+#else
+#define CVMX_SSO_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000001000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_WQ_INT_CNTX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_SSO_WQ_INT_CNTX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000008000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_SSO_WQ_INT_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000008000ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_WQ_INT_PC CVMX_SSO_WQ_INT_PC_FUNC()
+static inline uint64_t CVMX_SSO_WQ_INT_PC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_WQ_INT_PC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001020ull);
+}
+#else
+#define CVMX_SSO_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000001020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_WQ_INT_THRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_SSO_WQ_INT_THRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_SSO_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_SSO_WQ_IQ_DIS CVMX_SSO_WQ_IQ_DIS_FUNC()
+static inline uint64_t CVMX_SSO_WQ_IQ_DIS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_SSO_WQ_IQ_DIS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001670000001010ull);
+}
+#else
+#define CVMX_SSO_WQ_IQ_DIS (CVMX_ADD_IO_SEG(0x0001670000001010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_SSO_WS_PCX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_SSO_WS_PCX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001670000004000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_SSO_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000004000ull) + ((offset) & 63) * 8)
+#endif
+
+/**
+ * cvmx_sso_active_cycles
+ *
+ * SSO_ACTIVE_CYCLES = SSO cycles SSO active
+ *
+ * This register counts every sclk cycle that the SSO clocks are active.
+ * **NOTE: Added in pass 2.0
+ */
+union cvmx_sso_active_cycles {
+ uint64_t u64;
+ struct cvmx_sso_active_cycles_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t act_cyc : 64; /**< Counts number of active cycles. */
+#else
+ uint64_t act_cyc : 64;
+#endif
+ } s;
+ struct cvmx_sso_active_cycles_s cn68xx;
+};
+typedef union cvmx_sso_active_cycles cvmx_sso_active_cycles_t;
+
+/**
+ * cvmx_sso_bist_stat
+ *
+ * SSO_BIST_STAT = SSO BIST Status Register
+ *
+ * Contains the BIST status for the SSO memories ('0' = pass, '1' = fail).
+ * Note that PP BIST status is not reported here as it was in previous designs.
+ *
+ * There may be more for DDR interface buffers.
+ * It's possible that a RAM will be used for SSO_PP_QOS_RND.
+ */
+union cvmx_sso_bist_stat {
+ uint64_t u64;
+ struct cvmx_sso_bist_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63 : 2;
+ uint64_t odu_pref : 2; /**< ODU Prefetch memory BIST status */
+ uint64_t reserved_54_59 : 6;
+ uint64_t fptr : 2; /**< FPTR memory BIST status */
+ uint64_t reserved_45_51 : 7;
+ uint64_t rwo_dat : 1; /**< RWO_DAT memory BIST status */
+ uint64_t rwo : 2; /**< RWO memory BIST status */
+ uint64_t reserved_35_41 : 7;
+ uint64_t rwi_dat : 1; /**< RWI_DAT memory BIST status */
+ uint64_t reserved_32_33 : 2;
+ uint64_t soc : 1; /**< SSO CAM BIST status */
+ uint64_t reserved_28_30 : 3;
+ uint64_t ncbo : 4; /**< NCBO transmitter memory BIST status */
+ uint64_t reserved_21_23 : 3;
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t reserved_17_19 : 3;
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t reserved_10_15 : 6;
+ uint64_t pend : 2; /**< Pending switch memory BIST status */
+ uint64_t reserved_2_7 : 6;
+ uint64_t oth : 2; /**< WQP, GRP memory BIST status */
+#else
+ uint64_t oth : 2;
+ uint64_t reserved_2_7 : 6;
+ uint64_t pend : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t fidx : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t index : 1;
+ uint64_t reserved_21_23 : 3;
+ uint64_t ncbo : 4;
+ uint64_t reserved_28_30 : 3;
+ uint64_t soc : 1;
+ uint64_t reserved_32_33 : 2;
+ uint64_t rwi_dat : 1;
+ uint64_t reserved_35_41 : 7;
+ uint64_t rwo : 2;
+ uint64_t rwo_dat : 1;
+ uint64_t reserved_45_51 : 7;
+ uint64_t fptr : 2;
+ uint64_t reserved_54_59 : 6;
+ uint64_t odu_pref : 2;
+ uint64_t reserved_62_63 : 2;
+#endif
+ } s;
+ struct cvmx_sso_bist_stat_s cn68xx;
+ struct cvmx_sso_bist_stat_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t fptr : 2; /**< FPTR memory BIST status */
+ uint64_t reserved_45_51 : 7;
+ uint64_t rwo_dat : 1; /**< RWO_DAT memory BIST status */
+ uint64_t rwo : 2; /**< RWO memory BIST status */
+ uint64_t reserved_35_41 : 7;
+ uint64_t rwi_dat : 1; /**< RWI_DAT memory BIST status */
+ uint64_t reserved_32_33 : 2;
+ uint64_t soc : 1; /**< SSO CAM BIST status */
+ uint64_t reserved_28_30 : 3;
+ uint64_t ncbo : 4; /**< NCBO transmitter memory BIST status */
+ uint64_t reserved_21_23 : 3;
+ uint64_t index : 1; /**< Index memory BIST status */
+ uint64_t reserved_17_19 : 3;
+ uint64_t fidx : 1; /**< Forward index memory BIST status */
+ uint64_t reserved_10_15 : 6;
+ uint64_t pend : 2; /**< Pending switch memory BIST status */
+ uint64_t reserved_2_7 : 6;
+ uint64_t oth : 2; /**< WQP, GRP memory BIST status */
+#else
+ uint64_t oth : 2;
+ uint64_t reserved_2_7 : 6;
+ uint64_t pend : 2;
+ uint64_t reserved_10_15 : 6;
+ uint64_t fidx : 1;
+ uint64_t reserved_17_19 : 3;
+ uint64_t index : 1;
+ uint64_t reserved_21_23 : 3;
+ uint64_t ncbo : 4;
+ uint64_t reserved_28_30 : 3;
+ uint64_t soc : 1;
+ uint64_t reserved_32_33 : 2;
+ uint64_t rwi_dat : 1;
+ uint64_t reserved_35_41 : 7;
+ uint64_t rwo : 2;
+ uint64_t rwo_dat : 1;
+ uint64_t reserved_45_51 : 7;
+ uint64_t fptr : 2;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_sso_bist_stat cvmx_sso_bist_stat_t;
+
+/**
+ * cvmx_sso_cfg
+ *
+ * SSO_CFG = SSO Config
+ *
+ * This register is an assortment of various SSO configuration bits.
+ */
+union cvmx_sso_cfg {
+ uint64_t u64;
+ struct cvmx_sso_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t qck_gw_rsp_adj : 3; /**< Fast GET_WORK response fine adjustment
+ Allowed values are 0, 1, and 2 (0 is quickest) */
+ uint64_t qck_gw_rsp_dis : 1; /**< Disable faster response to GET_WORK */
+ uint64_t qck_sw_dis : 1; /**< Disable faster switch to UNSCHEDULED on GET_WORK */
+ uint64_t rwq_alloc_dis : 1; /**< Disable FPA Alloc Requests when SSO_FPAGE_CNT < 16 */
+ uint64_t soc_ccam_dis : 1; /**< Disable power saving SOC conditional CAM
+ (**NOTE: Added in pass 2.0) */
+ uint64_t sso_cclk_dis : 1; /**< Disable power saving SSO conditional clocking
+ (**NOTE: Added in pass 2.0) */
+ uint64_t rwo_flush : 1; /**< Flush RWO engine
+ Allows outbound NCB entries to go immediately rather
+ than waiting for a complete fill packet. This register
+ is one-shot and clears itself each time it is set. */
+ uint64_t wfe_thr : 1; /**< Use 1 Work-fetch engine (instead of 4) */
+ uint64_t rwio_byp_dis : 1; /**< Disable Bypass path in RWI/RWO Engines */
+ uint64_t rwq_byp_dis : 1; /**< Disable Bypass path in RWQ Engine */
+ uint64_t stt : 1; /**< STT Setting for RW Stores */
+ uint64_t ldt : 1; /**< LDT Setting for RW Loads */
+ uint64_t dwb : 1; /**< DWB Setting for Return Page Requests
+ 1 = 2 128B cache pages to issue DWB for
+ 0 = 0 128B cache pages ro issue DWB for */
+ uint64_t rwen : 1; /**< Enable RWI/RWO operations
+ This bit should be set after SSO_RWQ_HEAD_PTRX and
+ SSO_RWQ_TAIL_PTRX have been programmed. */
+#else
+ uint64_t rwen : 1;
+ uint64_t dwb : 1;
+ uint64_t ldt : 1;
+ uint64_t stt : 1;
+ uint64_t rwq_byp_dis : 1;
+ uint64_t rwio_byp_dis : 1;
+ uint64_t wfe_thr : 1;
+ uint64_t rwo_flush : 1;
+ uint64_t sso_cclk_dis : 1;
+ uint64_t soc_ccam_dis : 1;
+ uint64_t rwq_alloc_dis : 1;
+ uint64_t qck_sw_dis : 1;
+ uint64_t qck_gw_rsp_dis : 1;
+ uint64_t qck_gw_rsp_adj : 3;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_sso_cfg_s cn68xx;
+ struct cvmx_sso_cfg_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rwo_flush : 1; /**< Flush RWO engine
+ Allows outbound NCB entries to go immediately rather
+ than waiting for a complete fill packet. This register
+ is one-shot and clears itself each time it is set. */
+ uint64_t wfe_thr : 1; /**< Use 1 Work-fetch engine (instead of 4) */
+ uint64_t rwio_byp_dis : 1; /**< Disable Bypass path in RWI/RWO Engines */
+ uint64_t rwq_byp_dis : 1; /**< Disable Bypass path in RWQ Engine */
+ uint64_t stt : 1; /**< STT Setting for RW Stores */
+ uint64_t ldt : 1; /**< LDT Setting for RW Loads */
+ uint64_t dwb : 1; /**< DWB Setting for Return Page Requests
+ 1 = 2 128B cache pages to issue DWB for
+ 0 = 0 128B cache pages ro issue DWB for */
+ uint64_t rwen : 1; /**< Enable RWI/RWO operations
+ This bit should be set after SSO_RWQ_HEAD_PTRX and
+ SSO_RWQ_TAIL_PTRX have been programmed. */
+#else
+ uint64_t rwen : 1;
+ uint64_t dwb : 1;
+ uint64_t ldt : 1;
+ uint64_t stt : 1;
+ uint64_t rwq_byp_dis : 1;
+ uint64_t rwio_byp_dis : 1;
+ uint64_t wfe_thr : 1;
+ uint64_t rwo_flush : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_sso_cfg cvmx_sso_cfg_t;
+
+/**
+ * cvmx_sso_ds_pc
+ *
+ * SSO_DS_PC = SSO De-Schedule Performance Counter
+ *
+ * Counts the number of de-schedule requests.
+ * Counter rolls over through zero when max value exceeded.
+ */
+union cvmx_sso_ds_pc {
+ uint64_t u64;
+ struct cvmx_sso_ds_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ds_pc : 64; /**< De-schedule performance counter */
+#else
+ uint64_t ds_pc : 64;
+#endif
+ } s;
+ struct cvmx_sso_ds_pc_s cn68xx;
+ struct cvmx_sso_ds_pc_s cn68xxp1;
+};
+typedef union cvmx_sso_ds_pc cvmx_sso_ds_pc_t;
+
+/**
+ * cvmx_sso_err
+ *
+ * SSO_ERR = SSO Error Register
+ *
+ * Contains ECC and other misc error bits.
+ *
+ * <45> The free page error bit will assert when SSO_FPAGE_CNT <= 16 and
+ * SSO_CFG[RWEN] is 1. Software will want to disable the interrupt
+ * associated with this error when recovering SSO pointers from the
+ * FPA and SSO.
+ *
+ * This register also contains the illegal operation error bits:
+ *
+ * <42> Received ADDWQ with tag specified as EMPTY
+ * <41> Received illegal opcode
+ * <40> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE
+ * from WS with CLR_NSCHED pending
+ * <39> Received CLR_NSCHED
+ * from WS with SWTAG_DESCH/DESCH/CLR_NSCHED pending
+ * <38> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE
+ * from WS with ALLOC_WE pending
+ * <37> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE/CLR_NSCHED
+ * from WS with GET_WORK pending
+ * <36> Received SWTAG_FULL/SWTAG_DESCH
+ * with tag specified as UNSCHEDULED
+ * <35> Received SWTAG/SWTAG_FULL/SWTAG_DESCH
+ * with tag specified as EMPTY
+ * <34> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/GET_WORK
+ * from WS with pending tag switch to ORDERED or ATOMIC
+ * <33> Received SWTAG/SWTAG_DESCH/DESCH/UPD_WQP
+ * from WS in UNSCHEDULED state
+ * <32> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP
+ * from WS in EMPTY state
+ */
+union cvmx_sso_err {
+ uint64_t u64;
+ struct cvmx_sso_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t bfp : 1; /**< Bad Fill Packet error
+ Last byte of the fill packet did not match 8'h1a */
+ uint64_t awe : 1; /**< Out-of-memory error (ADDWQ Request is dropped) */
+ uint64_t fpe : 1; /**< Free page error */
+ uint64_t reserved_43_44 : 2;
+ uint64_t iop : 11; /**< Illegal operation errors */
+ uint64_t reserved_12_31 : 20;
+ uint64_t pnd_dbe0 : 1; /**< Double bit error for even PND RAM */
+ uint64_t pnd_sbe0 : 1; /**< Single bit error for even PND RAM */
+ uint64_t pnd_dbe1 : 1; /**< Double bit error for odd PND RAM */
+ uint64_t pnd_sbe1 : 1; /**< Single bit error for odd PND RAM */
+ uint64_t oth_dbe0 : 1; /**< Double bit error for even OTH RAM */
+ uint64_t oth_sbe0 : 1; /**< Single bit error for even OTH RAM */
+ uint64_t oth_dbe1 : 1; /**< Double bit error for odd OTH RAM */
+ uint64_t oth_sbe1 : 1; /**< Single bit error for odd OTH RAM */
+ uint64_t idx_dbe : 1; /**< Double bit error for IDX RAM */
+ uint64_t idx_sbe : 1; /**< Single bit error for IDX RAM */
+ uint64_t fidx_dbe : 1; /**< Double bit error for FIDX RAM */
+ uint64_t fidx_sbe : 1; /**< Single bit error for FIDX RAM */
+#else
+ uint64_t fidx_sbe : 1;
+ uint64_t fidx_dbe : 1;
+ uint64_t idx_sbe : 1;
+ uint64_t idx_dbe : 1;
+ uint64_t oth_sbe1 : 1;
+ uint64_t oth_dbe1 : 1;
+ uint64_t oth_sbe0 : 1;
+ uint64_t oth_dbe0 : 1;
+ uint64_t pnd_sbe1 : 1;
+ uint64_t pnd_dbe1 : 1;
+ uint64_t pnd_sbe0 : 1;
+ uint64_t pnd_dbe0 : 1;
+ uint64_t reserved_12_31 : 20;
+ uint64_t iop : 11;
+ uint64_t reserved_43_44 : 2;
+ uint64_t fpe : 1;
+ uint64_t awe : 1;
+ uint64_t bfp : 1;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_sso_err_s cn68xx;
+ struct cvmx_sso_err_s cn68xxp1;
+};
+typedef union cvmx_sso_err cvmx_sso_err_t;
+
+/**
+ * cvmx_sso_err_enb
+ *
+ * SSO_ERR_ENB = SSO Error Enable Register
+ *
+ * Contains the interrupt enables corresponding to SSO_ERR.
+ */
+union cvmx_sso_err_enb {
+ uint64_t u64;
+ struct cvmx_sso_err_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t bfp_ie : 1; /**< Bad Fill Packet error interrupt enable */
+ uint64_t awe_ie : 1; /**< Add-work error interrupt enable */
+ uint64_t fpe_ie : 1; /**< Free Page error interrupt enable */
+ uint64_t reserved_43_44 : 2;
+ uint64_t iop_ie : 11; /**< Illegal operation interrupt enables */
+ uint64_t reserved_12_31 : 20;
+ uint64_t pnd_dbe0_ie : 1; /**< Double bit error interrupt enable for even PND RAM */
+ uint64_t pnd_sbe0_ie : 1; /**< Single bit error interrupt enable for even PND RAM */
+ uint64_t pnd_dbe1_ie : 1; /**< Double bit error interrupt enable for odd PND RAM */
+ uint64_t pnd_sbe1_ie : 1; /**< Single bit error interrupt enable for odd PND RAM */
+ uint64_t oth_dbe0_ie : 1; /**< Double bit error interrupt enable for even OTH RAM */
+ uint64_t oth_sbe0_ie : 1; /**< Single bit error interrupt enable for even OTH RAM */
+ uint64_t oth_dbe1_ie : 1; /**< Double bit error interrupt enable for odd OTH RAM */
+ uint64_t oth_sbe1_ie : 1; /**< Single bit error interrupt enable for odd OTH RAM */
+ uint64_t idx_dbe_ie : 1; /**< Double bit error interrupt enable for IDX RAM */
+ uint64_t idx_sbe_ie : 1; /**< Single bit error interrupt enable for IDX RAM */
+ uint64_t fidx_dbe_ie : 1; /**< Double bit error interrupt enable for FIDX RAM */
+ uint64_t fidx_sbe_ie : 1; /**< Single bit error interrupt enable for FIDX RAM */
+#else
+ uint64_t fidx_sbe_ie : 1;
+ uint64_t fidx_dbe_ie : 1;
+ uint64_t idx_sbe_ie : 1;
+ uint64_t idx_dbe_ie : 1;
+ uint64_t oth_sbe1_ie : 1;
+ uint64_t oth_dbe1_ie : 1;
+ uint64_t oth_sbe0_ie : 1;
+ uint64_t oth_dbe0_ie : 1;
+ uint64_t pnd_sbe1_ie : 1;
+ uint64_t pnd_dbe1_ie : 1;
+ uint64_t pnd_sbe0_ie : 1;
+ uint64_t pnd_dbe0_ie : 1;
+ uint64_t reserved_12_31 : 20;
+ uint64_t iop_ie : 11;
+ uint64_t reserved_43_44 : 2;
+ uint64_t fpe_ie : 1;
+ uint64_t awe_ie : 1;
+ uint64_t bfp_ie : 1;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_sso_err_enb_s cn68xx;
+ struct cvmx_sso_err_enb_s cn68xxp1;
+};
+typedef union cvmx_sso_err_enb cvmx_sso_err_enb_t;
+
+/**
+ * cvmx_sso_fidx_ecc_ctl
+ *
+ * SSO_FIDX_ECC_CTL = SSO FIDX ECC Control
+ *
+ */
+union cvmx_sso_fidx_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_sso_fidx_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t flip_synd : 2; /**< Testing feature. Flip Syndrom to generate single or
+ double bit error for the FIDX RAM. */
+ uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 5 bit ECC
+ correct logic for the FIDX RAM. */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t flip_synd : 2;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_sso_fidx_ecc_ctl_s cn68xx;
+ struct cvmx_sso_fidx_ecc_ctl_s cn68xxp1;
+};
+typedef union cvmx_sso_fidx_ecc_ctl cvmx_sso_fidx_ecc_ctl_t;
+
+/**
+ * cvmx_sso_fidx_ecc_st
+ *
+ * SSO_FIDX_ECC_ST = SSO FIDX ECC Status
+ *
+ */
+union cvmx_sso_fidx_ecc_st {
+ uint64_t u64;
+ struct cvmx_sso_fidx_ecc_st_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t addr : 11; /**< Latch the address for latest sde/dbe occured
+ for the FIDX RAM */
+ uint64_t reserved_9_15 : 7;
+ uint64_t syndrom : 5; /**< Report the latest error syndrom for the
+ FIDX RAM */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t syndrom : 5;
+ uint64_t reserved_9_15 : 7;
+ uint64_t addr : 11;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } s;
+ struct cvmx_sso_fidx_ecc_st_s cn68xx;
+ struct cvmx_sso_fidx_ecc_st_s cn68xxp1;
+};
+typedef union cvmx_sso_fidx_ecc_st cvmx_sso_fidx_ecc_st_t;
+
+/**
+ * cvmx_sso_fpage_cnt
+ *
+ * SSO_FPAGE_CNT = SSO Free Page Cnt
+ *
+ * This register keeps track of the number of free pages pointers available for use in external memory.
+ */
+union cvmx_sso_fpage_cnt {
+ uint64_t u64;
+ struct cvmx_sso_fpage_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t fpage_cnt : 32; /**< Free Page Cnt
+ HW updates this register. Writes to this register
+ are only for diagnostic purposes */
+#else
+ uint64_t fpage_cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sso_fpage_cnt_s cn68xx;
+ struct cvmx_sso_fpage_cnt_s cn68xxp1;
+};
+typedef union cvmx_sso_fpage_cnt cvmx_sso_fpage_cnt_t;
+
+/**
+ * cvmx_sso_gwe_cfg
+ *
+ * SSO_GWE_CFG = SSO Get-Work Examiner Configuration
+ *
+ * This register controls the operation of the Get-Work Examiner (GWE)
+ */
+union cvmx_sso_gwe_cfg {
+ uint64_t u64;
+ struct cvmx_sso_gwe_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t odu_ffpgw_dis : 1; /**< Disable flushing ODU on periodic restart of GWE */
+ uint64_t gwe_rfpgw_dis : 1; /**< Disable periodic restart of GWE for pending get_work */
+ uint64_t odu_prf_dis : 1; /**< Disable ODU-initiated prefetches of WQEs into L2C
+ For diagnostic use only. */
+ uint64_t odu_bmp_dis : 1; /**< Disable ODU bumps.
+ If SSO_PP_STRICT is true, could
+ prevent forward progress under some circumstances.
+ For diagnostic use only. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t gwe_hvy_dis : 1; /**< Disable GWE automatic, proportional weight-increase
+ mechanism and use SSO_QOSX_RND values as-is.
+ For diagnostic use only. */
+ uint64_t gwe_poe : 1; /**< Pause GWE on extracts
+ For diagnostic use only. */
+ uint64_t gwe_fpor : 1; /**< Flush GWE pipeline when restarting GWE.
+ For diagnostic use only. */
+ uint64_t gwe_rah : 1; /**< Begin at head of input queues when restarting GWE.
+ For diagnostic use only. */
+ uint64_t gwe_dis : 1; /**< Disable Get-Work Examiner */
+#else
+ uint64_t gwe_dis : 1;
+ uint64_t gwe_rah : 1;
+ uint64_t gwe_fpor : 1;
+ uint64_t gwe_poe : 1;
+ uint64_t gwe_hvy_dis : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t odu_bmp_dis : 1;
+ uint64_t odu_prf_dis : 1;
+ uint64_t gwe_rfpgw_dis : 1;
+ uint64_t odu_ffpgw_dis : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_sso_gwe_cfg_s cn68xx;
+ struct cvmx_sso_gwe_cfg_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t gwe_poe : 1; /**< Pause GWE on extracts
+ For diagnostic use only. */
+ uint64_t gwe_fpor : 1; /**< Flush GWE pipeline when restarting GWE.
+ For diagnostic use only. */
+ uint64_t gwe_rah : 1; /**< Begin at head of input queues when restarting GWE.
+ For diagnostic use only. */
+ uint64_t gwe_dis : 1; /**< Disable Get-Work Examiner */
+#else
+ uint64_t gwe_dis : 1;
+ uint64_t gwe_rah : 1;
+ uint64_t gwe_fpor : 1;
+ uint64_t gwe_poe : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_sso_gwe_cfg cvmx_sso_gwe_cfg_t;
+
+/**
+ * cvmx_sso_idx_ecc_ctl
+ *
+ * SSO_IDX_ECC_CTL = SSO IDX ECC Control
+ *
+ */
+union cvmx_sso_idx_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_sso_idx_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t flip_synd : 2; /**< Testing feature. Flip Syndrom to generate single or
+ double bit error for the IDX RAM. */
+ uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 5 bit ECC
+ correct logic for the IDX RAM. */
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t flip_synd : 2;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_sso_idx_ecc_ctl_s cn68xx;
+ struct cvmx_sso_idx_ecc_ctl_s cn68xxp1;
+};
+typedef union cvmx_sso_idx_ecc_ctl cvmx_sso_idx_ecc_ctl_t;
+
+/**
+ * cvmx_sso_idx_ecc_st
+ *
+ * SSO_IDX_ECC_ST = SSO IDX ECC Status
+ *
+ */
+union cvmx_sso_idx_ecc_st {
+ uint64_t u64;
+ struct cvmx_sso_idx_ecc_st_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t addr : 11; /**< Latch the address for latest sde/dbe occured
+ for the IDX RAM */
+ uint64_t reserved_9_15 : 7;
+ uint64_t syndrom : 5; /**< Report the latest error syndrom for the
+ IDX RAM */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t syndrom : 5;
+ uint64_t reserved_9_15 : 7;
+ uint64_t addr : 11;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } s;
+ struct cvmx_sso_idx_ecc_st_s cn68xx;
+ struct cvmx_sso_idx_ecc_st_s cn68xxp1;
+};
+typedef union cvmx_sso_idx_ecc_st cvmx_sso_idx_ecc_st_t;
+
+/**
+ * cvmx_sso_iq_cnt#
+ *
+ * CSR reserved addresses: (64): 0x8200..0x83f8
+ * CSR align addresses: ===========================================================================================================
+ * SSO_IQ_CNTX = SSO Input Queue Count Register
+ * (one per QOS level)
+ *
+ * Contains a read-only count of the number of work queue entries for each QOS
+ * level. Counts both in-unit and in-memory entries.
+ */
+union cvmx_sso_iq_cntx {
+ uint64_t u64;
+ struct cvmx_sso_iq_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iq_cnt : 32; /**< Input queue count for QOS level X */
+#else
+ uint64_t iq_cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sso_iq_cntx_s cn68xx;
+ struct cvmx_sso_iq_cntx_s cn68xxp1;
+};
+typedef union cvmx_sso_iq_cntx cvmx_sso_iq_cntx_t;
+
+/**
+ * cvmx_sso_iq_com_cnt
+ *
+ * SSO_IQ_COM_CNT = SSO Input Queue Combined Count Register
+ *
+ * Contains a read-only count of the total number of work queue entries in all
+ * QOS levels. Counts both in-unit and in-memory entries.
+ */
+union cvmx_sso_iq_com_cnt {
+ uint64_t u64;
+ struct cvmx_sso_iq_com_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iq_cnt : 32; /**< Input queue combined count */
+#else
+ uint64_t iq_cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sso_iq_com_cnt_s cn68xx;
+ struct cvmx_sso_iq_com_cnt_s cn68xxp1;
+};
+typedef union cvmx_sso_iq_com_cnt cvmx_sso_iq_com_cnt_t;
+
+/**
+ * cvmx_sso_iq_int
+ *
+ * SSO_IQ_INT = SSO Input Queue Interrupt Register
+ *
+ * Contains the bits (one per QOS level) that can trigger the input queue
+ * interrupt. An IQ_INT bit will be set if SSO_IQ_CNT#QOS# changes and the
+ * resulting value is equal to SSO_IQ_THR#QOS#.
+ */
+union cvmx_sso_iq_int {
+ uint64_t u64;
+ struct cvmx_sso_iq_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t iq_int : 8; /**< Input queue interrupt bits */
+#else
+ uint64_t iq_int : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_sso_iq_int_s cn68xx;
+ struct cvmx_sso_iq_int_s cn68xxp1;
+};
+typedef union cvmx_sso_iq_int cvmx_sso_iq_int_t;
+
+/**
+ * cvmx_sso_iq_int_en
+ *
+ * SSO_IQ_INT_EN = SSO Input Queue Interrupt Enable Register
+ *
+ * Contains the bits (one per QOS level) that enable the input queue interrupt.
+ */
+union cvmx_sso_iq_int_en {
+ uint64_t u64;
+ struct cvmx_sso_iq_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t int_en : 8; /**< Input queue interrupt enable bits */
+#else
+ uint64_t int_en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_sso_iq_int_en_s cn68xx;
+ struct cvmx_sso_iq_int_en_s cn68xxp1;
+};
+typedef union cvmx_sso_iq_int_en cvmx_sso_iq_int_en_t;
+
+/**
+ * cvmx_sso_iq_thr#
+ *
+ * CSR reserved addresses: (24): 0x9040..0x90f8
+ * CSR align addresses: ===========================================================================================================
+ * SSO_IQ_THRX = SSO Input Queue Threshold Register
+ * (one per QOS level)
+ *
+ * Threshold value for triggering input queue interrupts.
+ */
+union cvmx_sso_iq_thrx {
+ uint64_t u64;
+ struct cvmx_sso_iq_thrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t iq_thr : 32; /**< Input queue threshold for QOS level X */
+#else
+ uint64_t iq_thr : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sso_iq_thrx_s cn68xx;
+ struct cvmx_sso_iq_thrx_s cn68xxp1;
+};
+typedef union cvmx_sso_iq_thrx cvmx_sso_iq_thrx_t;
+
+/**
+ * cvmx_sso_nos_cnt
+ *
+ * SSO_NOS_CNT = SSO No-schedule Count Register
+ *
+ * Contains the number of work queue entries on the no-schedule list.
+ */
+union cvmx_sso_nos_cnt {
+ uint64_t u64;
+ struct cvmx_sso_nos_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63 : 52;
+ uint64_t nos_cnt : 12; /**< Number of work queue entries on the no-schedule list */
+#else
+ uint64_t nos_cnt : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_sso_nos_cnt_s cn68xx;
+ struct cvmx_sso_nos_cnt_s cn68xxp1;
+};
+typedef union cvmx_sso_nos_cnt cvmx_sso_nos_cnt_t;
+
+/**
+ * cvmx_sso_nw_tim
+ *
+ * SSO_NW_TIM = SSO New Work Timer Period Register
+ *
+ * Sets the minimum period for a new work request timeout. Period is specified
+ * in n-1 notation where the increment value is 1024 clock cycles. Thus, a
+ * value of 0x0 in this register translates to 1024 cycles, 0x1 translates to
+ * 2048 cycles, 0x2 translates to 3072 cycles, etc... Note: the maximum period
+ * for a new work request timeout is 2 times the minimum period. Note: the new
+ * work request timeout counter is reset when this register is written.
+ *
+ * There are two new work request timeout cases:
+ *
+ * - WAIT bit clear. The new work request can timeout if the timer expires
+ * before the pre-fetch engine has reached the end of all work queues. This
+ * can occur if the executable work queue entry is deep in the queue and the
+ * pre-fetch engine is subject to many resets (i.e. high switch, de-schedule,
+ * or new work load from other PP's). Thus, it is possible for a PP to
+ * receive a work response with the NO_WORK bit set even though there was at
+ * least one executable entry in the work queues. The other (and typical)
+ * scenario for receiving a NO_WORK response with the WAIT bit clear is that
+ * the pre-fetch engine has reached the end of all work queues without
+ * finding executable work.
+ *
+ * - WAIT bit set. The new work request can timeout if the timer expires
+ * before the pre-fetch engine has found executable work. In this case, the
+ * only scenario where the PP will receive a work response with the NO_WORK
+ * bit set is if the timer expires. Note: it is still possible for a PP to
+ * receive a NO_WORK response even though there was at least one executable
+ * entry in the work queues.
+ *
+ * In either case, it's important to note that switches and de-schedules are
+ * higher priority operations that can cause the pre-fetch engine to reset.
+ * Thus in a system with many switches or de-schedules occurring, it's possible
+ * for the new work timer to expire (resulting in NO_WORK responses) before the
+ * pre-fetch engine is able to get very deep into the work queues.
+ */
+union cvmx_sso_nw_tim {
+ uint64_t u64;
+ struct cvmx_sso_nw_tim_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t nw_tim : 10; /**< New work timer period */
+#else
+ uint64_t nw_tim : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_sso_nw_tim_s cn68xx;
+ struct cvmx_sso_nw_tim_s cn68xxp1;
+};
+typedef union cvmx_sso_nw_tim cvmx_sso_nw_tim_t;
+
+/**
+ * cvmx_sso_oth_ecc_ctl
+ *
+ * SSO_OTH_ECC_CTL = SSO OTH ECC Control
+ *
+ */
+union cvmx_sso_oth_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_sso_oth_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t flip_synd1 : 2; /**< Testing feature. Flip Syndrom to generate single or
+ double bit error for the odd OTH RAM. */
+ uint64_t ecc_ena1 : 1; /**< ECC Enable: When set will enable the 7 bit ECC
+ correct logic for the odd OTH RAM. */
+ uint64_t flip_synd0 : 2; /**< Testing feature. Flip Syndrom to generate single or
+ double bit error for the even OTH RAM. */
+ uint64_t ecc_ena0 : 1; /**< ECC Enable: When set will enable the 7 bit ECC
+ correct logic for the even OTH RAM. */
+#else
+ uint64_t ecc_ena0 : 1;
+ uint64_t flip_synd0 : 2;
+ uint64_t ecc_ena1 : 1;
+ uint64_t flip_synd1 : 2;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_sso_oth_ecc_ctl_s cn68xx;
+ struct cvmx_sso_oth_ecc_ctl_s cn68xxp1;
+};
+typedef union cvmx_sso_oth_ecc_ctl cvmx_sso_oth_ecc_ctl_t;
+
+/**
+ * cvmx_sso_oth_ecc_st
+ *
+ * SSO_OTH_ECC_ST = SSO OTH ECC Status
+ *
+ */
+union cvmx_sso_oth_ecc_st {
+ uint64_t u64;
+ struct cvmx_sso_oth_ecc_st_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t addr1 : 11; /**< Latch the address for latest sde/dbe occured
+ for the odd OTH RAM */
+ uint64_t reserved_43_47 : 5;
+ uint64_t syndrom1 : 7; /**< Report the latest error syndrom for the odd
+ OTH RAM */
+ uint64_t reserved_27_35 : 9;
+ uint64_t addr0 : 11; /**< Latch the address for latest sde/dbe occured
+ for the even OTH RAM */
+ uint64_t reserved_11_15 : 5;
+ uint64_t syndrom0 : 7; /**< Report the latest error syndrom for the even
+ OTH RAM */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t syndrom0 : 7;
+ uint64_t reserved_11_15 : 5;
+ uint64_t addr0 : 11;
+ uint64_t reserved_27_35 : 9;
+ uint64_t syndrom1 : 7;
+ uint64_t reserved_43_47 : 5;
+ uint64_t addr1 : 11;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } s;
+ struct cvmx_sso_oth_ecc_st_s cn68xx;
+ struct cvmx_sso_oth_ecc_st_s cn68xxp1;
+};
+typedef union cvmx_sso_oth_ecc_st cvmx_sso_oth_ecc_st_t;
+
+/**
+ * cvmx_sso_pnd_ecc_ctl
+ *
+ * SSO_PND_ECC_CTL = SSO PND ECC Control
+ *
+ */
+union cvmx_sso_pnd_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_sso_pnd_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t flip_synd1 : 2; /**< Testing feature. Flip Syndrom to generate single or
+ double bit error for the odd PND RAM. */
+ uint64_t ecc_ena1 : 1; /**< ECC Enable: When set will enable the 7 bit ECC
+ correct logic for the odd PND RAM. */
+ uint64_t flip_synd0 : 2; /**< Testing feature. Flip Syndrom to generate single or
+ double bit error for the even PND RAM. */
+ uint64_t ecc_ena0 : 1; /**< ECC Enable: When set will enable the 7 bit ECC
+ correct logic for the even PND RAM. */
+#else
+ uint64_t ecc_ena0 : 1;
+ uint64_t flip_synd0 : 2;
+ uint64_t ecc_ena1 : 1;
+ uint64_t flip_synd1 : 2;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_sso_pnd_ecc_ctl_s cn68xx;
+ struct cvmx_sso_pnd_ecc_ctl_s cn68xxp1;
+};
+typedef union cvmx_sso_pnd_ecc_ctl cvmx_sso_pnd_ecc_ctl_t;
+
+/**
+ * cvmx_sso_pnd_ecc_st
+ *
+ * SSO_PND_ECC_ST = SSO PND ECC Status
+ *
+ */
+union cvmx_sso_pnd_ecc_st {
+ uint64_t u64;
+ struct cvmx_sso_pnd_ecc_st_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63 : 5;
+ uint64_t addr1 : 11; /**< Latch the address for latest sde/dbe occured
+ for the odd PND RAM */
+ uint64_t reserved_43_47 : 5;
+ uint64_t syndrom1 : 7; /**< Report the latest error syndrom for the odd
+ PND RAM */
+ uint64_t reserved_27_35 : 9;
+ uint64_t addr0 : 11; /**< Latch the address for latest sde/dbe occured
+ for the even PND RAM */
+ uint64_t reserved_11_15 : 5;
+ uint64_t syndrom0 : 7; /**< Report the latest error syndrom for the even
+ PND RAM */
+ uint64_t reserved_0_3 : 4;
+#else
+ uint64_t reserved_0_3 : 4;
+ uint64_t syndrom0 : 7;
+ uint64_t reserved_11_15 : 5;
+ uint64_t addr0 : 11;
+ uint64_t reserved_27_35 : 9;
+ uint64_t syndrom1 : 7;
+ uint64_t reserved_43_47 : 5;
+ uint64_t addr1 : 11;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } s;
+ struct cvmx_sso_pnd_ecc_st_s cn68xx;
+ struct cvmx_sso_pnd_ecc_st_s cn68xxp1;
+};
+typedef union cvmx_sso_pnd_ecc_st cvmx_sso_pnd_ecc_st_t;
+
+/**
+ * cvmx_sso_pp#_grp_msk
+ *
+ * CSR reserved addresses: (24): 0x5040..0x50f8
+ * CSR align addresses: ===========================================================================================================
+ * SSO_PPX_GRP_MSK = SSO PP Group Mask Register
+ * (one bit per group per PP)
+ *
+ * Selects which group(s) a PP belongs to. A '1' in any bit position sets the
+ * PP's membership in the corresponding group. A value of 0x0 will prevent the
+ * PP from receiving new work.
+ *
+ * Note that these do not contain QOS level priorities for each PP. This is a
+ * change from previous POW designs.
+ */
+union cvmx_sso_ppx_grp_msk {
+ uint64_t u64;
+ struct cvmx_sso_ppx_grp_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t grp_msk : 64; /**< PPX group mask */
+#else
+ uint64_t grp_msk : 64;
+#endif
+ } s;
+ struct cvmx_sso_ppx_grp_msk_s cn68xx;
+ struct cvmx_sso_ppx_grp_msk_s cn68xxp1;
+};
+typedef union cvmx_sso_ppx_grp_msk cvmx_sso_ppx_grp_msk_t;
+
+/**
+ * cvmx_sso_pp#_qos_pri
+ *
+ * CSR reserved addresses: (56): 0x2040..0x21f8
+ * CSR align addresses: ===========================================================================================================
+ * SSO_PP(0..31)_QOS_PRI = SSO PP QOS Priority Register
+ * (one field per IQ per PP)
+ *
+ * Contains the QOS level priorities for each PP.
+ * 0x0 is the highest priority
+ * 0x7 is the lowest priority
+ * 0xf prevents the PP from receiving work from that QOS level
+ * 0x8-0xe Reserved
+ *
+ * For a given PP, priorities should begin at 0x0, and remain contiguous
+ * throughout the range. Failure to do so may result in severe
+ * performance degradation.
+ *
+ *
+ * Priorities for IQs 0..7
+ */
+union cvmx_sso_ppx_qos_pri {
+ uint64_t u64;
+ struct cvmx_sso_ppx_qos_pri_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t qos7_pri : 4; /**< QOS7 priority for PPX */
+ uint64_t reserved_52_55 : 4;
+ uint64_t qos6_pri : 4; /**< QOS6 priority for PPX */
+ uint64_t reserved_44_47 : 4;
+ uint64_t qos5_pri : 4; /**< QOS5 priority for PPX */
+ uint64_t reserved_36_39 : 4;
+ uint64_t qos4_pri : 4; /**< QOS4 priority for PPX */
+ uint64_t reserved_28_31 : 4;
+ uint64_t qos3_pri : 4; /**< QOS3 priority for PPX */
+ uint64_t reserved_20_23 : 4;
+ uint64_t qos2_pri : 4; /**< QOS2 priority for PPX */
+ uint64_t reserved_12_15 : 4;
+ uint64_t qos1_pri : 4; /**< QOS1 priority for PPX */
+ uint64_t reserved_4_7 : 4;
+ uint64_t qos0_pri : 4; /**< QOS0 priority for PPX */
+#else
+ uint64_t qos0_pri : 4;
+ uint64_t reserved_4_7 : 4;
+ uint64_t qos1_pri : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t qos2_pri : 4;
+ uint64_t reserved_20_23 : 4;
+ uint64_t qos3_pri : 4;
+ uint64_t reserved_28_31 : 4;
+ uint64_t qos4_pri : 4;
+ uint64_t reserved_36_39 : 4;
+ uint64_t qos5_pri : 4;
+ uint64_t reserved_44_47 : 4;
+ uint64_t qos6_pri : 4;
+ uint64_t reserved_52_55 : 4;
+ uint64_t qos7_pri : 4;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } s;
+ struct cvmx_sso_ppx_qos_pri_s cn68xx;
+ struct cvmx_sso_ppx_qos_pri_s cn68xxp1;
+};
+typedef union cvmx_sso_ppx_qos_pri cvmx_sso_ppx_qos_pri_t;
+
+/**
+ * cvmx_sso_pp_strict
+ *
+ * SSO_PP_STRICT = SSO Strict Priority
+ *
+ * This register controls getting work from the input queues. If the bit
+ * corresponding to a PP is set, that PP will not take work off the input
+ * queues until it is known that there is no higher-priority work available.
+ *
+ * Setting SSO_PP_STRICT may incur a performance penalty if highest-priority
+ * work is not found early.
+ *
+ * It is possible to starve a PP of work with SSO_PP_STRICT. If the
+ * SSO_PPX_GRP_MSK for a PP masks-out much of the work added to the input
+ * queues that are higher-priority for that PP, and if there is a constant
+ * stream of work through one or more of those higher-priority input queues,
+ * then that PP may not accept work from lower-priority input queues. This can
+ * be alleviated by ensuring that most or all the work added to the
+ * higher-priority input queues for a PP with SSO_PP_STRICT set are in a group
+ * acceptable to that PP.
+ *
+ * It is also possible to neglect work in an input queue if SSO_PP_STRICT is
+ * used. If an input queue is a lower-priority queue for all PPs, and if all
+ * the PPs have their corresponding bit in SSO_PP_STRICT set, then work may
+ * never be taken (or be seldom taken) from that queue. This can be alleviated
+ * by ensuring that work in all input queues can be serviced by one or more PPs
+ * that do not have SSO_PP_STRICT set, or that the input queue is the
+ * highest-priority input queue for one or more PPs that do have SSO_PP_STRICT
+ * set.
+ */
+union cvmx_sso_pp_strict {
+ uint64_t u64;
+ struct cvmx_sso_pp_strict_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t pp_strict : 32; /**< Corresponding PP operates in strict mode. */
+#else
+ uint64_t pp_strict : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sso_pp_strict_s cn68xx;
+ struct cvmx_sso_pp_strict_s cn68xxp1;
+};
+typedef union cvmx_sso_pp_strict cvmx_sso_pp_strict_t;
+
+/**
+ * cvmx_sso_qos#_rnd
+ *
+ * CSR align addresses: ===========================================================================================================
+ * SSO_QOS(0..7)_RND = SSO QOS Issue Round Register
+ * (one per IQ)
+ *
+ * The number of arbitration rounds each QOS level participates in.
+ */
+union cvmx_sso_qosx_rnd {
+ uint64_t u64;
+ struct cvmx_sso_qosx_rnd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t rnds_qos : 8; /**< Number of rounds to participate in for IQ(X). */
+#else
+ uint64_t rnds_qos : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_sso_qosx_rnd_s cn68xx;
+ struct cvmx_sso_qosx_rnd_s cn68xxp1;
+};
+typedef union cvmx_sso_qosx_rnd cvmx_sso_qosx_rnd_t;
+
+/**
+ * cvmx_sso_qos_thr#
+ *
+ * CSR reserved addresses: (24): 0xa040..0xa0f8
+ * CSR align addresses: ===========================================================================================================
+ * SSO_QOS_THRX = SSO QOS Threshold Register
+ * (one per QOS level)
+ *
+ * Contains the thresholds for allocating SSO internal storage buffers. If the
+ * number of remaining free buffers drops below the minimum threshold (MIN_THR)
+ * or the number of allocated buffers for this QOS level rises above the
+ * maximum threshold (MAX_THR), future incoming work queue entries will be
+ * buffered externally rather than internally. This register also contains the
+ * number of internal buffers currently allocated to this QOS level (BUF_CNT).
+ */
+union cvmx_sso_qos_thrx {
+ uint64_t u64;
+ struct cvmx_sso_qos_thrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t buf_cnt : 12; /**< # of internal buffers allocated to QOS level X */
+ uint64_t reserved_26_27 : 2;
+ uint64_t max_thr : 12; /**< Max threshold for QOS level X
+ For performance reasons, MAX_THR can have a slop of 4
+ WQE for QOS level X. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t min_thr : 12; /**< Min threshold for QOS level X
+ For performance reasons, MIN_THR can have a slop of 4
+ WQEs for QOS level X. */
+#else
+ uint64_t min_thr : 12;
+ uint64_t reserved_12_13 : 2;
+ uint64_t max_thr : 12;
+ uint64_t reserved_26_27 : 2;
+ uint64_t buf_cnt : 12;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_sso_qos_thrx_s cn68xx;
+ struct cvmx_sso_qos_thrx_s cn68xxp1;
+};
+typedef union cvmx_sso_qos_thrx cvmx_sso_qos_thrx_t;
+
+/**
+ * cvmx_sso_qos_we
+ *
+ * SSO_QOS_WE = SSO WE Buffers
+ *
+ * This register contains a read-only count of the current number of free
+ * buffers (FREE_CNT) and the total number of tag chain heads on the de-schedule list
+ * (DES_CNT) (which is not the same as the total number of entries on all of the descheduled
+ * tag chains.)
+ */
+union cvmx_sso_qos_we {
+ uint64_t u64;
+ struct cvmx_sso_qos_we_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63 : 38;
+ uint64_t des_cnt : 12; /**< Number of buffers on de-schedule list */
+ uint64_t reserved_12_13 : 2;
+ uint64_t free_cnt : 12; /**< Number of total free buffers */
+#else
+ uint64_t free_cnt : 12;
+ uint64_t reserved_12_13 : 2;
+ uint64_t des_cnt : 12;
+ uint64_t reserved_26_63 : 38;
+#endif
+ } s;
+ struct cvmx_sso_qos_we_s cn68xx;
+ struct cvmx_sso_qos_we_s cn68xxp1;
+};
+typedef union cvmx_sso_qos_we cvmx_sso_qos_we_t;
+
+/**
+ * cvmx_sso_reset
+ *
+ * SSO_RESET = SSO Soft Reset
+ *
+ * Writing a one to SSO_RESET[RESET] will reset the SSO. After receiving a
+ * store to this CSR, the SSO must not be sent any other operations for 2500
+ * sclk cycles.
+ *
+ * Note that the contents of this register are reset along with the rest of the
+ * SSO.
+ *
+ * IMPLEMENTATION NOTES--NOT FOR SPEC:
+ * The SSO must return the bus credit associated with the CSR store used
+ * to write this register before reseting itself. And the RSL tree
+ * that passes through the SSO must continue to work for RSL operations
+ * that do not target the SSO itself.
+ */
+union cvmx_sso_reset {
+ uint64_t u64;
+ struct cvmx_sso_reset_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t reset : 1; /**< Reset the SSO */
+#else
+ uint64_t reset : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_sso_reset_s cn68xx;
+};
+typedef union cvmx_sso_reset cvmx_sso_reset_t;
+
+/**
+ * cvmx_sso_rwq_head_ptr#
+ *
+ * CSR reserved addresses: (24): 0xb040..0xb0f8
+ * CSR align addresses: ===========================================================================================================
+ * SSO_RWQ_HEAD_PTRX = SSO Remote Queue Head Register
+ * (one per QOS level)
+ * Contains the ptr to the first entry of the remote linked list(s) for a particular
+ * QoS level. SW should initialize the remote linked list(s) by programming
+ * SSO_RWQ_HEAD_PTRX and SSO_RWQ_TAIL_PTRX to identical values.
+ */
+union cvmx_sso_rwq_head_ptrx {
+ uint64_t u64;
+ struct cvmx_sso_rwq_head_ptrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t ptr : 31; /**< Head Pointer */
+ uint64_t reserved_5_6 : 2;
+ uint64_t rctr : 5; /**< Index of next WQE entry in fill packet to be
+ processed (inbound queues) */
+#else
+ uint64_t rctr : 5;
+ uint64_t reserved_5_6 : 2;
+ uint64_t ptr : 31;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_sso_rwq_head_ptrx_s cn68xx;
+ struct cvmx_sso_rwq_head_ptrx_s cn68xxp1;
+};
+typedef union cvmx_sso_rwq_head_ptrx cvmx_sso_rwq_head_ptrx_t;
+
+/**
+ * cvmx_sso_rwq_pop_fptr
+ *
+ * SSO_RWQ_POP_FPTR = SSO Pop Free Pointer
+ *
+ * This register is used by SW to remove pointers for buffer-reallocation and diagnostics, and
+ * should only be used when SSO is idle.
+ *
+ * To remove ALL pointers, software must insure that there are modulus 16
+ * pointers in the FPA. To do this, SSO_CFG.RWQ_BYP_DIS must be set, the FPA
+ * pointer count read, and enough fake buffers pushed via SSO_RWQ_PSH_FPTR to
+ * bring the FPA pointer count up to mod 16.
+ */
+union cvmx_sso_rwq_pop_fptr {
+ uint64_t u64;
+ struct cvmx_sso_rwq_pop_fptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t val : 1; /**< Free Pointer Valid */
+ uint64_t cnt : 6; /**< fptr_in count */
+ uint64_t reserved_38_56 : 19;
+ uint64_t fptr : 31; /**< Free Pointer */
+ uint64_t reserved_0_6 : 7;
+#else
+ uint64_t reserved_0_6 : 7;
+ uint64_t fptr : 31;
+ uint64_t reserved_38_56 : 19;
+ uint64_t cnt : 6;
+ uint64_t val : 1;
+#endif
+ } s;
+ struct cvmx_sso_rwq_pop_fptr_s cn68xx;
+ struct cvmx_sso_rwq_pop_fptr_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t val : 1; /**< Free Pointer Valid */
+ uint64_t reserved_38_62 : 25;
+ uint64_t fptr : 31; /**< Free Pointer */
+ uint64_t reserved_0_6 : 7;
+#else
+ uint64_t reserved_0_6 : 7;
+ uint64_t fptr : 31;
+ uint64_t reserved_38_62 : 25;
+ uint64_t val : 1;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_sso_rwq_pop_fptr cvmx_sso_rwq_pop_fptr_t;
+
+/**
+ * cvmx_sso_rwq_psh_fptr
+ *
+ * CSR reserved addresses: (56): 0xc240..0xc3f8
+ * SSO_RWQ_PSH_FPTR = SSO Free Pointer FIFO
+ *
+ * This register is used by SW to initialize the SSO with a pool of free
+ * pointers by writing the FPTR field whenever FULL = 0. Free pointers are
+ * fetched/released from/to the pool when accessing WQE entries stored remotely
+ * (in remote linked lists). Free pointers should be 128 byte aligned, each of
+ * 256 bytes. This register should only be used when SSO is idle.
+ *
+ * Software needs to set aside buffering for
+ * 8 + 48 + ROUNDUP(N/26)
+ *
+ * where as many as N DRAM work queue entries may be used. The first 8 buffers
+ * are used to setup the SSO_RWQ_HEAD_PTR and SSO_RWQ_TAIL_PTRs, and the
+ * remainder are pushed via this register.
+ *
+ * IMPLEMENTATION NOTES--NOT FOR SPEC:
+ * 48 avoids false out of buffer error due to (16) FPA and in-sso FPA buffering (32)
+ * 26 is number of WAE's per 256B buffer
+ */
+union cvmx_sso_rwq_psh_fptr {
+ uint64_t u64;
+ struct cvmx_sso_rwq_psh_fptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t full : 1; /**< FIFO Full. When set, the FPA is busy writing entries
+ and software must wait before adding new entries. */
+ uint64_t cnt : 4; /**< fptr_out count */
+ uint64_t reserved_38_58 : 21;
+ uint64_t fptr : 31; /**< Free Pointer */
+ uint64_t reserved_0_6 : 7;
+#else
+ uint64_t reserved_0_6 : 7;
+ uint64_t fptr : 31;
+ uint64_t reserved_38_58 : 21;
+ uint64_t cnt : 4;
+ uint64_t full : 1;
+#endif
+ } s;
+ struct cvmx_sso_rwq_psh_fptr_s cn68xx;
+ struct cvmx_sso_rwq_psh_fptr_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t full : 1; /**< FIFO Full. When set, the FPA is busy writing entries
+ and software must wait before adding new entries. */
+ uint64_t reserved_38_62 : 25;
+ uint64_t fptr : 31; /**< Free Pointer */
+ uint64_t reserved_0_6 : 7;
+#else
+ uint64_t reserved_0_6 : 7;
+ uint64_t fptr : 31;
+ uint64_t reserved_38_62 : 25;
+ uint64_t full : 1;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_sso_rwq_psh_fptr cvmx_sso_rwq_psh_fptr_t;
+
+/**
+ * cvmx_sso_rwq_tail_ptr#
+ *
+ * CSR reserved addresses: (56): 0xc040..0xc1f8
+ * SSO_RWQ_TAIL_PTRX = SSO Remote Queue Tail Register
+ * (one per QOS level)
+ * Contains the ptr to the last entry of the remote linked list(s) for a particular
+ * QoS level. SW must initialize the remote linked list(s) by programming
+ * SSO_RWQ_HEAD_PTRX and SSO_RWQ_TAIL_PTRX to identical values.
+ */
+union cvmx_sso_rwq_tail_ptrx {
+ uint64_t u64;
+ struct cvmx_sso_rwq_tail_ptrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t ptr : 31; /**< Tail Pointer */
+ uint64_t reserved_5_6 : 2;
+ uint64_t rctr : 5; /**< Number of entries waiting to be sent out to external
+ RAM (outbound queues) */
+#else
+ uint64_t rctr : 5;
+ uint64_t reserved_5_6 : 2;
+ uint64_t ptr : 31;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_sso_rwq_tail_ptrx_s cn68xx;
+ struct cvmx_sso_rwq_tail_ptrx_s cn68xxp1;
+};
+typedef union cvmx_sso_rwq_tail_ptrx cvmx_sso_rwq_tail_ptrx_t;
+
+/**
+ * cvmx_sso_ts_pc
+ *
+ * SSO_TS_PC = SSO Tag Switch Performance Counter
+ *
+ * Counts the number of tag switch requests.
+ * Counter rolls over through zero when max value exceeded.
+ */
+union cvmx_sso_ts_pc {
+ uint64_t u64;
+ struct cvmx_sso_ts_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ts_pc : 64; /**< Tag switch performance counter */
+#else
+ uint64_t ts_pc : 64;
+#endif
+ } s;
+ struct cvmx_sso_ts_pc_s cn68xx;
+ struct cvmx_sso_ts_pc_s cn68xxp1;
+};
+typedef union cvmx_sso_ts_pc cvmx_sso_ts_pc_t;
+
+/**
+ * cvmx_sso_wa_com_pc
+ *
+ * SSO_WA_COM_PC = SSO Work Add Combined Performance Counter
+ *
+ * Counts the number of add new work requests for all QOS levels.
+ * Counter rolls over through zero when max value exceeded.
+ */
+union cvmx_sso_wa_com_pc {
+ uint64_t u64;
+ struct cvmx_sso_wa_com_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wa_pc : 64; /**< Work add combined performance counter */
+#else
+ uint64_t wa_pc : 64;
+#endif
+ } s;
+ struct cvmx_sso_wa_com_pc_s cn68xx;
+ struct cvmx_sso_wa_com_pc_s cn68xxp1;
+};
+typedef union cvmx_sso_wa_com_pc cvmx_sso_wa_com_pc_t;
+
+/**
+ * cvmx_sso_wa_pc#
+ *
+ * CSR reserved addresses: (64): 0x4200..0x43f8
+ * CSR align addresses: ===========================================================================================================
+ * SSO_WA_PCX = SSO Work Add Performance Counter
+ * (one per QOS level)
+ *
+ * Counts the number of add new work requests for each QOS level.
+ * Counter rolls over through zero when max value exceeded.
+ */
+union cvmx_sso_wa_pcx {
+ uint64_t u64;
+ struct cvmx_sso_wa_pcx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wa_pc : 64; /**< Work add performance counter for QOS level X */
+#else
+ uint64_t wa_pc : 64;
+#endif
+ } s;
+ struct cvmx_sso_wa_pcx_s cn68xx;
+ struct cvmx_sso_wa_pcx_s cn68xxp1;
+};
+typedef union cvmx_sso_wa_pcx cvmx_sso_wa_pcx_t;
+
+/**
+ * cvmx_sso_wq_int
+ *
+ * Note, the old POW offsets ran from 0x0 to 0x3f8, leaving the next available slot at 0x400.
+ * To ensure no overlap, start on 4k boundary: 0x1000.
+ * SSO_WQ_INT = SSO Work Queue Interrupt Register
+ *
+ * Contains the bits (one per group) that set work queue interrupts and are
+ * used to clear these interrupts. For more information regarding this
+ * register, see the interrupt section of the SSO spec.
+ */
+union cvmx_sso_wq_int {
+ uint64_t u64;
+ struct cvmx_sso_wq_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wq_int : 64; /**< Work queue interrupt bits
+ Corresponding WQ_INT bit is set by HW whenever:
+ - SSO_WQ_INT_CNTX[IQ_CNT] >=
+ SSO_WQ_INT_THRX[IQ_THR] and the threshold
+ interrupt is not disabled.
+ SSO_WQ_IQ_DISX[IQ_DIS<X>]==1 disables the interrupt
+ SSO_WQ_INT_THRX[IQ_THR]==0 disables the int.
+ - SSO_WQ_INT_CNTX[DS_CNT] >=
+ SSO_WQ_INT_THRX[DS_THR] and the threshold
+ interrupt is not disabled
+ SSO_WQ_INT_THRX[DS_THR]==0 disables the int.
+ - SSO_WQ_INT_CNTX[TC_CNT]==1 when periodic
+ counter SSO_WQ_INT_PC[PC]==0 and
+ SSO_WQ_INT_THRX[TC_EN]==1 and at least one of:
+ - SSO_WQ_INT_CNTX[IQ_CNT] > 0
+ - SSO_WQ_INT_CNTX[DS_CNT] > 0 */
+#else
+ uint64_t wq_int : 64;
+#endif
+ } s;
+ struct cvmx_sso_wq_int_s cn68xx;
+ struct cvmx_sso_wq_int_s cn68xxp1;
+};
+typedef union cvmx_sso_wq_int cvmx_sso_wq_int_t;
+
+/**
+ * cvmx_sso_wq_int_cnt#
+ *
+ * CSR reserved addresses: (64): 0x7200..0x73f8
+ * CSR align addresses: ===========================================================================================================
+ * SSO_WQ_INT_CNTX = SSO Work Queue Interrupt Count Register
+ * (one per group)
+ *
+ * Contains a read-only copy of the counts used to trigger work queue
+ * interrupts. For more information regarding this register, see the interrupt
+ * section.
+ */
+union cvmx_sso_wq_int_cntx {
+ uint64_t u64;
+ struct cvmx_sso_wq_int_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t tc_cnt : 4; /**< Time counter current value for group X
+ HW sets TC_CNT to SSO_WQ_INT_THRX[TC_THR] whenever:
+ - corresponding SSO_WQ_INT_CNTX[IQ_CNT]==0 and
+ corresponding SSO_WQ_INT_CNTX[DS_CNT]==0
+ - corresponding SSO_WQ_INT[WQ_INT<X>] is written
+ with a 1 by SW
+ - corresponding SSO_WQ_IQ_DIS[IQ_DIS<X>] is written
+ with a 1 by SW
+ - corresponding SSO_WQ_INT_THRX is written by SW
+ - TC_CNT==1 and periodic counter
+ SSO_WQ_INT_PC[PC]==0
+ Otherwise, HW decrements TC_CNT whenever the
+ periodic counter SSO_WQ_INT_PC[PC]==0.
+ TC_CNT is 0 whenever SSO_WQ_INT_THRX[TC_THR]==0. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t ds_cnt : 12; /**< De-schedule executable count for group X */
+ uint64_t reserved_12_13 : 2;
+ uint64_t iq_cnt : 12; /**< Input queue executable count for group X */
+#else
+ uint64_t iq_cnt : 12;
+ uint64_t reserved_12_13 : 2;
+ uint64_t ds_cnt : 12;
+ uint64_t reserved_26_27 : 2;
+ uint64_t tc_cnt : 4;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_sso_wq_int_cntx_s cn68xx;
+ struct cvmx_sso_wq_int_cntx_s cn68xxp1;
+};
+typedef union cvmx_sso_wq_int_cntx cvmx_sso_wq_int_cntx_t;
+
+/**
+ * cvmx_sso_wq_int_pc
+ *
+ * CSR reserved addresses: (1): 0x1018..0x1018
+ * SSO_WQ_INT_PC = SSO Work Queue Interrupt Periodic Counter Register
+ *
+ * Contains the threshold value for the work queue interrupt periodic counter
+ * and also a read-only copy of the periodic counter. For more information
+ * regarding this register, see the interrupt section.
+ */
+union cvmx_sso_wq_int_pc {
+ uint64_t u64;
+ struct cvmx_sso_wq_int_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63 : 4;
+ uint64_t pc : 28; /**< Work queue interrupt periodic counter */
+ uint64_t reserved_28_31 : 4;
+ uint64_t pc_thr : 20; /**< Work queue interrupt periodic counter threshold */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t pc_thr : 20;
+ uint64_t reserved_28_31 : 4;
+ uint64_t pc : 28;
+ uint64_t reserved_60_63 : 4;
+#endif
+ } s;
+ struct cvmx_sso_wq_int_pc_s cn68xx;
+ struct cvmx_sso_wq_int_pc_s cn68xxp1;
+};
+typedef union cvmx_sso_wq_int_pc cvmx_sso_wq_int_pc_t;
+
+/**
+ * cvmx_sso_wq_int_thr#
+ *
+ * CSR reserved addresses: (96): 0x6100..0x63f8
+ * CSR align addresses: ===========================================================================================================
+ * SSO_WQ_INT_THR(0..63) = SSO Work Queue Interrupt Threshold Registers
+ * (one per group)
+ *
+ * Contains the thresholds for enabling and setting work queue interrupts. For
+ * more information, see the interrupt section.
+ *
+ * Note: Up to 16 of the SSO's internal storage buffers can be allocated
+ * for hardware use and are therefore not available for incoming work queue
+ * entries. Additionally, any WS that is not in the EMPTY state consumes a
+ * buffer. Thus in a 32 PP system, it is not advisable to set either IQ_THR or
+ * DS_THR to greater than 2048 - 16 - 32*2 = 1968. Doing so may prevent the
+ * interrupt from ever triggering.
+ *
+ * Priorities for QOS levels 0..7
+ */
+union cvmx_sso_wq_int_thrx {
+ uint64_t u64;
+ struct cvmx_sso_wq_int_thrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63 : 31;
+ uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
+ TC_EN must be zero when TC_THR==0 */
+ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
+ When TC_THR==0, SSO_WQ_INT_CNTX[TC_CNT] is zero */
+ uint64_t reserved_26_27 : 2;
+ uint64_t ds_thr : 12; /**< De-schedule count threshold for group X
+ DS_THR==0 disables the threshold interrupt */
+ uint64_t reserved_12_13 : 2;
+ uint64_t iq_thr : 12; /**< Input queue count threshold for group X
+ IQ_THR==0 disables the threshold interrupt */
+#else
+ uint64_t iq_thr : 12;
+ uint64_t reserved_12_13 : 2;
+ uint64_t ds_thr : 12;
+ uint64_t reserved_26_27 : 2;
+ uint64_t tc_thr : 4;
+ uint64_t tc_en : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_sso_wq_int_thrx_s cn68xx;
+ struct cvmx_sso_wq_int_thrx_s cn68xxp1;
+};
+typedef union cvmx_sso_wq_int_thrx cvmx_sso_wq_int_thrx_t;
+
+/**
+ * cvmx_sso_wq_iq_dis
+ *
+ * CSR reserved addresses: (1): 0x1008..0x1008
+ * SSO_WQ_IQ_DIS = SSO Input Queue Interrupt Temporary Disable Mask
+ *
+ * Contains the input queue interrupt temporary disable bits (one per group).
+ * For more information regarding this register, see the interrupt section.
+ */
+union cvmx_sso_wq_iq_dis {
+ uint64_t u64;
+ struct cvmx_sso_wq_iq_dis_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t iq_dis : 64; /**< Input queue interrupt temporary disable mask
+ Corresponding SSO_WQ_INTX[WQ_INT<X>] bit cannot be
+ set due to IQ_CNT/IQ_THR check when this bit is set.
+ Corresponding IQ_DIS bit is cleared by HW whenever:
+ - SSO_WQ_INT_CNTX[IQ_CNT] is zero, or
+ - SSO_WQ_INT_CNTX[TC_CNT]==1 when periodic
+ counter SSO_WQ_INT_PC[PC]==0 */
+#else
+ uint64_t iq_dis : 64;
+#endif
+ } s;
+ struct cvmx_sso_wq_iq_dis_s cn68xx;
+ struct cvmx_sso_wq_iq_dis_s cn68xxp1;
+};
+typedef union cvmx_sso_wq_iq_dis cvmx_sso_wq_iq_dis_t;
+
+/**
+ * cvmx_sso_ws_pc#
+ *
+ * CSR reserved addresses: (225): 0x3100..0x3800
+ * CSR align addresses: ===========================================================================================================
+ * SSO_WS_PCX = SSO Work Schedule Performance Counter
+ * (one per group)
+ *
+ * Counts the number of work schedules for each group.
+ * Counter rolls over through zero when max value exceeded.
+ */
+union cvmx_sso_ws_pcx {
+ uint64_t u64;
+ struct cvmx_sso_ws_pcx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ws_pc : 64; /**< Work schedule performance counter for group X */
+#else
+ uint64_t ws_pc : 64;
+#endif
+ } s;
+ struct cvmx_sso_ws_pcx_s cn68xx;
+ struct cvmx_sso_ws_pcx_s cn68xxp1;
+};
+typedef union cvmx_sso_ws_pcx cvmx_sso_ws_pcx_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-sso-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-stxx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-stxx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-stxx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,865 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-stxx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon stxx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_STXX_DEFS_H__
+#define __CVMX_STXX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_ARB_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_ARB_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000608ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_ARB_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000608ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_BCKPRS_CNT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_BCKPRS_CNT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000688ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_BCKPRS_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000688ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_COM_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_COM_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000600ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_COM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000600ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_DIP_CNT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_DIP_CNT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000690ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_DIP_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000690ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_IGN_CAL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_IGN_CAL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000610ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_IGN_CAL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000610ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_INT_MSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_INT_MSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800900006A0ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_INT_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800900006A0ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_INT_REG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_INT_REG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000698ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180090000698ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_INT_SYNC(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_INT_SYNC(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800900006A8ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_INT_SYNC(block_id) (CVMX_ADD_IO_SEG(0x00011800900006A8ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_MIN_BST(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_MIN_BST(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000618ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_MIN_BST(block_id) (CVMX_ADD_IO_SEG(0x0001180090000618ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_SPI4_CALX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 31)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 31)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_STXX_SPI4_CALX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000400ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_STXX_SPI4_CALX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180090000400ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_SPI4_DAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_SPI4_DAT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000628ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_SPI4_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000628ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_SPI4_STAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_SPI4_STAT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000630ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_SPI4_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000630ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_STAT_BYTES_HI(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_STAT_BYTES_HI(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000648ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_STAT_BYTES_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180090000648ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_STAT_BYTES_LO(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_STAT_BYTES_LO(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000680ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_STAT_BYTES_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180090000680ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_STAT_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_STAT_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000638ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_STAT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000638ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_STXX_STAT_PKT_XMT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_STXX_STAT_PKT_XMT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180090000640ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_STXX_STAT_PKT_XMT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000640ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+
+/**
+ * cvmx_stx#_arb_ctl
+ *
+ * STX_ARB_CTL - Spi transmit arbitration control
+ *
+ *
+ * Notes:
+ * If STX_ARB_CTL[MINTRN] is set in Spi4 mode, then the data_max_t
+ * parameter will have to be adjusted. Please see the
+ * STX_SPI4_DAT[MAX_T] section for additional information. In
+ * addition, the min_burst can only be guaranteed on the initial data
+ * burst of a given packet (i.e. the first data burst which contains
+ * the SOP tick). All subsequent bursts could be truncated by training
+ * sequences at any point during transmission and could be arbitrarily
+ * small. This mode is only for use in Spi4 mode.
+ */
+union cvmx_stxx_arb_ctl {
+ uint64_t u64;
+ struct cvmx_stxx_arb_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t mintrn : 1; /**< Hold off training cycles until STX_MIN_BST[MINB]
+ is satisfied */
+ uint64_t reserved_4_4 : 1;
+ uint64_t igntpa : 1; /**< User switch to ignore any TPA information from the
+ Spi interface. This CSR forces all TPA terms to
+ be masked out. It is only intended as backdoor
+ or debug feature. */
+ uint64_t reserved_0_2 : 3;
+#else
+ uint64_t reserved_0_2 : 3;
+ uint64_t igntpa : 1;
+ uint64_t reserved_4_4 : 1;
+ uint64_t mintrn : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_stxx_arb_ctl_s cn38xx;
+ struct cvmx_stxx_arb_ctl_s cn38xxp2;
+ struct cvmx_stxx_arb_ctl_s cn58xx;
+ struct cvmx_stxx_arb_ctl_s cn58xxp1;
+};
+typedef union cvmx_stxx_arb_ctl cvmx_stxx_arb_ctl_t;
+
+/**
+ * cvmx_stx#_bckprs_cnt
+ *
+ * Notes:
+ * This register reports the total number of cycles (STX data clks -
+ * stx_clk) in which the port defined in STX_STAT_CTL[BCKPRS] has lost TPA
+ * or is otherwise receiving backpressure.
+ *
+ * In Spi4 mode, this is defined as a loss of TPA which is indicated when
+ * the receiving device reports SATISFIED for the given port. The calendar
+ * status is brought into N2 on the spi4_tx*_sclk and synchronized into the
+ * N2 Spi TX clock domain which is 1/2 the frequency of the spi4_tx*_dclk
+ * clock (internally, this the stx_clk). The counter will update on the
+ * rising edge in which backpressure is reported.
+ *
+ * This register will be cleared when software writes all '1's to
+ * the STX_BCKPRS_CNT.
+ */
+union cvmx_stxx_bckprs_cnt {
+ uint64_t u64;
+ struct cvmx_stxx_bckprs_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Number of cycles when back-pressure is received
+ for port defined in STX_STAT_CTL[BCKPRS] */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_stxx_bckprs_cnt_s cn38xx;
+ struct cvmx_stxx_bckprs_cnt_s cn38xxp2;
+ struct cvmx_stxx_bckprs_cnt_s cn58xx;
+ struct cvmx_stxx_bckprs_cnt_s cn58xxp1;
+};
+typedef union cvmx_stxx_bckprs_cnt cvmx_stxx_bckprs_cnt_t;
+
+/**
+ * cvmx_stx#_com_ctl
+ *
+ * STX_COM_CTL - TX Common Control Register
+ *
+ *
+ * Notes:
+ * Restrictions:
+ * Both the calendar table and the LEN and M parameters must be
+ * completely setup before writing the Interface enable (INF_EN) and
+ * Status channel enabled (ST_EN) asserted.
+ */
+union cvmx_stxx_com_ctl {
+ uint64_t u64;
+ struct cvmx_stxx_com_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t st_en : 1; /**< Status channel enabled */
+ uint64_t reserved_1_2 : 2;
+ uint64_t inf_en : 1; /**< Interface enable */
+#else
+ uint64_t inf_en : 1;
+ uint64_t reserved_1_2 : 2;
+ uint64_t st_en : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_stxx_com_ctl_s cn38xx;
+ struct cvmx_stxx_com_ctl_s cn38xxp2;
+ struct cvmx_stxx_com_ctl_s cn58xx;
+ struct cvmx_stxx_com_ctl_s cn58xxp1;
+};
+typedef union cvmx_stxx_com_ctl cvmx_stxx_com_ctl_t;
+
+/**
+ * cvmx_stx#_dip_cnt
+ *
+ * Notes:
+ * * DIPMAX
+ * This counts the number of consecutive DIP2 states in which the the
+ * received DIP2 is bad. The expected range is 1-15 cycles with the
+ * value of 0 meaning disabled.
+ *
+ * * FRMMAX
+ * This counts the number of consecutive unexpected framing patterns (11)
+ * states. The expected range is 1-15 cycles with the value of 0 meaning
+ * disabled.
+ */
+union cvmx_stxx_dip_cnt {
+ uint64_t u64;
+ struct cvmx_stxx_dip_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t frmmax : 4; /**< Number of consecutive unexpected framing patterns
+ before loss of sync */
+ uint64_t dipmax : 4; /**< Number of consecutive DIP2 error before loss
+ of sync */
+#else
+ uint64_t dipmax : 4;
+ uint64_t frmmax : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_stxx_dip_cnt_s cn38xx;
+ struct cvmx_stxx_dip_cnt_s cn38xxp2;
+ struct cvmx_stxx_dip_cnt_s cn58xx;
+ struct cvmx_stxx_dip_cnt_s cn58xxp1;
+};
+typedef union cvmx_stxx_dip_cnt cvmx_stxx_dip_cnt_t;
+
+/**
+ * cvmx_stx#_ign_cal
+ *
+ * STX_IGN_CAL - Ignore Calendar Status from Spi4 Status Channel
+ *
+ */
+union cvmx_stxx_ign_cal {
+ uint64_t u64;
+ struct cvmx_stxx_ign_cal_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t igntpa : 16; /**< Ignore Calendar Status from Spi4 Status Channel
+ per Spi4 port
+ - 0: Use the status channel info
+ - 1: Grant the given port MAX_BURST1 credits */
+#else
+ uint64_t igntpa : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_stxx_ign_cal_s cn38xx;
+ struct cvmx_stxx_ign_cal_s cn38xxp2;
+ struct cvmx_stxx_ign_cal_s cn58xx;
+ struct cvmx_stxx_ign_cal_s cn58xxp1;
+};
+typedef union cvmx_stxx_ign_cal cvmx_stxx_ign_cal_t;
+
+/**
+ * cvmx_stx#_int_msk
+ *
+ * Notes:
+ * If the bit is enabled, then the coresponding exception condition will
+ * result in an interrupt to the system.
+ */
+union cvmx_stxx_int_msk {
+ uint64_t u64;
+ struct cvmx_stxx_int_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t frmerr : 1; /**< FRMCNT has exceeded STX_DIP_CNT[MAXFRM] */
+ uint64_t unxfrm : 1; /**< Unexpected framing sequence */
+ uint64_t nosync : 1; /**< ERRCNT has exceeded STX_DIP_CNT[MAXDIP] */
+ uint64_t diperr : 1; /**< DIP2 error on the Spi4 Status channel */
+ uint64_t datovr : 1; /**< Spi4 FIFO overflow error */
+ uint64_t ovrbst : 1; /**< Transmit packet burst too big */
+ uint64_t calpar1 : 1; /**< STX Calendar Table Parity Error Bank1 */
+ uint64_t calpar0 : 1; /**< STX Calendar Table Parity Error Bank0 */
+#else
+ uint64_t calpar0 : 1;
+ uint64_t calpar1 : 1;
+ uint64_t ovrbst : 1;
+ uint64_t datovr : 1;
+ uint64_t diperr : 1;
+ uint64_t nosync : 1;
+ uint64_t unxfrm : 1;
+ uint64_t frmerr : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_stxx_int_msk_s cn38xx;
+ struct cvmx_stxx_int_msk_s cn38xxp2;
+ struct cvmx_stxx_int_msk_s cn58xx;
+ struct cvmx_stxx_int_msk_s cn58xxp1;
+};
+typedef union cvmx_stxx_int_msk cvmx_stxx_int_msk_t;
+
+/**
+ * cvmx_stx#_int_reg
+ *
+ * Notes:
+ * * CALPAR0
+ * This bit indicates that the Spi4 calendar table encountered a parity
+ * error on bank0 of the calendar table memory. This error bit is
+ * associated with the calendar table on the TX interface - the interface
+ * that drives the Spi databus. The calendar table is used in Spi4 mode
+ * when using the status channel. Parity errors can occur during normal
+ * operation when the calendar table is constantly being read for the port
+ * information, or during initialization time, when the user has access.
+ * This errors will force the the status channel to the reset state and
+ * begin driving training sequences. The status channel will also reset.
+ * Software must follow the init sequence to resynch the interface. This
+ * includes toggling INF_EN which will cancel all outstanding accumulated
+ * credits.
+ *
+ * * CALPAR1
+ * Identical to CALPAR0 except that it indicates that the error occured
+ * on bank1 (instead of bank0).
+ *
+ * * OVRBST
+ * STX can track upto a 512KB data burst. Any packet larger than that is
+ * illegal and will cause confusion in the STX state machine. BMI is
+ * responsible for throwing away these out of control packets from the
+ * input and the Execs should never generate them on the output. This is
+ * a fatal error and should have STX_INT_SYNC[OVRBST] set.
+ *
+ * * DATOVR
+ * FIFO where the Spi4 data ramps upto its transmit frequency has
+ * overflowed. This is a fatal error and should have
+ * STX_INT_SYNC[DATOVR] set.
+ *
+ * * DIPERR
+ * This bit will fire if any DIP2 error is caught by the Spi4 status
+ * channel.
+ *
+ * * NOSYNC
+ * This bit indicates that the number of consecutive DIP2 errors exceeds
+ * STX_DIP_CNT[MAXDIP] and that the interface should be taken down. The
+ * datapath will be notified and send continuous training sequences until
+ * software resynchronizes the interface. This error condition should
+ * have STX_INT_SYNC[NOSYNC] set.
+ *
+ * * UNXFRM
+ * Unexpected framing data was seen on the status channel.
+ *
+ * * FRMERR
+ * This bit indicates that the number of consecutive unexpected framing
+ * sequences STX_DIP_CNT[MAXFRM] and that the interface should be taken
+ * down. The datapath will be notified and send continuous training
+ * sequences until software resynchronizes the interface. This error
+ * condition should have STX_INT_SYNC[FRMERR] set.
+ *
+ * * SYNCERR
+ * Indicates that an exception marked in STX_INT_SYNC has occured and the
+ * TX datapath is disabled. It is recommended that the OVRBST, DATOVR,
+ * NOSYNC, and FRMERR error conditions all have their bits set in the
+ * STX_INT_SYNC register.
+ */
+union cvmx_stxx_int_reg {
+ uint64_t u64;
+ struct cvmx_stxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t syncerr : 1; /**< Interface encountered a fatal error */
+ uint64_t frmerr : 1; /**< FRMCNT has exceeded STX_DIP_CNT[MAXFRM] */
+ uint64_t unxfrm : 1; /**< Unexpected framing sequence */
+ uint64_t nosync : 1; /**< ERRCNT has exceeded STX_DIP_CNT[MAXDIP] */
+ uint64_t diperr : 1; /**< DIP2 error on the Spi4 Status channel */
+ uint64_t datovr : 1; /**< Spi4 FIFO overflow error */
+ uint64_t ovrbst : 1; /**< Transmit packet burst too big */
+ uint64_t calpar1 : 1; /**< STX Calendar Table Parity Error Bank1 */
+ uint64_t calpar0 : 1; /**< STX Calendar Table Parity Error Bank0 */
+#else
+ uint64_t calpar0 : 1;
+ uint64_t calpar1 : 1;
+ uint64_t ovrbst : 1;
+ uint64_t datovr : 1;
+ uint64_t diperr : 1;
+ uint64_t nosync : 1;
+ uint64_t unxfrm : 1;
+ uint64_t frmerr : 1;
+ uint64_t syncerr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_stxx_int_reg_s cn38xx;
+ struct cvmx_stxx_int_reg_s cn38xxp2;
+ struct cvmx_stxx_int_reg_s cn58xx;
+ struct cvmx_stxx_int_reg_s cn58xxp1;
+};
+typedef union cvmx_stxx_int_reg cvmx_stxx_int_reg_t;
+
+/**
+ * cvmx_stx#_int_sync
+ *
+ * Notes:
+ * If the bit is enabled, then the coresponding exception condition is flagged
+ * to be fatal. In Spi4 mode, the exception condition will result in a loss
+ * of sync condition on the Spi4 interface and the datapath will send
+ * continuous traing sequences.
+ *
+ * It is recommended that software set the OVRBST, DATOVR, NOSYNC, and
+ * FRMERR errors as synchronization events. Software is free to
+ * synchronize the bus on other conditions, but this is the minimum
+ * recommended set.
+ */
+union cvmx_stxx_int_sync {
+ uint64_t u64;
+ struct cvmx_stxx_int_sync_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t frmerr : 1; /**< FRMCNT has exceeded STX_DIP_CNT[MAXFRM] */
+ uint64_t unxfrm : 1; /**< Unexpected framing sequence */
+ uint64_t nosync : 1; /**< ERRCNT has exceeded STX_DIP_CNT[MAXDIP] */
+ uint64_t diperr : 1; /**< DIP2 error on the Spi4 Status channel */
+ uint64_t datovr : 1; /**< Spi4 FIFO overflow error */
+ uint64_t ovrbst : 1; /**< Transmit packet burst too big */
+ uint64_t calpar1 : 1; /**< STX Calendar Table Parity Error Bank1 */
+ uint64_t calpar0 : 1; /**< STX Calendar Table Parity Error Bank0 */
+#else
+ uint64_t calpar0 : 1;
+ uint64_t calpar1 : 1;
+ uint64_t ovrbst : 1;
+ uint64_t datovr : 1;
+ uint64_t diperr : 1;
+ uint64_t nosync : 1;
+ uint64_t unxfrm : 1;
+ uint64_t frmerr : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_stxx_int_sync_s cn38xx;
+ struct cvmx_stxx_int_sync_s cn38xxp2;
+ struct cvmx_stxx_int_sync_s cn58xx;
+ struct cvmx_stxx_int_sync_s cn58xxp1;
+};
+typedef union cvmx_stxx_int_sync cvmx_stxx_int_sync_t;
+
+/**
+ * cvmx_stx#_min_bst
+ *
+ * STX_MIN_BST - Min Burst to enforce when inserting training sequence
+ *
+ */
+union cvmx_stxx_min_bst {
+ uint64_t u64;
+ struct cvmx_stxx_min_bst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t minb : 9; /**< When STX_ARB_CTL[MINTRN] is set, MINB indicates
+ the number of 8B blocks to send before inserting
+ a training sequence. Normally MINB will be set
+ to GMX_TX_SPI_THRESH[THRESH]. MINB should always
+ be set to an even number (ie. multiple of 16B) */
+#else
+ uint64_t minb : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_stxx_min_bst_s cn38xx;
+ struct cvmx_stxx_min_bst_s cn38xxp2;
+ struct cvmx_stxx_min_bst_s cn58xx;
+ struct cvmx_stxx_min_bst_s cn58xxp1;
+};
+typedef union cvmx_stxx_min_bst cvmx_stxx_min_bst_t;
+
+/**
+ * cvmx_stx#_spi4_cal#
+ *
+ * specify the RSL base addresses for the block
+ * STX_SPI4_CAL - Spi4 Calender table
+ * direct_calendar_write / direct_calendar_read
+ *
+ * Notes:
+ * There are 32 calendar table CSR's, each containing 4 entries for a
+ * total of 128 entries. In the above definition...
+ *
+ * n = calendar table offset * 4
+ *
+ * Example, offset 0x00 contains the calendar table entries 0, 1, 2, 3
+ * (with n == 0). Offset 0x10 is the 16th entry in the calendar table
+ * and would contain entries (16*4) = 64, 65, 66, and 67.
+ *
+ * Restrictions:
+ * Calendar table entry accesses (read or write) can only occur
+ * if the interface is disabled. All other accesses will be
+ * unpredictable.
+ *
+ * Both the calendar table and the LEN and M parameters must be
+ * completely setup before writing the Interface enable (INF_EN) and
+ * Status channel enabled (ST_EN) asserted.
+ */
+union cvmx_stxx_spi4_calx {
+ uint64_t u64;
+ struct cvmx_stxx_spi4_calx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t oddpar : 1; /**< Odd parity over STX_SPI4_CAL[15:0]
+ (^STX_SPI4_CAL[16:0] === 1'b1) | $NS NS */
+ uint64_t prt3 : 4; /**< Status for port n+3 */
+ uint64_t prt2 : 4; /**< Status for port n+2 */
+ uint64_t prt1 : 4; /**< Status for port n+1 */
+ uint64_t prt0 : 4; /**< Status for port n+0 */
+#else
+ uint64_t prt0 : 4;
+ uint64_t prt1 : 4;
+ uint64_t prt2 : 4;
+ uint64_t prt3 : 4;
+ uint64_t oddpar : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_stxx_spi4_calx_s cn38xx;
+ struct cvmx_stxx_spi4_calx_s cn38xxp2;
+ struct cvmx_stxx_spi4_calx_s cn58xx;
+ struct cvmx_stxx_spi4_calx_s cn58xxp1;
+};
+typedef union cvmx_stxx_spi4_calx cvmx_stxx_spi4_calx_t;
+
+/**
+ * cvmx_stx#_spi4_dat
+ *
+ * STX_SPI4_DAT - Spi4 datapath channel control register
+ *
+ *
+ * Notes:
+ * Restrictions:
+ * * DATA_MAX_T must be in MOD 4 cycles
+ *
+ * * DATA_MAX_T must at least 0x20
+ *
+ * * DATA_MAX_T == 0 or ALPHA == 0 will disable the training sequnce
+ *
+ * * If STX_ARB_CTL[MINTRN] is set, then training cycles will stall
+ * waiting for min bursts to complete. In the worst case, this will
+ * add the entire min burst transmission time to the interval between
+ * trainging sequence. The observed MAX_T on the Spi4 bus will be...
+ *
+ * STX_SPI4_DAT[MAX_T] + (STX_MIN_BST[MINB] * 4)
+ *
+ * If STX_ARB_CTL[MINTRN] is set in Spi4 mode, then the data_max_t
+ * parameter will have to be adjusted. Please see the
+ * STX_SPI4_DAT[MAX_T] section for additional information. In
+ * addition, the min_burst can only be guaranteed on the initial data
+ * burst of a given packet (i.e. the first data burst which contains
+ * the SOP tick). All subsequent bursts could be truncated by training
+ * sequences at any point during transmission and could be arbitrarily
+ * small. This mode is only for use in Spi4 mode.
+ */
+union cvmx_stxx_spi4_dat {
+ uint64_t u64;
+ struct cvmx_stxx_spi4_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t alpha : 16; /**< alpha (from spi4.2 spec) */
+ uint64_t max_t : 16; /**< DATA_MAX_T (from spi4.2 spec) */
+#else
+ uint64_t max_t : 16;
+ uint64_t alpha : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_stxx_spi4_dat_s cn38xx;
+ struct cvmx_stxx_spi4_dat_s cn38xxp2;
+ struct cvmx_stxx_spi4_dat_s cn58xx;
+ struct cvmx_stxx_spi4_dat_s cn58xxp1;
+};
+typedef union cvmx_stxx_spi4_dat cvmx_stxx_spi4_dat_t;
+
+/**
+ * cvmx_stx#_spi4_stat
+ *
+ * STX_SPI4_STAT - Spi4 status channel control register
+ *
+ *
+ * Notes:
+ * Restrictions:
+ * Both the calendar table and the LEN and M parameters must be
+ * completely setup before writing the Interface enable (INF_EN) and
+ * Status channel enabled (ST_EN) asserted.
+ *
+ * The calendar table will only be enabled when LEN > 0.
+ *
+ * Current rev will only support LVTTL status IO.
+ */
+union cvmx_stxx_spi4_stat {
+ uint64_t u64;
+ struct cvmx_stxx_spi4_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t m : 8; /**< CALENDAR_M (from spi4.2 spec) */
+ uint64_t reserved_7_7 : 1;
+ uint64_t len : 7; /**< CALENDAR_LEN (from spi4.2 spec) */
+#else
+ uint64_t len : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t m : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_stxx_spi4_stat_s cn38xx;
+ struct cvmx_stxx_spi4_stat_s cn38xxp2;
+ struct cvmx_stxx_spi4_stat_s cn58xx;
+ struct cvmx_stxx_spi4_stat_s cn58xxp1;
+};
+typedef union cvmx_stxx_spi4_stat cvmx_stxx_spi4_stat_t;
+
+/**
+ * cvmx_stx#_stat_bytes_hi
+ */
+union cvmx_stxx_stat_bytes_hi {
+ uint64_t u64;
+ struct cvmx_stxx_stat_bytes_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Number of bytes sent (CNT[63:32]) */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_stxx_stat_bytes_hi_s cn38xx;
+ struct cvmx_stxx_stat_bytes_hi_s cn38xxp2;
+ struct cvmx_stxx_stat_bytes_hi_s cn58xx;
+ struct cvmx_stxx_stat_bytes_hi_s cn58xxp1;
+};
+typedef union cvmx_stxx_stat_bytes_hi cvmx_stxx_stat_bytes_hi_t;
+
+/**
+ * cvmx_stx#_stat_bytes_lo
+ */
+union cvmx_stxx_stat_bytes_lo {
+ uint64_t u64;
+ struct cvmx_stxx_stat_bytes_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Number of bytes sent (CNT[31:0]) */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_stxx_stat_bytes_lo_s cn38xx;
+ struct cvmx_stxx_stat_bytes_lo_s cn38xxp2;
+ struct cvmx_stxx_stat_bytes_lo_s cn58xx;
+ struct cvmx_stxx_stat_bytes_lo_s cn58xxp1;
+};
+typedef union cvmx_stxx_stat_bytes_lo cvmx_stxx_stat_bytes_lo_t;
+
+/**
+ * cvmx_stx#_stat_ctl
+ */
+union cvmx_stxx_stat_ctl {
+ uint64_t u64;
+ struct cvmx_stxx_stat_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t clr : 1; /**< Clear all statistics counters
+ - STX_STAT_PKT_XMT
+ - STX_STAT_BYTES_HI
+ - STX_STAT_BYTES_LO */
+ uint64_t bckprs : 4; /**< The selected port for STX_BCKPRS_CNT */
+#else
+ uint64_t bckprs : 4;
+ uint64_t clr : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_stxx_stat_ctl_s cn38xx;
+ struct cvmx_stxx_stat_ctl_s cn38xxp2;
+ struct cvmx_stxx_stat_ctl_s cn58xx;
+ struct cvmx_stxx_stat_ctl_s cn58xxp1;
+};
+typedef union cvmx_stxx_stat_ctl cvmx_stxx_stat_ctl_t;
+
+/**
+ * cvmx_stx#_stat_pkt_xmt
+ */
+union cvmx_stxx_stat_pkt_xmt {
+ uint64_t u64;
+ struct cvmx_stxx_stat_pkt_xmt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Number of packets sent */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_stxx_stat_pkt_xmt_s cn38xx;
+ struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2;
+ struct cvmx_stxx_stat_pkt_xmt_s cn58xx;
+ struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1;
+};
+typedef union cvmx_stxx_stat_pkt_xmt cvmx_stxx_stat_pkt_xmt_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-stxx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-swap.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-swap.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-swap.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,144 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Utility functions for endian swapping
+ *
+ * <hr>$Revision: 32636 $<hr>
+ */
+
+#ifndef __CVMX_SWAP_H__
+#define __CVMX_SWAP_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Byte swap a 16 bit number
+ *
+ * @param x 16 bit number
+ * @return Byte swapped result
+ */
+static inline uint16_t cvmx_swap16(uint16_t x)
+{
+ return ((uint16_t)((((uint16_t)(x) & (uint16_t)0x00ffU) << 8) |
+ (((uint16_t)(x) & (uint16_t)0xff00U) >> 8) ));
+}
+
+
+/**
+ * Byte swap a 32 bit number
+ *
+ * @param x 32 bit number
+ * @return Byte swapped result
+ */
+static inline uint32_t cvmx_swap32(uint32_t x)
+{
+ return ((uint32_t)((((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) |
+ (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) |
+ (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) |
+ (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24) ));
+}
+
+
+/**
+ * Byte swap a 64 bit number
+ *
+ * @param x 64 bit number
+ * @return Byte swapped result
+ */
+static inline uint64_t cvmx_swap64(uint64_t x)
+{
+ return ((x >> 56) |
+ (((x >> 48) & 0xfful) << 8) |
+ (((x >> 40) & 0xfful) << 16) |
+ (((x >> 32) & 0xfful) << 24) |
+ (((x >> 24) & 0xfful) << 32) |
+ (((x >> 16) & 0xfful) << 40) |
+ (((x >> 8) & 0xfful) << 48) |
+ (((x >> 0) & 0xfful) << 56));
+}
+
+
+#ifdef __BIG_ENDIAN_BITFIELD
+
+#define cvmx_cpu_to_le16(x) cvmx_swap16(x)
+#define cvmx_cpu_to_le32(x) cvmx_swap32(x)
+#define cvmx_cpu_to_le64(x) cvmx_swap64(x)
+
+#define cvmx_cpu_to_be16(x) (x)
+#define cvmx_cpu_to_be32(x) (x)
+#define cvmx_cpu_to_be64(x) (x)
+
+#else
+
+#define cvmx_cpu_to_le16(x) (x)
+#define cvmx_cpu_to_le32(x) (x)
+#define cvmx_cpu_to_le64(x) (x)
+
+#define cvmx_cpu_to_be16(x) cvmx_swap16(x)
+#define cvmx_cpu_to_be32(x) cvmx_swap32(x)
+#define cvmx_cpu_to_be64(x) cvmx_swap64(x)
+
+#endif
+
+#define cvmx_le16_to_cpu(x) cvmx_cpu_to_le16(x)
+#define cvmx_le32_to_cpu(x) cvmx_cpu_to_le32(x)
+#define cvmx_le64_to_cpu(x) cvmx_cpu_to_le64(x)
+
+#define cvmx_be16_to_cpu(x) cvmx_cpu_to_be16(x)
+#define cvmx_be32_to_cpu(x) cvmx_cpu_to_be32(x)
+#define cvmx_be64_to_cpu(x) cvmx_cpu_to_be64(x)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_SWAP_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-swap.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,265 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This module provides system/board/application information obtained by the bootloader.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+#else
+#include "cvmx.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-sysinfo.h"
+#endif
+
+
+/**
+ * This structure defines the private state maintained by sysinfo module.
+ *
+ */
+#if defined(CVMX_BUILD_FOR_UBOOT) && CONFIG_OCTEON_NAND_STAGE2
+/* For u-boot, put this in the text section so that we can use this in early
+** boot when running from ram(or L2 cache). This is primarily used for NAND
+** access during NAND boot. The 'data_in_text' section is merged with the
+** text section by the linker script to avoid an assembler warning. */
+static struct {
+
+ cvmx_sysinfo_t sysinfo; /**< system information */
+ cvmx_spinlock_t lock; /**< mutex spinlock */
+
+} state __attribute__ ((section (".data_in_text"))) = {
+ .lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
+};
+#else
+CVMX_SHARED static struct {
+
+ struct cvmx_sysinfo sysinfo; /**< system information */
+ cvmx_spinlock_t lock; /**< mutex spinlock */
+
+} state = {
+ .lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
+};
+#endif
+
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+/* Global variable with the processor ID since we can't read it directly */
+CVMX_SHARED uint32_t cvmx_app_init_processor_id;
+#endif
+
+/* Global variables that define the min/max of the memory region set up for 32 bit userspace access */
+uint64_t linux_mem32_min = 0;
+uint64_t linux_mem32_max = 0;
+uint64_t linux_mem32_wired = 0;
+uint64_t linux_mem32_offset = 0;
+
+/**
+ * This function returns the application information as obtained
+ * by the bootloader. This provides the core mask of the cores
+ * running the same application image, as well as the physical
+ * memory regions available to the core.
+ *
+ * @return Pointer to the boot information structure
+ *
+ */
+struct cvmx_sysinfo *cvmx_sysinfo_get(void)
+{
+ return &(state.sysinfo);
+}
+
+void cvmx_sysinfo_add_self_to_core_mask(void)
+{
+ int core = cvmx_get_core_num();
+ uint32_t core_mask = 1 << core;
+
+ cvmx_spinlock_lock(&state.lock);
+ state.sysinfo.core_mask = state.sysinfo.core_mask | core_mask;
+ cvmx_spinlock_unlock(&state.lock);
+}
+
+void cvmx_sysinfo_remove_self_from_core_mask(void)
+{
+ int core = cvmx_get_core_num();
+ uint32_t core_mask = 1 << core;
+
+ cvmx_spinlock_lock(&state.lock);
+ state.sysinfo.core_mask = state.sysinfo.core_mask & ~core_mask;
+ cvmx_spinlock_unlock(&state.lock);
+}
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_sysinfo_get);
+#endif
+
+
+/**
+ * This function is used in non-simple executive environments (such as Linux kernel, u-boot, etc.)
+ * to configure the minimal fields that are required to use
+ * simple executive files directly.
+ *
+ * Locking (if required) must be handled outside of this
+ * function
+ *
+ * @param phy_mem_desc_addr
+ * Address of the global physical memory descriptor (bootmem
+ * descriptor)
+ * @param board_type Octeon board type enumeration
+ *
+ * @param board_rev_major
+ * Board major revision
+ * @param board_rev_minor
+ * Board minor revision
+ * @param cpu_clock_hz
+ * CPU clock freqency in hertz
+ *
+ * @return 0: Failure
+ * 1: success
+ */
+int cvmx_sysinfo_minimal_initialize(uint64_t phy_mem_desc_addr, uint16_t board_type, uint8_t board_rev_major,
+ uint8_t board_rev_minor, uint32_t cpu_clock_hz)
+{
+
+
+ memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo));
+ state.sysinfo.phy_mem_desc_addr = phy_mem_desc_addr;
+ state.sysinfo.board_type = board_type;
+ state.sysinfo.board_rev_major = board_rev_major;
+ state.sysinfo.board_rev_minor = board_rev_minor;
+ state.sysinfo.cpu_clock_hz = cpu_clock_hz;
+
+ return(1);
+}
+
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+/**
+ * Initialize the sysinfo structure when running on
+ * Octeon under Linux userspace
+ */
+void cvmx_sysinfo_linux_userspace_initialize(void)
+{
+ cvmx_sysinfo_t *system_info = cvmx_sysinfo_get();
+ memset(system_info, 0, sizeof(cvmx_sysinfo_t));
+
+ system_info->core_mask = 0;
+ system_info->init_core = -1;
+
+ FILE *infile = fopen("/proc/octeon_info", "r");
+ if (infile == NULL)
+ {
+ perror("Error opening /proc/octeon_info");
+ exit(-1);
+ }
+
+ while (!feof(infile))
+ {
+ char buffer[80];
+ if (fgets(buffer, sizeof(buffer), infile))
+ {
+ const char *field = strtok(buffer, " ");
+ const char *valueS = strtok(NULL, " ");
+ if (field == NULL)
+ continue;
+ if (valueS == NULL)
+ continue;
+ unsigned long long value;
+ sscanf(valueS, "%lli", &value);
+
+ if (strcmp(field, "dram_size:") == 0)
+ system_info->system_dram_size = value << 20;
+ else if (strcmp(field, "phy_mem_desc_addr:") == 0)
+ system_info->phy_mem_desc_addr = value;
+ else if (strcmp(field, "eclock_hz:") == 0)
+ system_info->cpu_clock_hz = value;
+ else if (strcmp(field, "dclock_hz:") == 0)
+ system_info->dram_data_rate_hz = value * 2;
+ else if (strcmp(field, "board_type:") == 0)
+ system_info->board_type = value;
+ else if (strcmp(field, "board_rev_major:") == 0)
+ system_info->board_rev_major = value;
+ else if (strcmp(field, "board_rev_minor:") == 0)
+ system_info->board_rev_minor = value;
+ else if (strcmp(field, "board_serial_number:") == 0)
+ strncpy(system_info->board_serial_number, valueS, sizeof(system_info->board_serial_number));
+ else if (strcmp(field, "mac_addr_base:") == 0)
+ {
+ int i;
+ int m[6];
+ sscanf(valueS, "%02x:%02x:%02x:%02x:%02x:%02x", m+0, m+1, m+2, m+3, m+4, m+5);
+ for (i=0; i<6; i++)
+ system_info->mac_addr_base[i] = m[i];
+ }
+ else if (strcmp(field, "mac_addr_count:") == 0)
+ system_info->mac_addr_count = value;
+ else if (strcmp(field, "fdt_addr:") == 0)
+ system_info->fdt_addr = UNMAPPED_PTR(value);
+ else if (strcmp(field, "32bit_shared_mem_base:") == 0)
+ linux_mem32_min = value;
+ else if (strcmp(field, "32bit_shared_mem_size:") == 0)
+ linux_mem32_max = linux_mem32_min + value - 1;
+ else if (strcmp(field, "processor_id:") == 0)
+ cvmx_app_init_processor_id = value;
+ else if (strcmp(field, "32bit_shared_mem_wired:") == 0)
+ linux_mem32_wired = value;
+ }
+ }
+
+ /*
+ * set up the feature map.
+ */
+ octeon_feature_init();
+
+ system_info->cpu_clock_hz = cvmx_clock_get_rate(CVMX_CLOCK_CORE);
+}
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,190 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * This module provides system/board information obtained by the bootloader.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+
+#ifndef __CVMX_SYSINFO_H__
+#define __CVMX_SYSINFO_H__
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+#include "cvmx-app-init.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define OCTEON_SERIAL_LEN 20
+/**
+ * Structure describing application specific information.
+ * __cvmx_app_init() populates this from the cvmx boot descriptor.
+ * This structure is private to simple executive applications, so no
+ * versioning is required.
+ *
+ * This structure must be provided with some fields set in order to
+ * use simple executive functions in other applications (Linux kernel,
+ * u-boot, etc.) The cvmx_sysinfo_minimal_initialize() function is
+ * provided to set the required values in these cases.
+ *
+ */
+struct cvmx_sysinfo {
+ /* System wide variables */
+ uint64_t system_dram_size; /**< installed DRAM in system, in bytes */
+ uint64_t phy_mem_desc_addr; /**< Address of the memory descriptor block */
+
+ /* Application image specific variables */
+ uint64_t stack_top; /**< stack top address (virtual) */
+ uint64_t heap_base; /**< heap base address (virtual) */
+ uint32_t stack_size; /**< stack size in bytes */
+ uint32_t heap_size; /**< heap size in bytes */
+ uint32_t core_mask; /**< coremask defining cores running application */
+ uint32_t init_core; /**< Deprecated, use cvmx_coremask_first_core() to select init core */
+ uint64_t exception_base_addr; /**< exception base address, as set by bootloader */
+ uint32_t cpu_clock_hz; /**< cpu clock speed in hz */
+ uint32_t dram_data_rate_hz; /**< dram data rate in hz (data rate = 2 * clock rate */
+
+ uint16_t board_type;
+ uint8_t board_rev_major;
+ uint8_t board_rev_minor;
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+ char board_serial_number[OCTEON_SERIAL_LEN];
+ /*
+ * Several boards support compact flash on the Octeon boot
+ * bus. The CF memory spaces may be mapped to different
+ * addresses on different boards. These values will be 0 if
+ * CF is not present. Note that these addresses are physical
+ * addresses, and it is up to the application to use the
+ * proper addressing mode (XKPHYS, KSEG0, etc.)
+ */
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ /*
+ * Base address of the LED display (as on EBT3000 board) This
+ * will be 0 if LED display not present. Note that this
+ * address is a physical address, and it is up to the
+ * application to use the proper addressing mode (XKPHYS,
+ * KSEG0, etc.)
+ */
+ uint64_t led_display_base_addr;
+ uint32_t dfa_ref_clock_hz; /**< DFA reference clock in hz (if applicable)*/
+ uint32_t bootloader_config_flags; /**< configuration flags from bootloader */
+ uint8_t console_uart_num; /** < Uart number used for console */
+ uint64_t fdt_addr; /** pointer to device tree */
+};
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+typedef struct cvmx_sysinfo cvmx_sysinfo_t;
+#endif
+
+/**
+ * This function returns the system/board information as obtained
+ * by the bootloader.
+ *
+ *
+ * @return Pointer to the boot information structure
+ *
+ */
+
+extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
+
+/**
+ * This function adds the current cpu to sysinfo coremask
+ *
+ */
+
+void cvmx_sysinfo_add_self_to_core_mask(void);
+
+/**
+ * This function removes the current cpu to sysinfo coremask
+ *
+ */
+void cvmx_sysinfo_remove_self_from_core_mask(void);
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+/**
+ * This function is used in non-simple executive environments (such as Linux kernel, u-boot, etc.)
+ * to configure the minimal fields that are required to use
+ * simple executive files directly.
+ *
+ * Locking (if required) must be handled outside of this
+ * function
+ *
+ * @param phy_mem_desc_addr
+ * Address of the global physical memory descriptor (bootmem
+ * descriptor)
+ * @param board_type Octeon board type enumeration
+ *
+ * @param board_rev_major
+ * Board major revision
+ * @param board_rev_minor
+ * Board minor revision
+ * @param cpu_clock_hz
+ * CPU clock freqency in hertz
+ *
+ * @return 0: Failure
+ * 1: success
+ */
+extern int cvmx_sysinfo_minimal_initialize(uint64_t phy_mem_desc_addr, uint16_t board_type, uint8_t board_rev_major,
+ uint8_t board_rev_minor, uint32_t cpu_clock_hz);
+#endif
+
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+/**
+ * Initialize the sysinfo structure when running on
+ * Octeon under Linux userspace
+ */
+extern void cvmx_sysinfo_linux_userspace_initialize(void);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_SYSINFO_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-sysinfo.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-thunder.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-thunder.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-thunder.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,332 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the Thunder specific devices
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-thunder.h"
+#include "cvmx-gpio.h"
+#include "cvmx-twsi.h"
+
+
+static const int BYPASS_STATUS = 1<<5; /* GPIO 5 */
+static const int BYPASS_EN = 1<<6; /* GPIO 6 */
+static const int WDT_BP_CLR = 1<<7; /* GPIO 7 */
+
+static const int RTC_CTL_ADDR = 0x7;
+static const int RTC_CTL_BIT_EOSC = 0x80;
+static const int RTC_CTL_BIT_WACE = 0x40;
+static const int RTC_CTL_BIT_WD_ALM = 0x20;
+static const int RTC_CTL_BIT_WDSTR = 0x8;
+static const int RTC_CTL_BIT_AIE = 0x1;
+static const int RTC_WD_ALM_CNT_BYTE0_ADDR = 0x4;
+
+#define CVMX_LAN_BYPASS_MSG(...) do {} while(0)
+
+/*
+ * Board-specifc RTC read
+ * Time is expressed in seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
+ */
+uint32_t cvmx_rtc_ds1374_read(void)
+{
+ int retry;
+ uint8_t sec;
+ uint32_t time;
+
+ for(retry=0; retry<2; retry++)
+ {
+ time = cvmx_twsi_read8(CVMX_RTC_DS1374_ADDR, 0x0);
+ time |= (cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1374_ADDR) & 0xff) << 8;
+ time |= (cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1374_ADDR) & 0xff) << 16;
+ time |= (cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1374_ADDR) & 0xff) << 24;
+
+ sec = cvmx_twsi_read8(CVMX_RTC_DS1374_ADDR, 0x0);
+ if (sec == (time & 0xff))
+ break; /* Time did not roll-over, value is correct */
+ }
+
+ return time;
+}
+
+/*
+ * Board-specific RTC write
+ * Time is expressed in seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
+ */
+int cvmx_rtc_ds1374_write(uint32_t time)
+{
+ int rc;
+ int retry;
+ uint8_t sec;
+
+ for(retry=0; retry<2; retry++)
+ {
+ rc = cvmx_twsi_write8(CVMX_RTC_DS1374_ADDR, 0x0, time & 0xff);
+ rc |= cvmx_twsi_write8(CVMX_RTC_DS1374_ADDR, 0x1, (time >> 8) & 0xff);
+ rc |= cvmx_twsi_write8(CVMX_RTC_DS1374_ADDR, 0x2, (time >> 16) & 0xff);
+ rc |= cvmx_twsi_write8(CVMX_RTC_DS1374_ADDR, 0x3, (time >> 24) & 0xff);
+ sec = cvmx_twsi_read8(CVMX_RTC_DS1374_ADDR, 0x0);
+ if (sec == (time & 0xff))
+ break; /* Time did not roll-over, value is correct */
+ }
+
+ return (rc ? -1 : 0);
+}
+
+static int cvmx_rtc_ds1374_alarm_config(int WD, int WDSTR, int AIE)
+{
+ int val;
+
+ val = cvmx_twsi_read8(CVMX_RTC_DS1374_ADDR,RTC_CTL_ADDR);
+ val = val & ~RTC_CTL_BIT_EOSC; /* Make sure that oscillator is running */
+ WD?(val = val | RTC_CTL_BIT_WD_ALM):(val = val & ~RTC_CTL_BIT_WD_ALM);
+ WDSTR?(val = val | RTC_CTL_BIT_WDSTR):(val = val & ~RTC_CTL_BIT_WDSTR);
+ AIE?(val = val | RTC_CTL_BIT_AIE):(val = val & ~RTC_CTL_BIT_AIE);
+ cvmx_twsi_write8(CVMX_RTC_DS1374_ADDR,RTC_CTL_ADDR, val);
+ return 0;
+}
+
+static int cvmx_rtc_ds1374_alarm_set(int alarm_on)
+{
+ uint8_t val;
+
+ if (alarm_on)
+ {
+ val = cvmx_twsi_read8(CVMX_RTC_DS1374_ADDR,RTC_CTL_ADDR);
+ cvmx_twsi_write8(CVMX_RTC_DS1374_ADDR,RTC_CTL_ADDR, val | RTC_CTL_BIT_WACE);
+ }
+ else
+ {
+ val = cvmx_twsi_read8(CVMX_RTC_DS1374_ADDR,RTC_CTL_ADDR);
+ cvmx_twsi_write8(CVMX_RTC_DS1374_ADDR,RTC_CTL_ADDR, val & ~RTC_CTL_BIT_WACE);
+ }
+ return 0;
+}
+
+
+static int cvmx_rtc_ds1374_alarm_counter_set(uint32_t interval)
+{
+ int i;
+ int rc = 0;
+
+ for(i=0;i<3;i++)
+ {
+ rc |= cvmx_twsi_write8(CVMX_RTC_DS1374_ADDR, RTC_WD_ALM_CNT_BYTE0_ADDR+i, interval & 0xFF);
+ interval >>= 8;
+ }
+ return rc;
+}
+
+#if 0 /* XXX unused */
+static uint32_t cvmx_rtc_ds1374_alarm_counter_get(void)
+{
+ int i;
+ uint32_t interval = 0;
+
+ for(i=0;i<3;i++)
+ {
+ interval |= ( cvmx_twsi_read8(CVMX_RTC_DS1374_ADDR,RTC_WD_ALM_CNT_BYTE0_ADDR+i) & 0xff) << (i*8);
+ }
+ return interval;
+}
+#endif
+
+
+#ifdef CVMX_RTC_DEBUG
+
+void cvmx_rtc_ds1374_dump_state(void)
+{
+ int i = 0;
+
+ cvmx_dprintf("RTC:\n");
+ cvmx_dprintf("%d : %02X ", i, cvmx_twsi_read8(CVMX_RTC_DS1374_ADDR, 0x0));
+ for(i=1; i<10; i++)
+ {
+ cvmx_dprintf("%02X ", cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1374_ADDR));
+ }
+ cvmx_dprintf("\n");
+}
+
+#endif /* CVMX_RTC_DEBUG */
+
+
+/*
+ * LAN bypass functionality
+ */
+
+/* Private initialization function */
+static int cvmx_lan_bypass_init(void)
+{
+ const int CLR_PULSE = 100; /* Longer than 100 ns (on CPUs up to 1 GHz) */
+
+ //Clear GPIO 6
+ cvmx_gpio_clear(BYPASS_EN);
+
+ //Disable WDT
+ cvmx_rtc_ds1374_alarm_set(0);
+
+ //GPIO(7) Send a low pulse
+ cvmx_gpio_clear(WDT_BP_CLR);
+ cvmx_wait(CLR_PULSE);
+ cvmx_gpio_set(WDT_BP_CLR);
+ return 0;
+}
+
+/**
+ * Set LAN bypass mode.
+ *
+ * Supported modes are:
+ * - CVMX_LAN_BYPASS_OFF
+ * <br>LAN ports are connected ( port 0 <--> Octeon <--> port 1 )
+ *
+ * - CVMX_LAN_BYPASS_GPIO
+ * <br>LAN bypass is controlled by software using cvmx_lan_bypass_force() function.
+ * When transitioning to this mode, default is LAN bypass enabled
+ * ( port 0 <--> port 1, -- Octeon ).
+ *
+ * - CVMX_LAN_BYPASS_WATCHDOG
+ * <br>LAN bypass is inactive as long as a watchdog is kept alive.
+ * The default expiration time is 1 second and the function to
+ * call periodically to prevent watchdog expiration is
+ * cvmx_lan_bypass_keep_alive().
+ *
+ * @param mode LAN bypass mode
+ *
+ * @return Error code, or 0 in case of success
+ */
+int cvmx_lan_bypass_mode_set(cvmx_lan_bypass_mode_t mode)
+{
+ switch(mode)
+ {
+ case CVMX_LAN_BYPASS_GPIO:
+ /* make lan bypass enable */
+ cvmx_lan_bypass_init();
+ cvmx_gpio_set(BYPASS_EN);
+ CVMX_LAN_BYPASS_MSG("Enable LAN bypass by GPIO. \n");
+ break;
+
+ case CVMX_LAN_BYPASS_WATCHDOG:
+ /* make lan bypass enable */
+ cvmx_lan_bypass_init();
+ /* Set WDT parameters and turn it on */
+ cvmx_rtc_ds1374_alarm_counter_set(0x1000); /* 4096 ticks = 1 sec */
+ cvmx_rtc_ds1374_alarm_config(1,1,1);
+ cvmx_rtc_ds1374_alarm_set(1);
+ CVMX_LAN_BYPASS_MSG("Enable LAN bypass by WDT. \n");
+ break;
+
+ case CVMX_LAN_BYPASS_OFF:
+ /* make lan bypass disable */
+ cvmx_lan_bypass_init();
+ CVMX_LAN_BYPASS_MSG("Disable LAN bypass. \n");
+ break;
+
+ default:
+ CVMX_LAN_BYPASS_MSG("%s: LAN bypass mode %d not supported\n", __FUNCTION__, mode);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * Refresh watchdog timer.
+ *
+ * Call periodically (less than 1 second) to prevent triggering LAN bypass.
+ * The alternative cvmx_lan_bypass_keep_alive_ms() is provided for cases
+ * where a variable interval is required.
+ */
+void cvmx_lan_bypass_keep_alive(void)
+{
+ cvmx_rtc_ds1374_alarm_counter_set(0x1000); /* 4096 ticks = 1 second */
+}
+
+/**
+ * Refresh watchdog timer, setting a specific expiration interval.
+ *
+ * @param interval_ms Interval, in milliseconds, to next watchdog expiration.
+ */
+void cvmx_lan_bypass_keep_alive_ms(uint32_t interval_ms)
+{
+ cvmx_rtc_ds1374_alarm_counter_set((interval_ms * 0x1000) / 1000);
+}
+
+/**
+ * Control LAN bypass via software.
+ *
+ * @param force_bypass Force LAN bypass to active (1) or inactive (0)
+ *
+ * @return Error code, or 0 in case of success
+ */
+int cvmx_lan_bypass_force(int force_bypass)
+{
+ if (force_bypass)
+ {
+ //Set GPIO 6
+ cvmx_gpio_set(BYPASS_EN);
+ }
+ else
+ {
+ cvmx_lan_bypass_init();
+ }
+ return 0;
+}
+
+/**
+ * Return status of LAN bypass circuit.
+ *
+ * @return 1 if ports are in LAN bypass, or 0 if normally connected
+ */
+int cvmx_lan_bypass_is_active(void)
+{
+ return !!(cvmx_gpio_read() & BYPASS_STATUS);
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-thunder.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-thunder.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-thunder.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-thunder.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,151 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+#ifndef __CVMX_THUNDER_H__
+#define __CVMX_THUNDER_H__
+
+/**
+ * @file
+ *
+ * Interface to the Thunder specific devices
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_RTC_DS1374_ADDR (0x68)
+
+/*
+ * Read time-of-day counter.
+ * This function is called internally by cvmx-rtc functions.
+ */
+uint32_t cvmx_rtc_ds1374_read(void);
+
+/*
+ * Write time-of-day counter.
+ * This function is called internally by cvmx-rtc functions.
+ */
+int cvmx_rtc_ds1374_write(uint32_t time);
+
+
+/**
+ * LAN bypass modes.
+ */
+typedef enum {
+ CVMX_LAN_BYPASS_OFF = 0, /**< LAN bypass is disabled, port 0 and port 1
+ are always connected to Octeon */
+ CVMX_LAN_BYPASS_GPIO, /**< LAN bypass controlled by GPIO only */
+ CVMX_LAN_BYPASS_WATCHDOG, /**< LAN bypass controlled by watchdog (and GPIO) */
+ CVMX_LAN_BYPASS_LAST /* Keep as last entry */
+} cvmx_lan_bypass_mode_t;
+
+
+/**
+ * Set LAN bypass mode.
+ *
+ * Supported modes are:
+ * - CVMX_LAN_BYPASS_OFF
+ * <br>LAN ports are connected ( port 0 <--> Octeon <--> port 1 )
+ *
+ * - CVMX_LAN_BYPASS_GPIO
+ * <br>LAN bypass is controlled by software using cvmx_lan_bypass_force() function.
+ * When transitioning to this mode, default is LAN bypass enabled
+ * ( port 0 <--> port 1, disconnected from Octeon ).
+ *
+ * - CVMX_LAN_BYPASS_WATCHDOG
+ * <br>LAN bypass is inactive as long as the watchdog is kept alive.
+ * The default expiration time is 1 second and the function to
+ * call periodically to prevent watchdog expiration is
+ * cvmx_lan_bypass_keep_alive().
+ *
+ * @param mode LAN bypass mode
+ *
+ * @return Error code, or 0 in case of success
+ */
+int cvmx_lan_bypass_mode_set(cvmx_lan_bypass_mode_t mode);
+
+/**
+ * Return status of LAN bypass circuit.
+ *
+ * @return 1 if ports are in LAN bypass, or 0 if normally connected
+ */
+int cvmx_lan_bypass_is_active(void);
+
+/**
+ * Refresh watchdog timer.
+ *
+ * Call periodically (less than 1 second) to prevent triggering LAN bypass.
+ * The alternative cvmx_lan_bypass_keep_alive_ms() is provided for cases
+ * where a variable interval is required.
+ */
+void cvmx_lan_bypass_keep_alive(void);
+
+/**
+ * Refresh watchdog timer, setting a specific expiration interval.
+ *
+ * @param interval_ms Interval, in milliseconds, to next watchdog expiration.
+ */
+void cvmx_lan_bypass_keep_alive_ms(uint32_t interval_ms);
+
+/**
+ * Control LAN bypass via software.
+ *
+ * @param force_bypass Force LAN bypass to active (1) or inactive (0)
+ *
+ * @return Error code, or 0 in case of success
+ */
+int cvmx_lan_bypass_force(int force_bypass);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_THUNDER_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-thunder.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-tim-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-tim-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-tim-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1375 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-tim-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon tim.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_TIM_DEFS_H__
+#define __CVMX_TIM_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_BIST_RESULT CVMX_TIM_BIST_RESULT_FUNC()
+static inline uint64_t CVMX_TIM_BIST_RESULT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_BIST_RESULT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000020ull);
+}
+#else
+#define CVMX_TIM_BIST_RESULT (CVMX_ADD_IO_SEG(0x0001180058000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_DBG2 CVMX_TIM_DBG2_FUNC()
+static inline uint64_t CVMX_TIM_DBG2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_DBG2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800580000A0ull);
+}
+#else
+#define CVMX_TIM_DBG2 (CVMX_ADD_IO_SEG(0x00011800580000A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_DBG3 CVMX_TIM_DBG3_FUNC()
+static inline uint64_t CVMX_TIM_DBG3_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_DBG3 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800580000A8ull);
+}
+#else
+#define CVMX_TIM_DBG3 (CVMX_ADD_IO_SEG(0x00011800580000A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_ECC_CFG CVMX_TIM_ECC_CFG_FUNC()
+static inline uint64_t CVMX_TIM_ECC_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_ECC_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000018ull);
+}
+#else
+#define CVMX_TIM_ECC_CFG (CVMX_ADD_IO_SEG(0x0001180058000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_FR_RN_TT CVMX_TIM_FR_RN_TT_FUNC()
+static inline uint64_t CVMX_TIM_FR_RN_TT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_FR_RN_TT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000010ull);
+}
+#else
+#define CVMX_TIM_FR_RN_TT (CVMX_ADD_IO_SEG(0x0001180058000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_GPIO_EN CVMX_TIM_GPIO_EN_FUNC()
+static inline uint64_t CVMX_TIM_GPIO_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_GPIO_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000080ull);
+}
+#else
+#define CVMX_TIM_GPIO_EN (CVMX_ADD_IO_SEG(0x0001180058000080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_INT0 CVMX_TIM_INT0_FUNC()
+static inline uint64_t CVMX_TIM_INT0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_INT0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000030ull);
+}
+#else
+#define CVMX_TIM_INT0 (CVMX_ADD_IO_SEG(0x0001180058000030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_INT0_EN CVMX_TIM_INT0_EN_FUNC()
+static inline uint64_t CVMX_TIM_INT0_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_INT0_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000038ull);
+}
+#else
+#define CVMX_TIM_INT0_EN (CVMX_ADD_IO_SEG(0x0001180058000038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_INT0_EVENT CVMX_TIM_INT0_EVENT_FUNC()
+static inline uint64_t CVMX_TIM_INT0_EVENT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_INT0_EVENT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000040ull);
+}
+#else
+#define CVMX_TIM_INT0_EVENT (CVMX_ADD_IO_SEG(0x0001180058000040ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_INT_ECCERR CVMX_TIM_INT_ECCERR_FUNC()
+static inline uint64_t CVMX_TIM_INT_ECCERR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_INT_ECCERR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000060ull);
+}
+#else
+#define CVMX_TIM_INT_ECCERR (CVMX_ADD_IO_SEG(0x0001180058000060ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_INT_ECCERR_EN CVMX_TIM_INT_ECCERR_EN_FUNC()
+static inline uint64_t CVMX_TIM_INT_ECCERR_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_INT_ECCERR_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000068ull);
+}
+#else
+#define CVMX_TIM_INT_ECCERR_EN (CVMX_ADD_IO_SEG(0x0001180058000068ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_INT_ECCERR_EVENT0 CVMX_TIM_INT_ECCERR_EVENT0_FUNC()
+static inline uint64_t CVMX_TIM_INT_ECCERR_EVENT0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_INT_ECCERR_EVENT0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000070ull);
+}
+#else
+#define CVMX_TIM_INT_ECCERR_EVENT0 (CVMX_ADD_IO_SEG(0x0001180058000070ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_INT_ECCERR_EVENT1 CVMX_TIM_INT_ECCERR_EVENT1_FUNC()
+static inline uint64_t CVMX_TIM_INT_ECCERR_EVENT1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_TIM_INT_ECCERR_EVENT1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000078ull);
+}
+#else
+#define CVMX_TIM_INT_ECCERR_EVENT1 (CVMX_ADD_IO_SEG(0x0001180058000078ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_MEM_DEBUG0 CVMX_TIM_MEM_DEBUG0_FUNC()
+static inline uint64_t CVMX_TIM_MEM_DEBUG0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_TIM_MEM_DEBUG0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058001100ull);
+}
+#else
+#define CVMX_TIM_MEM_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180058001100ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_MEM_DEBUG1 CVMX_TIM_MEM_DEBUG1_FUNC()
+static inline uint64_t CVMX_TIM_MEM_DEBUG1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_TIM_MEM_DEBUG1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058001108ull);
+}
+#else
+#define CVMX_TIM_MEM_DEBUG1 (CVMX_ADD_IO_SEG(0x0001180058001108ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_MEM_DEBUG2 CVMX_TIM_MEM_DEBUG2_FUNC()
+static inline uint64_t CVMX_TIM_MEM_DEBUG2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_TIM_MEM_DEBUG2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058001110ull);
+}
+#else
+#define CVMX_TIM_MEM_DEBUG2 (CVMX_ADD_IO_SEG(0x0001180058001110ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_MEM_RING0 CVMX_TIM_MEM_RING0_FUNC()
+static inline uint64_t CVMX_TIM_MEM_RING0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_TIM_MEM_RING0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058001000ull);
+}
+#else
+#define CVMX_TIM_MEM_RING0 (CVMX_ADD_IO_SEG(0x0001180058001000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_MEM_RING1 CVMX_TIM_MEM_RING1_FUNC()
+static inline uint64_t CVMX_TIM_MEM_RING1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_TIM_MEM_RING1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058001008ull);
+}
+#else
+#define CVMX_TIM_MEM_RING1 (CVMX_ADD_IO_SEG(0x0001180058001008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_REG_BIST_RESULT CVMX_TIM_REG_BIST_RESULT_FUNC()
+static inline uint64_t CVMX_TIM_REG_BIST_RESULT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_TIM_REG_BIST_RESULT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000080ull);
+}
+#else
+#define CVMX_TIM_REG_BIST_RESULT (CVMX_ADD_IO_SEG(0x0001180058000080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_REG_ERROR CVMX_TIM_REG_ERROR_FUNC()
+static inline uint64_t CVMX_TIM_REG_ERROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_TIM_REG_ERROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000088ull);
+}
+#else
+#define CVMX_TIM_REG_ERROR (CVMX_ADD_IO_SEG(0x0001180058000088ull))
+#endif
+#define CVMX_TIM_REG_FLAGS (CVMX_ADD_IO_SEG(0x0001180058000000ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_REG_INT_MASK CVMX_TIM_REG_INT_MASK_FUNC()
+static inline uint64_t CVMX_TIM_REG_INT_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_TIM_REG_INT_MASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000090ull);
+}
+#else
+#define CVMX_TIM_REG_INT_MASK (CVMX_ADD_IO_SEG(0x0001180058000090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_TIM_REG_READ_IDX CVMX_TIM_REG_READ_IDX_FUNC()
+static inline uint64_t CVMX_TIM_REG_READ_IDX_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
+ cvmx_warn("CVMX_TIM_REG_READ_IDX not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180058000008ull);
+}
+#else
+#define CVMX_TIM_REG_READ_IDX (CVMX_ADD_IO_SEG(0x0001180058000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TIM_RINGX_CTL0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_TIM_RINGX_CTL0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180058002000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_TIM_RINGX_CTL0(offset) (CVMX_ADD_IO_SEG(0x0001180058002000ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TIM_RINGX_CTL1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_TIM_RINGX_CTL1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180058002400ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_TIM_RINGX_CTL1(offset) (CVMX_ADD_IO_SEG(0x0001180058002400ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TIM_RINGX_CTL2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_TIM_RINGX_CTL2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180058002800ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_TIM_RINGX_CTL2(offset) (CVMX_ADD_IO_SEG(0x0001180058002800ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TIM_RINGX_DBG0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_TIM_RINGX_DBG0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180058003000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_TIM_RINGX_DBG0(offset) (CVMX_ADD_IO_SEG(0x0001180058003000ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TIM_RINGX_DBG1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
+ cvmx_warn("CVMX_TIM_RINGX_DBG1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180058001200ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_TIM_RINGX_DBG1(offset) (CVMX_ADD_IO_SEG(0x0001180058001200ull) + ((offset) & 63) * 8)
+#endif
+
+/**
+ * cvmx_tim_bist_result
+ *
+ * Notes:
+ * Access to the internal BiST results
+ * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail).
+ */
+union cvmx_tim_bist_result {
+ uint64_t u64;
+ struct cvmx_tim_bist_result_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t wqe_fifo : 1; /**< BIST result of the NCB_WQE FIFO (0=pass, !0=fail) */
+ uint64_t lslr_fifo : 1; /**< BIST result of the NCB_LSLR FIFO (0=pass, !0=fail) */
+ uint64_t rds_mem : 1; /**< BIST result of the RDS memory (0=pass, !0=fail) */
+#else
+ uint64_t rds_mem : 1;
+ uint64_t lslr_fifo : 1;
+ uint64_t wqe_fifo : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_tim_bist_result_s cn68xx;
+ struct cvmx_tim_bist_result_s cn68xxp1;
+};
+typedef union cvmx_tim_bist_result cvmx_tim_bist_result_t;
+
+/**
+ * cvmx_tim_dbg2
+ */
+union cvmx_tim_dbg2 {
+ uint64_t u64;
+ struct cvmx_tim_dbg2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mem_alloc_reg : 8; /**< NCB Load Memory Allocation status */
+ uint64_t reserved_51_55 : 5;
+ uint64_t gnt_fifo_level : 3; /**< NCB GRANT FIFO level */
+ uint64_t reserved_45_47 : 3;
+ uint64_t rwf_fifo_level : 5; /**< NCB requests FIFO level */
+ uint64_t wqe_fifo_level : 8; /**< NCB WQE LD FIFO level */
+ uint64_t reserved_16_31 : 16;
+ uint64_t fsm3_state : 4; /**< FSM 3 current state */
+ uint64_t fsm2_state : 4; /**< FSM 2 current state */
+ uint64_t fsm1_state : 4; /**< FSM 1 current state */
+ uint64_t fsm0_state : 4; /**< FSM 0 current state */
+#else
+ uint64_t fsm0_state : 4;
+ uint64_t fsm1_state : 4;
+ uint64_t fsm2_state : 4;
+ uint64_t fsm3_state : 4;
+ uint64_t reserved_16_31 : 16;
+ uint64_t wqe_fifo_level : 8;
+ uint64_t rwf_fifo_level : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t gnt_fifo_level : 3;
+ uint64_t reserved_51_55 : 5;
+ uint64_t mem_alloc_reg : 8;
+#endif
+ } s;
+ struct cvmx_tim_dbg2_s cn68xx;
+ struct cvmx_tim_dbg2_s cn68xxp1;
+};
+typedef union cvmx_tim_dbg2 cvmx_tim_dbg2_t;
+
+/**
+ * cvmx_tim_dbg3
+ */
+union cvmx_tim_dbg3 {
+ uint64_t u64;
+ struct cvmx_tim_dbg3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rings_pending_vec : 64; /**< Pending rings vector. Indicates which ring in TIM are
+ pending traversal. Bit 0 represents ring 0 while bit 63
+ represents ring 63. */
+#else
+ uint64_t rings_pending_vec : 64;
+#endif
+ } s;
+ struct cvmx_tim_dbg3_s cn68xx;
+ struct cvmx_tim_dbg3_s cn68xxp1;
+};
+typedef union cvmx_tim_dbg3 cvmx_tim_dbg3_t;
+
+/**
+ * cvmx_tim_ecc_cfg
+ */
+union cvmx_tim_ecc_cfg {
+ uint64_t u64;
+ struct cvmx_tim_ecc_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t ecc_flp_syn : 2; /**< ECC Flip Syndrome. Flip the ECC's syndrome for testing
+ purposes, to test SBE and DBE ECC interrupts. */
+ uint64_t ecc_en : 1; /**< Enable ECC correction of the Ring Data Structre memory.
+ ECC is enabled by default. */
+#else
+ uint64_t ecc_en : 1;
+ uint64_t ecc_flp_syn : 2;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_tim_ecc_cfg_s cn68xx;
+ struct cvmx_tim_ecc_cfg_s cn68xxp1;
+};
+typedef union cvmx_tim_ecc_cfg cvmx_tim_ecc_cfg_t;
+
+/**
+ * cvmx_tim_fr_rn_tt
+ *
+ * Notes:
+ * For every 64 entries in a bucket interval should be at
+ * least 1us.
+ * Minimal recommended value for Threshold register is 1us
+ */
+union cvmx_tim_fr_rn_tt {
+ uint64_t u64;
+ struct cvmx_tim_fr_rn_tt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63 : 10;
+ uint64_t thld_gp : 22; /**< Free Running Timer Threshold. Defines the reset value
+ for the free running timer when it reaches zero during
+ it's count down. This threshold only applies to the
+ timer that is driven by GPIO edge as defined at
+ TIM_REG_FLAGS.GPIO_EDGE
+ ***NOTE: Added in pass 2.0 */
+ uint64_t reserved_22_31 : 10;
+ uint64_t fr_rn_tt : 22; /**< Free Running Timer Threshold. Defines the reset value
+ for the free running timer when it reaches zero during
+ it's count down.
+ FR_RN_TT will be used in both cases where free running
+ clock is driven externally or internally.
+ Interval programming guidelines:
+ For every 64 entries in a bucket interval should be at
+ least 1us.
+ Minimal recommended value for FR_RN_TT is 1us. */
+#else
+ uint64_t fr_rn_tt : 22;
+ uint64_t reserved_22_31 : 10;
+ uint64_t thld_gp : 22;
+ uint64_t reserved_54_63 : 10;
+#endif
+ } s;
+ struct cvmx_tim_fr_rn_tt_s cn68xx;
+ struct cvmx_tim_fr_rn_tt_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63 : 42;
+ uint64_t fr_rn_tt : 22; /**< Free Running Timer Threshold. Defines the reset value
+ for the free running timer when it reaches zero during
+ it's count down.
+ FR_RN_TT will be used in both cases where free running
+ clock is driven externally or internally.
+ Interval programming guidelines:
+ For every 64 entries in a bucket interval should be at
+ least 1us.
+ Minimal recommended value for FR_RN_TT is 1us. */
+#else
+ uint64_t fr_rn_tt : 22;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_tim_fr_rn_tt cvmx_tim_fr_rn_tt_t;
+
+/**
+ * cvmx_tim_gpio_en
+ */
+union cvmx_tim_gpio_en {
+ uint64_t u64;
+ struct cvmx_tim_gpio_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t gpio_en : 64; /**< Each bit correspond to rings [63:0] respectively.
+ This register reflects the values written to
+ TIM_RING63..0_CTL1.ENA_GPIO
+ ***NOTE: Added in pass 2.0 for debug only. RESERVED */
+#else
+ uint64_t gpio_en : 64;
+#endif
+ } s;
+ struct cvmx_tim_gpio_en_s cn68xx;
+};
+typedef union cvmx_tim_gpio_en cvmx_tim_gpio_en_t;
+
+/**
+ * cvmx_tim_int0
+ *
+ * Notes:
+ * A ring is in error if its interval has elapsed more than once without having been serviced. This is
+ * usually a programming error where number of entries in the bucket is too large for the interval
+ * specified for the ring.
+ * Any bit in the INT field should be cleared by writing '1' to it.
+ */
+union cvmx_tim_int0 {
+ uint64_t u64;
+ struct cvmx_tim_int0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t int0 : 64; /**< Interrupt bit per ring. Each bit indicates the
+ ring number in error. Each bit in this reg is set
+ regardless of TIM_INT0_EN value. */
+#else
+ uint64_t int0 : 64;
+#endif
+ } s;
+ struct cvmx_tim_int0_s cn68xx;
+ struct cvmx_tim_int0_s cn68xxp1;
+};
+typedef union cvmx_tim_int0 cvmx_tim_int0_t;
+
+/**
+ * cvmx_tim_int0_en
+ *
+ * Notes:
+ * When bit at TIM_INT0_EN is set it enables the corresponding TIM_INTO's bit for interrupt generation
+ * If enable bit is cleared the corresponding bit at TIM_INT0 will still be set.
+ * Interrupt to the cores is generated by : |(TIM_INT0 & TIM_INT0_EN0)
+ */
+union cvmx_tim_int0_en {
+ uint64_t u64;
+ struct cvmx_tim_int0_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t int0_en : 64; /**< Bit enable corresponding to TIM_INT0. */
+#else
+ uint64_t int0_en : 64;
+#endif
+ } s;
+ struct cvmx_tim_int0_en_s cn68xx;
+ struct cvmx_tim_int0_en_s cn68xxp1;
+};
+typedef union cvmx_tim_int0_en cvmx_tim_int0_en_t;
+
+/**
+ * cvmx_tim_int0_event
+ */
+union cvmx_tim_int0_event {
+ uint64_t u64;
+ struct cvmx_tim_int0_event_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t ring_id : 6; /**< The first Ring ID where an interrupt occurred. */
+#else
+ uint64_t ring_id : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_tim_int0_event_s cn68xx;
+ struct cvmx_tim_int0_event_s cn68xxp1;
+};
+typedef union cvmx_tim_int0_event cvmx_tim_int0_event_t;
+
+/**
+ * cvmx_tim_int_eccerr
+ *
+ * Notes:
+ * Each bit in this reg is set regardless of TIM_INT_ECCERR_EN value.
+ *
+ */
+union cvmx_tim_int_eccerr {
+ uint64_t u64;
+ struct cvmx_tim_int_eccerr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dbe : 1; /**< TIM RDS memory had a Double Bit Error */
+ uint64_t sbe : 1; /**< TIM RDS memory had a Single Bit Error */
+#else
+ uint64_t sbe : 1;
+ uint64_t dbe : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_tim_int_eccerr_s cn68xx;
+ struct cvmx_tim_int_eccerr_s cn68xxp1;
+};
+typedef union cvmx_tim_int_eccerr cvmx_tim_int_eccerr_t;
+
+/**
+ * cvmx_tim_int_eccerr_en
+ *
+ * Notes:
+ * When mask bit is set, the corresponding bit in TIM_INT_ECCERR is enabled. If mask bit is cleared the
+ * corresponding bit in TIM_INT_ECCERR will still be set but interrupt will not be reported.
+ */
+union cvmx_tim_int_eccerr_en {
+ uint64_t u64;
+ struct cvmx_tim_int_eccerr_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t dbe_en : 1; /**< Bit mask corresponding to TIM_REG_ECCERR.DBE */
+ uint64_t sbe_en : 1; /**< Bit mask corresponding to TIM_REG_ECCERR.SBE */
+#else
+ uint64_t sbe_en : 1;
+ uint64_t dbe_en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_tim_int_eccerr_en_s cn68xx;
+ struct cvmx_tim_int_eccerr_en_s cn68xxp1;
+};
+typedef union cvmx_tim_int_eccerr_en cvmx_tim_int_eccerr_en_t;
+
+/**
+ * cvmx_tim_int_eccerr_event0
+ */
+union cvmx_tim_int_eccerr_event0 {
+ uint64_t u64;
+ struct cvmx_tim_int_eccerr_event0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t synd : 7; /**< ECC Syndrome */
+ uint64_t add : 8; /**< Memory address where the Error occurred. */
+#else
+ uint64_t add : 8;
+ uint64_t synd : 7;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } s;
+ struct cvmx_tim_int_eccerr_event0_s cn68xx;
+ struct cvmx_tim_int_eccerr_event0_s cn68xxp1;
+};
+typedef union cvmx_tim_int_eccerr_event0 cvmx_tim_int_eccerr_event0_t;
+
+/**
+ * cvmx_tim_int_eccerr_event1
+ */
+union cvmx_tim_int_eccerr_event1 {
+ uint64_t u64;
+ struct cvmx_tim_int_eccerr_event1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63 : 9;
+ uint64_t org_ecc : 7; /**< Original ECC bits where the error occured. */
+ uint64_t org_rds_dat : 48; /**< Memory original data where the error occured. */
+#else
+ uint64_t org_rds_dat : 48;
+ uint64_t org_ecc : 7;
+ uint64_t reserved_55_63 : 9;
+#endif
+ } s;
+ struct cvmx_tim_int_eccerr_event1_s cn68xx;
+ struct cvmx_tim_int_eccerr_event1_s cn68xxp1;
+};
+typedef union cvmx_tim_int_eccerr_event1 cvmx_tim_int_eccerr_event1_t;
+
+/**
+ * cvmx_tim_mem_debug0
+ *
+ * Notes:
+ * Internal per-ring state intended for debug use only - tim.ctl[47:0]
+ * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_tim_mem_debug0 {
+ uint64_t u64;
+ struct cvmx_tim_mem_debug0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t ena : 1; /**< Ring timer enable */
+ uint64_t reserved_46_46 : 1;
+ uint64_t count : 22; /**< Time offset for the ring
+ Set to INTERVAL and counts down by 1 every 1024
+ cycles when ENA==1. The HW forces a bucket
+ traversal (and resets COUNT to INTERVAL) whenever
+ the decrement would cause COUNT to go negative.
+ COUNT is unpredictable whenever ENA==0.
+ COUNT is reset to INTERVAL whenever TIM_MEM_RING1
+ is written for the ring. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t interval : 22; /**< Timer interval - 1 */
+#else
+ uint64_t interval : 22;
+ uint64_t reserved_22_23 : 2;
+ uint64_t count : 22;
+ uint64_t reserved_46_46 : 1;
+ uint64_t ena : 1;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_tim_mem_debug0_s cn30xx;
+ struct cvmx_tim_mem_debug0_s cn31xx;
+ struct cvmx_tim_mem_debug0_s cn38xx;
+ struct cvmx_tim_mem_debug0_s cn38xxp2;
+ struct cvmx_tim_mem_debug0_s cn50xx;
+ struct cvmx_tim_mem_debug0_s cn52xx;
+ struct cvmx_tim_mem_debug0_s cn52xxp1;
+ struct cvmx_tim_mem_debug0_s cn56xx;
+ struct cvmx_tim_mem_debug0_s cn56xxp1;
+ struct cvmx_tim_mem_debug0_s cn58xx;
+ struct cvmx_tim_mem_debug0_s cn58xxp1;
+ struct cvmx_tim_mem_debug0_s cn61xx;
+ struct cvmx_tim_mem_debug0_s cn63xx;
+ struct cvmx_tim_mem_debug0_s cn63xxp1;
+ struct cvmx_tim_mem_debug0_s cn66xx;
+ struct cvmx_tim_mem_debug0_s cnf71xx;
+};
+typedef union cvmx_tim_mem_debug0 cvmx_tim_mem_debug0_t;
+
+/**
+ * cvmx_tim_mem_debug1
+ *
+ * Notes:
+ * Internal per-ring state intended for debug use only - tim.sta[63:0]
+ * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_tim_mem_debug1 {
+ uint64_t u64;
+ struct cvmx_tim_mem_debug1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bucket : 13; /**< Current bucket[12:0]
+ Reset to 0 whenever TIM_MEM_RING0 is written for
+ the ring. Incremented (modulo BSIZE) once per
+ bucket traversal.
+ See TIM_MEM_DEBUG2[BUCKET]. */
+ uint64_t base : 31; /**< Pointer[35:5] to bucket[0] */
+ uint64_t bsize : 20; /**< Number of buckets - 1 */
+#else
+ uint64_t bsize : 20;
+ uint64_t base : 31;
+ uint64_t bucket : 13;
+#endif
+ } s;
+ struct cvmx_tim_mem_debug1_s cn30xx;
+ struct cvmx_tim_mem_debug1_s cn31xx;
+ struct cvmx_tim_mem_debug1_s cn38xx;
+ struct cvmx_tim_mem_debug1_s cn38xxp2;
+ struct cvmx_tim_mem_debug1_s cn50xx;
+ struct cvmx_tim_mem_debug1_s cn52xx;
+ struct cvmx_tim_mem_debug1_s cn52xxp1;
+ struct cvmx_tim_mem_debug1_s cn56xx;
+ struct cvmx_tim_mem_debug1_s cn56xxp1;
+ struct cvmx_tim_mem_debug1_s cn58xx;
+ struct cvmx_tim_mem_debug1_s cn58xxp1;
+ struct cvmx_tim_mem_debug1_s cn61xx;
+ struct cvmx_tim_mem_debug1_s cn63xx;
+ struct cvmx_tim_mem_debug1_s cn63xxp1;
+ struct cvmx_tim_mem_debug1_s cn66xx;
+ struct cvmx_tim_mem_debug1_s cnf71xx;
+};
+typedef union cvmx_tim_mem_debug1 cvmx_tim_mem_debug1_t;
+
+/**
+ * cvmx_tim_mem_debug2
+ *
+ * Notes:
+ * Internal per-ring state intended for debug use only - tim.sta[95:64]
+ * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_tim_mem_debug2 {
+ uint64_t u64;
+ struct cvmx_tim_mem_debug2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t cpool : 3; /**< Free list used to free chunks */
+ uint64_t csize : 13; /**< Number of words per chunk */
+ uint64_t reserved_7_7 : 1;
+ uint64_t bucket : 7; /**< Current bucket[19:13]
+ See TIM_MEM_DEBUG1[BUCKET]. */
+#else
+ uint64_t bucket : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t csize : 13;
+ uint64_t cpool : 3;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_tim_mem_debug2_s cn30xx;
+ struct cvmx_tim_mem_debug2_s cn31xx;
+ struct cvmx_tim_mem_debug2_s cn38xx;
+ struct cvmx_tim_mem_debug2_s cn38xxp2;
+ struct cvmx_tim_mem_debug2_s cn50xx;
+ struct cvmx_tim_mem_debug2_s cn52xx;
+ struct cvmx_tim_mem_debug2_s cn52xxp1;
+ struct cvmx_tim_mem_debug2_s cn56xx;
+ struct cvmx_tim_mem_debug2_s cn56xxp1;
+ struct cvmx_tim_mem_debug2_s cn58xx;
+ struct cvmx_tim_mem_debug2_s cn58xxp1;
+ struct cvmx_tim_mem_debug2_s cn61xx;
+ struct cvmx_tim_mem_debug2_s cn63xx;
+ struct cvmx_tim_mem_debug2_s cn63xxp1;
+ struct cvmx_tim_mem_debug2_s cn66xx;
+ struct cvmx_tim_mem_debug2_s cnf71xx;
+};
+typedef union cvmx_tim_mem_debug2 cvmx_tim_mem_debug2_t;
+
+/**
+ * cvmx_tim_mem_ring0
+ *
+ * Notes:
+ * TIM_MEM_RING0 must not be written for a ring when TIM_MEM_RING1[ENA] is set for the ring.
+ * Every write to TIM_MEM_RING0 clears the current bucket for the ring. (The current bucket is
+ * readable via TIM_MEM_DEBUG2[BUCKET],TIM_MEM_DEBUG1[BUCKET].)
+ * BASE is a 32-byte aligned pointer[35:0]. Only pointer[35:5] are stored because pointer[4:0] = 0.
+ * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_tim_mem_ring0 {
+ uint64_t u64;
+ struct cvmx_tim_mem_ring0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63 : 9;
+ uint64_t first_bucket : 31; /**< Pointer[35:5] to bucket[0] */
+ uint64_t num_buckets : 20; /**< Number of buckets - 1 */
+ uint64_t ring : 4; /**< Ring ID */
+#else
+ uint64_t ring : 4;
+ uint64_t num_buckets : 20;
+ uint64_t first_bucket : 31;
+ uint64_t reserved_55_63 : 9;
+#endif
+ } s;
+ struct cvmx_tim_mem_ring0_s cn30xx;
+ struct cvmx_tim_mem_ring0_s cn31xx;
+ struct cvmx_tim_mem_ring0_s cn38xx;
+ struct cvmx_tim_mem_ring0_s cn38xxp2;
+ struct cvmx_tim_mem_ring0_s cn50xx;
+ struct cvmx_tim_mem_ring0_s cn52xx;
+ struct cvmx_tim_mem_ring0_s cn52xxp1;
+ struct cvmx_tim_mem_ring0_s cn56xx;
+ struct cvmx_tim_mem_ring0_s cn56xxp1;
+ struct cvmx_tim_mem_ring0_s cn58xx;
+ struct cvmx_tim_mem_ring0_s cn58xxp1;
+ struct cvmx_tim_mem_ring0_s cn61xx;
+ struct cvmx_tim_mem_ring0_s cn63xx;
+ struct cvmx_tim_mem_ring0_s cn63xxp1;
+ struct cvmx_tim_mem_ring0_s cn66xx;
+ struct cvmx_tim_mem_ring0_s cnf71xx;
+};
+typedef union cvmx_tim_mem_ring0 cvmx_tim_mem_ring0_t;
+
+/**
+ * cvmx_tim_mem_ring1
+ *
+ * Notes:
+ * After a 1->0 transition on ENA, the HW will still complete a bucket traversal for the ring
+ * if it was pending or active prior to the transition. (SW must delay to ensure the completion
+ * of the traversal before reprogramming the ring.)
+ * Every write to TIM_MEM_RING1 resets the current time offset for the ring to the INTERVAL value.
+ * (The current time offset for the ring is readable via TIM_MEM_DEBUG0[COUNT].)
+ * CSIZE must be at least 16. It is illegal to program CSIZE to a value that is less than 16.
+ * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
+ * CSR read operations to this address can be performed.
+ */
+union cvmx_tim_mem_ring1 {
+ uint64_t u64;
+ struct cvmx_tim_mem_ring1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t enable : 1; /**< Ring timer enable
+ When clear, the ring is disabled and TIM
+ will not traverse any new buckets for the ring. */
+ uint64_t pool : 3; /**< Free list used to free chunks */
+ uint64_t words_per_chunk : 13; /**< Number of words per chunk */
+ uint64_t interval : 22; /**< Timer interval - 1, measured in 1024 cycle ticks */
+ uint64_t ring : 4; /**< Ring ID */
+#else
+ uint64_t ring : 4;
+ uint64_t interval : 22;
+ uint64_t words_per_chunk : 13;
+ uint64_t pool : 3;
+ uint64_t enable : 1;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } s;
+ struct cvmx_tim_mem_ring1_s cn30xx;
+ struct cvmx_tim_mem_ring1_s cn31xx;
+ struct cvmx_tim_mem_ring1_s cn38xx;
+ struct cvmx_tim_mem_ring1_s cn38xxp2;
+ struct cvmx_tim_mem_ring1_s cn50xx;
+ struct cvmx_tim_mem_ring1_s cn52xx;
+ struct cvmx_tim_mem_ring1_s cn52xxp1;
+ struct cvmx_tim_mem_ring1_s cn56xx;
+ struct cvmx_tim_mem_ring1_s cn56xxp1;
+ struct cvmx_tim_mem_ring1_s cn58xx;
+ struct cvmx_tim_mem_ring1_s cn58xxp1;
+ struct cvmx_tim_mem_ring1_s cn61xx;
+ struct cvmx_tim_mem_ring1_s cn63xx;
+ struct cvmx_tim_mem_ring1_s cn63xxp1;
+ struct cvmx_tim_mem_ring1_s cn66xx;
+ struct cvmx_tim_mem_ring1_s cnf71xx;
+};
+typedef union cvmx_tim_mem_ring1 cvmx_tim_mem_ring1_t;
+
+/**
+ * cvmx_tim_reg_bist_result
+ *
+ * Notes:
+ * Access to the internal BiST results
+ * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail).
+ */
+union cvmx_tim_reg_bist_result {
+ uint64_t u64;
+ struct cvmx_tim_reg_bist_result_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t sta : 2; /**< BiST result of the STA memories (0=pass, !0=fail) */
+ uint64_t ncb : 1; /**< BiST result of the NCB memories (0=pass, !0=fail) */
+ uint64_t ctl : 1; /**< BiST result of the CTL memories (0=pass, !0=fail) */
+#else
+ uint64_t ctl : 1;
+ uint64_t ncb : 1;
+ uint64_t sta : 2;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_tim_reg_bist_result_s cn30xx;
+ struct cvmx_tim_reg_bist_result_s cn31xx;
+ struct cvmx_tim_reg_bist_result_s cn38xx;
+ struct cvmx_tim_reg_bist_result_s cn38xxp2;
+ struct cvmx_tim_reg_bist_result_s cn50xx;
+ struct cvmx_tim_reg_bist_result_s cn52xx;
+ struct cvmx_tim_reg_bist_result_s cn52xxp1;
+ struct cvmx_tim_reg_bist_result_s cn56xx;
+ struct cvmx_tim_reg_bist_result_s cn56xxp1;
+ struct cvmx_tim_reg_bist_result_s cn58xx;
+ struct cvmx_tim_reg_bist_result_s cn58xxp1;
+ struct cvmx_tim_reg_bist_result_s cn61xx;
+ struct cvmx_tim_reg_bist_result_s cn63xx;
+ struct cvmx_tim_reg_bist_result_s cn63xxp1;
+ struct cvmx_tim_reg_bist_result_s cn66xx;
+ struct cvmx_tim_reg_bist_result_s cnf71xx;
+};
+typedef union cvmx_tim_reg_bist_result cvmx_tim_reg_bist_result_t;
+
+/**
+ * cvmx_tim_reg_error
+ *
+ * Notes:
+ * A ring is in error if its interval has elapsed more than once without having been serviced.
+ * During a CSR write to this register, the write data is used as a mask to clear the selected mask
+ * bits (mask'[15:0] = mask[15:0] & ~write_data[15:0]).
+ */
+union cvmx_tim_reg_error {
+ uint64_t u64;
+ struct cvmx_tim_reg_error_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mask : 16; /**< Bit mask indicating the rings in error */
+#else
+ uint64_t mask : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_tim_reg_error_s cn30xx;
+ struct cvmx_tim_reg_error_s cn31xx;
+ struct cvmx_tim_reg_error_s cn38xx;
+ struct cvmx_tim_reg_error_s cn38xxp2;
+ struct cvmx_tim_reg_error_s cn50xx;
+ struct cvmx_tim_reg_error_s cn52xx;
+ struct cvmx_tim_reg_error_s cn52xxp1;
+ struct cvmx_tim_reg_error_s cn56xx;
+ struct cvmx_tim_reg_error_s cn56xxp1;
+ struct cvmx_tim_reg_error_s cn58xx;
+ struct cvmx_tim_reg_error_s cn58xxp1;
+ struct cvmx_tim_reg_error_s cn61xx;
+ struct cvmx_tim_reg_error_s cn63xx;
+ struct cvmx_tim_reg_error_s cn63xxp1;
+ struct cvmx_tim_reg_error_s cn66xx;
+ struct cvmx_tim_reg_error_s cnf71xx;
+};
+typedef union cvmx_tim_reg_error cvmx_tim_reg_error_t;
+
+/**
+ * cvmx_tim_reg_flags
+ *
+ * 13e20 reserved
+ *
+ *
+ * Notes:
+ * TIM has a counter that causes a periodic tick every 1024 cycles. This counter is shared by all
+ * rings. (Each tick causes the HW to decrement the time offset (i.e. COUNT) for all enabled rings.)
+ * When ENA_TIM==0, the HW stops this shared periodic counter, so there are no more ticks, and there
+ * are no more new bucket traversals (for any ring).
+ *
+ * If ENA_TIM transitions 1->0, TIM will no longer create new bucket traversals, but there may
+ * have been previous ones. If there are ring bucket traversals that were already pending but
+ * not currently active (i.e. bucket traversals that need to be done by the HW, but haven't been yet)
+ * during this ENA_TIM 1->0 transition, then these bucket traversals will remain pending until
+ * ENA_TIM is later set to one. Bucket traversals that were already in progress will complete
+ * after the 1->0 ENA_TIM transition, though.
+ */
+union cvmx_tim_reg_flags {
+ uint64_t u64;
+ struct cvmx_tim_reg_flags_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t gpio_edge : 2; /**< Edge used for GPIO timing
+ 2'b10 - TIM counts high to low transitions
+ 2'b01 - TIM counts low to high transitions
+ 2'b11 - TIM counts Both low to high and high to low
+ transitions */
+ uint64_t ena_gpio : 1; /**< Enable the external control of GPIO over the free
+ running timer.
+ When set, free running timer will be driven by GPIO.
+ Free running timer will count posedge or negedge of the
+ GPIO pin based on GPIO_EDGE register. */
+ uint64_t ena_dfb : 1; /**< Enable Don't Free Buffer. When set chunk buffer
+ would not be released by the TIM back to FPA. */
+ uint64_t reset : 1; /**< Reset oneshot pulse for free-running structures */
+ uint64_t enable_dwb : 1; /**< Enables non-zero DonwWriteBacks when set
+ When set, enables the use of
+ DontWriteBacks during the buffer freeing
+ operations. */
+ uint64_t enable_timers : 1; /**< Enables the TIM section when set
+ When set, TIM is in normal operation.
+ When clear, time is effectively stopped for all
+ rings in TIM. */
+#else
+ uint64_t enable_timers : 1;
+ uint64_t enable_dwb : 1;
+ uint64_t reset : 1;
+ uint64_t ena_dfb : 1;
+ uint64_t ena_gpio : 1;
+ uint64_t gpio_edge : 2;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_tim_reg_flags_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t reset : 1; /**< Reset oneshot pulse for free-running structures */
+ uint64_t enable_dwb : 1; /**< Enables non-zero DonwWriteBacks when set
+ When set, enables the use of
+ DontWriteBacks during the buffer freeing
+ operations. */
+ uint64_t enable_timers : 1; /**< Enables the TIM section when set
+ When set, TIM is in normal operation.
+ When clear, time is effectively stopped for all
+ rings in TIM. */
+#else
+ uint64_t enable_timers : 1;
+ uint64_t enable_dwb : 1;
+ uint64_t reset : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_tim_reg_flags_cn30xx cn31xx;
+ struct cvmx_tim_reg_flags_cn30xx cn38xx;
+ struct cvmx_tim_reg_flags_cn30xx cn38xxp2;
+ struct cvmx_tim_reg_flags_cn30xx cn50xx;
+ struct cvmx_tim_reg_flags_cn30xx cn52xx;
+ struct cvmx_tim_reg_flags_cn30xx cn52xxp1;
+ struct cvmx_tim_reg_flags_cn30xx cn56xx;
+ struct cvmx_tim_reg_flags_cn30xx cn56xxp1;
+ struct cvmx_tim_reg_flags_cn30xx cn58xx;
+ struct cvmx_tim_reg_flags_cn30xx cn58xxp1;
+ struct cvmx_tim_reg_flags_cn30xx cn61xx;
+ struct cvmx_tim_reg_flags_cn30xx cn63xx;
+ struct cvmx_tim_reg_flags_cn30xx cn63xxp1;
+ struct cvmx_tim_reg_flags_cn30xx cn66xx;
+ struct cvmx_tim_reg_flags_s cn68xx;
+ struct cvmx_tim_reg_flags_s cn68xxp1;
+ struct cvmx_tim_reg_flags_cn30xx cnf71xx;
+};
+typedef union cvmx_tim_reg_flags cvmx_tim_reg_flags_t;
+
+/**
+ * cvmx_tim_reg_int_mask
+ *
+ * Notes:
+ * Note that this CSR is present only in chip revisions beginning with pass2.
+ * When mask bit is set, the interrupt is enabled.
+ */
+union cvmx_tim_reg_int_mask {
+ uint64_t u64;
+ struct cvmx_tim_reg_int_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t mask : 16; /**< Bit mask corresponding to TIM_REG_ERROR.MASK above */
+#else
+ uint64_t mask : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_tim_reg_int_mask_s cn30xx;
+ struct cvmx_tim_reg_int_mask_s cn31xx;
+ struct cvmx_tim_reg_int_mask_s cn38xx;
+ struct cvmx_tim_reg_int_mask_s cn38xxp2;
+ struct cvmx_tim_reg_int_mask_s cn50xx;
+ struct cvmx_tim_reg_int_mask_s cn52xx;
+ struct cvmx_tim_reg_int_mask_s cn52xxp1;
+ struct cvmx_tim_reg_int_mask_s cn56xx;
+ struct cvmx_tim_reg_int_mask_s cn56xxp1;
+ struct cvmx_tim_reg_int_mask_s cn58xx;
+ struct cvmx_tim_reg_int_mask_s cn58xxp1;
+ struct cvmx_tim_reg_int_mask_s cn61xx;
+ struct cvmx_tim_reg_int_mask_s cn63xx;
+ struct cvmx_tim_reg_int_mask_s cn63xxp1;
+ struct cvmx_tim_reg_int_mask_s cn66xx;
+ struct cvmx_tim_reg_int_mask_s cnf71xx;
+};
+typedef union cvmx_tim_reg_int_mask cvmx_tim_reg_int_mask_t;
+
+/**
+ * cvmx_tim_reg_read_idx
+ *
+ * Notes:
+ * Provides the read index during a CSR read operation to any of the CSRs that are physically stored
+ * as memories. The names of these CSRs begin with the prefix "TIM_MEM_".
+ * IDX[7:0] is the read index. INC[7:0] is an increment that is added to IDX[7:0] after any CSR read.
+ * The intended use is to initially write this CSR such that IDX=0 and INC=1. Then, the entire
+ * contents of a CSR memory can be read with consecutive CSR read commands.
+ */
+union cvmx_tim_reg_read_idx {
+ uint64_t u64;
+ struct cvmx_tim_reg_read_idx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t inc : 8; /**< Increment to add to current index for next index */
+ uint64_t index : 8; /**< Index to use for next memory CSR read */
+#else
+ uint64_t index : 8;
+ uint64_t inc : 8;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_tim_reg_read_idx_s cn30xx;
+ struct cvmx_tim_reg_read_idx_s cn31xx;
+ struct cvmx_tim_reg_read_idx_s cn38xx;
+ struct cvmx_tim_reg_read_idx_s cn38xxp2;
+ struct cvmx_tim_reg_read_idx_s cn50xx;
+ struct cvmx_tim_reg_read_idx_s cn52xx;
+ struct cvmx_tim_reg_read_idx_s cn52xxp1;
+ struct cvmx_tim_reg_read_idx_s cn56xx;
+ struct cvmx_tim_reg_read_idx_s cn56xxp1;
+ struct cvmx_tim_reg_read_idx_s cn58xx;
+ struct cvmx_tim_reg_read_idx_s cn58xxp1;
+ struct cvmx_tim_reg_read_idx_s cn61xx;
+ struct cvmx_tim_reg_read_idx_s cn63xx;
+ struct cvmx_tim_reg_read_idx_s cn63xxp1;
+ struct cvmx_tim_reg_read_idx_s cn66xx;
+ struct cvmx_tim_reg_read_idx_s cnf71xx;
+};
+typedef union cvmx_tim_reg_read_idx cvmx_tim_reg_read_idx_t;
+
+/**
+ * cvmx_tim_ring#_ctl0
+ *
+ * Notes:
+ * This CSR is a memory of 64 entries
+ * After a 1 to 0 transition on ENA, the HW will still complete a bucket traversal for the ring
+ * if it was pending or active prior to the transition. (SW must delay to ensure the completion
+ * of the traversal before reprogramming the ring.)
+ */
+union cvmx_tim_ringx_ctl0 {
+ uint64_t u64;
+ struct cvmx_tim_ringx_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t ena : 1; /**< Ring timer enable */
+ uint64_t intc : 2; /**< Interval count for Error. Defines how many intervals
+ could elapse from bucket expiration till actual
+ bucket traversal before HW asserts an error.
+ Typical value is 0,1,2. */
+ uint64_t timercount : 22; /**< Timer Count represents the ring offset; how many timer
+ ticks have left till the interval expiration.
+ Typical initialization value should be Interval/Constant,
+ it is recommended that constant should be unique per ring
+ This will create an offset between the rings.
+ Once ENA is set,
+ TIMERCOUNT counts down timer ticks. When TIMERCOUNT
+ reaches zero, ring's interval expired and the HW forces
+ a bucket traversal (and resets TIMERCOUNT to INTERVAL)
+ TIMERCOUNT is unpredictable whenever ENA==0.
+ It is SW responsibility to set TIMERCOUNT before
+ TIM_RINGX_CTL0.ENA transitions from 0 to 1.
+ When the field is set to X it would take X+1 timer tick
+ for the interval to expire. */
+ uint64_t interval : 22; /**< Timer interval. Measured in Timer Ticks, where timer
+ ticks are defined by TIM_FR_RN_TT.FR_RN_TT. */
+#else
+ uint64_t interval : 22;
+ uint64_t timercount : 22;
+ uint64_t intc : 2;
+ uint64_t ena : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_tim_ringx_ctl0_s cn68xx;
+ struct cvmx_tim_ringx_ctl0_s cn68xxp1;
+};
+typedef union cvmx_tim_ringx_ctl0 cvmx_tim_ringx_ctl0_t;
+
+/**
+ * cvmx_tim_ring#_ctl1
+ *
+ * Notes:
+ * This CSR is a memory of 64 entries
+ * ***NOTE: Added fields in pass 2.0
+ */
+union cvmx_tim_ringx_ctl1 {
+ uint64_t u64;
+ struct cvmx_tim_ringx_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t ena_gpio : 1; /**< When set, ring's timer tick will be generated by the
+ GPIO Timer. GPIO edge is defined by
+ TIM_REG_FLAGS.GPIO_EDGE
+ Default value zero means that timer ticks will
+ be genearated from the Internal Timer */
+ uint64_t ena_prd : 1; /**< Enable Periodic Mode which would disable the memory
+ write of zeros to num_entries and chunk_remainder
+ when a bucket is traveresed. */
+ uint64_t ena_dwb : 1; /**< When set, enables the use of Dont Write Back during
+ FPA buffer freeing operations */
+ uint64_t ena_dfb : 1; /**< Enable Don't Free Buffer. When set chunk buffer
+ would not be released by the TIM back to FPA. */
+ uint64_t cpool : 3; /**< FPA Free list to free chunks to. */
+ uint64_t bucket : 20; /**< Current bucket. Should be set to zero by SW at
+ enable time.
+ Incremented once per bucket traversal. */
+ uint64_t bsize : 20; /**< Number of buckets minus one. If BSIZE==0 there is
+ only one bucket in the ring. */
+#else
+ uint64_t bsize : 20;
+ uint64_t bucket : 20;
+ uint64_t cpool : 3;
+ uint64_t ena_dfb : 1;
+ uint64_t ena_dwb : 1;
+ uint64_t ena_prd : 1;
+ uint64_t ena_gpio : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_tim_ringx_ctl1_s cn68xx;
+ struct cvmx_tim_ringx_ctl1_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t cpool : 3; /**< FPA Free list to free chunks to. */
+ uint64_t bucket : 20; /**< Current bucket. Should be set to zero by SW at
+ enable time.
+ Incremented once per bucket traversal. */
+ uint64_t bsize : 20; /**< Number of buckets minus one. If BSIZE==0 there is
+ only one bucket in the ring. */
+#else
+ uint64_t bsize : 20;
+ uint64_t bucket : 20;
+ uint64_t cpool : 3;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } cn68xxp1;
+};
+typedef union cvmx_tim_ringx_ctl1 cvmx_tim_ringx_ctl1_t;
+
+/**
+ * cvmx_tim_ring#_ctl2
+ *
+ * Notes:
+ * BASE is a 32-byte aligned pointer[35:0]. Only pointer[35:5] are stored because pointer[4:0] = 0.
+ * This CSR is a memory of 64 entries
+ */
+union cvmx_tim_ringx_ctl2 {
+ uint64_t u64;
+ struct cvmx_tim_ringx_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63 : 17;
+ uint64_t csize : 13; /**< Number of words per chunk. CSIZE mod(16) should be
+ zero. */
+ uint64_t reserved_31_33 : 3;
+ uint64_t base : 31; /**< Pointer[35:5] to bucket[0] */
+#else
+ uint64_t base : 31;
+ uint64_t reserved_31_33 : 3;
+ uint64_t csize : 13;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_tim_ringx_ctl2_s cn68xx;
+ struct cvmx_tim_ringx_ctl2_s cn68xxp1;
+};
+typedef union cvmx_tim_ringx_ctl2 cvmx_tim_ringx_ctl2_t;
+
+/**
+ * cvmx_tim_ring#_dbg0
+ */
+union cvmx_tim_ringx_dbg0 {
+ uint64_t u64;
+ struct cvmx_tim_ringx_dbg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fr_rn_ht : 22; /**< Free Running Hardware Timer. Shared by all rings and is
+ used to generate the Timer Tick based on
+ FR_RN_TT. */
+ uint64_t timercount : 22; /**< Timer Count represents the ring's offset.
+ Refer to TIM_RINGX_CTL0. */
+ uint64_t cur_bucket : 20; /**< Current bucket. Indicates the ring's current bucket.
+ Refer to TIM_RINGX_CTL1.BUCKET. */
+#else
+ uint64_t cur_bucket : 20;
+ uint64_t timercount : 22;
+ uint64_t fr_rn_ht : 22;
+#endif
+ } s;
+ struct cvmx_tim_ringx_dbg0_s cn68xx;
+ struct cvmx_tim_ringx_dbg0_s cn68xxp1;
+};
+typedef union cvmx_tim_ringx_dbg0 cvmx_tim_ringx_dbg0_t;
+
+/**
+ * cvmx_tim_ring#_dbg1
+ */
+union cvmx_tim_ringx_dbg1 {
+ uint64_t u64;
+ struct cvmx_tim_ringx_dbg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t ring_esr : 2; /**< Ring Expiration Status Register.
+ This register hold the expiration status of the ring.
+ 2'b00 - Ring was recently traversed.
+ 2'b01 - Interval expired. Ring is queued to be traversed.
+ 2'b10 - 1st interval expiration while ring is queued to be
+ traversed.
+ 2'b11 - 2nd interval expiration while ring is queued to be
+ traversed. */
+#else
+ uint64_t ring_esr : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_tim_ringx_dbg1_s cn68xx;
+ struct cvmx_tim_ringx_dbg1_s cn68xxp1;
+};
+typedef union cvmx_tim_ringx_dbg1 cvmx_tim_ringx_dbg1_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-tim-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-tim.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-tim.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-tim.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,314 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support library for the hardware work queue timers.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#include "executive-config.h"
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-tim.h"
+#include "cvmx-bootmem.h"
+
+/* CSR typedefs have been moved to cvmx-tim-defs.h */
+
+/**
+ * Global structure holding the state of all timers.
+ */
+CVMX_SHARED cvmx_tim_t cvmx_tim;
+
+
+#ifdef CVMX_ENABLE_TIMER_FUNCTIONS
+/**
+ * Setup a timer for use. Must be called before the timer
+ * can be used.
+ *
+ * @param tick Time between each bucket in microseconds. This must not be
+ * smaller than 1024/(clock frequency in MHz).
+ * @param max_ticks The maximum number of ticks the timer must be able
+ * to schedule in the future. There are guaranteed to be enough
+ * timer buckets such that:
+ * number of buckets >= max_ticks.
+ * @return Zero on success. Negative on error. Failures are possible
+ * if the number of buckets needed is too large or memory
+ * allocation fails for creating the buckets.
+ */
+int cvmx_tim_setup(uint64_t tick, uint64_t max_ticks)
+{
+ uint64_t timer_id;
+ int error = -1;
+ uint64_t tim_clock_hz = cvmx_clock_get_rate(CVMX_CLOCK_TIM);
+ uint64_t hw_tick_ns;
+ uint64_t hw_tick_ns_allowed;
+ uint64_t tick_ns = 1000 * tick;
+ int i;
+ uint32_t temp;
+ int timer_thr = 1024;
+
+ /* for the simulator */
+ if (tim_clock_hz == 0)
+ tim_clock_hz = 800000000;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ cvmx_tim_fr_rn_tt_t fr_tt;
+ fr_tt.u64 = cvmx_read_csr(CVMX_TIM_FR_RN_TT);
+ timer_thr = fr_tt.s.fr_rn_tt;
+ }
+
+ hw_tick_ns = timer_thr * 1000000000ull / tim_clock_hz;
+ /*
+ * Double the minimal allowed tick to 2 * HW tick. tick between
+ * (hw_tick_ns, 2*hw_tick_ns) will set config_ring1.s.interval
+ * to zero, or 1024 cycles. This is not enough time for the timer unit
+ * to fetch the bucket data, Resulting in timer ring error interrupt
+ * be always generated. Avoid such setting in software.
+ */
+ hw_tick_ns_allowed = hw_tick_ns * 2;
+
+ /* Make sure the timers are stopped */
+ cvmx_tim_stop();
+
+ /* Reinitialize out timer state */
+ memset(&cvmx_tim, 0, sizeof(cvmx_tim));
+
+ if (tick_ns < hw_tick_ns_allowed)
+ {
+ cvmx_dprintf("ERROR: cvmx_tim_setup: Requested tick %lu(ns) is smaller than"
+ " the minimal ticks allowed by hardware %lu(ns)\n",
+ tick_ns, hw_tick_ns_allowed);
+ return error;
+ }
+ else if (tick_ns > 4194304 * hw_tick_ns)
+ {
+ cvmx_dprintf("ERROR: cvmx_tim_setup: Requested tick %lu(ns) is greater than"
+ " the max ticks %lu(ns)\n", tick_ns, hw_tick_ns);
+ return error;
+ }
+
+ for (i=2; i<20; i++)
+ {
+ if (tick_ns < (hw_tick_ns << i))
+ break;
+ }
+
+ cvmx_tim.max_ticks = (uint32_t)max_ticks;
+ cvmx_tim.bucket_shift = (uint32_t)(i - 1 + 10);
+ cvmx_tim.tick_cycles = tick * tim_clock_hz / 1000000;
+
+ temp = (max_ticks * cvmx_tim.tick_cycles) >> cvmx_tim.bucket_shift;
+
+ /* round up to nearest power of 2 */
+ temp -= 1;
+ temp = temp | (temp >> 1);
+ temp = temp | (temp >> 2);
+ temp = temp | (temp >> 4);
+ temp = temp | (temp >> 8);
+ temp = temp | (temp >> 16);
+ cvmx_tim.num_buckets = temp + 1;
+
+ /* ensure input params fall into permitted ranges */
+ if ((cvmx_tim.num_buckets < 3) || cvmx_tim.num_buckets > 1048576)
+ {
+ cvmx_dprintf("ERROR: cvmx_tim_setup: num_buckets out of range\n");
+ return error;
+ }
+
+ /* Allocate the timer buckets from hardware addressable memory */
+ cvmx_tim.bucket = cvmx_bootmem_alloc(CVMX_TIM_NUM_TIMERS * cvmx_tim.num_buckets
+ * sizeof(cvmx_tim_bucket_entry_t), CVMX_CACHE_LINE_SIZE);
+ if (cvmx_tim.bucket == NULL)
+ {
+ cvmx_dprintf("ERROR: cvmx_tim_setup: allocation problem\n");
+ return error;
+ }
+ memset(cvmx_tim.bucket, 0, CVMX_TIM_NUM_TIMERS * cvmx_tim.num_buckets * sizeof(cvmx_tim_bucket_entry_t));
+
+ cvmx_tim.start_time = 0;
+
+ /* Loop through all timers */
+ for (timer_id = 0; timer_id<CVMX_TIM_NUM_TIMERS; timer_id++)
+ {
+ int interval = ((1 << (cvmx_tim.bucket_shift - 10)) - 1);
+ cvmx_tim_bucket_entry_t *bucket = cvmx_tim.bucket + timer_id * cvmx_tim.num_buckets;
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ cvmx_tim_ringx_ctl0_t ring_ctl0;
+ cvmx_tim_ringx_ctl1_t ring_ctl1;
+ cvmx_tim_ringx_ctl2_t ring_ctl2;
+ cvmx_tim_reg_flags_t reg_flags;
+
+ /* Tell the hardware where about the bucket array */
+ ring_ctl2.u64 = 0;
+ ring_ctl2.s.csize = CVMX_FPA_TIMER_POOL_SIZE / 8;
+ ring_ctl2.s.base = cvmx_ptr_to_phys(bucket) >> 5;
+ cvmx_write_csr(CVMX_TIM_RINGX_CTL2(timer_id), ring_ctl2.u64);
+
+ reg_flags.u64 = cvmx_read_csr(CVMX_TIM_REG_FLAGS);
+ ring_ctl1.u64 = 0;
+ ring_ctl1.s.cpool = ((reg_flags.s.ena_dfb == 0) ? CVMX_FPA_TIMER_POOL : 0);
+ ring_ctl1.s.bsize = cvmx_tim.num_buckets - 1;
+ cvmx_write_csr(CVMX_TIM_RINGX_CTL1(timer_id), ring_ctl1.u64);
+
+ ring_ctl0.u64 = 0;
+ ring_ctl0.s.timercount = interval + timer_id * interval / CVMX_TIM_NUM_TIMERS;
+ cvmx_write_csr(CVMX_TIM_RINGX_CTL0(timer_id), ring_ctl0.u64);
+
+ ring_ctl0.u64 = cvmx_read_csr(CVMX_TIM_RINGX_CTL0(timer_id));
+ ring_ctl0.s.ena = 1;
+ ring_ctl0.s.interval = interval;
+ cvmx_write_csr(CVMX_TIM_RINGX_CTL0(timer_id), ring_ctl0.u64);
+ ring_ctl0.u64 = cvmx_read_csr(CVMX_TIM_RINGX_CTL0(timer_id));
+ }
+ else
+ {
+ cvmx_tim_mem_ring0_t config_ring0;
+ cvmx_tim_mem_ring1_t config_ring1;
+ /* Tell the hardware where about the bucket array */
+ config_ring0.u64 = 0;
+ config_ring0.s.first_bucket = cvmx_ptr_to_phys(bucket) >> 5;
+ config_ring0.s.num_buckets = cvmx_tim.num_buckets - 1;
+ config_ring0.s.ring = timer_id;
+ cvmx_write_csr(CVMX_TIM_MEM_RING0, config_ring0.u64);
+
+ /* Tell the hardware the size of each chunk block in pointers */
+ config_ring1.u64 = 0;
+ config_ring1.s.enable = 1;
+ config_ring1.s.pool = CVMX_FPA_TIMER_POOL;
+ config_ring1.s.words_per_chunk = CVMX_FPA_TIMER_POOL_SIZE / 8;
+ config_ring1.s.interval = interval;
+ config_ring1.s.ring = timer_id;
+ cvmx_write_csr(CVMX_TIM_MEM_RING1, config_ring1.u64);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+/**
+ * Start the hardware timer processing
+ */
+void cvmx_tim_start(void)
+{
+ cvmx_tim_control_t control;
+
+ control.u64 = cvmx_read_csr(CVMX_TIM_REG_FLAGS);
+ control.s.enable_dwb = 1;
+ control.s.enable_timers = 1;
+
+ /* Remember when we started the timers */
+ cvmx_tim.start_time = cvmx_clock_get_count(CVMX_CLOCK_TIM);
+ cvmx_write_csr(CVMX_TIM_REG_FLAGS, control.u64);
+}
+
+
+/**
+ * Stop the hardware timer processing. Timers stay configured.
+ */
+void cvmx_tim_stop(void)
+{
+ cvmx_tim_control_t control;
+ control.u64 = cvmx_read_csr(CVMX_TIM_REG_FLAGS);
+ control.s.enable_dwb = 0;
+ control.s.enable_timers = 0;
+ cvmx_write_csr(CVMX_TIM_REG_FLAGS, control.u64);
+}
+
+
+/**
+ * Stop the timer. After this the timer must be setup again
+ * before use.
+ */
+#ifdef CVMX_ENABLE_TIMER_FUNCTIONS
+void cvmx_tim_shutdown(void)
+{
+ uint32_t bucket;
+ uint64_t timer_id;
+ uint64_t entries_per_chunk;
+
+ /* Make sure the timers are stopped */
+ cvmx_tim_stop();
+
+ entries_per_chunk = CVMX_FPA_TIMER_POOL_SIZE/8 - 1;
+
+ /* Now walk all buckets freeing the chunks */
+ for (timer_id = 0; timer_id<CVMX_TIM_NUM_TIMERS; timer_id++)
+ {
+ for (bucket=0; bucket<cvmx_tim.num_buckets; bucket++)
+ {
+ uint64_t chunk_addr;
+ uint64_t next_chunk_addr;
+ cvmx_tim_bucket_entry_t *bucket_ptr = cvmx_tim.bucket + timer_id * cvmx_tim.num_buckets + bucket;
+ CVMX_PREFETCH128(CAST64(bucket_ptr)); /* prefetch the next cacheline for future buckets */
+
+ /* Each bucket contains a list of chunks */
+ chunk_addr = bucket_ptr->first_chunk_addr;
+ while (bucket_ptr->num_entries)
+ {
+#ifdef DEBUG
+ cvmx_dprintf("Freeing Timer Chunk 0x%llx\n", CAST64(chunk_addr));
+#endif
+ /* Read next chunk pointer from end of the current chunk */
+ next_chunk_addr = cvmx_read_csr(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, chunk_addr + CVMX_FPA_TIMER_POOL_SIZE - 8));
+
+ cvmx_fpa_free(cvmx_phys_to_ptr(chunk_addr), CVMX_FPA_TIMER_POOL, 0);
+ chunk_addr = next_chunk_addr;
+ if (bucket_ptr->num_entries > entries_per_chunk)
+ bucket_ptr->num_entries -= entries_per_chunk;
+ else
+ bucket_ptr->num_entries = 0;
+ }
+ }
+ }
+}
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-tim.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-tim.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-tim.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-tim.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,334 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the hardware work queue timers.
+ *
+`* <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_TIM_H__
+#define __CVMX_TIM_H__
+
+#include "cvmx-clock.h"
+#include "cvmx-fpa.h"
+#include "cvmx-wqe.h"
+
+#include "executive-config.h"
+#ifdef CVMX_ENABLE_TIMER_FUNCTIONS
+#include "cvmx-config.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_TIM_NUM_TIMERS (OCTEON_IS_MODEL(OCTEON_CN68XX) ? 64 : 16)
+#define CVMX_TIM_NUM_BUCKETS 2048
+
+typedef enum
+{
+ CVMX_TIM_STATUS_SUCCESS = 0,
+ CVMX_TIM_STATUS_NO_MEMORY = -1,
+ CVMX_TIM_STATUS_TOO_FAR_AWAY = -2,
+ CVMX_TIM_STATUS_BUSY = -3
+} cvmx_tim_status_t;
+
+/**
+ * Each timer bucket contains a list of work queue entries to
+ * schedule when the timer fires. The list is implemented as
+ * a linked list of blocks. Each block contains an array of
+ * work queue entries followed by a next block pointer. Since
+ * these blocks are dynamically allocated off of a hardware
+ * memory pool, there actual size isn't known compile time.
+ * The next block pointer is stored in the last 8 bytes of
+ * the memory block.
+ */
+typedef struct cvmx_tim_entry_chunk
+{
+ volatile uint64_t entries[0];
+} cvmx_tim_entry_chunk_t;
+
+/**
+ * Each timer contains an array of buckets. Each bucket
+ * represents the list of work queue entries that should be
+ * scheduled when the timer fires. The first 3 entries are used
+ * byt the hardware.
+ */
+typedef struct
+{
+ volatile uint64_t first_chunk_addr;
+ volatile uint32_t num_entries; /**< Zeroed by HW after traversing list */
+ volatile uint32_t chunk_remainder;/**< Zeroed by HW after traversing list */
+
+ // the remaining 16 bytes are not touched by hardware
+ volatile cvmx_tim_entry_chunk_t *last_chunk;
+ uint64_t pad;
+} cvmx_tim_bucket_entry_t;
+
+/**
+ * Structure representing an individual timer. Each timer has
+ * a timer period, a memory management pool, and a list of
+ * buckets.
+ */
+typedef struct
+{
+ cvmx_tim_bucket_entry_t*bucket; /**< The timer buckets. Array of [CVMX_TIM_NUM_TIMERS][CVMX_TIM_NUM_BUCKETS] */
+ uint64_t tick_cycles; /**< How long a bucket represents */
+ uint64_t start_time; /**< Time the timer started in cycles */
+ uint32_t bucket_shift; /**< How long a bucket represents in ms */
+ uint32_t num_buckets; /**< How many buckets per wheel */
+ uint32_t max_ticks; /**< maximum number of ticks allowed for timer */
+} cvmx_tim_t;
+
+/**
+ * Structure used to store state information needed to delete
+ * an already scheduled timer entry. An instance of this
+ * structure must be passed to cvmx_tim_add_entry in order
+ * to be able to delete an entry later with
+ * cvmx_tim_delete_entry.
+ *
+ * NOTE: This structure should be considered opaque by the application,
+ * and the application should not access its members
+ */
+typedef struct
+{
+ uint64_t commit_cycles; /**< After this time the timer can't be changed */
+ uint64_t * timer_entry_ptr;/**< Where the work entry is. Zero this
+ location to delete the entry */
+} cvmx_tim_delete_t;
+
+/**
+ * Global structure holding the state of all timers.
+ */
+extern cvmx_tim_t cvmx_tim;
+
+
+
+
+#ifdef CVMX_ENABLE_TIMER_FUNCTIONS
+/**
+ * Setup a timer for use. Must be called before the timer
+ * can be used.
+ *
+ * @param tick Time between each bucket in microseconds. This must not be
+ * smaller than 1024/(clock frequency in MHz).
+ * @param max_ticks The maximum number of ticks the timer must be able
+ * to schedule in the future. There are guaranteed to be enough
+ * timer buckets such that:
+ * number of buckets >= max_ticks.
+ * @return Zero on success. Negative on error. Failures are possible
+ * if the number of buckets needed is too large or memory
+ * allocation fails for creating the buckets.
+ */
+int cvmx_tim_setup(uint64_t tick, uint64_t max_ticks);
+#endif
+
+/**
+ * Start the hardware timer processing
+ */
+extern void cvmx_tim_start(void);
+
+
+/**
+ * Stop the hardware timer processing. Timers stay configured.
+ */
+extern void cvmx_tim_stop(void);
+
+
+/**
+ * Stop the timer. After this the timer must be setup again
+ * before use.
+ */
+#ifdef CVMX_ENABLE_TIMER_FUNCTIONS
+extern void cvmx_tim_shutdown(void);
+#endif
+
+#ifdef CVMX_ENABLE_TIMER_FUNCTIONS
+/**
+ * Add a work queue entry to the timer.
+ *
+ * @param work_entry Work queue entry to add.
+ * @param ticks_from_now
+ * @param delete_info
+ * Optional pointer where to store information needed to
+ * delete the timer entry. If non NULL information needed
+ * to delete the timer entry before it fires is stored here.
+ * If you don't need to be able to delete the timer, pass
+ * NULL.
+ * @return Result return code
+ */
+static inline cvmx_tim_status_t cvmx_tim_add_entry(cvmx_wqe_t *work_entry, uint64_t ticks_from_now, cvmx_tim_delete_t *delete_info)
+{
+ cvmx_tim_bucket_entry_t* work_bucket_ptr;
+ uint64_t work_bucket;
+ volatile uint64_t * tim_entry_ptr; /* pointer to wqe address in timer chunk */
+ uint64_t entries_per_chunk;
+
+ const uint64_t cycles = cvmx_clock_get_count(CVMX_CLOCK_TIM); /* Get our reference time early for accuracy */
+ const uint64_t core_num = cvmx_get_core_num(); /* One timer per processor, so use this to select */
+
+ /* Make sure the specified time won't wrap our bucket list */
+ if (ticks_from_now > cvmx_tim.max_ticks)
+ {
+ cvmx_dprintf("cvmx_tim_add_entry: Tried to schedule work too far away.\n");
+ return CVMX_TIM_STATUS_TOO_FAR_AWAY;
+ }
+
+ /* Since we have no way to synchronize, we can't update a timer that is
+ being used by the hardware. Two buckets forward should be safe */
+ if (ticks_from_now < 2)
+ {
+ cvmx_dprintf("cvmx_tim_add_entry: Tried to schedule work too soon. Delaying it.\n");
+ ticks_from_now = 2;
+ }
+
+ /* Get the bucket this work queue entry should be in. Remember the bucket
+ array is circular */
+ work_bucket = (((ticks_from_now * cvmx_tim.tick_cycles) + cycles - cvmx_tim.start_time)
+ >> cvmx_tim.bucket_shift);
+
+ work_bucket_ptr = cvmx_tim.bucket + core_num * cvmx_tim.num_buckets + (work_bucket & (cvmx_tim.num_buckets - 1));
+ entries_per_chunk = (CVMX_FPA_TIMER_POOL_SIZE/8 - 1);
+
+ /* Check if we have room to add this entry into the existing list */
+ if (work_bucket_ptr->chunk_remainder)
+ {
+ /* Adding the work entry to the end of the existing list */
+ tim_entry_ptr = &(work_bucket_ptr->last_chunk->entries[entries_per_chunk - work_bucket_ptr->chunk_remainder]);
+ *tim_entry_ptr = cvmx_ptr_to_phys(work_entry);
+ work_bucket_ptr->chunk_remainder--;
+ work_bucket_ptr->num_entries++;
+ }
+ else
+ {
+ /* Current list is either completely empty or completely full. We need
+ to allocate a new chunk for storing this work entry */
+ cvmx_tim_entry_chunk_t *new_chunk = (cvmx_tim_entry_chunk_t *)cvmx_fpa_alloc(CVMX_FPA_TIMER_POOL);
+ if (new_chunk == NULL)
+ {
+ cvmx_dprintf("cvmx_tim_add_entry: Failed to allocate memory for new chunk.\n");
+ return CVMX_TIM_STATUS_NO_MEMORY;
+ }
+
+ /* Does a chunk currently exist? We have to check num_entries since
+ the hardware doesn't NULL out the chunk pointers on free */
+ if (work_bucket_ptr->num_entries)
+ {
+ /* This chunk must be appended to an existing list by putting
+ ** its address in the last spot of the existing chunk. */
+ work_bucket_ptr->last_chunk->entries[entries_per_chunk] = cvmx_ptr_to_phys(new_chunk);
+ work_bucket_ptr->num_entries++;
+ }
+ else
+ {
+ /* This is the very first chunk. Add it */
+ work_bucket_ptr->first_chunk_addr = cvmx_ptr_to_phys(new_chunk);
+ work_bucket_ptr->num_entries = 1;
+ }
+ work_bucket_ptr->last_chunk = new_chunk;
+ work_bucket_ptr->chunk_remainder = entries_per_chunk - 1;
+ tim_entry_ptr = &(new_chunk->entries[0]);
+ *tim_entry_ptr = cvmx_ptr_to_phys(work_entry);
+ }
+
+ /* If the user supplied a delete info structure then fill it in */
+ if (delete_info)
+ {
+ /* It would be very bad to delete a timer entry after, or during the
+ timer's processing. During the processing could yield unpredicatable
+ results, but after would always be bad. Modifying the entry after
+ processing means we would be changing data in a buffer that has been
+ freed, and possible allocated again. For this reason we store a
+ commit cycle count in the delete structure. If we are after this
+ count we will refuse to delete the timer entry. */
+ delete_info->commit_cycles = cycles + (ticks_from_now - 2) * cvmx_tim.tick_cycles;
+ delete_info->timer_entry_ptr = (uint64_t *)tim_entry_ptr; /* Cast to non-volatile type */
+ }
+
+ CVMX_SYNCWS; /* Make sure the hardware timer unit can access valid data from L2 */
+
+ return CVMX_TIM_STATUS_SUCCESS;
+}
+#endif
+
+
+/**
+ * Delete a timer entry scheduled using cvmx_tim_add_entry.
+ * Deleting a timer will fail if it has already triggered or
+ * might be in progress. The actual state of the work queue
+ * entry isn't changed. You need to dispose of it properly.
+ *
+ * @param delete_info
+ * Structure passed to cvmx_tim_add_entry to store the
+ * information needed to delete a timer entry.
+ * @return CVMX_TIM_STATUS_BUSY if the timer was not deleted, otherwise
+ * CVMX_TIM_STATUS_SUCCESS.
+ */
+static inline cvmx_tim_status_t cvmx_tim_delete_entry(cvmx_tim_delete_t *delete_info)
+{
+ const uint64_t cycles = cvmx_clock_get_count(CVMX_CLOCK_TIM);
+
+ if ((int64_t)(cycles - delete_info->commit_cycles) < 0)
+ {
+ /* Timer is far enough away. Safe to delete */
+ *delete_info->timer_entry_ptr = 0;
+ return CVMX_TIM_STATUS_SUCCESS;
+ }
+ else
+ {
+ /* Timer is passed the commit time. It cannot be stopped */
+ return CVMX_TIM_STATUS_BUSY;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __CVMX_TIM_H__
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-tim.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-tlb.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-tlb.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-tlb.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,409 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+/**
+ * @file
+ *
+ * cvmx-tlb supplies per core TLB access functions for simple executive
+ * applications.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+#include "cvmx.h"
+#include "cvmx-tlb.h"
+#include "cvmx-core.h"
+#include <math.h>
+
+extern __uint32_t __log2(__uint32_t);
+//#define DEBUG
+
+/**
+ * @INTERNAL
+ * issue the tlb read instruction
+ */
+static inline void __tlb_read(void){
+ CVMX_EHB;
+ CVMX_TLBR;
+ CVMX_EHB;
+}
+
+/**
+ * @INTERNAL
+ * issue the tlb write instruction
+ */
+static inline void __tlb_write(void){
+
+ CVMX_EHB;
+ CVMX_TLBWI;
+ CVMX_EHB;
+}
+
+/**
+ * @INTERNAL
+ * issue the tlb read instruction
+ */
+static inline int __tlb_probe(uint64_t hi){
+ int index;
+ CVMX_EHB;
+ CVMX_MT_ENTRY_HIGH(hi);
+ CVMX_TLBP;
+ CVMX_EHB;
+
+ CVMX_MF_TLB_INDEX(index);
+
+ if (index < 0) index = -1;
+
+ return index;
+}
+
+/**
+ * @INTERNAL
+ * read a single tlb entry
+ *
+ * return 0: tlb entry is read
+ * -1: index is invalid
+ */
+static inline int __tlb_read_index(uint32_t tlbi){
+
+ if (tlbi >= (uint32_t)cvmx_core_get_tlb_entries()) {
+ return -1;
+ }
+
+ CVMX_MT_TLB_INDEX(tlbi);
+ __tlb_read();
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * write a single tlb entry
+ *
+ * return 0: tlb entry is read
+ * -1: index is invalid
+ */
+static inline int __tlb_write_index(uint32_t tlbi,
+ uint64_t hi, uint64_t lo0,
+ uint64_t lo1, uint64_t pagemask)
+{
+
+ if (tlbi >= (uint32_t)cvmx_core_get_tlb_entries()) {
+ return -1;
+ }
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx-tlb-dbg: "
+ "write TLB %d: hi %lx, lo0 %lx, lo1 %lx, pagemask %lx \n",
+ tlbi, hi, lo0, lo1, pagemask);
+#endif
+
+ CVMX_MT_TLB_INDEX(tlbi);
+ CVMX_MT_ENTRY_HIGH(hi);
+ CVMX_MT_ENTRY_LO_0(lo0);
+ CVMX_MT_ENTRY_LO_1(lo1);
+ CVMX_MT_PAGEMASK(pagemask);
+ __tlb_write();
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Determine if a TLB entry is free to use
+ */
+static inline int __tlb_entry_is_free(uint32_t tlbi) {
+ int ret = 0;
+ uint64_t lo0 = 0, lo1 = 0;
+
+ if (tlbi < (uint32_t)cvmx_core_get_tlb_entries()) {
+
+ __tlb_read_index(tlbi);
+
+ /* Unused entries have neither even nor odd page mapped */
+ CVMX_MF_ENTRY_LO_0(lo0);
+ CVMX_MF_ENTRY_LO_1(lo1);
+
+ if ( !(lo0 & TLB_VALID) && !(lo1 & TLB_VALID)) {
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
+
+/**
+ * @INTERNAL
+ * dump a single tlb entry
+ */
+static inline void __tlb_dump_index(uint32_t tlbi)
+{
+ if (tlbi < (uint32_t)cvmx_core_get_tlb_entries()) {
+
+ if (__tlb_entry_is_free(tlbi)) {
+#ifdef DEBUG
+ cvmx_dprintf("Index: %3d Free \n", tlbi);
+#endif
+ } else {
+ uint64_t lo0, lo1, pgmask;
+ uint32_t hi;
+#ifdef DEBUG
+ uint32_t c0, c1;
+ int width = 13;
+#endif
+
+ __tlb_read_index(tlbi);
+
+ CVMX_MF_ENTRY_HIGH(hi);
+ CVMX_MF_ENTRY_LO_0(lo0);
+ CVMX_MF_ENTRY_LO_1(lo1);
+ CVMX_MF_PAGEMASK(pgmask);
+
+#ifdef DEBUG
+ c0 = ( lo0 >> 3 ) & 7;
+ c1 = ( lo1 >> 3 ) & 7;
+
+ cvmx_dprintf("va=%0*lx asid=%02x\n",
+ width, (hi & ~0x1fffUL), hi & 0xff);
+
+ cvmx_dprintf("\t[pa=%0*lx c=%d d=%d v=%d g=%d] ",
+ width,
+ (lo0 << 6) & PAGE_MASK, c0,
+ (lo0 & 4) ? 1 : 0,
+ (lo0 & 2) ? 1 : 0,
+ (lo0 & 1) ? 1 : 0);
+ cvmx_dprintf("[pa=%0*lx c=%d d=%d v=%d g=%d]\n",
+ width,
+ (lo1 << 6) & PAGE_MASK, c1,
+ (lo1 & 4) ? 1 : 0,
+ (lo1 & 2) ? 1 : 0,
+ (lo1 & 1) ? 1 : 0);
+
+#endif
+ }
+ }
+}
+
+/**
+ * @INTERNAL
+ * dump a single tlb entry
+ */
+static inline uint32_t __tlb_wired_index() {
+ uint32_t tlbi;
+
+ CVMX_MF_TLB_WIRED(tlbi);
+ return tlbi;
+}
+
+/**
+ * Find a free entry that can be used for share memory mapping.
+ *
+ * @return -1: no free entry found
+ * @return : a free entry
+ */
+int cvmx_tlb_allocate_runtime_entry(void)
+{
+ uint32_t i, ret = -1;
+
+ for (i = __tlb_wired_index(); i< (uint32_t)cvmx_core_get_tlb_entries(); i++) {
+
+ /* Check to make sure the index is free to use */
+ if (__tlb_entry_is_free(i)) {
+ /* Found and return */
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * Invalidate the TLB entry. Remove previous mapping if one was set up
+ */
+void cvmx_tlb_free_runtime_entry(uint32_t tlbi)
+{
+ /* Invalidate an unwired TLB entry */
+ if ((tlbi < (uint32_t)cvmx_core_get_tlb_entries()) && (tlbi >= __tlb_wired_index())) {
+ __tlb_write_index(tlbi, 0xffffffff80000000ULL, 0, 0, 0);
+ }
+}
+
+
+/**
+ * Program a single TLB entry to enable the provided vaddr to paddr mapping.
+ *
+ * @param index Index of the TLB entry
+ * @param vaddr The virtual address for this mapping
+ * @param paddr The physical address for this mapping
+ * @param size Size of the mapping
+ * @param tlb_flags Entry mapping flags
+ */
+
+void cvmx_tlb_write_entry(int index, uint64_t vaddr, uint64_t paddr,
+ uint64_t size, uint64_t tlb_flags) {
+ uint64_t lo0, lo1, hi, pagemask;
+
+ if ( __is_power_of_two(size) ) {
+ if ( (__log2(size) & 1 ) == 0) {
+ /* size is not power of 4, we only need to map
+ one page, figure out even or odd page to map */
+ if ((vaddr >> __log2(size) & 1)) {
+ lo0 = 0;
+ lo1 = ((paddr >> 12) << 6) | tlb_flags;
+ hi = ((vaddr - size) >> 12) << 12;
+ }else {
+ lo0 = ((paddr >> 12) << 6) | tlb_flags;
+ lo1 = 0;
+ hi = ((vaddr) >> 12) << 12;
+ }
+ pagemask = (size - 1) & (~1<<11);
+ }else {
+ lo0 = ((paddr >> 12)<< 6) | tlb_flags;
+ lo1 = (((paddr + size /2) >> 12) << 6) | tlb_flags;
+ hi = ((vaddr) >> 12) << 12;
+ pagemask = ((size/2) -1) & (~1<<11);
+ }
+
+
+ __tlb_write_index(index, hi, lo0, lo1, pagemask);
+
+ }
+}
+
+
+/**
+ * Program a single TLB entry to enable the provided vaddr to paddr mapping.
+ * This version adds a wired entry that should not be changed at run time
+ *
+ * @param vaddr The virtual address for this mapping
+ * @param paddr The physical address for this mapping
+ * @param size Size of the mapping
+ * @param tlb_flags Entry mapping flags
+ * @return -1: TLB out of entries
+ * 0: fixed entry added
+ */
+int cvmx_tlb_add_fixed_entry( uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t tlb_flags) {
+
+ uint64_t index;
+ int ret = 0;
+
+ CVMX_MF_TLB_WIRED(index);
+
+ /* Check to make sure if the index is free to use */
+ if (index < (uint32_t)cvmx_core_get_tlb_entries() && __tlb_entry_is_free(index) ) {
+ cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
+
+ if (!__tlb_entry_is_free(index)) {
+ /* Bump up the wired register*/
+ CVMX_MT_TLB_WIRED(index + 1);
+ ret = 1;
+ }
+ }
+ return ret;
+}
+
+
+/**
+ * Program a single TLB entry to enable the provided vaddr to paddr mapping.
+ * This version writes a runtime entry. It will check the index to make sure
+ * not to overwrite any fixed entries.
+ *
+ * @param index Index of the TLB entry
+ * @param vaddr The virtual address for this mapping
+ * @param paddr The physical address for this mapping
+ * @param size Size of the mapping
+ * @param tlb_flags Entry mapping flags
+ */
+void cvmx_tlb_write_runtime_entry(int index, uint64_t vaddr, uint64_t paddr,
+ uint64_t size, uint64_t tlb_flags)
+{
+
+ int wired_index;
+ CVMX_MF_TLB_WIRED(wired_index);
+
+ if (index >= wired_index) {
+ cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
+ }
+
+}
+
+
+
+/**
+ * Find the TLB index of a given virtual address
+ *
+ * @param vaddr The virtual address to look up
+ * @return -1 not TLB mapped
+ * >=0 TLB TLB index
+ */
+int cvmx_tlb_lookup(uint64_t vaddr) {
+ uint64_t hi= (vaddr >> 13 ) << 13; /* We always use ASID 0 */
+
+ return __tlb_probe(hi);
+}
+
+/**
+ * Debug routine to show all shared memory mapping
+ */
+void cvmx_tlb_dump_shared_mapping(void) {
+ uint32_t tlbi;
+
+ for ( tlbi = __tlb_wired_index(); tlbi<(uint32_t)cvmx_core_get_tlb_entries(); tlbi++ ) {
+ __tlb_dump_index(tlbi);
+ }
+}
+
+/**
+ * Debug routine to show all TLB entries of this core
+ *
+ */
+void cvmx_tlb_dump_all(void) {
+
+ uint32_t tlbi;
+
+ for (tlbi = 0; tlbi<= (uint32_t)cvmx_core_get_tlb_entries(); tlbi++ ) {
+ __tlb_dump_index(tlbi);
+ }
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-tlb.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-tlb.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-tlb.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-tlb.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,176 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+#ifndef __CVMX_TLB_H__
+#define __CVMX_TLB_H__
+
+/**
+ * @file
+ *
+ * cvmx-tlb provides access functions for setting up TLB entries for simple
+ * executive applications.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Find a free entry that can be used for share memory mapping.
+ *
+ * @return -1: no free entry found
+ * @return : a free entry
+ */
+int cvmx_tlb_allocate_runtime_entry(void);
+
+/**
+ * Invalidate the TLB entry. Remove previous mapping if one was set up
+ * @param tlbi
+ */
+void cvmx_tlb_free_runtime_entry(uint32_t tlbi);
+
+/**
+ * Debug routine to show all shared memory mapping
+ */
+void cvmx_tlb_dump_shared_mapping(void);
+
+/**
+ * Program a single TLB entry to enable the provided vaddr to paddr mapping.
+ *
+ * @param index Index of the TLB entry
+ * @param vaddr The virtual address for this mapping
+ * @param paddr The physical address for this mapping
+ * @param size Size of the mapping
+ * @param tlb_flags Entry mapping flags
+ */
+void cvmx_tlb_write_entry(int index, uint64_t vaddr, uint64_t paddr,
+ uint64_t size, uint64_t tlb_flags);
+
+
+/**
+ * Program a single TLB entry to enable the provided vaddr to paddr mapping.
+ * This version adds a wired entry that should not be changed at run time
+ *
+ * @param index Index of the TLB entry
+ * @param vaddr The virtual address for this mapping
+ * @param paddr The physical address for this mapping
+ * @param size Size of the mapping
+ * @param tlb_flags Entry mapping flags
+ * @return -1: TLB out of entries
+ * 0: fixed entry added
+ *
+ */
+int cvmx_tlb_add_fixed_entry(uint64_t vaddr, uint64_t paddr,
+ uint64_t size, uint64_t tlb_flags);
+
+/**
+ * Program a single TLB entry to enable the provided vaddr to paddr mapping.
+ * This version writes a runtime entry. It will check the index to make sure
+ * not to overwrite any fixed entries.
+ *
+ * @param index Index of the TLB entry
+ * @param vaddr The virtual address for this mapping
+ * @param paddr The physical address for this mapping
+ * @param size Size of the mapping
+ * @param tlb_flags Entry mapping flags
+ */
+void cvmx_tlb_write_runtime_entry(int index, uint64_t vaddr, uint64_t paddr,
+ uint64_t size, uint64_t tlb_flags);
+
+
+/**
+ * Find the TLB index of a given virtual address
+ *
+ * @param vaddr The virtual address to look up
+ * @return -1 not TLB mapped
+ * >=0 TLB TLB index
+ */
+int cvmx_tlb_lookup(uint64_t vaddr);
+
+/**
+ * Debug routine to show all TLB entries of this core
+ *
+ */
+void cvmx_tlb_dump_all(void);
+
+/*
+ * @INTERNAL
+ * return the next power of two value for the given input <v>
+ *
+ * @param v input value
+ * @return next power of two value for v
+ */
+static inline uint64_t __upper_power_of_two(uint64_t v)
+{
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+ v++;
+ return v;
+}
+
+/**
+ * @INTERNAL
+ * Check if the given value 'v' is power of two.
+ *
+ * @param v input value
+ * @return 1 yes
+ * 0 no
+ */
+static inline int __is_power_of_two(uint64_t v)
+{
+ int num_of_1s = 0;
+
+ CVMX_DPOP(num_of_1s, v);
+ return (num_of_1s == 1 );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-tlb.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-tra-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-tra-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-tra-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,105 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-tra-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon tra.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+
+#ifndef __CVMX_TRA_TYPEDEFS_H__
+#define __CVMX_TRA_TYPEDEFS_H__
+
+#include "cvmx-trax-defs.h"
+
+#define CVMX_TRA_BIST_STATUS (CVMX_TRAX_BIST_STATUS(0))
+#define CVMX_TRA_CTL (CVMX_TRAX_CTL(0))
+#define CVMX_TRA_CYCLES_SINCE (CVMX_TRAX_CYCLES_SINCE(0))
+#define CVMX_TRA_CYCLES_SINCE1 (CVMX_TRAX_CYCLES_SINCE1(0))
+#define CVMX_TRA_FILT_ADR_ADR (CVMX_TRAX_FILT_ADR_ADR(0))
+#define CVMX_TRA_FILT_ADR_MSK (CVMX_TRAX_FILT_ADR_MSK(0))
+#define CVMX_TRA_FILT_CMD (CVMX_TRAX_FILT_CMD(0))
+#define CVMX_TRA_FILT_DID (CVMX_TRAX_FILT_DID(0))
+#define CVMX_TRA_FILT_SID (CVMX_TRAX_FILT_SID(0))
+#define CVMX_TRA_INT_STATUS (CVMX_TRAX_INT_STATUS(0))
+#define CVMX_TRA_READ_DAT (CVMX_TRAX_READ_DAT(0))
+#define CVMX_TRA_READ_DAT_HI (CVMX_TRAX_READ_DAT_HI(0))
+#define CVMX_TRA_TRIG0_ADR_ADR (CVMX_TRAX_TRIG0_ADR_ADR(0))
+#define CVMX_TRA_TRIG0_ADR_MSK (CVMX_TRAX_TRIG0_ADR_MSK(0))
+#define CVMX_TRA_TRIG0_CMD (CVMX_TRAX_TRIG0_CMD(0))
+#define CVMX_TRA_TRIG0_DID (CVMX_TRAX_TRIG0_DID(0))
+#define CVMX_TRA_TRIG0_SID (CVMX_TRAX_TRIG0_SID(0))
+#define CVMX_TRA_TRIG1_ADR_ADR (CVMX_TRAX_TRIG1_ADR_ADR(0))
+#define CVMX_TRA_TRIG1_ADR_MSK (CVMX_TRAX_TRIG1_ADR_MSK(0))
+#define CVMX_TRA_TRIG1_CMD (CVMX_TRAX_TRIG1_CMD(0))
+#define CVMX_TRA_TRIG1_DID (CVMX_TRAX_TRIG1_DID(0))
+#define CVMX_TRA_TRIG1_SID (CVMX_TRAX_TRIG1_SID(0))
+
+typedef union cvmx_trax_bist_status cvmx_tra_bist_status_t;
+typedef union cvmx_trax_ctl cvmx_tra_ctl_t;
+typedef union cvmx_trax_cycles_since cvmx_tra_cycles_since_t;
+typedef union cvmx_trax_cycles_since1 cvmx_tra_cycles_since1_t;
+typedef union cvmx_trax_filt_adr_adr cvmx_tra_filt_adr_adr_t;
+typedef union cvmx_trax_filt_adr_msk cvmx_tra_filt_adr_msk_t;
+typedef union cvmx_trax_filt_cmd cvmx_tra_filt_cmd_t;
+typedef union cvmx_trax_filt_did cvmx_tra_filt_did_t;
+typedef union cvmx_trax_filt_sid cvmx_tra_filt_sid_t;
+typedef union cvmx_trax_int_status cvmx_tra_int_status_t;
+typedef union cvmx_trax_read_dat cvmx_tra_read_dat_t;
+typedef union cvmx_trax_read_dat_hi cvmx_tra_read_dat_hi_t;
+typedef union cvmx_trax_trig0_adr_adr cvmx_tra_trig0_adr_adr_t;
+typedef union cvmx_trax_trig0_adr_msk cvmx_tra_trig0_adr_msk_t;
+typedef union cvmx_trax_trig0_cmd cvmx_tra_trig0_cmd_t;
+typedef union cvmx_trax_trig0_did cvmx_tra_trig0_did_t;
+typedef union cvmx_trax_trig0_sid cvmx_tra_trig0_sid_t;
+typedef union cvmx_trax_trig1_adr_adr cvmx_tra_trig1_adr_adr_t;
+typedef union cvmx_trax_trig1_adr_msk cvmx_tra_trig1_adr_msk_t;
+typedef union cvmx_trax_trig1_cmd cvmx_tra_trig1_cmd_t;
+typedef union cvmx_trax_trig1_did cvmx_tra_trig1_did_t;
+typedef union cvmx_trax_trig1_sid cvmx_tra_trig1_sid_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-tra-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-tra.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-tra.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-tra.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,944 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the Trace buffer hardware.
+ *
+ * <hr>$Revision: 30644 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-tra.h>
+#include <asm/octeon/cvmx-l2c.h>
+#else
+#include "cvmx.h"
+#include "cvmx-tra.h"
+#include "cvmx-l2c.h"
+#endif
+
+static const char *TYPE_ARRAY[] = {
+ "DWB - Don't write back",
+ "PL2 - Prefetch into L2",
+ "PSL1 - Dcache fill, skip L2",
+ "LDD - Dcache fill",
+ "LDI - Icache/IO fill",
+ "LDT - Icache/IO fill, skip L2",
+ "STF - Store full",
+ "STC - Store conditional",
+ "STP - Store partial",
+ "STT - Store full, skip L2",
+ "IOBLD8 - IOB 8bit load",
+ "IOBLD16 - IOB 16bit load",
+ "IOBLD32 - IOB 32bit load",
+ "IOBLD64 - IOB 64bit load",
+ "IOBST - IOB store",
+ "IOBDMA - Async IOB",
+ "SAA - Store atomic add",
+ "RSVD17",
+ "RSVD18",
+ "RSVD19",
+ "RSVD20",
+ "RSVD21",
+ "RSVD22",
+ "RSVD23",
+ "RSVD24",
+ "RSVD25",
+ "RSVD26",
+ "RSVD27",
+ "RSVD28",
+ "RSVD29",
+ "RSVD30",
+ "RSVD31"
+};
+
+static const char *TYPE_ARRAY2[] = {
+ "NOP - None",
+ "LDT - Icache/IO fill, skip L2",
+ "LDI - Icache/IO fill",
+ "PL2 - Prefetch into L2",
+ "RPL2 - Mark for replacement in L2",
+ "DWB - Don't write back",
+ "RSVD6",
+ "RSVD7",
+ "LDD - Dcache fill",
+ "PSL1 - Prefetch L1, skip L2",
+ "RSVD10",
+ "RSVD11",
+ "RSVD12",
+ "RSVD13",
+ "RSVD14",
+ "IOBDMA - Async IOB",
+ "STF - Store full",
+ "STT - Store full, skip L2",
+ "STP - Store partial",
+ "STC - Store conditional",
+ "STFIL1 - Store full, invalidate L1",
+ "STTIL1 - Store full, skip L2, invalidate L1",
+ "FAS32 - Atomic 32bit swap",
+ "FAS64 - Atomic 64bit swap",
+ "WBIL2i - Writeback, invalidate, by index/way",
+ "LTGL2i - Read tag at index/way",
+ "STGL2i - Write tag at index/way",
+ "RSVD27",
+ "INVL2 - Invalidate, by address",
+ "WBIL2 - Writeback, invalidate, by address",
+ "WBL2 - Writeback, by address",
+ "LCKL2 - Allocate, lock, by address",
+ "IOBLD8 - IOB 8bit load",
+ "IOBLD16 - IOB 16bit load",
+ "IOBLD32 - IOB 32bit load",
+ "IOBLD64 - IOB 64bit load",
+ "IOBST8 - IOB 8bit store",
+ "IOBST16 - IOB 16bit store",
+ "IOBST32 - IOB 32bit store",
+ "IOBST64 - IOB 64bit store",
+ "SET8 - 8bit Atomic swap with 1's",
+ "SET16 - 16bit Atomic swap with 1's",
+ "SET32 - 32bit Atomic swap with 1's",
+ "SET64 - 64bit Atomic swap with 1's",
+ "CLR8 - 8bit Atomic swap with 0's",
+ "CLR16 - 16bit Atomic swap with 0's",
+ "CLR32 - 32bit Atomic swap with 0's",
+ "CLR64 - 64bit Atomic swap with 0's",
+ "INCR8 - 8bit Atomic fetch & add by 1",
+ "INCR16 - 16bit Atomic fetch & add by 1",
+ "INCR32 - 32bit Atomic fetch & add by 1",
+ "INCR64 - 64bit Atomic fetch & add by 1",
+ "DECR8 - 8bit Atomic fetch & add by -1",
+ "DECR16 - 16bit Atomic fetch & add by -1",
+ "DECR32 - 32bit Atomic fetch & add by -1",
+ "DECR64 - 64bit Atomic fetch & add by -1",
+ "RSVD56",
+ "RSVD57",
+ "FAA32 - 32bit Atomic fetch and add",
+ "FAA64 - 64bit Atomic fetch and add",
+ "RSVD60",
+ "RSVD61",
+ "SAA32 - 32bit Atomic add",
+ "SAA64 - 64bit Atomic add"
+};
+
+static const char *SOURCE_ARRAY[] = {
+ "PP0",
+ "PP1",
+ "PP2",
+ "PP3",
+ "PP4",
+ "PP5",
+ "PP6",
+ "PP7",
+ "PP8",
+ "PP9",
+ "PP10",
+ "PP11",
+ "PP12",
+ "PP13",
+ "PP14",
+ "PP15",
+ "PIP/IPD",
+ "PKO-R",
+ "FPA/TIM/DFA/PCI/ZIP/POW/PKO-W",
+ "DWB",
+ "RSVD20",
+ "RSVD21",
+ "RSVD22",
+ "RSVD23",
+ "RSVD24",
+ "RSVD25",
+ "RSVD26",
+ "RSVD27",
+ "RSVD28",
+ "RSVD29",
+ "RSVD30",
+ "RSVD31",
+ "PP16",
+ "PP17",
+ "PP18",
+ "PP19",
+ "PP20",
+ "PP21",
+ "PP22",
+ "PP23",
+ "PP24",
+ "PP25",
+ "PP26",
+ "PP27",
+ "PP28",
+ "PP29",
+ "PP30",
+ "PP31"
+};
+
+static const char *DEST_ARRAY[] = {
+ "CIU/GPIO",
+ "RSVD1",
+ "RSVD2",
+ "PCI/PCIe/SLI",
+ "KEY",
+ "FPA",
+ "DFA",
+ "ZIP",
+ "RNG",
+ "IPD",
+ "PKO",
+ "RSVD11",
+ "POW",
+ "USB0",
+ "RAD",
+ "RSVD15",
+ "RSVD16",
+ "RSVD17",
+ "RSVD18",
+ "RSVD19",
+ "RSVD20",
+ "RSVD21",
+ "RSVD22",
+ "RSVD23",
+ "RSVD24",
+ "RSVD25",
+ "RSVD26",
+ "DPI",
+ "RSVD28",
+ "RSVD29",
+ "FAU",
+ "RSVD31"
+};
+
+int _cvmx_tra_unit = 0;
+
+#define CVMX_TRA_SOURCE_MASK (OCTEON_IS_MODEL(OCTEON_CN63XX) ? 0xf00ff : 0xfffff)
+#define CVMX_TRA_DESTINATION_MASK 0xfffffffful
+
+/**
+ * @INTERNAL
+ * Setup the trace buffer filter command mask. The bit position of filter commands
+ * are different for each Octeon model.
+ *
+ * @param filter Which event to log
+ * @return Bitmask of filter command based on the event.
+ */
+static uint64_t __cvmx_tra_set_filter_cmd_mask(cvmx_tra_filt_t filter)
+{
+ cvmx_tra_filt_cmd_t filter_command;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ {
+ /* Bit positions of filter commands are different, map it accordingly */
+ uint64_t cmd = 0;
+ if ((filter & CVMX_TRA_FILT_ALL) == -1ull)
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ cmd = 0x1ffff;
+ else
+ cmd = 0xffff;
+ }
+ if (filter & CVMX_TRA_FILT_DWB)
+ cmd |= 1ull<<0;
+ if (filter & CVMX_TRA_FILT_PL2)
+ cmd |= 1ull<<1;
+ if (filter & CVMX_TRA_FILT_PSL1)
+ cmd |= 1ull<<2;
+ if (filter & CVMX_TRA_FILT_LDD)
+ cmd |= 1ull<<3;
+ if (filter & CVMX_TRA_FILT_LDI)
+ cmd |= 1ull<<4;
+ if (filter & CVMX_TRA_FILT_LDT)
+ cmd |= 1ull<<5;
+ if (filter & CVMX_TRA_FILT_STF)
+ cmd |= 1ull<<6;
+ if (filter & CVMX_TRA_FILT_STC)
+ cmd |= 1ull<<7;
+ if (filter & CVMX_TRA_FILT_STP)
+ cmd |= 1ull<<8;
+ if (filter & CVMX_TRA_FILT_STT)
+ cmd |= 1ull<<9;
+ if (filter & CVMX_TRA_FILT_IOBLD8)
+ cmd |= 1ull<<10;
+ if (filter & CVMX_TRA_FILT_IOBLD16)
+ cmd |= 1ull<<11;
+ if (filter & CVMX_TRA_FILT_IOBLD32)
+ cmd |= 1ull<<12;
+ if (filter & CVMX_TRA_FILT_IOBLD64)
+ cmd |= 1ull<<13;
+ if (filter & CVMX_TRA_FILT_IOBST)
+ cmd |= 1ull<<14;
+ if (filter & CVMX_TRA_FILT_IOBDMA)
+ cmd |= 1ull<<15;
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) && (filter & CVMX_TRA_FILT_SAA))
+ cmd |= 1ull<<16;
+
+ filter_command.u64 = cmd;
+ }
+ else
+ {
+ if ((filter & CVMX_TRA_FILT_ALL) == -1ull)
+ filter_command.u64 = CVMX_TRA_FILT_ALL;
+ else
+ filter_command.u64 = filter;
+
+ filter_command.cn63xx.reserved_60_61 = 0;
+ filter_command.cn63xx.reserved_56_57 = 0;
+ filter_command.cn63xx.reserved_27_27 = 0;
+ filter_command.cn63xx.reserved_10_14 = 0;
+ filter_command.cn63xx.reserved_6_7 = 0;
+ }
+ return filter_command.u64;
+}
+
+
+/**
+ * Setup the TRA buffer for use
+ *
+ * @param control TRA control setup
+ * @param filter Which events to log
+ * @param source_filter
+ * Source match
+ * @param dest_filter
+ * Destination match
+ * @param address Address compare
+ * @param address_mask
+ * Address mask
+ */
+void cvmx_tra_setup(cvmx_tra_ctl_t control, cvmx_tra_filt_t filter,
+ cvmx_tra_sid_t source_filter, cvmx_tra_did_t dest_filter,
+ uint64_t address, uint64_t address_mask)
+{
+ cvmx_tra_filt_cmd_t filt_cmd;
+ cvmx_tra_filt_sid_t filt_sid;
+ cvmx_tra_filt_did_t filt_did;
+ int tad;
+
+ filt_cmd.u64 = __cvmx_tra_set_filter_cmd_mask(filter);
+ filt_sid.u64 = source_filter & CVMX_TRA_SOURCE_MASK;
+ filt_did.u64 = dest_filter & CVMX_TRA_DESTINATION_MASK;
+
+ /* Address filtering does not work when IOBDMA filter command is enabled
+ because of some caveats. Disable the IOBDMA filter command. */
+ if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ && ((filt_cmd.u64 & CVMX_TRA_FILT_IOBDMA) == CVMX_TRA_FILT_IOBDMA)
+ && address_mask != 0)
+ {
+ cvmx_dprintf("The address-based filtering does not work with IOBDMAs, disabling the filter command.\n");
+ filt_cmd.u64 &= ~(CVMX_TRA_FILT_IOBDMA);
+ }
+
+ /* In OcteonII pass2, the mode bit is added to enable reading the trace
+ buffer data from different registers for lower and upper 64-bit value.
+ This bit is reserved in other Octeon models. */
+ control.s.rdat_md = 1;
+
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ {
+ cvmx_write_csr(CVMX_TRAX_CTL(tad), control.u64);
+ cvmx_write_csr(CVMX_TRAX_FILT_CMD(tad), filt_cmd.u64);
+ cvmx_write_csr(CVMX_TRAX_FILT_SID(tad), filt_sid.u64);
+ cvmx_write_csr(CVMX_TRAX_FILT_DID(tad), filt_did.u64);
+ cvmx_write_csr(CVMX_TRAX_FILT_ADR_ADR(tad), address);
+ cvmx_write_csr(CVMX_TRAX_FILT_ADR_MSK(tad), address_mask);
+ }
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_tra_setup);
+#endif
+
+/**
+ * Setup each TRA buffer for use
+ *
+ * @param tra Which TRA buffer to use (0-3)
+ * @param control TRA control setup
+ * @param filter Which events to log
+ * @param source_filter
+ * Source match
+ * @param dest_filter
+ * Destination match
+ * @param address Address compare
+ * @param address_mask
+ * Address mask
+ */
+void cvmx_tra_setup_v2(int tra, cvmx_tra_ctl_t control, cvmx_tra_filt_t filter,
+ cvmx_tra_sid_t source_filter, cvmx_tra_did_t dest_filter,
+ uint64_t address, uint64_t address_mask)
+{
+ cvmx_tra_filt_cmd_t filt_cmd;
+ cvmx_tra_filt_sid_t filt_sid;
+ cvmx_tra_filt_did_t filt_did;
+
+ if ((tra + 1) > CVMX_L2C_TADS)
+ {
+ cvmx_dprintf("cvmx_tra_setup_per_tra: Invalid tra(%d), max allowed (%d)\n", tra, CVMX_L2C_TADS - 1);
+ tra = 0;
+ }
+
+ filt_cmd.u64 = __cvmx_tra_set_filter_cmd_mask(filter);
+ filt_sid.u64 = source_filter & CVMX_TRA_SOURCE_MASK;
+ filt_did.u64 = dest_filter & CVMX_TRA_DESTINATION_MASK;
+
+ /* Address filtering does not work when IOBDMA filter command is enabled
+ because of some caveats. Disable the IOBDMA filter command. */
+ if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ && ((filt_cmd.u64 & CVMX_TRA_FILT_IOBDMA) == CVMX_TRA_FILT_IOBDMA)
+ && address_mask != 0)
+ {
+ cvmx_dprintf("The address-based filtering does not work with IOBDMAs, disabling the filter command.\n");
+ filt_cmd.u64 &= ~(CVMX_TRA_FILT_IOBDMA);
+ }
+
+ /* In OcteonII pass2, the mode bit is added to enable reading the trace
+ buffer data from different registers for lower and upper 64-bit value.
+ This bit is reserved in other Octeon models. */
+ control.s.rdat_md = 1;
+
+ cvmx_write_csr(CVMX_TRAX_CTL(tra), control.u64);
+ cvmx_write_csr(CVMX_TRAX_FILT_CMD(tra), filt_cmd.u64);
+ cvmx_write_csr(CVMX_TRAX_FILT_SID(tra), filt_sid.u64);
+ cvmx_write_csr(CVMX_TRAX_FILT_DID(tra), filt_did.u64);
+ cvmx_write_csr(CVMX_TRAX_FILT_ADR_ADR(tra), address);
+ cvmx_write_csr(CVMX_TRAX_FILT_ADR_MSK(tra), address_mask);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_tra_setup_v2);
+#endif
+
+/**
+ * Setup a TRA trigger. How the triggers are used should be
+ * setup using cvmx_tra_setup.
+ *
+ * @param trigger Trigger to setup (0 or 1)
+ * @param filter Which types of events to trigger on
+ * @param source_filter
+ * Source trigger match
+ * @param dest_filter
+ * Destination trigger match
+ * @param address Trigger address compare
+ * @param address_mask
+ * Trigger address mask
+ */
+void cvmx_tra_trig_setup(uint64_t trigger, cvmx_tra_filt_t filter,
+ cvmx_tra_sid_t source_filter, cvmx_tra_did_t dest_filter,
+ uint64_t address, uint64_t address_mask)
+{
+ cvmx_tra_filt_cmd_t tra_filt_cmd;
+ cvmx_tra_filt_sid_t tra_filt_sid;
+ cvmx_tra_filt_did_t tra_filt_did;
+ int tad;
+
+ tra_filt_cmd.u64 = __cvmx_tra_set_filter_cmd_mask(filter);
+ tra_filt_sid.u64 = source_filter & CVMX_TRA_SOURCE_MASK;
+ tra_filt_did.u64 = dest_filter & CVMX_TRA_DESTINATION_MASK;
+
+ /* Address filtering does not work when IOBDMA filter command is enabled
+ because of some caveats. Disable the IOBDMA filter command. */
+ if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ && ((tra_filt_cmd.u64 & CVMX_TRA_FILT_IOBDMA) == CVMX_TRA_FILT_IOBDMA)
+ && address_mask != 0)
+ {
+ cvmx_dprintf("The address-based filtering does not work with IOBDMAs, disabling the filter command.\n");
+ tra_filt_cmd.u64 &= ~(CVMX_TRA_FILT_IOBDMA);
+ }
+
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ {
+ cvmx_write_csr(CVMX_TRAX_TRIG0_CMD(tad) + trigger * 64, tra_filt_cmd.u64);
+ cvmx_write_csr(CVMX_TRAX_TRIG0_SID(tad) + trigger * 64, tra_filt_sid.u64);
+ cvmx_write_csr(CVMX_TRAX_TRIG0_DID(tad) + trigger * 64, tra_filt_did.u64);
+ cvmx_write_csr(CVMX_TRAX_TRIG0_ADR_ADR(tad) + trigger * 64, address);
+ cvmx_write_csr(CVMX_TRAX_TRIG0_ADR_MSK(tad) + trigger * 64, address_mask);
+ }
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_tra_trig_setup);
+#endif
+
+/**
+ * Setup each TRA trigger. How the triggers are used should be
+ * setup using cvmx_tra_setup.
+ *
+ * @param tra Which TRA buffer to use (0-3)
+ * @param trigger Trigger to setup (0 or 1)
+ * @param filter Which types of events to trigger on
+ * @param source_filter
+ * Source trigger match
+ * @param dest_filter
+ * Destination trigger match
+ * @param address Trigger address compare
+ * @param address_mask
+ * Trigger address mask
+ */
+void cvmx_tra_trig_setup_v2(int tra, uint64_t trigger, cvmx_tra_filt_t filter,
+ cvmx_tra_sid_t source_filter, cvmx_tra_did_t dest_filter,
+ uint64_t address, uint64_t address_mask)
+{
+ cvmx_tra_filt_cmd_t tra_filt_cmd;
+ cvmx_tra_filt_sid_t tra_filt_sid;
+ cvmx_tra_filt_did_t tra_filt_did;
+
+ tra_filt_cmd.u64 = __cvmx_tra_set_filter_cmd_mask(filter);
+ tra_filt_sid.u64 = source_filter & CVMX_TRA_SOURCE_MASK;
+ tra_filt_did.u64 = dest_filter & CVMX_TRA_DESTINATION_MASK;
+
+ /* Address filtering does not work when IOBDMA filter command is enabled
+ because of some caveats. Disable the IOBDMA filter command. */
+ if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ && ((tra_filt_cmd.u64 & CVMX_TRA_FILT_IOBDMA) == CVMX_TRA_FILT_IOBDMA)
+ && address_mask != 0)
+ {
+ cvmx_dprintf("The address-based filtering does not work with IOBDMAs, disabling the filter command.\n");
+ tra_filt_cmd.u64 &= ~(CVMX_TRA_FILT_IOBDMA);
+ }
+
+ cvmx_write_csr(CVMX_TRAX_TRIG0_CMD(tra) + trigger * 64, tra_filt_cmd.u64);
+ cvmx_write_csr(CVMX_TRAX_TRIG0_SID(tra) + trigger * 64, tra_filt_sid.u64);
+ cvmx_write_csr(CVMX_TRAX_TRIG0_DID(tra) + trigger * 64, tra_filt_did.u64);
+ cvmx_write_csr(CVMX_TRAX_TRIG0_ADR_ADR(tra) + trigger * 64, address);
+ cvmx_write_csr(CVMX_TRAX_TRIG0_ADR_MSK(tra) + trigger * 64, address_mask);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_tra_trig_setup_v2);
+#endif
+
+/**
+ * Read an entry from the TRA buffer
+ *
+ * @return Value return. High bit will be zero if there wasn't any data
+ */
+cvmx_tra_data_t cvmx_tra_read(void)
+{
+ uint64_t address = CVMX_TRA_READ_DAT;
+ cvmx_tra_data_t result;
+
+ /* The trace buffer format is wider than 64-bits in OcteonII model,
+ read the register again to get the second part of the data. */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ /* These reads need to be as close as possible to each other */
+ result.u128.data = cvmx_read_csr(address);
+ result.u128.datahi = cvmx_read_csr(address);
+ }
+ else if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && !OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ {
+ /* OcteonII pass2 uses different trace buffer data register for reading
+ lower and upper 64-bit values */
+ result.u128.data = cvmx_read_csr(address);
+ result.u128.datahi = cvmx_read_csr(CVMX_TRA_READ_DAT_HI);
+ }
+ else
+ {
+ result.u128.data = cvmx_read_csr(address);
+ result.u128.datahi = 0;
+ }
+
+ return result;
+}
+
+/**
+ * Read an entry from the TRA buffer from a given TRA unit.
+ *
+ * @param tra_unit Trace buffer unit to read
+ *
+ * @return Value return. High bit will be zero if there wasn't any data
+ */
+cvmx_tra_data_t cvmx_tra_read_v2(int tra_unit)
+{
+ cvmx_tra_data_t result;
+
+ result.u128.data = cvmx_read_csr(CVMX_TRAX_READ_DAT(tra_unit));
+ result.u128.datahi = cvmx_read_csr(CVMX_TRAX_READ_DAT_HI(tra_unit));
+
+ return result;
+}
+
+/**
+ * Decode a TRA entry into human readable output
+ *
+ * @param tra_ctl Trace control setup
+ * @param data Data to decode
+ */
+void cvmx_tra_decode_text(cvmx_tra_ctl_t tra_ctl, cvmx_tra_data_t data)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ {
+ /* The type is a five bit field for some entries and 4 for other. The four
+ bit entries can be mis-typed if the top is set */
+ int type = data.cmn.type;
+
+ if (type >= 0x1a)
+ type &= 0xf;
+
+ switch (type)
+ {
+ case 0: /* DWB */
+ case 1: /* PL2 */
+ case 2: /* PSL1 */
+ case 3: /* LDD */
+ case 4: /* LDI */
+ case 5: /* LDT */
+ cvmx_dprintf("0x%016llx %c%+10d %s %s 0x%016llx\n",
+ (unsigned long long)data.u128.data,
+ (data.cmn.discontinuity) ? 'D' : ' ',
+ data.cmn.timestamp << (tra_ctl.s.time_grn*3),
+ TYPE_ARRAY[type],
+ SOURCE_ARRAY[data.cmn.source],
+ (unsigned long long)data.cmn.address);
+ break;
+ case 6: /* STF */
+ case 7: /* STC */
+ case 8: /* STP */
+ case 9: /* STT */
+ case 16: /* SAA */
+ cvmx_dprintf("0x%016llx %c%+10d %s %s mask=0x%02x 0x%016llx\n",
+ (unsigned long long)data.u128.data,
+ (data.cmn.discontinuity) ? 'D' : ' ',
+ data.cmn.timestamp << (tra_ctl.s.time_grn*3),
+ TYPE_ARRAY[type],
+ SOURCE_ARRAY[data.store.source],
+ (unsigned int)data.store.mask,
+ (unsigned long long)data.store.address << 3);
+ break;
+ case 10: /* IOBLD8 */
+ case 11: /* IOBLD16 */
+ case 12: /* IOBLD32 */
+ case 13: /* IOBLD64 */
+ case 14: /* IOBST */
+ cvmx_dprintf("0x%016llx %c%+10d %s %s->%s subdid=0x%x 0x%016llx\n",
+ (unsigned long long)data.u128.data,
+ (data.cmn.discontinuity) ? 'D' : ' ',
+ data.cmn.timestamp << (tra_ctl.s.time_grn*3),
+ TYPE_ARRAY[type],
+ SOURCE_ARRAY[data.iobld.source],
+ DEST_ARRAY[data.iobld.dest],
+ (unsigned int)data.iobld.subid,
+ (unsigned long long)data.iobld.address);
+ break;
+ case 15: /* IOBDMA */
+ cvmx_dprintf("0x%016llx %c%+10d %s %s->%s len=0x%x 0x%016llx\n",
+ (unsigned long long)data.u128.data,
+ (data.cmn.discontinuity) ? 'D' : ' ',
+ data.cmn.timestamp << (tra_ctl.s.time_grn*3),
+ TYPE_ARRAY[type],
+ SOURCE_ARRAY[data.iob.source],
+ DEST_ARRAY[data.iob.dest],
+ (unsigned int)data.iob.mask,
+ (unsigned long long)data.iob.address << 3);
+ break;
+ default:
+ cvmx_dprintf("0x%016llx %c%+10d Unknown format\n",
+ (unsigned long long)data.u128.data,
+ (data.cmn.discontinuity) ? 'D' : ' ',
+ data.cmn.timestamp << (tra_ctl.s.time_grn*3));
+ break;
+ }
+ }
+ else
+ {
+ int type;
+ int srcId;
+
+ type = data.cmn2.type;
+
+ switch (1ull<<type)
+ {
+ case CVMX_TRA_FILT_DECR64:
+ case CVMX_TRA_FILT_DECR32:
+ case CVMX_TRA_FILT_DECR16:
+ case CVMX_TRA_FILT_DECR8:
+ case CVMX_TRA_FILT_INCR64:
+ case CVMX_TRA_FILT_INCR32:
+ case CVMX_TRA_FILT_INCR16:
+ case CVMX_TRA_FILT_INCR8:
+ case CVMX_TRA_FILT_CLR64:
+ case CVMX_TRA_FILT_CLR32:
+ case CVMX_TRA_FILT_CLR16:
+ case CVMX_TRA_FILT_CLR8:
+ case CVMX_TRA_FILT_SET64:
+ case CVMX_TRA_FILT_SET32:
+ case CVMX_TRA_FILT_SET16:
+ case CVMX_TRA_FILT_SET8:
+ case CVMX_TRA_FILT_LCKL2:
+ case CVMX_TRA_FILT_WBIL2:
+ case CVMX_TRA_FILT_INVL2:
+ case CVMX_TRA_FILT_STGL2I:
+ case CVMX_TRA_FILT_LTGL2I:
+ case CVMX_TRA_FILT_WBIL2I:
+ case CVMX_TRA_FILT_WBL2:
+ case CVMX_TRA_FILT_DWB:
+ case CVMX_TRA_FILT_RPL2:
+ case CVMX_TRA_FILT_PL2:
+ case CVMX_TRA_FILT_LDI:
+ case CVMX_TRA_FILT_LDT:
+ /* CN68XX has 32 cores which are distributed to use different
+ trace buffers, decode the core that has data */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ if (data.cmn2.source <= 7)
+ {
+ srcId = _cvmx_tra_unit + (data.cmn2.source * 4);
+ if (srcId >= 16)
+ srcId += 16;
+ }
+ else
+ srcId = (data.cmn2.source);
+ }
+ else
+ srcId = (data.cmn2.source);
+
+ cvmx_dprintf("0x%016llx%016llx %c%+10d %s %s 0x%016llx%llx\n",
+ (unsigned long long)data.u128.datahi, (unsigned long long)data.u128.data,
+ (data.cmn2.discontinuity) ? 'D' : ' ',
+ data.cmn2.timestamp << (tra_ctl.s.time_grn*3),
+ TYPE_ARRAY2[type],
+ SOURCE_ARRAY[srcId],
+ (unsigned long long)data.cmn2.addresshi,
+ (unsigned long long)data.cmn2.addresslo);
+ break;
+ case CVMX_TRA_FILT_PSL1:
+ case CVMX_TRA_FILT_LDD:
+ case CVMX_TRA_FILT_FAS64:
+ case CVMX_TRA_FILT_FAS32:
+ case CVMX_TRA_FILT_FAA64:
+ case CVMX_TRA_FILT_FAA32:
+ case CVMX_TRA_FILT_SAA64:
+ case CVMX_TRA_FILT_SAA32:
+ case CVMX_TRA_FILT_STC:
+ case CVMX_TRA_FILT_STF:
+ case CVMX_TRA_FILT_STP:
+ case CVMX_TRA_FILT_STT:
+ case CVMX_TRA_FILT_STTIL1:
+ case CVMX_TRA_FILT_STFIL1:
+ /* CN68XX has 32 cores which are distributed to use different
+ trace buffers, decode the core that has data */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ if (data.store2.source <= 7)
+ {
+ srcId = _cvmx_tra_unit + (data.store2.source * 4);
+ if (srcId >= 16)
+ srcId += 16;
+ }
+ else
+ srcId = data.store2.source;
+ }
+ else
+ srcId = data.store2.source;
+
+ cvmx_dprintf("0x%016llx%016llx %c%+10d %s %s mask=0x%02x 0x%016llx%llx\n",
+ (unsigned long long)data.u128.datahi, (unsigned long long)data.u128.data,
+ (data.cmn2.discontinuity) ? 'D' : ' ',
+ data.cmn2.timestamp << (tra_ctl.s.time_grn*3),
+ TYPE_ARRAY2[type],
+ SOURCE_ARRAY[srcId],
+ (unsigned int)data.store2.mask,
+ (unsigned long long)data.store2.addresshi,
+ (unsigned long long)data.store2.addresslo);
+ break;
+ case CVMX_TRA_FILT_IOBST64:
+ case CVMX_TRA_FILT_IOBST32:
+ case CVMX_TRA_FILT_IOBST16:
+ case CVMX_TRA_FILT_IOBST8:
+ case CVMX_TRA_FILT_IOBLD64:
+ case CVMX_TRA_FILT_IOBLD32:
+ case CVMX_TRA_FILT_IOBLD16:
+ case CVMX_TRA_FILT_IOBLD8:
+ /* CN68XX has 32 cores which are distributed to use different
+ trace buffers, decode the core that has data */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ if (data.iobld2.source <= 7)
+ {
+ srcId = _cvmx_tra_unit + (data.iobld2.source * 4);
+ if (srcId >= 16)
+ srcId += 16;
+ }
+ else
+ srcId = data.iobld2.source;
+ }
+ else
+ srcId = data.iobld2.source;
+
+ cvmx_dprintf("0x%016llx%016llx %c%+10d %s %s->%s subdid=0x%x 0x%016llx%llx\n",
+ (unsigned long long)data.u128.datahi, (unsigned long long)data.u128.data,
+ (data.cmn2.discontinuity) ? 'D' : ' ',
+ data.cmn2.timestamp << (tra_ctl.s.time_grn*3),
+ TYPE_ARRAY2[type],
+ SOURCE_ARRAY[srcId],
+ DEST_ARRAY[data.iobld2.dest],
+ (unsigned int)data.iobld2.subid,
+ (unsigned long long)data.iobld2.addresshi,
+ (unsigned long long)data.iobld2.addresslo);
+ break;
+ case CVMX_TRA_FILT_IOBDMA:
+ /* CN68XX has 32 cores which are distributed to use different
+ trace buffers, decode the core that has data */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ {
+ if (data.iob2.source <= 7)
+ {
+ srcId = _cvmx_tra_unit + (data.iob2.source * 4);
+ if (srcId >= 16)
+ srcId += 16;
+ }
+ else
+ srcId = data.iob2.source;
+ }
+ else
+ srcId = data.iob2.source;
+
+ cvmx_dprintf("0x%016llx%016llx %c%+10d %s %s->%s len=0x%x 0x%016llx%llx\n",
+ (unsigned long long)data.u128.datahi, (unsigned long long)data.u128.data,
+ (data.iob2.discontinuity) ? 'D' : ' ',
+ data.iob2.timestamp << (tra_ctl.s.time_grn*3),
+ TYPE_ARRAY2[type],
+ SOURCE_ARRAY[srcId],
+ DEST_ARRAY[data.iob2.dest],
+ (unsigned int)data.iob2.mask,
+ (unsigned long long)data.iob2.addresshi << 3,
+ (unsigned long long)data.iob2.addresslo << 3);
+ break;
+ default:
+ cvmx_dprintf("0x%016llx%016llx %c%+10d Unknown format\n",
+ (unsigned long long)data.u128.datahi, (unsigned long long)data.u128.data,
+ (data.cmn2.discontinuity) ? 'D' : ' ',
+ data.cmn2.timestamp << (tra_ctl.s.time_grn*3));
+ break;
+ }
+ }
+}
+
+/**
+ * Display the entire trace buffer. It is advised that you
+ * disable the trace buffer before calling this routine
+ * otherwise it could infinitely loop displaying trace data
+ * that it created.
+ */
+void cvmx_tra_display(void)
+{
+ int valid = 0;
+
+ /* Collect data from each TRA unit for decoding */
+ if (CVMX_L2C_TADS > 1)
+ {
+ cvmx_trax_ctl_t tra_ctl;
+ cvmx_tra_data_t data[4];
+ int tad;
+ do
+ {
+ valid = 0;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ data[tad] = cvmx_tra_read_v2(tad);
+
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ {
+ tra_ctl.u64 = cvmx_read_csr(CVMX_TRAX_CTL(tad));
+
+ if (data[tad].cmn2.valid)
+ {
+ _cvmx_tra_unit = tad;
+ cvmx_tra_decode_text(tra_ctl, data[tad]);
+ valid = 1;
+ }
+ }
+ } while (valid);
+ }
+ else
+ {
+ cvmx_tra_ctl_t tra_ctl;
+ cvmx_tra_data_t data;
+
+ tra_ctl.u64 = cvmx_read_csr(CVMX_TRA_CTL);
+
+ do
+ {
+ data = cvmx_tra_read();
+ if ((OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)) && data.cmn.valid)
+ valid = 1;
+ else if (data.cmn2.valid)
+ valid = 1;
+ else
+ valid = 0;
+
+ if (valid)
+ cvmx_tra_decode_text(tra_ctl, data);
+
+ } while (valid);
+ }
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_tra_display);
+#endif
+
+/**
+ * Display the entire trace buffer. It is advised that you
+ * disable the trace buffer before calling this routine
+ * otherwise it could infinitely loop displaying trace data
+ * that it created.
+ *
+ * @param tra_unit Which TRA buffer to use.
+ */
+void cvmx_tra_display_v2(int tra_unit)
+{
+ int valid = 0;
+
+ cvmx_trax_ctl_t tra_ctl;
+ cvmx_tra_data_t data;
+
+ valid = 0;
+ tra_ctl.u64 = cvmx_read_csr(CVMX_TRAX_CTL(tra_unit));
+
+ do
+ {
+ data = cvmx_tra_read_v2(tra_unit);
+ if (data.cmn2.valid)
+ {
+ _cvmx_tra_unit = tra_unit;
+ cvmx_tra_decode_text(tra_ctl, data);
+ valid = 1;
+ }
+ } while (valid);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_tra_display_v2);
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-tra.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-tra.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-tra.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-tra.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,749 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the Trace buffer hardware.
+ *
+ * WRITING THE TRACE BUFFER
+ *
+ * When the trace is enabled, commands are traced continuously (wrapping) or until the buffer is filled once
+ * (no wrapping). Additionally and independent of wrapping, tracing can be temporarily enabled and disabled
+ * by the tracing triggers. All XMC commands can be traced except for IDLE and IOBRSP. The subset of XMC
+ * commands that are traced is determined by the filter and the two triggers, each of which is comprised of
+ * masks for command, sid, did, and address). If triggers are disabled, then only those commands matching
+ * the filter are traced. If triggers are enabled, then only those commands matching the filter, the start
+ * trigger, or the stop trigger are traced during the time between a start trigger and a stop trigger.
+ *
+ * For a given command, its XMC data is written immediately to the buffer. If the command has XMD data,
+ * then that data comes in-order at some later time. The XMD data is accumulated across all valid
+ * XMD cycles and written to the buffer or to a shallow fifo. Data from the fifo is written to the buffer
+ * as soon as it gets access to write the buffer (i.e. the buffer is not currently being written with XMC
+ * data). If the fifo overflows, it simply overwrites itself and the previous XMD data is lost.
+ *
+ *
+ * READING THE TRACE BUFFER
+ *
+ * Each entry of the trace buffer is read by a CSR read command. The trace buffer services each read in order,
+ * as soon as it has access to the (single-ported) trace buffer.
+ *
+ * On Octeon2, each entry of the trace buffer is read by two CSR memory read operations. The first read accesses
+ * bits 63:0 of the buffer entry, and the second read accesses bits 68:64 of the buffer entry. The trace buffer
+ * services each read in order, as soon as it has access to the (single-ported) trace buffer. Buffer's read pointer
+ * increments after two CSR memory read operations.
+ *
+ *
+ * OVERFLOW, UNDERFLOW AND THRESHOLD EVENTS
+ *
+ * The trace buffer maintains a write pointer and a read pointer and detects both the overflow and underflow
+ * conditions. Each time a new trace is enabled, both pointers are reset to entry 0. Normally, each write
+ * (traced event) increments the write pointer and each read increments the read pointer. During the overflow
+ * condition, writing (tracing) is disabled. Tracing will continue as soon as the overflow condition is
+ * resolved. The first entry that is written immediately following the overflow condition may be marked to
+ * indicate that a tracing discontinuity has occurred before this entry. During the underflow condition,
+ * reading does not increment the read pointer and the read data is marked to indicate that no read data is
+ * available.
+ *
+ * The full threshold events are defined to signal an interrupt a certain levels of "fullness" (1/2, 3/4, 4/4).
+ * "fullness" is defined as the relative distance between the write and read pointers (i.e. not defined as the
+ * absolute distance between the write pointer and entry 0). When enabled, the full threshold event occurs
+ * every time the desired level of "fullness" is achieved.
+ *
+ *
+ * Trace buffer entry format
+ * @verbatim
+ * 6 5 4 3 2 1 0
+ * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | 0 | src id | 0 | DWB | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | 0 | src id | 0 | PL2 | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | 0 | src id | 0 | PSL1 | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | 0 | src id | 0 | LDD | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | 0 | src id | 0 | LDI | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | 0 | src id | 0 | LDT | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | * or 16B mask | src id | 0 | STC | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | * or 16B mask | src id | 0 | STF | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | * or 16B mask | src id | 0 | STP | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | * or 16B mask | src id | 0 | STT | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:0] | 0 | src id| dest id |IOBLD8 | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:1] | 0 | src id| dest id |IOBLD16| diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:2] | 0 | src id| dest id |IOBLD32| diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | 0 | src id| dest id |IOBLD64| diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[35:3] | * or 16B mask | src id| dest id |IOBST | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| * or address[35:3] | * or length | src id| dest id |IOBDMA | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *
+ * Trace buffer entry format in Octeon2 is different
+ *
+ * 6 5 4 3 2 1 0
+ * 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[37:0] | 0 | src id | Group 1 | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[37:0] | 0 | xmd mask | src id | Group 2 | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| address[37:0] | 0 |s-did| dest id | src id | Group 3 | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |sta| *address[37:3] | *Length | dest id | src id | Group 4 | diff timestamp|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * notes:
+ * - diff timestamp is the difference in time from the previous trace event to this event - 1. the granularity of the timestamp is programmable
+ * - Fields marked as '*' are first filled with '0' at XMC time and may be filled with real data later at XMD time. Note that the
+ * XMD write may be dropped if the shallow FIFO overflows which leaves the '*' fields as '0'.
+ * - 2 bits (sta) are used not to trace, but to return global state information with each read, encoded as follows:
+ * 0x0=not valid
+ * 0x1=valid, no discontinuity
+ * 0x2=not valid, discontinuity
+ * 0x3=valid, discontinuity
+ * - commands are encoded as follows:
+ * 0x0=DWB
+ * 0x1=PL2
+ * 0x2=PSL1
+ * 0x3=LDD
+ * 0x4=LDI
+ * 0x5=LDT
+ * 0x6=STF
+ * 0x7=STC
+ * 0x8=STP
+ * 0x9=STT
+ * 0xa=IOBLD8
+ * 0xb=IOBLD16
+ * 0xc=IOBLD32
+ * 0xd=IOBLD64
+ * 0xe=IOBST
+ * 0xf=IOBDMA
+ * - In Octeon2 the commands are grouped as follows:
+ * Group1:
+ * XMC_LDT, XMC_LDI, XMC_PL2, XMC_RPL2, XMC_DWB, XMC_WBL2,
+ * XMC_SET8, XMC_SET16, XMC_SET32, XMC_SET64,
+ * XMC_CLR8, XMC_CLR16, XMC_CLR32, XMC_CLR64,
+ * XMC_INCR8, XMC_INCR16, XMC_INCR32, XMC_INCR64,
+ * XMC_DECR8, XMC_DECR16, XMC_DECR32, XMC_DECR64
+ * Group2:
+ * XMC_STF, XMC_STT, XMC_STP, XMC_STC,
+ * XMC_LDD, XMC_PSL1
+ * XMC_SAA32, XMC_SAA64,
+ * XMC_FAA32, XMC_FAA64,
+ * XMC_FAS32, XMC_FAS64
+ * Group3:
+ * XMC_IOBLD8, XMC_IOBLD16, XMC_IOBLD32, XMC_IOBLD64,
+ * XMC_IOBST8, XMC_IOBST16, XMC_IOBST32, XMC_IOBST64
+ * Group4:
+ * XMC_IOBDMA
+ * - For non IOB* commands
+ * - source id is encoded as follows:
+ * 0x00-0x0f=PP[n]
+ * 0x10=IOB(Packet)
+ * 0x11=IOB(PKO)
+ * 0x12=IOB(ReqLoad, ReqStore)
+ * 0x13=IOB(DWB)
+ * 0x14-0x1e=illegal
+ * 0x1f=IOB(generic)
+ * - dest id is unused (can only be L2c)
+ * - For IOB* commands
+ * - source id is encoded as follows:
+ * 0x00-0x0f = PP[n]
+ * - dest id is encoded as follows:
+ * 0 = CIU/GPIO (for CSRs)
+ * 1-2 = illegal
+ * 3 = PCIe (access to RSL-type CSRs)
+ * 4 = KEY (read/write operations)
+ * 5 = FPA (free pool allocate/free operations)
+ * 6 = DFA
+ * 7 = ZIP (doorbell operations)
+ * 8 = RNG (load/IOBDMA operations)
+ * 10 = PKO (doorbell operations)
+ * 11 = illegal
+ * 12 = POW (get work, add work, status/memory/index loads, NULLrd load operations, CSR operations)
+ * 13-31 = illegal
+ * @endverbatim
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_TRA_H__
+#define __CVMX_TRA_H__
+
+#include "cvmx.h"
+#include "cvmx-l2c.h"
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include "cvmx-tra-defs.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* CSR typedefs have been moved to cvmx-tra-defs.h */
+
+/* The 'saa' filter command is renamed as 'saa64' */
+#define CVMX_TRA_FILT_SAA CVMX_TRA_FILT_SAA64
+/* The 'iobst' filter command is renamed as 'iobst64' */
+#define CVMX_TRA_FILT_IOBST CVMX_TRA_FILT_IOBST64
+
+/**
+ * Enumeration of the bitmask of all the filter commands. The bit positions
+ * correspond to Octeon2 model.
+ */
+typedef enum
+{
+ CVMX_TRA_FILT_NOP = 1ull<<0, /**< none */
+ CVMX_TRA_FILT_LDT = 1ull<<1, /**< don't allocate L2 or L1 */
+ CVMX_TRA_FILT_LDI = 1ull<<2, /**< don't allocate L1 */
+ CVMX_TRA_FILT_PL2 = 1ull<<3, /**< pref L2 */
+ CVMX_TRA_FILT_RPL2 = 1ull<<4, /**< mark for replacement in L2 */
+ CVMX_TRA_FILT_DWB = 1ull<<5, /**< clear L2 dirty bit (no writeback) + RPL2 */
+ CVMX_TRA_FILT_LDD = 1ull<<8, /**< normal load */
+ CVMX_TRA_FILT_PSL1 = 1ull<<9, /**< pref L1, bypass L2 */
+ CVMX_TRA_FILT_IOBDMA = 1ull<<15, /**< store reflection by IOB for prior load */
+ CVMX_TRA_FILT_STF = 1ull<<16, /**< full block store to L2, fill 0's */
+ CVMX_TRA_FILT_STT = 1ull<<17, /**< full block store bypass-L2, fill 0's */
+ CVMX_TRA_FILT_STP = 1ull<<18, /**< partial store to L2 */
+ CVMX_TRA_FILT_STC = 1ull<<19, /**< partial store to L2, if duptag valid */
+ CVMX_TRA_FILT_STFIL1 = 1ull<<20, /**< full block store to L2, fill 0's, invalidate L1 */
+ CVMX_TRA_FILT_STTIL1 = 1ull<<21, /**< full block store bypass-L2, fill 0's, invalidate L1 */
+ CVMX_TRA_FILT_FAS32 = 1ull<<22, /**< to load from and write a word of memory atomically */
+ CVMX_TRA_FILT_FAS64 = 1ull<<23, /**< to load from and write a doubleword of memory atomically */
+ CVMX_TRA_FILT_WBIL2I = 1ull<<24, /**< writeback if dirty, invalidate, clear use bit, by index/way */
+ CVMX_TRA_FILT_LTGL2I = 1ull<<25, /**< read tag @ index/way into CSR */
+ CVMX_TRA_FILT_STGL2I = 1ull<<26, /**< write tag @ index/way from CSR */
+ CVMX_TRA_FILT_INVL2 = 1ull<<28, /**< invalidate, clear use bit, by address (dirty data is LOST) */
+ CVMX_TRA_FILT_WBIL2 = 1ull<<29, /**< writeback if dirty, invalidate, clear use bit, by address */
+ CVMX_TRA_FILT_WBL2 = 1ull<<30, /**< writeback if dirty, make clean, clear use bit, by address */
+ CVMX_TRA_FILT_LCKL2 = 1ull<<31, /**< allocate (if miss), set lock bit, set use bit, by address */
+ CVMX_TRA_FILT_IOBLD8 = 1ull<<32, /**< load reflection 8bit */
+ CVMX_TRA_FILT_IOBLD16 = 1ull<<33, /**< load reflection 16bit */
+ CVMX_TRA_FILT_IOBLD32 = 1ull<<34, /**< load reflection 32bit */
+ CVMX_TRA_FILT_IOBLD64 = 1ull<<35, /**< load reflection 64bit */
+ CVMX_TRA_FILT_IOBST8 = 1ull<<36, /**< store reflection 8bit */
+ CVMX_TRA_FILT_IOBST16 = 1ull<<37, /**< store reflection 16bit */
+ CVMX_TRA_FILT_IOBST32 = 1ull<<38, /**< store reflection 32bit */
+ CVMX_TRA_FILT_IOBST64 = 1ull<<39, /**< store reflection 64bit */
+ CVMX_TRA_FILT_SET8 = 1ull<<40, /**< to load from and write 1's to 8bit of memory atomically */
+ CVMX_TRA_FILT_SET16 = 1ull<<41, /**< to load from and write 1's to 16bit of memory atomically */
+ CVMX_TRA_FILT_SET32 = 1ull<<42, /**< to load from and write 1's to 32bit of memory atomically */
+ CVMX_TRA_FILT_SET64 = 1ull<<43, /**< to load from and write 1's to 64bit of memory atomically */
+ CVMX_TRA_FILT_CLR8 = 1ull<<44, /**< to load from and write 0's to 8bit of memory atomically */
+ CVMX_TRA_FILT_CLR16 = 1ull<<45, /**< to load from and write 0's to 16bit of memory atomically */
+ CVMX_TRA_FILT_CLR32 = 1ull<<46, /**< to load from and write 0's to 32bit of memory atomically */
+ CVMX_TRA_FILT_CLR64 = 1ull<<47, /**< to load from and write 0's to 64bit of memory atomically */
+ CVMX_TRA_FILT_INCR8 = 1ull<<48, /**< to load and increment 8bit of memory atomically */
+ CVMX_TRA_FILT_INCR16 = 1ull<<49, /**< to load and increment 16bit of memory atomically */
+ CVMX_TRA_FILT_INCR32 = 1ull<<50, /**< to load and increment 32bit of memory atomically */
+ CVMX_TRA_FILT_INCR64 = 1ull<<51, /**< to load and increment 64bit of memory atomically */
+ CVMX_TRA_FILT_DECR8 = 1ull<<52, /**< to load and decrement 8bit of memory atomically */
+ CVMX_TRA_FILT_DECR16 = 1ull<<53, /**< to load and decrement 16bit of memory atomically */
+ CVMX_TRA_FILT_DECR32 = 1ull<<54, /**< to load and decrement 32bit of memory atomically */
+ CVMX_TRA_FILT_DECR64 = 1ull<<55, /**< to load and decrement 64bit of memory atomically */
+ CVMX_TRA_FILT_FAA32 = 1ull<<58, /**< to load from and add to a word of memory atomically */
+ CVMX_TRA_FILT_FAA64 = 1ull<<59, /**< to load from and add to a doubleword of memory atomically */
+ CVMX_TRA_FILT_SAA32 = 1ull<<62, /**< to atomically add a word to a memory location */
+ CVMX_TRA_FILT_SAA64 = 1ull<<63, /**< to atomically add a doubleword to a memory location */
+ CVMX_TRA_FILT_ALL = -1ull /**< all the above filter commands */
+} cvmx_tra_filt_t;
+
+/*
+ * Enumeration of the bitmask of all source commands.
+ */
+typedef enum
+{
+ CVMX_TRA_SID_PP0 = 1ull<<0, /**< Enable tracing from PP0 with matching sourceID */
+ CVMX_TRA_SID_PP1 = 1ull<<1, /**< Enable tracing from PP1 with matching sourceID */
+ CVMX_TRA_SID_PP2 = 1ull<<2, /**< Enable tracing from PP2 with matching sourceID */
+ CVMX_TRA_SID_PP3 = 1ull<<3, /**< Enable tracing from PP3 with matching sourceID */
+ CVMX_TRA_SID_PP4 = 1ull<<4, /**< Enable tracing from PP4 with matching sourceID */
+ CVMX_TRA_SID_PP5 = 1ull<<5, /**< Enable tracing from PP5 with matching sourceID */
+ CVMX_TRA_SID_PP6 = 1ull<<6, /**< Enable tracing from PP6 with matching sourceID */
+ CVMX_TRA_SID_PP7 = 1ull<<7, /**< Enable tracing from PP7 with matching sourceID */
+ CVMX_TRA_SID_PP8 = 1ull<<8, /**< Enable tracing from PP8 with matching sourceID */
+ CVMX_TRA_SID_PP9 = 1ull<<9, /**< Enable tracing from PP9 with matching sourceID */
+ CVMX_TRA_SID_PP10 = 1ull<<10, /**< Enable tracing from PP10 with matching sourceID */
+ CVMX_TRA_SID_PP11 = 1ull<<11, /**< Enable tracing from PP11 with matching sourceID */
+ CVMX_TRA_SID_PP12 = 1ull<<12, /**< Enable tracing from PP12 with matching sourceID */
+ CVMX_TRA_SID_PP13 = 1ull<<13, /**< Enable tracing from PP13 with matching sourceID */
+ CVMX_TRA_SID_PP14 = 1ull<<14, /**< Enable tracing from PP14 with matching sourceID */
+ CVMX_TRA_SID_PP15 = 1ull<<15, /**< Enable tracing from PP15 with matching sourceID */
+ CVMX_TRA_SID_PKI = 1ull<<16, /**< Enable tracing of write requests from PIP/IPD */
+ CVMX_TRA_SID_PKO = 1ull<<17, /**< Enable tracing of write requests from PKO */
+ CVMX_TRA_SID_IOBREQ = 1ull<<18, /**< Enable tracing of write requests from FPA,TIM,DFA,PCI,ZIP,POW, and PKO (writes) */
+ CVMX_TRA_SID_DWB = 1ull<<19, /**< Enable tracing of write requests from IOB DWB engine */
+ CVMX_TRA_SID_ALL = -1ull /**< Enable tracing all the above source commands */
+} cvmx_tra_sid_t;
+
+
+#define CVMX_TRA_DID_SLI CVMX_TRA_DID_PCI /**< Enable tracing of requests to SLI and RSL-type CSRs. */
+/*
+ * Enumeration of the bitmask of all destination commands.
+ */
+typedef enum
+{
+ CVMX_TRA_DID_MIO = 1ull<<0, /**< Enable tracing of CIU and GPIO CSR's */
+ CVMX_TRA_DID_PCI = 1ull<<3, /**< Enable tracing of requests to PCI and RSL type CSR's */
+ CVMX_TRA_DID_KEY = 1ull<<4, /**< Enable tracing of requests to KEY memory */
+ CVMX_TRA_DID_FPA = 1ull<<5, /**< Enable tracing of requests to FPA */
+ CVMX_TRA_DID_DFA = 1ull<<6, /**< Enable tracing of requests to DFA */
+ CVMX_TRA_DID_ZIP = 1ull<<7, /**< Enable tracing of requests to ZIP */
+ CVMX_TRA_DID_RNG = 1ull<<8, /**< Enable tracing of requests to RNG */
+ CVMX_TRA_DID_IPD = 1ull<<9, /**< Enable tracing of IPD CSR accesses */
+ CVMX_TRA_DID_PKO = 1ull<<10, /**< Enable tracing of PKO accesses (doorbells) */
+ CVMX_TRA_DID_POW = 1ull<<12, /**< Enable tracing of requests to RNG */
+ CVMX_TRA_DID_USB0 = 1ull<<13, /**< Enable tracing of USB0 accesses (UAHC0 EHCI and OHCI NCB CSRs) */
+ CVMX_TRA_DID_RAD = 1ull<<14, /**< Enable tracing of RAD accesses (doorbells) */
+ CVMX_TRA_DID_DPI = 1ull<<27, /**< Enable tracing of DPI accesses (DPI NCD CSRs) */
+ CVMX_TRA_DID_FAU = 1ull<<30, /**< Enable tracing FAU accesses */
+ CVMX_TRA_DID_ALL = -1ull /**< Enable tracing all the above destination commands */
+} cvmx_tra_did_t;
+
+/**
+ * TRA data format definition. Use the type field to
+ * determine which union element to use.
+ *
+ * In Octeon 2, the trace buffer is 69 bits,
+ * the first read accesses bits 63:0 of the trace buffer entry, and
+ * the second read accesses bits 68:64 of the trace buffer entry.
+ */
+typedef union
+{
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t datahi;
+ uint64_t data;
+#else
+ uint64_t data;
+ uint64_t datahi;
+#endif
+ } u128;
+
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved3 : 64;
+ uint64_t valid : 1;
+ uint64_t discontinuity:1;
+ uint64_t address : 36;
+ uint64_t reserved : 5;
+ uint64_t source : 5;
+ uint64_t reserved2 : 3;
+ uint64_t type : 5;
+ uint64_t timestamp : 8;
+#else
+ uint64_t timestamp : 8;
+ uint64_t type : 5;
+ uint64_t reserved2 : 3;
+ uint64_t source : 5;
+ uint64_t reserved : 5;
+ uint64_t address : 36;
+ uint64_t discontinuity:1;
+ uint64_t valid : 1;
+ uint64_t reserved3 : 64;
+#endif
+ } cmn; /**< for DWB, PL2, PSL1, LDD, LDI, LDT */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved3 : 64;
+ uint64_t valid : 1;
+ uint64_t discontinuity:1;
+ uint64_t address : 33;
+ uint64_t mask : 8;
+ uint64_t source : 5;
+ uint64_t reserved2 : 3;
+ uint64_t type : 5;
+ uint64_t timestamp : 8;
+#else
+ uint64_t timestamp : 8;
+ uint64_t type : 5;
+ uint64_t reserved2 : 3;
+ uint64_t source : 5;
+ uint64_t mask : 8;
+ uint64_t address : 33;
+ uint64_t discontinuity:1;
+ uint64_t valid : 1;
+ uint64_t reserved3 : 64;
+#endif
+ } store; /**< STC, STF, STP, STT */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved3 : 64;
+ uint64_t valid : 1;
+ uint64_t discontinuity:1;
+ uint64_t address : 36;
+ uint64_t reserved : 2;
+ uint64_t subid : 3;
+ uint64_t source : 4;
+ uint64_t dest : 5;
+ uint64_t type : 4;
+ uint64_t timestamp : 8;
+#else
+ uint64_t timestamp : 8;
+ uint64_t type : 4;
+ uint64_t dest : 5;
+ uint64_t source : 4;
+ uint64_t subid : 3;
+ uint64_t reserved : 2;
+ uint64_t address : 36;
+ uint64_t discontinuity:1;
+ uint64_t valid : 1;
+ uint64_t reserved3 : 64;
+#endif
+ } iobld; /**< for IOBLD8, IOBLD16, IOBLD32, IOBLD64, IOBST, SAA */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved3 : 64;
+ uint64_t valid : 1;
+ uint64_t discontinuity:1;
+ uint64_t address : 33;
+ uint64_t mask : 8;
+ uint64_t source : 4;
+ uint64_t dest : 5;
+ uint64_t type : 4;
+ uint64_t timestamp : 8;
+#else
+ uint64_t timestamp : 8;
+ uint64_t type : 4;
+ uint64_t dest : 5;
+ uint64_t source : 4;
+ uint64_t mask : 8;
+ uint64_t address : 33;
+ uint64_t discontinuity:1;
+ uint64_t valid : 1;
+ uint64_t reserved3 : 64;
+#endif
+ } iob; /**< for IOBDMA */
+
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved1 : 59;
+ uint64_t discontinuity:1;
+ uint64_t valid : 1;
+ uint64_t addresshi : 3; /* Split the address to fit in upper 64 bits */
+ uint64_t addresslo : 35; /* and lower 64-bits. */
+ uint64_t reserved : 10;
+ uint64_t source : 5;
+ uint64_t type : 6;
+ uint64_t timestamp : 8;
+#else
+ uint64_t timestamp : 8;
+ uint64_t type : 6;
+ uint64_t source : 5;
+ uint64_t reserved : 10;
+ uint64_t addresslo : 35;
+ uint64_t addresshi : 3;
+ uint64_t valid : 1;
+ uint64_t discontinuity:1;
+ uint64_t reserved1 : 59;
+#endif
+ } cmn2; /**< for LDT, LDI, PL2, RPL2, DWB, WBL2, WBIL2i, LTGL2i, STGL2i, INVL2, WBIL2, LCKL2, SET*, CLR*, INCR*, DECR* */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved1 : 59;
+ uint64_t discontinuity:1;
+ uint64_t valid : 1;
+ uint64_t addresshi : 3; /* Split the address to fit in upper 64 bits */
+ uint64_t addresslo : 35; /* and lower 64-bits */
+ uint64_t reserved : 2;
+ uint64_t mask : 8;
+ uint64_t source : 5;
+ uint64_t type : 6;
+ uint64_t timestamp : 8;
+#else
+ uint64_t timestamp : 8;
+ uint64_t type : 6;
+ uint64_t source : 5;
+ uint64_t mask : 8;
+ uint64_t reserved : 2;
+ uint64_t addresslo : 35;
+ uint64_t addresshi : 3;
+ uint64_t valid : 1;
+ uint64_t discontinuity:1;
+ uint64_t reserved1 : 59;
+#endif
+ } store2; /**< for STC, STF, STP, STT, LDD, PSL1, SAA32, SAA64, FAA32, FAA64, FAS32, FAS64, STTIL1, STFIL1 */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved1 : 59;
+ uint64_t discontinuity:1;
+ uint64_t valid : 1;
+ uint64_t addresshi : 3; /* Split the address to fit in upper 64 bits */
+ uint64_t addresslo : 35; /* and lower 64-bits */
+ uint64_t reserved : 2;
+ uint64_t subid : 3;
+ uint64_t dest : 5;
+ uint64_t source : 5;
+ uint64_t type : 6;
+ uint64_t timestamp : 8;
+#else
+ uint64_t timestamp : 8;
+ uint64_t type : 6;
+ uint64_t source : 5;
+ uint64_t dest : 5;
+ uint64_t subid : 3;
+ uint64_t reserved : 2;
+ uint64_t addresslo : 35;
+ uint64_t addresshi : 3;
+ uint64_t valid : 1;
+ uint64_t discontinuity:1;
+ uint64_t reserved1 : 59;
+#endif
+ } iobld2; /**< for IOBLD8, IOBLD16, IOBLD32, IOBLD64, IOBST64, IOBST32, IOBST16, IOBST8 */
+ struct
+ {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved1 : 59;
+ uint64_t discontinuity:1;
+ uint64_t valid : 1;
+ uint64_t addresshi : 3; /* Split the address to fit in upper 64 bits */
+ uint64_t addresslo : 32; /* and lower 64-bits */
+ uint64_t mask : 8;
+ uint64_t dest : 5;
+ uint64_t source : 5;
+ uint64_t type : 6;
+ uint64_t timestamp : 8;
+#else
+ uint64_t timestamp : 8;
+ uint64_t type : 6;
+ uint64_t source : 5;
+ uint64_t dest : 5;
+ uint64_t mask : 8;
+ uint64_t addresslo : 32;
+ uint64_t addresshi : 3;
+ uint64_t valid : 1;
+ uint64_t discontinuity:1;
+ uint64_t reserved1 : 59;
+#endif
+ } iob2; /**< for IOBDMA */
+} cvmx_tra_data_t;
+
+/* The trace buffer number to use. */
+extern int _cvmx_tra_unit;
+
+/**
+ * Setup the TRA buffer for use
+ *
+ * @param control TRA control setup
+ * @param filter Which events to log
+ * @param source_filter
+ * Source match
+ * @param dest_filter
+ * Destination match
+ * @param address Address compare
+ * @param address_mask
+ * Address mask
+ */
+extern void cvmx_tra_setup(cvmx_tra_ctl_t control, cvmx_tra_filt_t filter,
+ cvmx_tra_sid_t source_filter, cvmx_tra_did_t dest_filter,
+ uint64_t address, uint64_t address_mask);
+
+/**
+ * Setup each TRA buffer for use
+ *
+ * @param tra Which TRA buffer to use (0-3)
+ * @param control TRA control setup
+ * @param filter Which events to log
+ * @param source_filter
+ * Source match
+ * @param dest_filter
+ * Destination match
+ * @param address Address compare
+ * @param address_mask
+ * Address mask
+ */
+extern void cvmx_tra_setup_v2(int tra, cvmx_tra_ctl_t control, cvmx_tra_filt_t filter,
+ cvmx_tra_sid_t source_filter, cvmx_tra_did_t dest_filter,
+ uint64_t address, uint64_t address_mask);
+
+/**
+ * Setup a TRA trigger. How the triggers are used should be
+ * setup using cvmx_tra_setup.
+ *
+ * @param trigger Trigger to setup (0 or 1)
+ * @param filter Which types of events to trigger on
+ * @param source_filter
+ * Source trigger match
+ * @param dest_filter
+ * Destination trigger match
+ * @param address Trigger address compare
+ * @param address_mask
+ * Trigger address mask
+ */
+extern void cvmx_tra_trig_setup(uint64_t trigger, cvmx_tra_filt_t filter,
+ cvmx_tra_sid_t source_filter, cvmx_tra_did_t dest_filter,
+ uint64_t address, uint64_t address_mask);
+
+/**
+ * Setup each TRA trigger. How the triggers are used should be
+ * setup using cvmx_tra_setup.
+ *
+ * @param tra Which TRA buffer to use (0-3)
+ * @param trigger Trigger to setup (0 or 1)
+ * @param filter Which types of events to trigger on
+ * @param source_filter
+ * Source trigger match
+ * @param dest_filter
+ * Destination trigger match
+ * @param address Trigger address compare
+ * @param address_mask
+ * Trigger address mask
+ */
+extern void cvmx_tra_trig_setup_v2(int tra, uint64_t trigger, cvmx_tra_filt_t filter,
+ cvmx_tra_sid_t source_filter, cvmx_tra_did_t dest_filter,
+ uint64_t address, uint64_t address_mask);
+
+/**
+ * Read an entry from the TRA buffer. The trace buffer format is
+ * different in Octeon2, need to read twice from TRA_READ_DAT.
+ *
+ * @return Value return. High bit will be zero if there wasn't any data
+ */
+extern cvmx_tra_data_t cvmx_tra_read(void);
+
+/**
+ * Read an entry from the TRA buffer from a given TRA unit.
+ *
+ * @param tra_unit Trace buffer unit to read
+ *
+ * @return Value return. High bit will be zero if there wasn't any data
+ */
+cvmx_tra_data_t cvmx_tra_read_v2(int tra_unit);
+
+/**
+ * Decode a TRA entry into human readable output
+ *
+ * @param tra_ctl Trace control setup
+ * @param data Data to decode
+ */
+extern void cvmx_tra_decode_text(cvmx_tra_ctl_t tra_ctl, cvmx_tra_data_t data);
+
+/**
+ * Display the entire trace buffer. It is advised that you
+ * disable the trace buffer before calling this routine
+ * otherwise it could infinitely loop displaying trace data
+ * that it created.
+ */
+extern void cvmx_tra_display(void);
+
+/**
+ * Display the entire trace buffer. It is advised that you
+ * disable the trace buffer before calling this routine
+ * otherwise it could infinitely loop displaying trace data
+ * that it created.
+ *
+ * @param tra_unit Which TRA buffer to use.
+ */
+extern void cvmx_tra_display_v2(int tra_unit);
+
+/**
+ * Enable or disable the TRA hardware, by default enables all TRAs.
+ *
+ * @param enable 1=enable, 0=disable
+ */
+static inline void cvmx_tra_enable(int enable)
+{
+ cvmx_tra_ctl_t control;
+ int tad;
+
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ {
+ control.u64 = cvmx_read_csr(CVMX_TRAX_CTL(tad));
+ control.s.ena = enable;
+ cvmx_write_csr(CVMX_TRAX_CTL(tad), control.u64);
+ cvmx_read_csr(CVMX_TRAX_CTL(tad));
+ }
+}
+
+/**
+ * Enable or disable a particular TRA hardware
+ *
+ * @param enable 1=enable, 0=disable
+ * @param tra which TRA to enable, CN68XX has 4.
+ */
+static inline void cvmx_tra_enable_v2(int enable, int tra)
+{
+ cvmx_tra_ctl_t control;
+
+ if ((tra + 1) > CVMX_L2C_TADS)
+ {
+ cvmx_dprintf("cvmx_tra_enable: Invalid TRA(%d), max allowed are %d\n", tra, CVMX_L2C_TADS - 1);
+ tra = 0;
+ }
+ control.u64 = cvmx_read_csr(CVMX_TRAX_CTL(tra));
+ control.s.ena = enable;
+ cvmx_write_csr(CVMX_TRAX_CTL(tra), control.u64);
+ cvmx_read_csr(CVMX_TRAX_CTL(tra));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-tra.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-trax-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-trax-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-trax-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,3591 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-trax-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon trax.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_TRAX_DEFS_H__
+#define __CVMX_TRAX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_BIST_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_BIST_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000010ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000010ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000000ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000000ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_CYCLES_SINCE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_CYCLES_SINCE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000018ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_CYCLES_SINCE(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000018ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_CYCLES_SINCE1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_CYCLES_SINCE1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000028ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_CYCLES_SINCE1(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000028ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_FILT_ADR_ADR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_FILT_ADR_ADR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000058ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_FILT_ADR_ADR(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000058ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_FILT_ADR_MSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_FILT_ADR_MSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000060ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_FILT_ADR_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000060ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_FILT_CMD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_FILT_CMD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000040ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_FILT_CMD(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000040ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_FILT_DID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_FILT_DID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000050ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_FILT_DID(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000050ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_FILT_SID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_FILT_SID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000048ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_FILT_SID(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000048ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_INT_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_INT_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000008ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_INT_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000008ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_READ_DAT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_READ_DAT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000020ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_READ_DAT(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000020ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_READ_DAT_HI(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_READ_DAT_HI(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000030ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_READ_DAT_HI(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000030ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG0_ADR_ADR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG0_ADR_ADR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000098ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG0_ADR_ADR(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000098ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG0_ADR_MSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG0_ADR_MSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A80000A0ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG0_ADR_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800A80000A0ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG0_CMD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG0_CMD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000080ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG0_CMD(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000080ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG0_DID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG0_DID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000090ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG0_DID(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000090ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG0_SID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG0_SID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A8000088ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG0_SID(block_id) (CVMX_ADD_IO_SEG(0x00011800A8000088ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG1_ADR_ADR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG1_ADR_ADR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A80000D8ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG1_ADR_ADR(block_id) (CVMX_ADD_IO_SEG(0x00011800A80000D8ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG1_ADR_MSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG1_ADR_MSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A80000E0ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG1_ADR_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800A80000E0ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG1_CMD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG1_CMD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A80000C0ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG1_CMD(block_id) (CVMX_ADD_IO_SEG(0x00011800A80000C0ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG1_DID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG1_DID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A80000D0ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG1_DID(block_id) (CVMX_ADD_IO_SEG(0x00011800A80000D0ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_TRAX_TRIG1_SID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_TRAX_TRIG1_SID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800A80000C8ull) + ((block_id) & 3) * 0x100000ull;
+}
+#else
+#define CVMX_TRAX_TRIG1_SID(block_id) (CVMX_ADD_IO_SEG(0x00011800A80000C8ull) + ((block_id) & 3) * 0x100000ull)
+#endif
+
+/**
+ * cvmx_tra#_bist_status
+ *
+ * TRA_BIST_STATUS = Trace Buffer BiST Status
+ *
+ * Description:
+ */
+union cvmx_trax_bist_status {
+ uint64_t u64;
+ struct cvmx_trax_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t tcf : 1; /**< Bist Results for TCF memory
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t tdf1 : 1; /**< Bist Results for TDF memory 1
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t tdf1 : 1;
+ uint64_t tcf : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_trax_bist_status_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t tcf : 1; /**< Bist Results for TCF memory
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t tdf1 : 1; /**< Bist Results for TDF memory 1
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t tdf0 : 1; /**< Bist Results for TCF memory 0
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t tdf0 : 1;
+ uint64_t tdf1 : 1;
+ uint64_t tcf : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn31xx;
+ struct cvmx_trax_bist_status_cn31xx cn38xx;
+ struct cvmx_trax_bist_status_cn31xx cn38xxp2;
+ struct cvmx_trax_bist_status_cn31xx cn52xx;
+ struct cvmx_trax_bist_status_cn31xx cn52xxp1;
+ struct cvmx_trax_bist_status_cn31xx cn56xx;
+ struct cvmx_trax_bist_status_cn31xx cn56xxp1;
+ struct cvmx_trax_bist_status_cn31xx cn58xx;
+ struct cvmx_trax_bist_status_cn31xx cn58xxp1;
+ struct cvmx_trax_bist_status_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t tdf : 1; /**< Bist Results for TCF memory
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t tdf : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn61xx;
+ struct cvmx_trax_bist_status_cn61xx cn63xx;
+ struct cvmx_trax_bist_status_cn61xx cn63xxp1;
+ struct cvmx_trax_bist_status_cn61xx cn66xx;
+ struct cvmx_trax_bist_status_cn61xx cn68xx;
+ struct cvmx_trax_bist_status_cn61xx cn68xxp1;
+ struct cvmx_trax_bist_status_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_bist_status cvmx_trax_bist_status_t;
+
+/**
+ * cvmx_tra#_ctl
+ *
+ * TRA_CTL = Trace Buffer Control
+ *
+ * Description:
+ *
+ * Notes:
+ * It is illegal to change the values of WRAP, TRIG_CTL, IGNORE_O while tracing (i.e. when ENA=1).
+ * Note that the following fields are present only in chip revisions beginning with pass2: IGNORE_O
+ */
+union cvmx_trax_ctl {
+ uint64_t u64;
+ struct cvmx_trax_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t rdat_md : 1; /**< TRA_READ_DAT mode bit
+ If set, the TRA_READ_DAT reads will return the lower
+ 64 bits of the TRA entry and the upper bits must be
+ read through TRA_READ_DAT_HI. If not set the return
+ value from TRA_READ_DAT accesses will switch between
+ the lower bits and the upper bits of the TRA entry. */
+ uint64_t clkalways : 1; /**< Conditional clock enable
+ If set, the TRA clock is never disabled. */
+ uint64_t ignore_o : 1; /**< Ignore overflow during wrap mode
+ If set and wrapping mode is enabled, then tracing
+ will not stop at the overflow condition. Each
+ write during an overflow will overwrite the
+ oldest, unread entry and the read pointer is
+ incremented by one entry. This bit has no effect
+ if WRAP=0. */
+ uint64_t mcd0_ena : 1; /**< MCD0 enable
+ If set and any PP sends the MCD0 signal, the
+ tracing is disabled. */
+ uint64_t mcd0_thr : 1; /**< MCD0_threshold
+ At a fill threshold event, sends an MCD0
+ wire pulse that can cause cores to enter debug
+ mode, if enabled. This MCD0 wire pulse will not
+ occur while (TRA_INT_STATUS.MCD0_THR == 1). */
+ uint64_t mcd0_trg : 1; /**< MCD0_trigger
+ At an end trigger event, sends an MCD0
+ wire pulse that can cause cores to enter debug
+ mode, if enabled. This MCD0 wire pulse will not
+ occur while (TRA_INT_STATUS.MCD0_TRG == 1). */
+ uint64_t ciu_thr : 1; /**< CIU_threshold
+ When set during a fill threshold event,
+ TRA_INT_STATUS[CIU_THR] is set, which can cause
+ core interrupts, if enabled. */
+ uint64_t ciu_trg : 1; /**< CIU_trigger
+ When set during an end trigger event,
+ TRA_INT_STATUS[CIU_TRG] is set, which can cause
+ core interrupts, if enabled. */
+ uint64_t full_thr : 2; /**< Full Threshhold
+ 0=none
+ 1=1/2 full
+ 2=3/4 full
+ 3=4/4 full */
+ uint64_t time_grn : 3; /**< Timestamp granularity
+ granularity=8^n cycles, n=0,1,2,3,4,5,6,7 */
+ uint64_t trig_ctl : 2; /**< Trigger Control
+ Note: trigger events are written to the trace
+ 0=no triggers
+ 1=trigger0=start trigger, trigger1=stop trigger
+ 2=(trigger0 || trigger1)=start trigger
+ 3=(trigger0 || trigger1)=stop trigger */
+ uint64_t wrap : 1; /**< Wrap mode
+ When WRAP=0, the trace buffer will disable itself
+ after having logged 1024 entries. When WRAP=1,
+ the trace buffer will never disable itself.
+ In this case, tracing may or may not be
+ temporarily suspended during the overflow
+ condition (see IGNORE_O above).
+ 0=do not wrap
+ 1=wrap */
+ uint64_t ena : 1; /**< Enable Trace
+ Master enable. Tracing only happens when ENA=1.
+ When ENA changes from 0 to 1, the read and write
+ pointers are reset to 0x00 to begin a new trace.
+ The MCD0 event may set ENA=0 (see MCD0_ENA
+ above). When using triggers, tracing occurs only
+ between start and stop triggers (including the
+ triggers themselves).
+ 0=disable
+ 1=enable */
+#else
+ uint64_t ena : 1;
+ uint64_t wrap : 1;
+ uint64_t trig_ctl : 2;
+ uint64_t time_grn : 3;
+ uint64_t full_thr : 2;
+ uint64_t ciu_trg : 1;
+ uint64_t ciu_thr : 1;
+ uint64_t mcd0_trg : 1;
+ uint64_t mcd0_thr : 1;
+ uint64_t mcd0_ena : 1;
+ uint64_t ignore_o : 1;
+ uint64_t clkalways : 1;
+ uint64_t rdat_md : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_trax_ctl_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63 : 49;
+ uint64_t ignore_o : 1; /**< Ignore overflow during wrap mode
+ If set and wrapping mode is enabled, then tracing
+ will not stop at the overflow condition. Each
+ write during an overflow will overwrite the
+ oldest, unread entry and the read pointer is
+ incremented by one entry. This bit has no effect
+ if WRAP=0. */
+ uint64_t mcd0_ena : 1; /**< MCD0 enable
+ If set and any PP sends the MCD0 signal, the
+ tracing is disabled. */
+ uint64_t mcd0_thr : 1; /**< MCD0_threshold
+ At a fill threshold event, sends an MCD0
+ wire pulse that can cause cores to enter debug
+ mode, if enabled. This MCD0 wire pulse will not
+ occur while (TRA(0..0)_INT_STATUS.MCD0_THR == 1). */
+ uint64_t mcd0_trg : 1; /**< MCD0_trigger
+ At an end trigger event, sends an MCD0
+ wire pulse that can cause cores to enter debug
+ mode, if enabled. This MCD0 wire pulse will not
+ occur while (TRA(0..0)_INT_STATUS.MCD0_TRG == 1). */
+ uint64_t ciu_thr : 1; /**< CIU_threshold
+ When set during a fill threshold event,
+ TRA(0..0)_INT_STATUS[CIU_THR] is set, which can cause
+ core interrupts, if enabled. */
+ uint64_t ciu_trg : 1; /**< CIU_trigger
+ When set during an end trigger event,
+ TRA(0..0)_INT_STATUS[CIU_TRG] is set, which can cause
+ core interrupts, if enabled. */
+ uint64_t full_thr : 2; /**< Full Threshhold
+ 0=none
+ 1=1/2 full
+ 2=3/4 full
+ 3=4/4 full */
+ uint64_t time_grn : 3; /**< Timestamp granularity
+ granularity=8^n cycles, n=0,1,2,3,4,5,6,7 */
+ uint64_t trig_ctl : 2; /**< Trigger Control
+ Note: trigger events are written to the trace
+ 0=no triggers
+ 1=trigger0=start trigger, trigger1=stop trigger
+ 2=(trigger0 || trigger1)=start trigger
+ 3=(trigger0 || trigger1)=stop trigger */
+ uint64_t wrap : 1; /**< Wrap mode
+ When WRAP=0, the trace buffer will disable itself
+ after having logged 256 entries. When WRAP=1,
+ the trace buffer will never disable itself.
+ In this case, tracing may or may not be
+ temporarily suspended during the overflow
+ condition (see IGNORE_O above).
+ 0=do not wrap
+ 1=wrap */
+ uint64_t ena : 1; /**< Enable Trace
+ Master enable. Tracing only happens when ENA=1.
+ When ENA changes from 0 to 1, the read and write
+ pointers are reset to 0x00 to begin a new trace.
+ The MCD0 event may set ENA=0 (see MCD0_ENA
+ above). When using triggers, tracing occurs only
+ between start and stop triggers (including the
+ triggers themselves).
+ 0=disable
+ 1=enable */
+#else
+ uint64_t ena : 1;
+ uint64_t wrap : 1;
+ uint64_t trig_ctl : 2;
+ uint64_t time_grn : 3;
+ uint64_t full_thr : 2;
+ uint64_t ciu_trg : 1;
+ uint64_t ciu_thr : 1;
+ uint64_t mcd0_trg : 1;
+ uint64_t mcd0_thr : 1;
+ uint64_t mcd0_ena : 1;
+ uint64_t ignore_o : 1;
+ uint64_t reserved_15_63 : 49;
+#endif
+ } cn31xx;
+ struct cvmx_trax_ctl_cn31xx cn38xx;
+ struct cvmx_trax_ctl_cn31xx cn38xxp2;
+ struct cvmx_trax_ctl_cn31xx cn52xx;
+ struct cvmx_trax_ctl_cn31xx cn52xxp1;
+ struct cvmx_trax_ctl_cn31xx cn56xx;
+ struct cvmx_trax_ctl_cn31xx cn56xxp1;
+ struct cvmx_trax_ctl_cn31xx cn58xx;
+ struct cvmx_trax_ctl_cn31xx cn58xxp1;
+ struct cvmx_trax_ctl_s cn61xx;
+ struct cvmx_trax_ctl_s cn63xx;
+ struct cvmx_trax_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t clkalways : 1; /**< Conditional clock enable
+ If set, the TRA clock is never disabled. */
+ uint64_t ignore_o : 1; /**< Ignore overflow during wrap mode
+ If set and wrapping mode is enabled, then tracing
+ will not stop at the overflow condition. Each
+ write during an overflow will overwrite the
+ oldest, unread entry and the read pointer is
+ incremented by one entry. This bit has no effect
+ if WRAP=0. */
+ uint64_t mcd0_ena : 1; /**< MCD0 enable
+ If set and any PP sends the MCD0 signal, the
+ tracing is disabled. */
+ uint64_t mcd0_thr : 1; /**< MCD0_threshold
+ At a fill threshold event, sends an MCD0
+ wire pulse that can cause cores to enter debug
+ mode, if enabled. This MCD0 wire pulse will not
+ occur while (TRA_INT_STATUS.MCD0_THR == 1). */
+ uint64_t mcd0_trg : 1; /**< MCD0_trigger
+ At an end trigger event, sends an MCD0
+ wire pulse that can cause cores to enter debug
+ mode, if enabled. This MCD0 wire pulse will not
+ occur while (TRA_INT_STATUS.MCD0_TRG == 1). */
+ uint64_t ciu_thr : 1; /**< CIU_threshold
+ When set during a fill threshold event,
+ TRA_INT_STATUS[CIU_THR] is set, which can cause
+ core interrupts, if enabled. */
+ uint64_t ciu_trg : 1; /**< CIU_trigger
+ When set during an end trigger event,
+ TRA_INT_STATUS[CIU_TRG] is set, which can cause
+ core interrupts, if enabled. */
+ uint64_t full_thr : 2; /**< Full Threshhold
+ 0=none
+ 1=1/2 full
+ 2=3/4 full
+ 3=4/4 full */
+ uint64_t time_grn : 3; /**< Timestamp granularity
+ granularity=8^n cycles, n=0,1,2,3,4,5,6,7 */
+ uint64_t trig_ctl : 2; /**< Trigger Control
+ Note: trigger events are written to the trace
+ 0=no triggers
+ 1=trigger0=start trigger, trigger1=stop trigger
+ 2=(trigger0 || trigger1)=start trigger
+ 3=(trigger0 || trigger1)=stop trigger */
+ uint64_t wrap : 1; /**< Wrap mode
+ When WRAP=0, the trace buffer will disable itself
+ after having logged 1024 entries. When WRAP=1,
+ the trace buffer will never disable itself.
+ In this case, tracing may or may not be
+ temporarily suspended during the overflow
+ condition (see IGNORE_O above).
+ 0=do not wrap
+ 1=wrap */
+ uint64_t ena : 1; /**< Enable Trace
+ Master enable. Tracing only happens when ENA=1.
+ When ENA changes from 0 to 1, the read and write
+ pointers are reset to 0x00 to begin a new trace.
+ The MCD0 event may set ENA=0 (see MCD0_ENA
+ above). When using triggers, tracing occurs only
+ between start and stop triggers (including the
+ triggers themselves).
+ 0=disable
+ 1=enable */
+#else
+ uint64_t ena : 1;
+ uint64_t wrap : 1;
+ uint64_t trig_ctl : 2;
+ uint64_t time_grn : 3;
+ uint64_t full_thr : 2;
+ uint64_t ciu_trg : 1;
+ uint64_t ciu_thr : 1;
+ uint64_t mcd0_trg : 1;
+ uint64_t mcd0_thr : 1;
+ uint64_t mcd0_ena : 1;
+ uint64_t ignore_o : 1;
+ uint64_t clkalways : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn63xxp1;
+ struct cvmx_trax_ctl_s cn66xx;
+ struct cvmx_trax_ctl_s cn68xx;
+ struct cvmx_trax_ctl_s cn68xxp1;
+ struct cvmx_trax_ctl_s cnf71xx;
+};
+typedef union cvmx_trax_ctl cvmx_trax_ctl_t;
+
+/**
+ * cvmx_tra#_cycles_since
+ *
+ * TRA_CYCLES_SINCE = Trace Buffer Cycles Since Last Write, Read/Write pointers
+ *
+ * Description:
+ *
+ * Notes:
+ * This CSR is obsolete. Use TRA_CYCLES_SINCE1 instead.
+ *
+ */
+union cvmx_trax_cycles_since {
+ uint64_t u64;
+ struct cvmx_trax_cycles_since_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cycles : 48; /**< Cycles since the last entry was written */
+ uint64_t rptr : 8; /**< Read pointer */
+ uint64_t wptr : 8; /**< Write pointer */
+#else
+ uint64_t wptr : 8;
+ uint64_t rptr : 8;
+ uint64_t cycles : 48;
+#endif
+ } s;
+ struct cvmx_trax_cycles_since_s cn31xx;
+ struct cvmx_trax_cycles_since_s cn38xx;
+ struct cvmx_trax_cycles_since_s cn38xxp2;
+ struct cvmx_trax_cycles_since_s cn52xx;
+ struct cvmx_trax_cycles_since_s cn52xxp1;
+ struct cvmx_trax_cycles_since_s cn56xx;
+ struct cvmx_trax_cycles_since_s cn56xxp1;
+ struct cvmx_trax_cycles_since_s cn58xx;
+ struct cvmx_trax_cycles_since_s cn58xxp1;
+ struct cvmx_trax_cycles_since_s cn61xx;
+ struct cvmx_trax_cycles_since_s cn63xx;
+ struct cvmx_trax_cycles_since_s cn63xxp1;
+ struct cvmx_trax_cycles_since_s cn66xx;
+ struct cvmx_trax_cycles_since_s cn68xx;
+ struct cvmx_trax_cycles_since_s cn68xxp1;
+ struct cvmx_trax_cycles_since_s cnf71xx;
+};
+typedef union cvmx_trax_cycles_since cvmx_trax_cycles_since_t;
+
+/**
+ * cvmx_tra#_cycles_since1
+ *
+ * TRA_CYCLES_SINCE1 = Trace Buffer Cycles Since Last Write, Read/Write pointers
+ *
+ * Description:
+ */
+union cvmx_trax_cycles_since1 {
+ uint64_t u64;
+ struct cvmx_trax_cycles_since1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cycles : 40; /**< Cycles since the last entry was written */
+ uint64_t reserved_22_23 : 2;
+ uint64_t rptr : 10; /**< Read pointer */
+ uint64_t reserved_10_11 : 2;
+ uint64_t wptr : 10; /**< Write pointer */
+#else
+ uint64_t wptr : 10;
+ uint64_t reserved_10_11 : 2;
+ uint64_t rptr : 10;
+ uint64_t reserved_22_23 : 2;
+ uint64_t cycles : 40;
+#endif
+ } s;
+ struct cvmx_trax_cycles_since1_s cn52xx;
+ struct cvmx_trax_cycles_since1_s cn52xxp1;
+ struct cvmx_trax_cycles_since1_s cn56xx;
+ struct cvmx_trax_cycles_since1_s cn56xxp1;
+ struct cvmx_trax_cycles_since1_s cn58xx;
+ struct cvmx_trax_cycles_since1_s cn58xxp1;
+ struct cvmx_trax_cycles_since1_s cn61xx;
+ struct cvmx_trax_cycles_since1_s cn63xx;
+ struct cvmx_trax_cycles_since1_s cn63xxp1;
+ struct cvmx_trax_cycles_since1_s cn66xx;
+ struct cvmx_trax_cycles_since1_s cn68xx;
+ struct cvmx_trax_cycles_since1_s cn68xxp1;
+ struct cvmx_trax_cycles_since1_s cnf71xx;
+};
+typedef union cvmx_trax_cycles_since1 cvmx_trax_cycles_since1_t;
+
+/**
+ * cvmx_tra#_filt_adr_adr
+ *
+ * TRA_FILT_ADR_ADR = Trace Buffer Filter Address Address
+ *
+ * Description:
+ */
+union cvmx_trax_filt_adr_adr {
+ uint64_t u64;
+ struct cvmx_trax_filt_adr_adr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t adr : 38; /**< Unmasked Address
+ The combination of TRA_FILT_ADR_ADR and
+ TRA_FILT_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches */
+#else
+ uint64_t adr : 38;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_trax_filt_adr_adr_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t adr : 36; /**< Unmasked Address
+ The combination of TRA(0..0)_FILT_ADR_ADR and
+ TRA(0..0)_FILT_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches */
+#else
+ uint64_t adr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn31xx;
+ struct cvmx_trax_filt_adr_adr_cn31xx cn38xx;
+ struct cvmx_trax_filt_adr_adr_cn31xx cn38xxp2;
+ struct cvmx_trax_filt_adr_adr_cn31xx cn52xx;
+ struct cvmx_trax_filt_adr_adr_cn31xx cn52xxp1;
+ struct cvmx_trax_filt_adr_adr_cn31xx cn56xx;
+ struct cvmx_trax_filt_adr_adr_cn31xx cn56xxp1;
+ struct cvmx_trax_filt_adr_adr_cn31xx cn58xx;
+ struct cvmx_trax_filt_adr_adr_cn31xx cn58xxp1;
+ struct cvmx_trax_filt_adr_adr_s cn61xx;
+ struct cvmx_trax_filt_adr_adr_s cn63xx;
+ struct cvmx_trax_filt_adr_adr_s cn63xxp1;
+ struct cvmx_trax_filt_adr_adr_s cn66xx;
+ struct cvmx_trax_filt_adr_adr_s cn68xx;
+ struct cvmx_trax_filt_adr_adr_s cn68xxp1;
+ struct cvmx_trax_filt_adr_adr_s cnf71xx;
+};
+typedef union cvmx_trax_filt_adr_adr cvmx_trax_filt_adr_adr_t;
+
+/**
+ * cvmx_tra#_filt_adr_msk
+ *
+ * TRA_FILT_ADR_MSK = Trace Buffer Filter Address Mask
+ *
+ * Description:
+ */
+union cvmx_trax_filt_adr_msk {
+ uint64_t u64;
+ struct cvmx_trax_filt_adr_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t adr : 38; /**< Address Mask
+ The combination of TRA_FILT_ADR_ADR and
+ TRA_FILT_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches. When a mask bit is not
+ set, the corresponding address bits are assumed
+ to match. Also, note that IOBDMAs do not have
+ proper addresses, so when TRA_FILT_CMD[IOBDMA]
+ is set, TRA_FILT_ADR_MSK must be zero to
+ guarantee that any IOBDMAs enter the trace. */
+#else
+ uint64_t adr : 38;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_trax_filt_adr_msk_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t adr : 36; /**< Address Mask
+ The combination of TRA(0..0)_FILT_ADR_ADR and
+ TRA(0..0)_FILT_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches. When a mask bit is not
+ set, the corresponding address bits are assumed
+ to match. Also, note that IOBDMAs do not have
+ proper addresses, so when TRA(0..0)_FILT_CMD[IOBDMA]
+ is set, TRA(0..0)_FILT_ADR_MSK must be zero to
+ guarantee that any IOBDMAs enter the trace. */
+#else
+ uint64_t adr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn31xx;
+ struct cvmx_trax_filt_adr_msk_cn31xx cn38xx;
+ struct cvmx_trax_filt_adr_msk_cn31xx cn38xxp2;
+ struct cvmx_trax_filt_adr_msk_cn31xx cn52xx;
+ struct cvmx_trax_filt_adr_msk_cn31xx cn52xxp1;
+ struct cvmx_trax_filt_adr_msk_cn31xx cn56xx;
+ struct cvmx_trax_filt_adr_msk_cn31xx cn56xxp1;
+ struct cvmx_trax_filt_adr_msk_cn31xx cn58xx;
+ struct cvmx_trax_filt_adr_msk_cn31xx cn58xxp1;
+ struct cvmx_trax_filt_adr_msk_s cn61xx;
+ struct cvmx_trax_filt_adr_msk_s cn63xx;
+ struct cvmx_trax_filt_adr_msk_s cn63xxp1;
+ struct cvmx_trax_filt_adr_msk_s cn66xx;
+ struct cvmx_trax_filt_adr_msk_s cn68xx;
+ struct cvmx_trax_filt_adr_msk_s cn68xxp1;
+ struct cvmx_trax_filt_adr_msk_s cnf71xx;
+};
+typedef union cvmx_trax_filt_adr_msk cvmx_trax_filt_adr_msk_t;
+
+/**
+ * cvmx_tra#_filt_cmd
+ *
+ * TRA_FILT_CMD = Trace Buffer Filter Command Mask
+ *
+ * Description:
+ *
+ * Notes:
+ * Note that the trace buffer does not do proper IOBDMA address compares. Thus, if IOBDMA is set, then
+ * the address compare must be disabled (i.e. TRA_FILT_ADR_MSK set to zero) to guarantee that IOBDMAs
+ * enter the trace.
+ */
+union cvmx_trax_filt_cmd {
+ uint64_t u64;
+ struct cvmx_trax_filt_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t saa64 : 1; /**< Enable SAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t saa32 : 1; /**< Enable SAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_60_61 : 2;
+ uint64_t faa64 : 1; /**< Enable FAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t faa32 : 1; /**< Enable FAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_56_57 : 2;
+ uint64_t decr64 : 1; /**< Enable DECR64 tracing
+ 0=disable, 1=enable */
+ uint64_t decr32 : 1; /**< Enable DECR32 tracing
+ 0=disable, 1=enable */
+ uint64_t decr16 : 1; /**< Enable DECR16 tracing
+ 0=disable, 1=enable */
+ uint64_t decr8 : 1; /**< Enable DECR8 tracing
+ 0=disable, 1=enable */
+ uint64_t incr64 : 1; /**< Enable INCR64 tracing
+ 0=disable, 1=enable */
+ uint64_t incr32 : 1; /**< Enable INCR32 tracing
+ 0=disable, 1=enable */
+ uint64_t incr16 : 1; /**< Enable INCR16 tracing
+ 0=disable, 1=enable */
+ uint64_t incr8 : 1; /**< Enable INCR8 tracing
+ 0=disable, 1=enable */
+ uint64_t clr64 : 1; /**< Enable CLR64 tracing
+ 0=disable, 1=enable */
+ uint64_t clr32 : 1; /**< Enable CLR32 tracing
+ 0=disable, 1=enable */
+ uint64_t clr16 : 1; /**< Enable CLR16 tracing
+ 0=disable, 1=enable */
+ uint64_t clr8 : 1; /**< Enable CLR8 tracing
+ 0=disable, 1=enable */
+ uint64_t set64 : 1; /**< Enable SET64 tracing
+ 0=disable, 1=enable */
+ uint64_t set32 : 1; /**< Enable SET32 tracing
+ 0=disable, 1=enable */
+ uint64_t set16 : 1; /**< Enable SET16 tracing
+ 0=disable, 1=enable */
+ uint64_t set8 : 1; /**< Enable SET8 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst64 : 1; /**< Enable IOBST64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst32 : 1; /**< Enable IOBST32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst16 : 1; /**< Enable IOBST16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst8 : 1; /**< Enable IOBST8 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_32_35 : 4;
+ uint64_t lckl2 : 1; /**< Enable LCKL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbl2 : 1; /**< Enable WBL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2 : 1; /**< Enable WBIL2 tracing
+ 0=disable, 1=enable */
+ uint64_t invl2 : 1; /**< Enable INVL2 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t stgl2i : 1; /**< Enable STGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t ltgl2i : 1; /**< Enable LTGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2i : 1; /**< Enable WBIL2I tracing
+ 0=disable, 1=enable */
+ uint64_t fas64 : 1; /**< Enable FAS64 tracing
+ 0=disable, 1=enable */
+ uint64_t fas32 : 1; /**< Enable FAS32 tracing
+ 0=disable, 1=enable */
+ uint64_t sttil1 : 1; /**< Enable STTIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t stfil1 : 1; /**< Enable STFIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_16_19 : 4;
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t iobst : 1; /**< Enable IOBST tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_0_13 : 14;
+#else
+ uint64_t reserved_0_13 : 14;
+ uint64_t iobst : 1;
+ uint64_t iobdma : 1;
+ uint64_t reserved_16_19 : 4;
+ uint64_t stfil1 : 1;
+ uint64_t sttil1 : 1;
+ uint64_t fas32 : 1;
+ uint64_t fas64 : 1;
+ uint64_t wbil2i : 1;
+ uint64_t ltgl2i : 1;
+ uint64_t stgl2i : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t invl2 : 1;
+ uint64_t wbil2 : 1;
+ uint64_t wbl2 : 1;
+ uint64_t lckl2 : 1;
+ uint64_t reserved_32_35 : 4;
+ uint64_t iobst8 : 1;
+ uint64_t iobst16 : 1;
+ uint64_t iobst32 : 1;
+ uint64_t iobst64 : 1;
+ uint64_t set8 : 1;
+ uint64_t set16 : 1;
+ uint64_t set32 : 1;
+ uint64_t set64 : 1;
+ uint64_t clr8 : 1;
+ uint64_t clr16 : 1;
+ uint64_t clr32 : 1;
+ uint64_t clr64 : 1;
+ uint64_t incr8 : 1;
+ uint64_t incr16 : 1;
+ uint64_t incr32 : 1;
+ uint64_t incr64 : 1;
+ uint64_t decr8 : 1;
+ uint64_t decr16 : 1;
+ uint64_t decr32 : 1;
+ uint64_t decr64 : 1;
+ uint64_t reserved_56_57 : 2;
+ uint64_t faa32 : 1;
+ uint64_t faa64 : 1;
+ uint64_t reserved_60_61 : 2;
+ uint64_t saa32 : 1;
+ uint64_t saa64 : 1;
+#endif
+ } s;
+ struct cvmx_trax_filt_cmd_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t iobst : 1; /**< Enable IOBST tracing
+ 0=disable, 1=enable */
+ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing
+ 0=disable, 1=enable */
+ uint64_t stt : 1; /**< Enable STT tracing
+ 0=disable, 1=enable */
+ uint64_t stp : 1; /**< Enable STP tracing
+ 0=disable, 1=enable */
+ uint64_t stc : 1; /**< Enable STC tracing
+ 0=disable, 1=enable */
+ uint64_t stf : 1; /**< Enable STF tracing
+ 0=disable, 1=enable */
+ uint64_t ldt : 1; /**< Enable LDT tracing
+ 0=disable, 1=enable */
+ uint64_t ldi : 1; /**< Enable LDI tracing
+ 0=disable, 1=enable */
+ uint64_t ldd : 1; /**< Enable LDD tracing
+ 0=disable, 1=enable */
+ uint64_t psl1 : 1; /**< Enable PSL1 tracing
+ 0=disable, 1=enable */
+ uint64_t pl2 : 1; /**< Enable PL2 tracing
+ 0=disable, 1=enable */
+ uint64_t dwb : 1; /**< Enable DWB tracing
+ 0=disable, 1=enable */
+#else
+ uint64_t dwb : 1;
+ uint64_t pl2 : 1;
+ uint64_t psl1 : 1;
+ uint64_t ldd : 1;
+ uint64_t ldi : 1;
+ uint64_t ldt : 1;
+ uint64_t stf : 1;
+ uint64_t stc : 1;
+ uint64_t stp : 1;
+ uint64_t stt : 1;
+ uint64_t iobld8 : 1;
+ uint64_t iobld16 : 1;
+ uint64_t iobld32 : 1;
+ uint64_t iobld64 : 1;
+ uint64_t iobst : 1;
+ uint64_t iobdma : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn31xx;
+ struct cvmx_trax_filt_cmd_cn31xx cn38xx;
+ struct cvmx_trax_filt_cmd_cn31xx cn38xxp2;
+ struct cvmx_trax_filt_cmd_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t saa : 1; /**< Enable SAA tracing
+ 0=disable, 1=enable */
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t iobst : 1; /**< Enable IOBST tracing
+ 0=disable, 1=enable */
+ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing
+ 0=disable, 1=enable */
+ uint64_t stt : 1; /**< Enable STT tracing
+ 0=disable, 1=enable */
+ uint64_t stp : 1; /**< Enable STP tracing
+ 0=disable, 1=enable */
+ uint64_t stc : 1; /**< Enable STC tracing
+ 0=disable, 1=enable */
+ uint64_t stf : 1; /**< Enable STF tracing
+ 0=disable, 1=enable */
+ uint64_t ldt : 1; /**< Enable LDT tracing
+ 0=disable, 1=enable */
+ uint64_t ldi : 1; /**< Enable LDI tracing
+ 0=disable, 1=enable */
+ uint64_t ldd : 1; /**< Enable LDD tracing
+ 0=disable, 1=enable */
+ uint64_t psl1 : 1; /**< Enable PSL1 tracing
+ 0=disable, 1=enable */
+ uint64_t pl2 : 1; /**< Enable PL2 tracing
+ 0=disable, 1=enable */
+ uint64_t dwb : 1; /**< Enable DWB tracing
+ 0=disable, 1=enable */
+#else
+ uint64_t dwb : 1;
+ uint64_t pl2 : 1;
+ uint64_t psl1 : 1;
+ uint64_t ldd : 1;
+ uint64_t ldi : 1;
+ uint64_t ldt : 1;
+ uint64_t stf : 1;
+ uint64_t stc : 1;
+ uint64_t stp : 1;
+ uint64_t stt : 1;
+ uint64_t iobld8 : 1;
+ uint64_t iobld16 : 1;
+ uint64_t iobld32 : 1;
+ uint64_t iobld64 : 1;
+ uint64_t iobst : 1;
+ uint64_t iobdma : 1;
+ uint64_t saa : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn52xx;
+ struct cvmx_trax_filt_cmd_cn52xx cn52xxp1;
+ struct cvmx_trax_filt_cmd_cn52xx cn56xx;
+ struct cvmx_trax_filt_cmd_cn52xx cn56xxp1;
+ struct cvmx_trax_filt_cmd_cn52xx cn58xx;
+ struct cvmx_trax_filt_cmd_cn52xx cn58xxp1;
+ struct cvmx_trax_filt_cmd_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t saa64 : 1; /**< Enable SAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t saa32 : 1; /**< Enable SAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_60_61 : 2;
+ uint64_t faa64 : 1; /**< Enable FAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t faa32 : 1; /**< Enable FAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_56_57 : 2;
+ uint64_t decr64 : 1; /**< Enable DECR64 tracing
+ 0=disable, 1=enable */
+ uint64_t decr32 : 1; /**< Enable DECR32 tracing
+ 0=disable, 1=enable */
+ uint64_t decr16 : 1; /**< Enable DECR16 tracing
+ 0=disable, 1=enable */
+ uint64_t decr8 : 1; /**< Enable DECR8 tracing
+ 0=disable, 1=enable */
+ uint64_t incr64 : 1; /**< Enable INCR64 tracing
+ 0=disable, 1=enable */
+ uint64_t incr32 : 1; /**< Enable INCR32 tracing
+ 0=disable, 1=enable */
+ uint64_t incr16 : 1; /**< Enable INCR16 tracing
+ 0=disable, 1=enable */
+ uint64_t incr8 : 1; /**< Enable INCR8 tracing
+ 0=disable, 1=enable */
+ uint64_t clr64 : 1; /**< Enable CLR64 tracing
+ 0=disable, 1=enable */
+ uint64_t clr32 : 1; /**< Enable CLR32 tracing
+ 0=disable, 1=enable */
+ uint64_t clr16 : 1; /**< Enable CLR16 tracing
+ 0=disable, 1=enable */
+ uint64_t clr8 : 1; /**< Enable CLR8 tracing
+ 0=disable, 1=enable */
+ uint64_t set64 : 1; /**< Enable SET64 tracing
+ 0=disable, 1=enable */
+ uint64_t set32 : 1; /**< Enable SET32 tracing
+ 0=disable, 1=enable */
+ uint64_t set16 : 1; /**< Enable SET16 tracing
+ 0=disable, 1=enable */
+ uint64_t set8 : 1; /**< Enable SET8 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst64 : 1; /**< Enable IOBST64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst32 : 1; /**< Enable IOBST32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst16 : 1; /**< Enable IOBST16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst8 : 1; /**< Enable IOBST8 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing
+ 0=disable, 1=enable */
+ uint64_t lckl2 : 1; /**< Enable LCKL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbl2 : 1; /**< Enable WBL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2 : 1; /**< Enable WBIL2 tracing
+ 0=disable, 1=enable */
+ uint64_t invl2 : 1; /**< Enable INVL2 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t stgl2i : 1; /**< Enable STGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t ltgl2i : 1; /**< Enable LTGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2i : 1; /**< Enable WBIL2I tracing
+ 0=disable, 1=enable */
+ uint64_t fas64 : 1; /**< Enable FAS64 tracing
+ 0=disable, 1=enable */
+ uint64_t fas32 : 1; /**< Enable FAS32 tracing
+ 0=disable, 1=enable */
+ uint64_t sttil1 : 1; /**< Enable STTIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t stfil1 : 1; /**< Enable STFIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t stc : 1; /**< Enable STC tracing
+ 0=disable, 1=enable */
+ uint64_t stp : 1; /**< Enable STP tracing
+ 0=disable, 1=enable */
+ uint64_t stt : 1; /**< Enable STT tracing
+ 0=disable, 1=enable */
+ uint64_t stf : 1; /**< Enable STF tracing
+ 0=disable, 1=enable */
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_10_14 : 5;
+ uint64_t psl1 : 1; /**< Enable PSL1 tracing
+ 0=disable, 1=enable */
+ uint64_t ldd : 1; /**< Enable LDD tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_6_7 : 2;
+ uint64_t dwb : 1; /**< Enable DWB tracing
+ 0=disable, 1=enable */
+ uint64_t rpl2 : 1; /**< Enable RPL2 tracing
+ 0=disable, 1=enable */
+ uint64_t pl2 : 1; /**< Enable PL2 tracing
+ 0=disable, 1=enable */
+ uint64_t ldi : 1; /**< Enable LDI tracing
+ 0=disable, 1=enable */
+ uint64_t ldt : 1; /**< Enable LDT tracing
+ 0=disable, 1=enable */
+ uint64_t nop : 1; /**< Enable NOP tracing
+ 0=disable, 1=enable */
+#else
+ uint64_t nop : 1;
+ uint64_t ldt : 1;
+ uint64_t ldi : 1;
+ uint64_t pl2 : 1;
+ uint64_t rpl2 : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t ldd : 1;
+ uint64_t psl1 : 1;
+ uint64_t reserved_10_14 : 5;
+ uint64_t iobdma : 1;
+ uint64_t stf : 1;
+ uint64_t stt : 1;
+ uint64_t stp : 1;
+ uint64_t stc : 1;
+ uint64_t stfil1 : 1;
+ uint64_t sttil1 : 1;
+ uint64_t fas32 : 1;
+ uint64_t fas64 : 1;
+ uint64_t wbil2i : 1;
+ uint64_t ltgl2i : 1;
+ uint64_t stgl2i : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t invl2 : 1;
+ uint64_t wbil2 : 1;
+ uint64_t wbl2 : 1;
+ uint64_t lckl2 : 1;
+ uint64_t iobld8 : 1;
+ uint64_t iobld16 : 1;
+ uint64_t iobld32 : 1;
+ uint64_t iobld64 : 1;
+ uint64_t iobst8 : 1;
+ uint64_t iobst16 : 1;
+ uint64_t iobst32 : 1;
+ uint64_t iobst64 : 1;
+ uint64_t set8 : 1;
+ uint64_t set16 : 1;
+ uint64_t set32 : 1;
+ uint64_t set64 : 1;
+ uint64_t clr8 : 1;
+ uint64_t clr16 : 1;
+ uint64_t clr32 : 1;
+ uint64_t clr64 : 1;
+ uint64_t incr8 : 1;
+ uint64_t incr16 : 1;
+ uint64_t incr32 : 1;
+ uint64_t incr64 : 1;
+ uint64_t decr8 : 1;
+ uint64_t decr16 : 1;
+ uint64_t decr32 : 1;
+ uint64_t decr64 : 1;
+ uint64_t reserved_56_57 : 2;
+ uint64_t faa32 : 1;
+ uint64_t faa64 : 1;
+ uint64_t reserved_60_61 : 2;
+ uint64_t saa32 : 1;
+ uint64_t saa64 : 1;
+#endif
+ } cn61xx;
+ struct cvmx_trax_filt_cmd_cn61xx cn63xx;
+ struct cvmx_trax_filt_cmd_cn61xx cn63xxp1;
+ struct cvmx_trax_filt_cmd_cn61xx cn66xx;
+ struct cvmx_trax_filt_cmd_cn61xx cn68xx;
+ struct cvmx_trax_filt_cmd_cn61xx cn68xxp1;
+ struct cvmx_trax_filt_cmd_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_filt_cmd cvmx_trax_filt_cmd_t;
+
+/**
+ * cvmx_tra#_filt_did
+ *
+ * TRA_FILT_DID = Trace Buffer Filter DestinationId Mask
+ *
+ * Description:
+ */
+union cvmx_trax_filt_did {
+ uint64_t u64;
+ struct cvmx_trax_filt_did_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t pow : 1; /**< Enable tracing of requests to POW
+ (get work, add work, status/memory/index
+ loads, NULLRd loads, CSR's) */
+ uint64_t reserved_9_11 : 3;
+ uint64_t rng : 1; /**< Enable tracing of requests to RNG
+ (loads/IOBDMA's are legal) */
+ uint64_t zip : 1; /**< Enable tracing of requests to ZIP
+ (doorbell stores are legal) */
+ uint64_t dfa : 1; /**< Enable tracing of requests to DFA
+ (CSR's and operations are legal) */
+ uint64_t fpa : 1; /**< Enable tracing of requests to FPA
+ (alloc's (loads/IOBDMA's), frees (stores) are legal) */
+ uint64_t key : 1; /**< Enable tracing of requests to KEY memory
+ (loads/IOBDMA's/stores are legal) */
+ uint64_t reserved_3_3 : 1;
+ uint64_t illegal3 : 2; /**< Illegal destinations */
+ uint64_t mio : 1; /**< Enable tracing of MIO accesses
+ (CIU and GPIO CSR's, boot bus accesses) */
+#else
+ uint64_t mio : 1;
+ uint64_t illegal3 : 2;
+ uint64_t reserved_3_3 : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rng : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t pow : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_trax_filt_did_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t illegal : 19; /**< Illegal destinations */
+ uint64_t pow : 1; /**< Enable tracing of requests to POW
+ (get work, add work, status/memory/index
+ loads, NULLRd loads, CSR's) */
+ uint64_t illegal2 : 3; /**< Illegal destinations */
+ uint64_t rng : 1; /**< Enable tracing of requests to RNG
+ (loads/IOBDMA's are legal) */
+ uint64_t zip : 1; /**< Enable tracing of requests to ZIP
+ (doorbell stores are legal) */
+ uint64_t dfa : 1; /**< Enable tracing of requests to DFA
+ (CSR's and operations are legal) */
+ uint64_t fpa : 1; /**< Enable tracing of requests to FPA
+ (alloc's (loads/IOBDMA's), frees (stores) are legal) */
+ uint64_t key : 1; /**< Enable tracing of requests to KEY memory
+ (loads/IOBDMA's/stores are legal) */
+ uint64_t pci : 1; /**< Enable tracing of requests to PCI and RSL-type
+ CSR's (RSL CSR's, PCI bus operations, PCI
+ CSR's) */
+ uint64_t illegal3 : 2; /**< Illegal destinations */
+ uint64_t mio : 1; /**< Enable tracing of CIU and GPIO CSR's */
+#else
+ uint64_t mio : 1;
+ uint64_t illegal3 : 2;
+ uint64_t pci : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rng : 1;
+ uint64_t illegal2 : 3;
+ uint64_t pow : 1;
+ uint64_t illegal : 19;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn31xx;
+ struct cvmx_trax_filt_did_cn31xx cn38xx;
+ struct cvmx_trax_filt_did_cn31xx cn38xxp2;
+ struct cvmx_trax_filt_did_cn31xx cn52xx;
+ struct cvmx_trax_filt_did_cn31xx cn52xxp1;
+ struct cvmx_trax_filt_did_cn31xx cn56xx;
+ struct cvmx_trax_filt_did_cn31xx cn56xxp1;
+ struct cvmx_trax_filt_did_cn31xx cn58xx;
+ struct cvmx_trax_filt_did_cn31xx cn58xxp1;
+ struct cvmx_trax_filt_did_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t illegal5 : 1; /**< Illegal destinations */
+ uint64_t fau : 1; /**< Enable tracing of FAU accesses */
+ uint64_t illegal4 : 2; /**< Illegal destinations */
+ uint64_t dpi : 1; /**< Enable tracing of DPI accesses
+ (DPI NCB CSRs) */
+ uint64_t illegal : 12; /**< Illegal destinations */
+ uint64_t rad : 1; /**< Enable tracing of RAD accesses
+ (doorbells) */
+ uint64_t usb0 : 1; /**< Enable tracing of USB0 accesses
+ (UAHC0 EHCI and OHCI NCB CSRs) */
+ uint64_t pow : 1; /**< Enable tracing of requests to POW
+ (get work, add work, status/memory/index
+ loads, NULLRd loads, CSR's) */
+ uint64_t illegal2 : 1; /**< Illegal destination */
+ uint64_t pko : 1; /**< Enable tracing of PKO accesses
+ (doorbells) */
+ uint64_t ipd : 1; /**< Enable tracing of IPD CSR accesses
+ (IPD CSRs) */
+ uint64_t rng : 1; /**< Enable tracing of requests to RNG
+ (loads/IOBDMA's are legal) */
+ uint64_t zip : 1; /**< Enable tracing of requests to ZIP
+ (doorbell stores are legal) */
+ uint64_t dfa : 1; /**< Enable tracing of requests to DFA
+ (CSR's and operations are legal) */
+ uint64_t fpa : 1; /**< Enable tracing of requests to FPA
+ (alloc's (loads/IOBDMA's), frees (stores) are legal) */
+ uint64_t key : 1; /**< Enable tracing of requests to KEY memory
+ (loads/IOBDMA's/stores are legal) */
+ uint64_t sli : 1; /**< Enable tracing of requests to SLI and RSL-type
+ CSR's (RSL CSR's, PCI/sRIO bus operations, SLI
+ CSR's) */
+ uint64_t illegal3 : 2; /**< Illegal destinations */
+ uint64_t mio : 1; /**< Enable tracing of MIO accesses
+ (CIU and GPIO CSR's, boot bus accesses) */
+#else
+ uint64_t mio : 1;
+ uint64_t illegal3 : 2;
+ uint64_t sli : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rng : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t illegal2 : 1;
+ uint64_t pow : 1;
+ uint64_t usb0 : 1;
+ uint64_t rad : 1;
+ uint64_t illegal : 12;
+ uint64_t dpi : 1;
+ uint64_t illegal4 : 2;
+ uint64_t fau : 1;
+ uint64_t illegal5 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn61xx;
+ struct cvmx_trax_filt_did_cn61xx cn63xx;
+ struct cvmx_trax_filt_did_cn61xx cn63xxp1;
+ struct cvmx_trax_filt_did_cn61xx cn66xx;
+ struct cvmx_trax_filt_did_cn61xx cn68xx;
+ struct cvmx_trax_filt_did_cn61xx cn68xxp1;
+ struct cvmx_trax_filt_did_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_filt_did cvmx_trax_filt_did_t;
+
+/**
+ * cvmx_tra#_filt_sid
+ *
+ * TRA_FILT_SID = Trace Buffer Filter SourceId Mask
+ *
+ * Description:
+ */
+union cvmx_trax_filt_sid {
+ uint64_t u64;
+ struct cvmx_trax_filt_sid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable tracing of requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable tracing of requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable tracing of read requests from PKO */
+ uint64_t pki : 1; /**< Enable tracing of write requests from PIP/IPD */
+ uint64_t pp : 16; /**< Enable tracing from PP[N] with matching SourceID
+ 0=disable, 1=enable per bit N where 0<=N<=3 */
+#else
+ uint64_t pp : 16;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_trax_filt_sid_s cn31xx;
+ struct cvmx_trax_filt_sid_s cn38xx;
+ struct cvmx_trax_filt_sid_s cn38xxp2;
+ struct cvmx_trax_filt_sid_s cn52xx;
+ struct cvmx_trax_filt_sid_s cn52xxp1;
+ struct cvmx_trax_filt_sid_s cn56xx;
+ struct cvmx_trax_filt_sid_s cn56xxp1;
+ struct cvmx_trax_filt_sid_s cn58xx;
+ struct cvmx_trax_filt_sid_s cn58xxp1;
+ struct cvmx_trax_filt_sid_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable tracing of requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable tracing of requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable tracing of read requests from PKO */
+ uint64_t pki : 1; /**< Enable tracing of write requests from PIP/IPD */
+ uint64_t reserved_4_15 : 12;
+ uint64_t pp : 4; /**< Enable tracing from PP[N] with matching SourceID
+ 0=disable, 1=enable per bit N where 0<=N<=3 */
+#else
+ uint64_t pp : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn61xx;
+ struct cvmx_trax_filt_sid_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable tracing of requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable tracing of requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable tracing of read requests from PKO */
+ uint64_t pki : 1; /**< Enable tracing of write requests from PIP/IPD */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pp : 8; /**< Enable tracing from PP[N] with matching SourceID
+ 0=disable, 1=enableper bit N where 0<=N<=15 */
+#else
+ uint64_t pp : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn63xx;
+ struct cvmx_trax_filt_sid_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable tracing of requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable tracing of requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable tracing of read requests from PKO */
+ uint64_t pki : 1; /**< Enable tracing of write requests from PIP/IPD */
+ uint64_t reserved_6_15 : 10;
+ uint64_t pp : 6; /**< Enable tracing from PP[N] with matching SourceID
+ 0=disable, 1=enable per bit N where 0<=N<=5 */
+#else
+ uint64_t pp : 6;
+ uint64_t reserved_6_15 : 10;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn63xxp1;
+ struct cvmx_trax_filt_sid_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable tracing of requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable tracing of requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable tracing of read requests from PKO */
+ uint64_t pki : 1; /**< Enable tracing of write requests from PIP/IPD */
+ uint64_t reserved_10_15 : 6;
+ uint64_t pp : 10; /**< Enable tracing from PP[N] with matching SourceID
+ 0=disable, 1=enableper bit N where 0<=N<=15 */
+#else
+ uint64_t pp : 10;
+ uint64_t reserved_10_15 : 6;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn66xx;
+ struct cvmx_trax_filt_sid_cn63xx cn68xx;
+ struct cvmx_trax_filt_sid_cn63xx cn68xxp1;
+ struct cvmx_trax_filt_sid_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_filt_sid cvmx_trax_filt_sid_t;
+
+/**
+ * cvmx_tra#_int_status
+ *
+ * TRA_INT_STATUS = Trace Buffer Interrupt Status
+ *
+ * Description:
+ *
+ * Notes:
+ * During a CSR write to this register, the write data is used as a mask to clear the selected status
+ * bits (status'[3:0] = status[3:0] & ~write_data[3:0]).
+ */
+union cvmx_trax_int_status {
+ uint64_t u64;
+ struct cvmx_trax_int_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t mcd0_thr : 1; /**< MCD0 full threshold interrupt status
+ 0=trace buffer did not generate MCD0 wire pulse
+ 1=trace buffer did generate MCD0 wire pulse
+ and prevents additional MCD0_THR MCD0 wire pulses */
+ uint64_t mcd0_trg : 1; /**< MCD0 end trigger interrupt status
+ 0=trace buffer did not generate interrupt
+ 1=trace buffer did generate interrupt
+ and prevents additional MCD0_TRG MCD0 wire pulses */
+ uint64_t ciu_thr : 1; /**< CIU full threshold interrupt status
+ 0=trace buffer did not generate interrupt
+ 1=trace buffer did generate interrupt */
+ uint64_t ciu_trg : 1; /**< CIU end trigger interrupt status
+ 0=trace buffer did not generate interrupt
+ 1=trace buffer did generate interrupt */
+#else
+ uint64_t ciu_trg : 1;
+ uint64_t ciu_thr : 1;
+ uint64_t mcd0_trg : 1;
+ uint64_t mcd0_thr : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_trax_int_status_s cn31xx;
+ struct cvmx_trax_int_status_s cn38xx;
+ struct cvmx_trax_int_status_s cn38xxp2;
+ struct cvmx_trax_int_status_s cn52xx;
+ struct cvmx_trax_int_status_s cn52xxp1;
+ struct cvmx_trax_int_status_s cn56xx;
+ struct cvmx_trax_int_status_s cn56xxp1;
+ struct cvmx_trax_int_status_s cn58xx;
+ struct cvmx_trax_int_status_s cn58xxp1;
+ struct cvmx_trax_int_status_s cn61xx;
+ struct cvmx_trax_int_status_s cn63xx;
+ struct cvmx_trax_int_status_s cn63xxp1;
+ struct cvmx_trax_int_status_s cn66xx;
+ struct cvmx_trax_int_status_s cn68xx;
+ struct cvmx_trax_int_status_s cn68xxp1;
+ struct cvmx_trax_int_status_s cnf71xx;
+};
+typedef union cvmx_trax_int_status cvmx_trax_int_status_t;
+
+/**
+ * cvmx_tra#_read_dat
+ *
+ * TRA_READ_DAT = Trace Buffer Read Data
+ *
+ * Description:
+ *
+ * Notes:
+ * This CSR is a memory of 1024 entries. When the trace was enabled, the read pointer was set to entry
+ * 0 by hardware. Each read to this address increments the read pointer.
+ */
+union cvmx_trax_read_dat {
+ uint64_t u64;
+ struct cvmx_trax_read_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data : 64; /**< Trace buffer data for current entry
+ if TRA_CTL[16]== 1; returns lower 64 bits of entry
+ else two access are necessary to get all of 69bits
+ first access of a pair is the lower 64 bits and
+ second access is the upper 5 bits. */
+#else
+ uint64_t data : 64;
+#endif
+ } s;
+ struct cvmx_trax_read_dat_s cn31xx;
+ struct cvmx_trax_read_dat_s cn38xx;
+ struct cvmx_trax_read_dat_s cn38xxp2;
+ struct cvmx_trax_read_dat_s cn52xx;
+ struct cvmx_trax_read_dat_s cn52xxp1;
+ struct cvmx_trax_read_dat_s cn56xx;
+ struct cvmx_trax_read_dat_s cn56xxp1;
+ struct cvmx_trax_read_dat_s cn58xx;
+ struct cvmx_trax_read_dat_s cn58xxp1;
+ struct cvmx_trax_read_dat_s cn61xx;
+ struct cvmx_trax_read_dat_s cn63xx;
+ struct cvmx_trax_read_dat_s cn63xxp1;
+ struct cvmx_trax_read_dat_s cn66xx;
+ struct cvmx_trax_read_dat_s cn68xx;
+ struct cvmx_trax_read_dat_s cn68xxp1;
+ struct cvmx_trax_read_dat_s cnf71xx;
+};
+typedef union cvmx_trax_read_dat cvmx_trax_read_dat_t;
+
+/**
+ * cvmx_tra#_read_dat_hi
+ *
+ * TRA_READ_DAT_HI = Trace Buffer Read Data- upper 5 bits do not use if TRA_CTL[16]==0
+ *
+ * Description:
+ *
+ * Notes:
+ * This CSR is a memory of 1024 entries. Reads to this address do not increment the read pointer. The
+ * 5 bits read are the upper 5 bits of the TRA entry last read by the TRA_READ_DAT reg.
+ */
+union cvmx_trax_read_dat_hi {
+ uint64_t u64;
+ struct cvmx_trax_read_dat_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t data : 5; /**< Trace buffer data[68:64] for current entry */
+#else
+ uint64_t data : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_trax_read_dat_hi_s cn61xx;
+ struct cvmx_trax_read_dat_hi_s cn63xx;
+ struct cvmx_trax_read_dat_hi_s cn66xx;
+ struct cvmx_trax_read_dat_hi_s cn68xx;
+ struct cvmx_trax_read_dat_hi_s cn68xxp1;
+ struct cvmx_trax_read_dat_hi_s cnf71xx;
+};
+typedef union cvmx_trax_read_dat_hi cvmx_trax_read_dat_hi_t;
+
+/**
+ * cvmx_tra#_trig0_adr_adr
+ *
+ * TRA_TRIG0_ADR_ADR = Trace Buffer Filter Address Address
+ *
+ * Description:
+ */
+union cvmx_trax_trig0_adr_adr {
+ uint64_t u64;
+ struct cvmx_trax_trig0_adr_adr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t adr : 38; /**< Unmasked Address
+ The combination of TRA_TRIG0_ADR_ADR and
+ TRA_TRIG0_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches */
+#else
+ uint64_t adr : 38;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_trax_trig0_adr_adr_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t adr : 36; /**< Unmasked Address
+ The combination of TRA(0..0)_TRIG0_ADR_ADR and
+ TRA(0..0)_TRIG0_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches */
+#else
+ uint64_t adr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn31xx;
+ struct cvmx_trax_trig0_adr_adr_cn31xx cn38xx;
+ struct cvmx_trax_trig0_adr_adr_cn31xx cn38xxp2;
+ struct cvmx_trax_trig0_adr_adr_cn31xx cn52xx;
+ struct cvmx_trax_trig0_adr_adr_cn31xx cn52xxp1;
+ struct cvmx_trax_trig0_adr_adr_cn31xx cn56xx;
+ struct cvmx_trax_trig0_adr_adr_cn31xx cn56xxp1;
+ struct cvmx_trax_trig0_adr_adr_cn31xx cn58xx;
+ struct cvmx_trax_trig0_adr_adr_cn31xx cn58xxp1;
+ struct cvmx_trax_trig0_adr_adr_s cn61xx;
+ struct cvmx_trax_trig0_adr_adr_s cn63xx;
+ struct cvmx_trax_trig0_adr_adr_s cn63xxp1;
+ struct cvmx_trax_trig0_adr_adr_s cn66xx;
+ struct cvmx_trax_trig0_adr_adr_s cn68xx;
+ struct cvmx_trax_trig0_adr_adr_s cn68xxp1;
+ struct cvmx_trax_trig0_adr_adr_s cnf71xx;
+};
+typedef union cvmx_trax_trig0_adr_adr cvmx_trax_trig0_adr_adr_t;
+
+/**
+ * cvmx_tra#_trig0_adr_msk
+ *
+ * TRA_TRIG0_ADR_MSK = Trace Buffer Filter Address Mask
+ *
+ * Description:
+ */
+union cvmx_trax_trig0_adr_msk {
+ uint64_t u64;
+ struct cvmx_trax_trig0_adr_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t adr : 38; /**< Address Mask
+ The combination of TRA_TRIG0_ADR_ADR and
+ TRA_TRIG0_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches. When a mask bit is not
+ set, the corresponding address bits are assumed
+ to match. Also, note that IOBDMAs do not have
+ proper addresses, so when TRA_TRIG0_CMD[IOBDMA]
+ is set, TRA_FILT_TRIG0_MSK must be zero to
+ guarantee that any IOBDMAs are recognized as
+ triggers. */
+#else
+ uint64_t adr : 38;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_trax_trig0_adr_msk_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t adr : 36; /**< Address Mask
+ The combination of TRA(0..0)_TRIG0_ADR_ADR and
+ TRA(0..0)_TRIG0_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches. When a mask bit is not
+ set, the corresponding address bits are assumed
+ to match. Also, note that IOBDMAs do not have
+ proper addresses, so when TRA(0..0)_TRIG0_CMD[IOBDMA]
+ is set, TRA(0..0)_FILT_TRIG0_MSK must be zero to
+ guarantee that any IOBDMAs are recognized as
+ triggers. */
+#else
+ uint64_t adr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn31xx;
+ struct cvmx_trax_trig0_adr_msk_cn31xx cn38xx;
+ struct cvmx_trax_trig0_adr_msk_cn31xx cn38xxp2;
+ struct cvmx_trax_trig0_adr_msk_cn31xx cn52xx;
+ struct cvmx_trax_trig0_adr_msk_cn31xx cn52xxp1;
+ struct cvmx_trax_trig0_adr_msk_cn31xx cn56xx;
+ struct cvmx_trax_trig0_adr_msk_cn31xx cn56xxp1;
+ struct cvmx_trax_trig0_adr_msk_cn31xx cn58xx;
+ struct cvmx_trax_trig0_adr_msk_cn31xx cn58xxp1;
+ struct cvmx_trax_trig0_adr_msk_s cn61xx;
+ struct cvmx_trax_trig0_adr_msk_s cn63xx;
+ struct cvmx_trax_trig0_adr_msk_s cn63xxp1;
+ struct cvmx_trax_trig0_adr_msk_s cn66xx;
+ struct cvmx_trax_trig0_adr_msk_s cn68xx;
+ struct cvmx_trax_trig0_adr_msk_s cn68xxp1;
+ struct cvmx_trax_trig0_adr_msk_s cnf71xx;
+};
+typedef union cvmx_trax_trig0_adr_msk cvmx_trax_trig0_adr_msk_t;
+
+/**
+ * cvmx_tra#_trig0_cmd
+ *
+ * TRA_TRIG0_CMD = Trace Buffer Filter Command Mask
+ *
+ * Description:
+ *
+ * Notes:
+ * Note that the trace buffer does not do proper IOBDMA address compares. Thus, if IOBDMA is set, then
+ * the address compare must be disabled (i.e. TRA_TRIG0_ADR_MSK set to zero) to guarantee that IOBDMAs
+ * are recognized as triggers.
+ */
+union cvmx_trax_trig0_cmd {
+ uint64_t u64;
+ struct cvmx_trax_trig0_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t saa64 : 1; /**< Enable SAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t saa32 : 1; /**< Enable SAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_60_61 : 2;
+ uint64_t faa64 : 1; /**< Enable FAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t faa32 : 1; /**< Enable FAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_56_57 : 2;
+ uint64_t decr64 : 1; /**< Enable DECR64 tracing
+ 0=disable, 1=enable */
+ uint64_t decr32 : 1; /**< Enable DECR32 tracing
+ 0=disable, 1=enable */
+ uint64_t decr16 : 1; /**< Enable DECR16 tracing
+ 0=disable, 1=enable */
+ uint64_t decr8 : 1; /**< Enable DECR8 tracing
+ 0=disable, 1=enable */
+ uint64_t incr64 : 1; /**< Enable INCR64 tracing
+ 0=disable, 1=enable */
+ uint64_t incr32 : 1; /**< Enable INCR32 tracing
+ 0=disable, 1=enable */
+ uint64_t incr16 : 1; /**< Enable INCR16 tracing
+ 0=disable, 1=enable */
+ uint64_t incr8 : 1; /**< Enable INCR8 tracing
+ 0=disable, 1=enable */
+ uint64_t clr64 : 1; /**< Enable CLR64 tracing
+ 0=disable, 1=enable */
+ uint64_t clr32 : 1; /**< Enable CLR32 tracing
+ 0=disable, 1=enable */
+ uint64_t clr16 : 1; /**< Enable CLR16 tracing
+ 0=disable, 1=enable */
+ uint64_t clr8 : 1; /**< Enable CLR8 tracing
+ 0=disable, 1=enable */
+ uint64_t set64 : 1; /**< Enable SET64 tracing
+ 0=disable, 1=enable */
+ uint64_t set32 : 1; /**< Enable SET32 tracing
+ 0=disable, 1=enable */
+ uint64_t set16 : 1; /**< Enable SET16 tracing
+ 0=disable, 1=enable */
+ uint64_t set8 : 1; /**< Enable SET8 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst64 : 1; /**< Enable IOBST64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst32 : 1; /**< Enable IOBST32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst16 : 1; /**< Enable IOBST16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst8 : 1; /**< Enable IOBST8 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_32_35 : 4;
+ uint64_t lckl2 : 1; /**< Enable LCKL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbl2 : 1; /**< Enable WBL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2 : 1; /**< Enable WBIL2 tracing
+ 0=disable, 1=enable */
+ uint64_t invl2 : 1; /**< Enable INVL2 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t stgl2i : 1; /**< Enable STGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t ltgl2i : 1; /**< Enable LTGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2i : 1; /**< Enable WBIL2I tracing
+ 0=disable, 1=enable */
+ uint64_t fas64 : 1; /**< Enable FAS64 tracing
+ 0=disable, 1=enable */
+ uint64_t fas32 : 1; /**< Enable FAS32 tracing
+ 0=disable, 1=enable */
+ uint64_t sttil1 : 1; /**< Enable STTIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t stfil1 : 1; /**< Enable STFIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_16_19 : 4;
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t iobst : 1; /**< Enable IOBST tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_0_13 : 14;
+#else
+ uint64_t reserved_0_13 : 14;
+ uint64_t iobst : 1;
+ uint64_t iobdma : 1;
+ uint64_t reserved_16_19 : 4;
+ uint64_t stfil1 : 1;
+ uint64_t sttil1 : 1;
+ uint64_t fas32 : 1;
+ uint64_t fas64 : 1;
+ uint64_t wbil2i : 1;
+ uint64_t ltgl2i : 1;
+ uint64_t stgl2i : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t invl2 : 1;
+ uint64_t wbil2 : 1;
+ uint64_t wbl2 : 1;
+ uint64_t lckl2 : 1;
+ uint64_t reserved_32_35 : 4;
+ uint64_t iobst8 : 1;
+ uint64_t iobst16 : 1;
+ uint64_t iobst32 : 1;
+ uint64_t iobst64 : 1;
+ uint64_t set8 : 1;
+ uint64_t set16 : 1;
+ uint64_t set32 : 1;
+ uint64_t set64 : 1;
+ uint64_t clr8 : 1;
+ uint64_t clr16 : 1;
+ uint64_t clr32 : 1;
+ uint64_t clr64 : 1;
+ uint64_t incr8 : 1;
+ uint64_t incr16 : 1;
+ uint64_t incr32 : 1;
+ uint64_t incr64 : 1;
+ uint64_t decr8 : 1;
+ uint64_t decr16 : 1;
+ uint64_t decr32 : 1;
+ uint64_t decr64 : 1;
+ uint64_t reserved_56_57 : 2;
+ uint64_t faa32 : 1;
+ uint64_t faa64 : 1;
+ uint64_t reserved_60_61 : 2;
+ uint64_t saa32 : 1;
+ uint64_t saa64 : 1;
+#endif
+ } s;
+ struct cvmx_trax_trig0_cmd_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t iobst : 1; /**< Enable IOBST tracing
+ 0=disable, 1=enable */
+ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing
+ 0=disable, 1=enable */
+ uint64_t stt : 1; /**< Enable STT tracing
+ 0=disable, 1=enable */
+ uint64_t stp : 1; /**< Enable STP tracing
+ 0=disable, 1=enable */
+ uint64_t stc : 1; /**< Enable STC tracing
+ 0=disable, 1=enable */
+ uint64_t stf : 1; /**< Enable STF tracing
+ 0=disable, 1=enable */
+ uint64_t ldt : 1; /**< Enable LDT tracing
+ 0=disable, 1=enable */
+ uint64_t ldi : 1; /**< Enable LDI tracing
+ 0=disable, 1=enable */
+ uint64_t ldd : 1; /**< Enable LDD tracing
+ 0=disable, 1=enable */
+ uint64_t psl1 : 1; /**< Enable PSL1 tracing
+ 0=disable, 1=enable */
+ uint64_t pl2 : 1; /**< Enable PL2 tracing
+ 0=disable, 1=enable */
+ uint64_t dwb : 1; /**< Enable DWB tracing
+ 0=disable, 1=enable */
+#else
+ uint64_t dwb : 1;
+ uint64_t pl2 : 1;
+ uint64_t psl1 : 1;
+ uint64_t ldd : 1;
+ uint64_t ldi : 1;
+ uint64_t ldt : 1;
+ uint64_t stf : 1;
+ uint64_t stc : 1;
+ uint64_t stp : 1;
+ uint64_t stt : 1;
+ uint64_t iobld8 : 1;
+ uint64_t iobld16 : 1;
+ uint64_t iobld32 : 1;
+ uint64_t iobld64 : 1;
+ uint64_t iobst : 1;
+ uint64_t iobdma : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn31xx;
+ struct cvmx_trax_trig0_cmd_cn31xx cn38xx;
+ struct cvmx_trax_trig0_cmd_cn31xx cn38xxp2;
+ struct cvmx_trax_trig0_cmd_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t saa : 1; /**< Enable SAA tracing
+ 0=disable, 1=enable */
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t iobst : 1; /**< Enable IOBST tracing
+ 0=disable, 1=enable */
+ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing
+ 0=disable, 1=enable */
+ uint64_t stt : 1; /**< Enable STT tracing
+ 0=disable, 1=enable */
+ uint64_t stp : 1; /**< Enable STP tracing
+ 0=disable, 1=enable */
+ uint64_t stc : 1; /**< Enable STC tracing
+ 0=disable, 1=enable */
+ uint64_t stf : 1; /**< Enable STF tracing
+ 0=disable, 1=enable */
+ uint64_t ldt : 1; /**< Enable LDT tracing
+ 0=disable, 1=enable */
+ uint64_t ldi : 1; /**< Enable LDI tracing
+ 0=disable, 1=enable */
+ uint64_t ldd : 1; /**< Enable LDD tracing
+ 0=disable, 1=enable */
+ uint64_t psl1 : 1; /**< Enable PSL1 tracing
+ 0=disable, 1=enable */
+ uint64_t pl2 : 1; /**< Enable PL2 tracing
+ 0=disable, 1=enable */
+ uint64_t dwb : 1; /**< Enable DWB tracing
+ 0=disable, 1=enable */
+#else
+ uint64_t dwb : 1;
+ uint64_t pl2 : 1;
+ uint64_t psl1 : 1;
+ uint64_t ldd : 1;
+ uint64_t ldi : 1;
+ uint64_t ldt : 1;
+ uint64_t stf : 1;
+ uint64_t stc : 1;
+ uint64_t stp : 1;
+ uint64_t stt : 1;
+ uint64_t iobld8 : 1;
+ uint64_t iobld16 : 1;
+ uint64_t iobld32 : 1;
+ uint64_t iobld64 : 1;
+ uint64_t iobst : 1;
+ uint64_t iobdma : 1;
+ uint64_t saa : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn52xx;
+ struct cvmx_trax_trig0_cmd_cn52xx cn52xxp1;
+ struct cvmx_trax_trig0_cmd_cn52xx cn56xx;
+ struct cvmx_trax_trig0_cmd_cn52xx cn56xxp1;
+ struct cvmx_trax_trig0_cmd_cn52xx cn58xx;
+ struct cvmx_trax_trig0_cmd_cn52xx cn58xxp1;
+ struct cvmx_trax_trig0_cmd_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t saa64 : 1; /**< Enable SAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t saa32 : 1; /**< Enable SAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_60_61 : 2;
+ uint64_t faa64 : 1; /**< Enable FAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t faa32 : 1; /**< Enable FAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_56_57 : 2;
+ uint64_t decr64 : 1; /**< Enable DECR64 tracing
+ 0=disable, 1=enable */
+ uint64_t decr32 : 1; /**< Enable DECR32 tracing
+ 0=disable, 1=enable */
+ uint64_t decr16 : 1; /**< Enable DECR16 tracing
+ 0=disable, 1=enable */
+ uint64_t decr8 : 1; /**< Enable DECR8 tracing
+ 0=disable, 1=enable */
+ uint64_t incr64 : 1; /**< Enable INCR64 tracing
+ 0=disable, 1=enable */
+ uint64_t incr32 : 1; /**< Enable INCR32 tracing
+ 0=disable, 1=enable */
+ uint64_t incr16 : 1; /**< Enable INCR16 tracing
+ 0=disable, 1=enable */
+ uint64_t incr8 : 1; /**< Enable INCR8 tracing
+ 0=disable, 1=enable */
+ uint64_t clr64 : 1; /**< Enable CLR64 tracing
+ 0=disable, 1=enable */
+ uint64_t clr32 : 1; /**< Enable CLR32 tracing
+ 0=disable, 1=enable */
+ uint64_t clr16 : 1; /**< Enable CLR16 tracing
+ 0=disable, 1=enable */
+ uint64_t clr8 : 1; /**< Enable CLR8 tracing
+ 0=disable, 1=enable */
+ uint64_t set64 : 1; /**< Enable SET64 tracing
+ 0=disable, 1=enable */
+ uint64_t set32 : 1; /**< Enable SET32 tracing
+ 0=disable, 1=enable */
+ uint64_t set16 : 1; /**< Enable SET16 tracing
+ 0=disable, 1=enable */
+ uint64_t set8 : 1; /**< Enable SET8 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst64 : 1; /**< Enable IOBST64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst32 : 1; /**< Enable IOBST32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst16 : 1; /**< Enable IOBST16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst8 : 1; /**< Enable IOBST8 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing
+ 0=disable, 1=enable */
+ uint64_t lckl2 : 1; /**< Enable LCKL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbl2 : 1; /**< Enable WBL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2 : 1; /**< Enable WBIL2 tracing
+ 0=disable, 1=enable */
+ uint64_t invl2 : 1; /**< Enable INVL2 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t stgl2i : 1; /**< Enable STGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t ltgl2i : 1; /**< Enable LTGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2i : 1; /**< Enable WBIL2I tracing
+ 0=disable, 1=enable */
+ uint64_t fas64 : 1; /**< Enable FAS64 tracing
+ 0=disable, 1=enable */
+ uint64_t fas32 : 1; /**< Enable FAS32 tracing
+ 0=disable, 1=enable */
+ uint64_t sttil1 : 1; /**< Enable STTIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t stfil1 : 1; /**< Enable STFIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t stc : 1; /**< Enable STC tracing
+ 0=disable, 1=enable */
+ uint64_t stp : 1; /**< Enable STP tracing
+ 0=disable, 1=enable */
+ uint64_t stt : 1; /**< Enable STT tracing
+ 0=disable, 1=enable */
+ uint64_t stf : 1; /**< Enable STF tracing
+ 0=disable, 1=enable */
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_10_14 : 5;
+ uint64_t psl1 : 1; /**< Enable PSL1 tracing
+ 0=disable, 1=enable */
+ uint64_t ldd : 1; /**< Enable LDD tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_6_7 : 2;
+ uint64_t dwb : 1; /**< Enable DWB tracing
+ 0=disable, 1=enable */
+ uint64_t rpl2 : 1; /**< Enable RPL2 tracing
+ 0=disable, 1=enable */
+ uint64_t pl2 : 1; /**< Enable PL2 tracing
+ 0=disable, 1=enable */
+ uint64_t ldi : 1; /**< Enable LDI tracing
+ 0=disable, 1=enable */
+ uint64_t ldt : 1; /**< Enable LDT tracing
+ 0=disable, 1=enable */
+ uint64_t nop : 1; /**< Enable NOP tracing
+ 0=disable, 1=enable */
+#else
+ uint64_t nop : 1;
+ uint64_t ldt : 1;
+ uint64_t ldi : 1;
+ uint64_t pl2 : 1;
+ uint64_t rpl2 : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t ldd : 1;
+ uint64_t psl1 : 1;
+ uint64_t reserved_10_14 : 5;
+ uint64_t iobdma : 1;
+ uint64_t stf : 1;
+ uint64_t stt : 1;
+ uint64_t stp : 1;
+ uint64_t stc : 1;
+ uint64_t stfil1 : 1;
+ uint64_t sttil1 : 1;
+ uint64_t fas32 : 1;
+ uint64_t fas64 : 1;
+ uint64_t wbil2i : 1;
+ uint64_t ltgl2i : 1;
+ uint64_t stgl2i : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t invl2 : 1;
+ uint64_t wbil2 : 1;
+ uint64_t wbl2 : 1;
+ uint64_t lckl2 : 1;
+ uint64_t iobld8 : 1;
+ uint64_t iobld16 : 1;
+ uint64_t iobld32 : 1;
+ uint64_t iobld64 : 1;
+ uint64_t iobst8 : 1;
+ uint64_t iobst16 : 1;
+ uint64_t iobst32 : 1;
+ uint64_t iobst64 : 1;
+ uint64_t set8 : 1;
+ uint64_t set16 : 1;
+ uint64_t set32 : 1;
+ uint64_t set64 : 1;
+ uint64_t clr8 : 1;
+ uint64_t clr16 : 1;
+ uint64_t clr32 : 1;
+ uint64_t clr64 : 1;
+ uint64_t incr8 : 1;
+ uint64_t incr16 : 1;
+ uint64_t incr32 : 1;
+ uint64_t incr64 : 1;
+ uint64_t decr8 : 1;
+ uint64_t decr16 : 1;
+ uint64_t decr32 : 1;
+ uint64_t decr64 : 1;
+ uint64_t reserved_56_57 : 2;
+ uint64_t faa32 : 1;
+ uint64_t faa64 : 1;
+ uint64_t reserved_60_61 : 2;
+ uint64_t saa32 : 1;
+ uint64_t saa64 : 1;
+#endif
+ } cn61xx;
+ struct cvmx_trax_trig0_cmd_cn61xx cn63xx;
+ struct cvmx_trax_trig0_cmd_cn61xx cn63xxp1;
+ struct cvmx_trax_trig0_cmd_cn61xx cn66xx;
+ struct cvmx_trax_trig0_cmd_cn61xx cn68xx;
+ struct cvmx_trax_trig0_cmd_cn61xx cn68xxp1;
+ struct cvmx_trax_trig0_cmd_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_trig0_cmd cvmx_trax_trig0_cmd_t;
+
+/**
+ * cvmx_tra#_trig0_did
+ *
+ * TRA_TRIG0_DID = Trace Buffer Filter DestinationId Mask
+ *
+ * Description:
+ */
+union cvmx_trax_trig0_did {
+ uint64_t u64;
+ struct cvmx_trax_trig0_did_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t pow : 1; /**< Enable triggering on requests to POW
+ (get work, add work, status/memory/index
+ loads, NULLRd loads, CSR's) */
+ uint64_t reserved_9_11 : 3;
+ uint64_t rng : 1; /**< Enable triggering on requests to RNG
+ (loads/IOBDMA's are legal) */
+ uint64_t zip : 1; /**< Enable triggering on requests to ZIP
+ (doorbell stores are legal) */
+ uint64_t dfa : 1; /**< Enable triggering on requests to DFA
+ (CSR's and operations are legal) */
+ uint64_t fpa : 1; /**< Enable triggering on requests to FPA
+ (alloc's (loads/IOBDMA's), frees (stores) are legal) */
+ uint64_t key : 1; /**< Enable triggering on requests to KEY memory
+ (loads/IOBDMA's/stores are legal) */
+ uint64_t reserved_3_3 : 1;
+ uint64_t illegal3 : 2; /**< Illegal destinations */
+ uint64_t mio : 1; /**< Enable triggering on MIO accesses
+ (CIU and GPIO CSR's, boot bus accesses) */
+#else
+ uint64_t mio : 1;
+ uint64_t illegal3 : 2;
+ uint64_t reserved_3_3 : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rng : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t pow : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_trax_trig0_did_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t illegal : 19; /**< Illegal destinations */
+ uint64_t pow : 1; /**< Enable triggering on requests to POW
+ (get work, add work, status/memory/index
+ loads, NULLRd loads, CSR's) */
+ uint64_t illegal2 : 3; /**< Illegal destinations */
+ uint64_t rng : 1; /**< Enable triggering on requests to RNG
+ (loads/IOBDMA's are legal) */
+ uint64_t zip : 1; /**< Enable triggering on requests to ZIP
+ (doorbell stores are legal) */
+ uint64_t dfa : 1; /**< Enable triggering on requests to DFA
+ (CSR's and operations are legal) */
+ uint64_t fpa : 1; /**< Enable triggering on requests to FPA
+ (alloc's (loads/IOBDMA's), frees (stores) are legal) */
+ uint64_t key : 1; /**< Enable triggering on requests to KEY memory
+ (loads/IOBDMA's/stores are legal) */
+ uint64_t pci : 1; /**< Enable triggering on requests to PCI and RSL-type
+ CSR's (RSL CSR's, PCI bus operations, PCI
+ CSR's) */
+ uint64_t illegal3 : 2; /**< Illegal destinations */
+ uint64_t mio : 1; /**< Enable triggering on CIU and GPIO CSR's */
+#else
+ uint64_t mio : 1;
+ uint64_t illegal3 : 2;
+ uint64_t pci : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rng : 1;
+ uint64_t illegal2 : 3;
+ uint64_t pow : 1;
+ uint64_t illegal : 19;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn31xx;
+ struct cvmx_trax_trig0_did_cn31xx cn38xx;
+ struct cvmx_trax_trig0_did_cn31xx cn38xxp2;
+ struct cvmx_trax_trig0_did_cn31xx cn52xx;
+ struct cvmx_trax_trig0_did_cn31xx cn52xxp1;
+ struct cvmx_trax_trig0_did_cn31xx cn56xx;
+ struct cvmx_trax_trig0_did_cn31xx cn56xxp1;
+ struct cvmx_trax_trig0_did_cn31xx cn58xx;
+ struct cvmx_trax_trig0_did_cn31xx cn58xxp1;
+ struct cvmx_trax_trig0_did_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t illegal5 : 1; /**< Illegal destinations */
+ uint64_t fau : 1; /**< Enable triggering on FAU accesses */
+ uint64_t illegal4 : 2; /**< Illegal destinations */
+ uint64_t dpi : 1; /**< Enable triggering on DPI accesses
+ (DPI NCB CSRs) */
+ uint64_t illegal : 12; /**< Illegal destinations */
+ uint64_t rad : 1; /**< Enable triggering on RAD accesses
+ (doorbells) */
+ uint64_t usb0 : 1; /**< Enable triggering on USB0 accesses
+ (UAHC0 EHCI and OHCI NCB CSRs) */
+ uint64_t pow : 1; /**< Enable triggering on requests to POW
+ (get work, add work, status/memory/index
+ loads, NULLRd loads, CSR's) */
+ uint64_t illegal2 : 1; /**< Illegal destination */
+ uint64_t pko : 1; /**< Enable triggering on PKO accesses
+ (doorbells) */
+ uint64_t ipd : 1; /**< Enable triggering on IPD CSR accesses
+ (IPD CSRs) */
+ uint64_t rng : 1; /**< Enable triggering on requests to RNG
+ (loads/IOBDMA's are legal) */
+ uint64_t zip : 1; /**< Enable triggering on requests to ZIP
+ (doorbell stores are legal) */
+ uint64_t dfa : 1; /**< Enable triggering on requests to DFA
+ (CSR's and operations are legal) */
+ uint64_t fpa : 1; /**< Enable triggering on requests to FPA
+ (alloc's (loads/IOBDMA's), frees (stores) are legal) */
+ uint64_t key : 1; /**< Enable triggering on requests to KEY memory
+ (loads/IOBDMA's/stores are legal) */
+ uint64_t sli : 1; /**< Enable triggering on requests to SLI and RSL-type
+ CSR's (RSL CSR's, PCI/sRIO bus operations, SLI
+ CSR's) */
+ uint64_t illegal3 : 2; /**< Illegal destinations */
+ uint64_t mio : 1; /**< Enable triggering on MIO accesses
+ (CIU and GPIO CSR's, boot bus accesses) */
+#else
+ uint64_t mio : 1;
+ uint64_t illegal3 : 2;
+ uint64_t sli : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rng : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t illegal2 : 1;
+ uint64_t pow : 1;
+ uint64_t usb0 : 1;
+ uint64_t rad : 1;
+ uint64_t illegal : 12;
+ uint64_t dpi : 1;
+ uint64_t illegal4 : 2;
+ uint64_t fau : 1;
+ uint64_t illegal5 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn61xx;
+ struct cvmx_trax_trig0_did_cn61xx cn63xx;
+ struct cvmx_trax_trig0_did_cn61xx cn63xxp1;
+ struct cvmx_trax_trig0_did_cn61xx cn66xx;
+ struct cvmx_trax_trig0_did_cn61xx cn68xx;
+ struct cvmx_trax_trig0_did_cn61xx cn68xxp1;
+ struct cvmx_trax_trig0_did_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_trig0_did cvmx_trax_trig0_did_t;
+
+/**
+ * cvmx_tra#_trig0_sid
+ *
+ * TRA_TRIG0_SID = Trace Buffer Filter SourceId Mask
+ *
+ * Description:
+ */
+union cvmx_trax_trig0_sid {
+ uint64_t u64;
+ struct cvmx_trax_trig0_sid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t pp : 16; /**< Enable triggering from PP[N] with matching SourceID
+ 0=disable, 1=enable per bit N where 0<=N<=3 */
+#else
+ uint64_t pp : 16;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_trax_trig0_sid_s cn31xx;
+ struct cvmx_trax_trig0_sid_s cn38xx;
+ struct cvmx_trax_trig0_sid_s cn38xxp2;
+ struct cvmx_trax_trig0_sid_s cn52xx;
+ struct cvmx_trax_trig0_sid_s cn52xxp1;
+ struct cvmx_trax_trig0_sid_s cn56xx;
+ struct cvmx_trax_trig0_sid_s cn56xxp1;
+ struct cvmx_trax_trig0_sid_s cn58xx;
+ struct cvmx_trax_trig0_sid_s cn58xxp1;
+ struct cvmx_trax_trig0_sid_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t reserved_4_15 : 12;
+ uint64_t pp : 4; /**< Enable triggering from PP[N] with matching SourceID
+ 0=disable, 1=enable per bit N where 0<=N<=3 */
+#else
+ uint64_t pp : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn61xx;
+ struct cvmx_trax_trig0_sid_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pp : 8; /**< Enable triggering from PP[N] with matching SourceID
+ 0=disable, 1=enableper bit N where 0<=N<=15 */
+#else
+ uint64_t pp : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn63xx;
+ struct cvmx_trax_trig0_sid_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t reserved_6_15 : 10;
+ uint64_t pp : 6; /**< Enable triggering from PP[N] with matching SourceID
+ 0=disable, 1=enable per bit N where 0<=N<=5 */
+#else
+ uint64_t pp : 6;
+ uint64_t reserved_6_15 : 10;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn63xxp1;
+ struct cvmx_trax_trig0_sid_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t reserved_10_15 : 6;
+ uint64_t pp : 10; /**< Enable triggering from PP[N] with matching SourceID
+ 0=disable, 1=enableper bit N where 0<=N<=15 */
+#else
+ uint64_t pp : 10;
+ uint64_t reserved_10_15 : 6;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn66xx;
+ struct cvmx_trax_trig0_sid_cn63xx cn68xx;
+ struct cvmx_trax_trig0_sid_cn63xx cn68xxp1;
+ struct cvmx_trax_trig0_sid_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_trig0_sid cvmx_trax_trig0_sid_t;
+
+/**
+ * cvmx_tra#_trig1_adr_adr
+ *
+ * TRA_TRIG1_ADR_ADR = Trace Buffer Filter Address Address
+ *
+ * Description:
+ */
+union cvmx_trax_trig1_adr_adr {
+ uint64_t u64;
+ struct cvmx_trax_trig1_adr_adr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t adr : 38; /**< Unmasked Address
+ The combination of TRA_TRIG1_ADR_ADR and
+ TRA_TRIG1_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches */
+#else
+ uint64_t adr : 38;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_trax_trig1_adr_adr_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t adr : 36; /**< Unmasked Address
+ The combination of TRA(0..0)_TRIG1_ADR_ADR and
+ TRA(0..0)_TRIG1_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches */
+#else
+ uint64_t adr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn31xx;
+ struct cvmx_trax_trig1_adr_adr_cn31xx cn38xx;
+ struct cvmx_trax_trig1_adr_adr_cn31xx cn38xxp2;
+ struct cvmx_trax_trig1_adr_adr_cn31xx cn52xx;
+ struct cvmx_trax_trig1_adr_adr_cn31xx cn52xxp1;
+ struct cvmx_trax_trig1_adr_adr_cn31xx cn56xx;
+ struct cvmx_trax_trig1_adr_adr_cn31xx cn56xxp1;
+ struct cvmx_trax_trig1_adr_adr_cn31xx cn58xx;
+ struct cvmx_trax_trig1_adr_adr_cn31xx cn58xxp1;
+ struct cvmx_trax_trig1_adr_adr_s cn61xx;
+ struct cvmx_trax_trig1_adr_adr_s cn63xx;
+ struct cvmx_trax_trig1_adr_adr_s cn63xxp1;
+ struct cvmx_trax_trig1_adr_adr_s cn66xx;
+ struct cvmx_trax_trig1_adr_adr_s cn68xx;
+ struct cvmx_trax_trig1_adr_adr_s cn68xxp1;
+ struct cvmx_trax_trig1_adr_adr_s cnf71xx;
+};
+typedef union cvmx_trax_trig1_adr_adr cvmx_trax_trig1_adr_adr_t;
+
+/**
+ * cvmx_tra#_trig1_adr_msk
+ *
+ * TRA_TRIG1_ADR_MSK = Trace Buffer Filter Address Mask
+ *
+ * Description:
+ */
+union cvmx_trax_trig1_adr_msk {
+ uint64_t u64;
+ struct cvmx_trax_trig1_adr_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t adr : 38; /**< Address Mask
+ The combination of TRA_TRIG1_ADR_ADR and
+ TRA_TRIG1_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches. When a mask bit is not
+ set, the corresponding address bits are assumed
+ to match. Also, note that IOBDMAs do not have
+ proper addresses, so when TRA_TRIG1_CMD[IOBDMA]
+ is set, TRA_FILT_TRIG1_MSK must be zero to
+ guarantee that any IOBDMAs are recognized as
+ triggers. */
+#else
+ uint64_t adr : 38;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_trax_trig1_adr_msk_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t adr : 36; /**< Address Mask
+ The combination of TRA(0..0)_TRIG1_ADR_ADR and
+ TRA(0..0)_TRIG1_ADR_MSK is a masked address to
+ enable tracing of only those commands whose
+ masked address matches. When a mask bit is not
+ set, the corresponding address bits are assumed
+ to match. Also, note that IOBDMAs do not have
+ proper addresses, so when TRA(0..0)_TRIG1_CMD[IOBDMA]
+ is set, TRA(0..0)_FILT_TRIG1_MSK must be zero to
+ guarantee that any IOBDMAs are recognized as
+ triggers. */
+#else
+ uint64_t adr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } cn31xx;
+ struct cvmx_trax_trig1_adr_msk_cn31xx cn38xx;
+ struct cvmx_trax_trig1_adr_msk_cn31xx cn38xxp2;
+ struct cvmx_trax_trig1_adr_msk_cn31xx cn52xx;
+ struct cvmx_trax_trig1_adr_msk_cn31xx cn52xxp1;
+ struct cvmx_trax_trig1_adr_msk_cn31xx cn56xx;
+ struct cvmx_trax_trig1_adr_msk_cn31xx cn56xxp1;
+ struct cvmx_trax_trig1_adr_msk_cn31xx cn58xx;
+ struct cvmx_trax_trig1_adr_msk_cn31xx cn58xxp1;
+ struct cvmx_trax_trig1_adr_msk_s cn61xx;
+ struct cvmx_trax_trig1_adr_msk_s cn63xx;
+ struct cvmx_trax_trig1_adr_msk_s cn63xxp1;
+ struct cvmx_trax_trig1_adr_msk_s cn66xx;
+ struct cvmx_trax_trig1_adr_msk_s cn68xx;
+ struct cvmx_trax_trig1_adr_msk_s cn68xxp1;
+ struct cvmx_trax_trig1_adr_msk_s cnf71xx;
+};
+typedef union cvmx_trax_trig1_adr_msk cvmx_trax_trig1_adr_msk_t;
+
+/**
+ * cvmx_tra#_trig1_cmd
+ *
+ * TRA_TRIG1_CMD = Trace Buffer Filter Command Mask
+ *
+ * Description:
+ *
+ * Notes:
+ * Note that the trace buffer does not do proper IOBDMA address compares. Thus, if IOBDMA is set, then
+ * the address compare must be disabled (i.e. TRA_TRIG1_ADR_MSK set to zero) to guarantee that IOBDMAs
+ * are recognized as triggers.
+ */
+union cvmx_trax_trig1_cmd {
+ uint64_t u64;
+ struct cvmx_trax_trig1_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t saa64 : 1; /**< Enable SAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t saa32 : 1; /**< Enable SAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_60_61 : 2;
+ uint64_t faa64 : 1; /**< Enable FAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t faa32 : 1; /**< Enable FAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_56_57 : 2;
+ uint64_t decr64 : 1; /**< Enable DECR64 tracing
+ 0=disable, 1=enable */
+ uint64_t decr32 : 1; /**< Enable DECR32 tracing
+ 0=disable, 1=enable */
+ uint64_t decr16 : 1; /**< Enable DECR16 tracing
+ 0=disable, 1=enable */
+ uint64_t decr8 : 1; /**< Enable DECR8 tracing
+ 0=disable, 1=enable */
+ uint64_t incr64 : 1; /**< Enable INCR64 tracing
+ 0=disable, 1=enable */
+ uint64_t incr32 : 1; /**< Enable INCR32 tracing
+ 0=disable, 1=enable */
+ uint64_t incr16 : 1; /**< Enable INCR16 tracing
+ 0=disable, 1=enable */
+ uint64_t incr8 : 1; /**< Enable INCR8 tracing
+ 0=disable, 1=enable */
+ uint64_t clr64 : 1; /**< Enable CLR64 tracing
+ 0=disable, 1=enable */
+ uint64_t clr32 : 1; /**< Enable CLR32 tracing
+ 0=disable, 1=enable */
+ uint64_t clr16 : 1; /**< Enable CLR16 tracing
+ 0=disable, 1=enable */
+ uint64_t clr8 : 1; /**< Enable CLR8 tracing
+ 0=disable, 1=enable */
+ uint64_t set64 : 1; /**< Enable SET64 tracing
+ 0=disable, 1=enable */
+ uint64_t set32 : 1; /**< Enable SET32 tracing
+ 0=disable, 1=enable */
+ uint64_t set16 : 1; /**< Enable SET16 tracing
+ 0=disable, 1=enable */
+ uint64_t set8 : 1; /**< Enable SET8 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst64 : 1; /**< Enable IOBST64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst32 : 1; /**< Enable IOBST32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst16 : 1; /**< Enable IOBST16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst8 : 1; /**< Enable IOBST8 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_32_35 : 4;
+ uint64_t lckl2 : 1; /**< Enable LCKL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbl2 : 1; /**< Enable WBL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2 : 1; /**< Enable WBIL2 tracing
+ 0=disable, 1=enable */
+ uint64_t invl2 : 1; /**< Enable INVL2 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t stgl2i : 1; /**< Enable STGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t ltgl2i : 1; /**< Enable LTGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2i : 1; /**< Enable WBIL2I tracing
+ 0=disable, 1=enable */
+ uint64_t fas64 : 1; /**< Enable FAS64 tracing
+ 0=disable, 1=enable */
+ uint64_t fas32 : 1; /**< Enable FAS32 tracing
+ 0=disable, 1=enable */
+ uint64_t sttil1 : 1; /**< Enable STTIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t stfil1 : 1; /**< Enable STFIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_16_19 : 4;
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t iobst : 1; /**< Enable IOBST tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_0_13 : 14;
+#else
+ uint64_t reserved_0_13 : 14;
+ uint64_t iobst : 1;
+ uint64_t iobdma : 1;
+ uint64_t reserved_16_19 : 4;
+ uint64_t stfil1 : 1;
+ uint64_t sttil1 : 1;
+ uint64_t fas32 : 1;
+ uint64_t fas64 : 1;
+ uint64_t wbil2i : 1;
+ uint64_t ltgl2i : 1;
+ uint64_t stgl2i : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t invl2 : 1;
+ uint64_t wbil2 : 1;
+ uint64_t wbl2 : 1;
+ uint64_t lckl2 : 1;
+ uint64_t reserved_32_35 : 4;
+ uint64_t iobst8 : 1;
+ uint64_t iobst16 : 1;
+ uint64_t iobst32 : 1;
+ uint64_t iobst64 : 1;
+ uint64_t set8 : 1;
+ uint64_t set16 : 1;
+ uint64_t set32 : 1;
+ uint64_t set64 : 1;
+ uint64_t clr8 : 1;
+ uint64_t clr16 : 1;
+ uint64_t clr32 : 1;
+ uint64_t clr64 : 1;
+ uint64_t incr8 : 1;
+ uint64_t incr16 : 1;
+ uint64_t incr32 : 1;
+ uint64_t incr64 : 1;
+ uint64_t decr8 : 1;
+ uint64_t decr16 : 1;
+ uint64_t decr32 : 1;
+ uint64_t decr64 : 1;
+ uint64_t reserved_56_57 : 2;
+ uint64_t faa32 : 1;
+ uint64_t faa64 : 1;
+ uint64_t reserved_60_61 : 2;
+ uint64_t saa32 : 1;
+ uint64_t saa64 : 1;
+#endif
+ } s;
+ struct cvmx_trax_trig1_cmd_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63 : 48;
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t iobst : 1; /**< Enable IOBST tracing
+ 0=disable, 1=enable */
+ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing
+ 0=disable, 1=enable */
+ uint64_t stt : 1; /**< Enable STT tracing
+ 0=disable, 1=enable */
+ uint64_t stp : 1; /**< Enable STP tracing
+ 0=disable, 1=enable */
+ uint64_t stc : 1; /**< Enable STC tracing
+ 0=disable, 1=enable */
+ uint64_t stf : 1; /**< Enable STF tracing
+ 0=disable, 1=enable */
+ uint64_t ldt : 1; /**< Enable LDT tracing
+ 0=disable, 1=enable */
+ uint64_t ldi : 1; /**< Enable LDI tracing
+ 0=disable, 1=enable */
+ uint64_t ldd : 1; /**< Enable LDD tracing
+ 0=disable, 1=enable */
+ uint64_t psl1 : 1; /**< Enable PSL1 tracing
+ 0=disable, 1=enable */
+ uint64_t pl2 : 1; /**< Enable PL2 tracing
+ 0=disable, 1=enable */
+ uint64_t dwb : 1; /**< Enable DWB tracing
+ 0=disable, 1=enable */
+#else
+ uint64_t dwb : 1;
+ uint64_t pl2 : 1;
+ uint64_t psl1 : 1;
+ uint64_t ldd : 1;
+ uint64_t ldi : 1;
+ uint64_t ldt : 1;
+ uint64_t stf : 1;
+ uint64_t stc : 1;
+ uint64_t stp : 1;
+ uint64_t stt : 1;
+ uint64_t iobld8 : 1;
+ uint64_t iobld16 : 1;
+ uint64_t iobld32 : 1;
+ uint64_t iobld64 : 1;
+ uint64_t iobst : 1;
+ uint64_t iobdma : 1;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn31xx;
+ struct cvmx_trax_trig1_cmd_cn31xx cn38xx;
+ struct cvmx_trax_trig1_cmd_cn31xx cn38xxp2;
+ struct cvmx_trax_trig1_cmd_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t saa : 1; /**< Enable SAA tracing
+ 0=disable, 1=enable */
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t iobst : 1; /**< Enable IOBST tracing
+ 0=disable, 1=enable */
+ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing
+ 0=disable, 1=enable */
+ uint64_t stt : 1; /**< Enable STT tracing
+ 0=disable, 1=enable */
+ uint64_t stp : 1; /**< Enable STP tracing
+ 0=disable, 1=enable */
+ uint64_t stc : 1; /**< Enable STC tracing
+ 0=disable, 1=enable */
+ uint64_t stf : 1; /**< Enable STF tracing
+ 0=disable, 1=enable */
+ uint64_t ldt : 1; /**< Enable LDT tracing
+ 0=disable, 1=enable */
+ uint64_t ldi : 1; /**< Enable LDI tracing
+ 0=disable, 1=enable */
+ uint64_t ldd : 1; /**< Enable LDD tracing
+ 0=disable, 1=enable */
+ uint64_t psl1 : 1; /**< Enable PSL1 tracing
+ 0=disable, 1=enable */
+ uint64_t pl2 : 1; /**< Enable PL2 tracing
+ 0=disable, 1=enable */
+ uint64_t dwb : 1; /**< Enable DWB tracing
+ 0=disable, 1=enable */
+#else
+ uint64_t dwb : 1;
+ uint64_t pl2 : 1;
+ uint64_t psl1 : 1;
+ uint64_t ldd : 1;
+ uint64_t ldi : 1;
+ uint64_t ldt : 1;
+ uint64_t stf : 1;
+ uint64_t stc : 1;
+ uint64_t stp : 1;
+ uint64_t stt : 1;
+ uint64_t iobld8 : 1;
+ uint64_t iobld16 : 1;
+ uint64_t iobld32 : 1;
+ uint64_t iobld64 : 1;
+ uint64_t iobst : 1;
+ uint64_t iobdma : 1;
+ uint64_t saa : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn52xx;
+ struct cvmx_trax_trig1_cmd_cn52xx cn52xxp1;
+ struct cvmx_trax_trig1_cmd_cn52xx cn56xx;
+ struct cvmx_trax_trig1_cmd_cn52xx cn56xxp1;
+ struct cvmx_trax_trig1_cmd_cn52xx cn58xx;
+ struct cvmx_trax_trig1_cmd_cn52xx cn58xxp1;
+ struct cvmx_trax_trig1_cmd_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t saa64 : 1; /**< Enable SAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t saa32 : 1; /**< Enable SAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_60_61 : 2;
+ uint64_t faa64 : 1; /**< Enable FAA64 tracing
+ 0=disable, 1=enable */
+ uint64_t faa32 : 1; /**< Enable FAA32 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_56_57 : 2;
+ uint64_t decr64 : 1; /**< Enable DECR64 tracing
+ 0=disable, 1=enable */
+ uint64_t decr32 : 1; /**< Enable DECR32 tracing
+ 0=disable, 1=enable */
+ uint64_t decr16 : 1; /**< Enable DECR16 tracing
+ 0=disable, 1=enable */
+ uint64_t decr8 : 1; /**< Enable DECR8 tracing
+ 0=disable, 1=enable */
+ uint64_t incr64 : 1; /**< Enable INCR64 tracing
+ 0=disable, 1=enable */
+ uint64_t incr32 : 1; /**< Enable INCR32 tracing
+ 0=disable, 1=enable */
+ uint64_t incr16 : 1; /**< Enable INCR16 tracing
+ 0=disable, 1=enable */
+ uint64_t incr8 : 1; /**< Enable INCR8 tracing
+ 0=disable, 1=enable */
+ uint64_t clr64 : 1; /**< Enable CLR64 tracing
+ 0=disable, 1=enable */
+ uint64_t clr32 : 1; /**< Enable CLR32 tracing
+ 0=disable, 1=enable */
+ uint64_t clr16 : 1; /**< Enable CLR16 tracing
+ 0=disable, 1=enable */
+ uint64_t clr8 : 1; /**< Enable CLR8 tracing
+ 0=disable, 1=enable */
+ uint64_t set64 : 1; /**< Enable SET64 tracing
+ 0=disable, 1=enable */
+ uint64_t set32 : 1; /**< Enable SET32 tracing
+ 0=disable, 1=enable */
+ uint64_t set16 : 1; /**< Enable SET16 tracing
+ 0=disable, 1=enable */
+ uint64_t set8 : 1; /**< Enable SET8 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst64 : 1; /**< Enable IOBST64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst32 : 1; /**< Enable IOBST32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst16 : 1; /**< Enable IOBST16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobst8 : 1; /**< Enable IOBST8 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing
+ 0=disable, 1=enable */
+ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing
+ 0=disable, 1=enable */
+ uint64_t lckl2 : 1; /**< Enable LCKL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbl2 : 1; /**< Enable WBL2 tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2 : 1; /**< Enable WBIL2 tracing
+ 0=disable, 1=enable */
+ uint64_t invl2 : 1; /**< Enable INVL2 tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_27_27 : 1;
+ uint64_t stgl2i : 1; /**< Enable STGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t ltgl2i : 1; /**< Enable LTGL2I tracing
+ 0=disable, 1=enable */
+ uint64_t wbil2i : 1; /**< Enable WBIL2I tracing
+ 0=disable, 1=enable */
+ uint64_t fas64 : 1; /**< Enable FAS64 tracing
+ 0=disable, 1=enable */
+ uint64_t fas32 : 1; /**< Enable FAS32 tracing
+ 0=disable, 1=enable */
+ uint64_t sttil1 : 1; /**< Enable STTIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t stfil1 : 1; /**< Enable STFIL1 tracing
+ 0=disable, 1=enable */
+ uint64_t stc : 1; /**< Enable STC tracing
+ 0=disable, 1=enable */
+ uint64_t stp : 1; /**< Enable STP tracing
+ 0=disable, 1=enable */
+ uint64_t stt : 1; /**< Enable STT tracing
+ 0=disable, 1=enable */
+ uint64_t stf : 1; /**< Enable STF tracing
+ 0=disable, 1=enable */
+ uint64_t iobdma : 1; /**< Enable IOBDMA tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_10_14 : 5;
+ uint64_t psl1 : 1; /**< Enable PSL1 tracing
+ 0=disable, 1=enable */
+ uint64_t ldd : 1; /**< Enable LDD tracing
+ 0=disable, 1=enable */
+ uint64_t reserved_6_7 : 2;
+ uint64_t dwb : 1; /**< Enable DWB tracing
+ 0=disable, 1=enable */
+ uint64_t rpl2 : 1; /**< Enable RPL2 tracing
+ 0=disable, 1=enable */
+ uint64_t pl2 : 1; /**< Enable PL2 tracing
+ 0=disable, 1=enable */
+ uint64_t ldi : 1; /**< Enable LDI tracing
+ 0=disable, 1=enable */
+ uint64_t ldt : 1; /**< Enable LDT tracing
+ 0=disable, 1=enable */
+ uint64_t nop : 1; /**< Enable NOP tracing
+ 0=disable, 1=enable */
+#else
+ uint64_t nop : 1;
+ uint64_t ldt : 1;
+ uint64_t ldi : 1;
+ uint64_t pl2 : 1;
+ uint64_t rpl2 : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_6_7 : 2;
+ uint64_t ldd : 1;
+ uint64_t psl1 : 1;
+ uint64_t reserved_10_14 : 5;
+ uint64_t iobdma : 1;
+ uint64_t stf : 1;
+ uint64_t stt : 1;
+ uint64_t stp : 1;
+ uint64_t stc : 1;
+ uint64_t stfil1 : 1;
+ uint64_t sttil1 : 1;
+ uint64_t fas32 : 1;
+ uint64_t fas64 : 1;
+ uint64_t wbil2i : 1;
+ uint64_t ltgl2i : 1;
+ uint64_t stgl2i : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t invl2 : 1;
+ uint64_t wbil2 : 1;
+ uint64_t wbl2 : 1;
+ uint64_t lckl2 : 1;
+ uint64_t iobld8 : 1;
+ uint64_t iobld16 : 1;
+ uint64_t iobld32 : 1;
+ uint64_t iobld64 : 1;
+ uint64_t iobst8 : 1;
+ uint64_t iobst16 : 1;
+ uint64_t iobst32 : 1;
+ uint64_t iobst64 : 1;
+ uint64_t set8 : 1;
+ uint64_t set16 : 1;
+ uint64_t set32 : 1;
+ uint64_t set64 : 1;
+ uint64_t clr8 : 1;
+ uint64_t clr16 : 1;
+ uint64_t clr32 : 1;
+ uint64_t clr64 : 1;
+ uint64_t incr8 : 1;
+ uint64_t incr16 : 1;
+ uint64_t incr32 : 1;
+ uint64_t incr64 : 1;
+ uint64_t decr8 : 1;
+ uint64_t decr16 : 1;
+ uint64_t decr32 : 1;
+ uint64_t decr64 : 1;
+ uint64_t reserved_56_57 : 2;
+ uint64_t faa32 : 1;
+ uint64_t faa64 : 1;
+ uint64_t reserved_60_61 : 2;
+ uint64_t saa32 : 1;
+ uint64_t saa64 : 1;
+#endif
+ } cn61xx;
+ struct cvmx_trax_trig1_cmd_cn61xx cn63xx;
+ struct cvmx_trax_trig1_cmd_cn61xx cn63xxp1;
+ struct cvmx_trax_trig1_cmd_cn61xx cn66xx;
+ struct cvmx_trax_trig1_cmd_cn61xx cn68xx;
+ struct cvmx_trax_trig1_cmd_cn61xx cn68xxp1;
+ struct cvmx_trax_trig1_cmd_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_trig1_cmd cvmx_trax_trig1_cmd_t;
+
+/**
+ * cvmx_tra#_trig1_did
+ *
+ * TRA_TRIG1_DID = Trace Buffer Filter DestinationId Mask
+ *
+ * Description:
+ */
+union cvmx_trax_trig1_did {
+ uint64_t u64;
+ struct cvmx_trax_trig1_did_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63 : 51;
+ uint64_t pow : 1; /**< Enable triggering on requests to POW
+ (get work, add work, status/memory/index
+ loads, NULLRd loads, CSR's) */
+ uint64_t reserved_9_11 : 3;
+ uint64_t rng : 1; /**< Enable triggering on requests to RNG
+ (loads/IOBDMA's are legal) */
+ uint64_t zip : 1; /**< Enable triggering on requests to ZIP
+ (doorbell stores are legal) */
+ uint64_t dfa : 1; /**< Enable triggering on requests to DFA
+ (CSR's and operations are legal) */
+ uint64_t fpa : 1; /**< Enable triggering on requests to FPA
+ (alloc's (loads/IOBDMA's), frees (stores) are legal) */
+ uint64_t key : 1; /**< Enable triggering on requests to KEY memory
+ (loads/IOBDMA's/stores are legal) */
+ uint64_t reserved_3_3 : 1;
+ uint64_t illegal3 : 2; /**< Illegal destinations */
+ uint64_t mio : 1; /**< Enable triggering on MIO accesses
+ (CIU and GPIO CSR's, boot bus accesses) */
+#else
+ uint64_t mio : 1;
+ uint64_t illegal3 : 2;
+ uint64_t reserved_3_3 : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rng : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t pow : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_trax_trig1_did_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t illegal : 19; /**< Illegal destinations */
+ uint64_t pow : 1; /**< Enable triggering on requests to POW
+ (get work, add work, status/memory/index
+ loads, NULLRd loads, CSR's) */
+ uint64_t illegal2 : 3; /**< Illegal destinations */
+ uint64_t rng : 1; /**< Enable triggering on requests to RNG
+ (loads/IOBDMA's are legal) */
+ uint64_t zip : 1; /**< Enable triggering on requests to ZIP
+ (doorbell stores are legal) */
+ uint64_t dfa : 1; /**< Enable triggering on requests to DFA
+ (CSR's and operations are legal) */
+ uint64_t fpa : 1; /**< Enable triggering on requests to FPA
+ (alloc's (loads/IOBDMA's), frees (stores) are legal) */
+ uint64_t key : 1; /**< Enable triggering on requests to KEY memory
+ (loads/IOBDMA's/stores are legal) */
+ uint64_t pci : 1; /**< Enable triggering on requests to PCI and RSL-type
+ CSR's (RSL CSR's, PCI bus operations, PCI
+ CSR's) */
+ uint64_t illegal3 : 2; /**< Illegal destinations */
+ uint64_t mio : 1; /**< Enable triggering on CIU and GPIO CSR's */
+#else
+ uint64_t mio : 1;
+ uint64_t illegal3 : 2;
+ uint64_t pci : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rng : 1;
+ uint64_t illegal2 : 3;
+ uint64_t pow : 1;
+ uint64_t illegal : 19;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn31xx;
+ struct cvmx_trax_trig1_did_cn31xx cn38xx;
+ struct cvmx_trax_trig1_did_cn31xx cn38xxp2;
+ struct cvmx_trax_trig1_did_cn31xx cn52xx;
+ struct cvmx_trax_trig1_did_cn31xx cn52xxp1;
+ struct cvmx_trax_trig1_did_cn31xx cn56xx;
+ struct cvmx_trax_trig1_did_cn31xx cn56xxp1;
+ struct cvmx_trax_trig1_did_cn31xx cn58xx;
+ struct cvmx_trax_trig1_did_cn31xx cn58xxp1;
+ struct cvmx_trax_trig1_did_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t illegal5 : 1; /**< Illegal destinations */
+ uint64_t fau : 1; /**< Enable triggering on FAU accesses */
+ uint64_t illegal4 : 2; /**< Illegal destinations */
+ uint64_t dpi : 1; /**< Enable triggering on DPI accesses
+ (DPI NCB CSRs) */
+ uint64_t illegal : 12; /**< Illegal destinations */
+ uint64_t rad : 1; /**< Enable triggering on RAD accesses
+ (doorbells) */
+ uint64_t usb0 : 1; /**< Enable triggering on USB0 accesses
+ (UAHC0 EHCI and OHCI NCB CSRs) */
+ uint64_t pow : 1; /**< Enable triggering on requests to POW
+ (get work, add work, status/memory/index
+ loads, NULLRd loads, CSR's) */
+ uint64_t illegal2 : 1; /**< Illegal destination */
+ uint64_t pko : 1; /**< Enable triggering on PKO accesses
+ (doorbells) */
+ uint64_t ipd : 1; /**< Enable triggering on IPD CSR accesses
+ (IPD CSRs) */
+ uint64_t rng : 1; /**< Enable triggering on requests to RNG
+ (loads/IOBDMA's are legal) */
+ uint64_t zip : 1; /**< Enable triggering on requests to ZIP
+ (doorbell stores are legal) */
+ uint64_t dfa : 1; /**< Enable triggering on requests to DFA
+ (CSR's and operations are legal) */
+ uint64_t fpa : 1; /**< Enable triggering on requests to FPA
+ (alloc's (loads/IOBDMA's), frees (stores) are legal) */
+ uint64_t key : 1; /**< Enable triggering on requests to KEY memory
+ (loads/IOBDMA's/stores are legal) */
+ uint64_t sli : 1; /**< Enable triggering on requests to SLI and RSL-type
+ CSR's (RSL CSR's, PCI/sRIO bus operations, SLI
+ CSR's) */
+ uint64_t illegal3 : 2; /**< Illegal destinations */
+ uint64_t mio : 1; /**< Enable triggering on MIO accesses
+ (CIU and GPIO CSR's, boot bus accesses) */
+#else
+ uint64_t mio : 1;
+ uint64_t illegal3 : 2;
+ uint64_t sli : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t rng : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t illegal2 : 1;
+ uint64_t pow : 1;
+ uint64_t usb0 : 1;
+ uint64_t rad : 1;
+ uint64_t illegal : 12;
+ uint64_t dpi : 1;
+ uint64_t illegal4 : 2;
+ uint64_t fau : 1;
+ uint64_t illegal5 : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } cn61xx;
+ struct cvmx_trax_trig1_did_cn61xx cn63xx;
+ struct cvmx_trax_trig1_did_cn61xx cn63xxp1;
+ struct cvmx_trax_trig1_did_cn61xx cn66xx;
+ struct cvmx_trax_trig1_did_cn61xx cn68xx;
+ struct cvmx_trax_trig1_did_cn61xx cn68xxp1;
+ struct cvmx_trax_trig1_did_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_trig1_did cvmx_trax_trig1_did_t;
+
+/**
+ * cvmx_tra#_trig1_sid
+ *
+ * TRA_TRIG1_SID = Trace Buffer Filter SourceId Mask
+ *
+ * Description:
+ */
+union cvmx_trax_trig1_sid {
+ uint64_t u64;
+ struct cvmx_trax_trig1_sid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t pp : 16; /**< Enable trigering from PP[N] with matching SourceID
+ 0=disable, 1=enable per bit N where 0<=N<=3 */
+#else
+ uint64_t pp : 16;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_trax_trig1_sid_s cn31xx;
+ struct cvmx_trax_trig1_sid_s cn38xx;
+ struct cvmx_trax_trig1_sid_s cn38xxp2;
+ struct cvmx_trax_trig1_sid_s cn52xx;
+ struct cvmx_trax_trig1_sid_s cn52xxp1;
+ struct cvmx_trax_trig1_sid_s cn56xx;
+ struct cvmx_trax_trig1_sid_s cn56xxp1;
+ struct cvmx_trax_trig1_sid_s cn58xx;
+ struct cvmx_trax_trig1_sid_s cn58xxp1;
+ struct cvmx_trax_trig1_sid_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t reserved_4_15 : 12;
+ uint64_t pp : 4; /**< Enable trigering from PP[N] with matching SourceID
+ 0=disable, 1=enable per bit N where 0<=N<=3 */
+#else
+ uint64_t pp : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn61xx;
+ struct cvmx_trax_trig1_sid_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t reserved_8_15 : 8;
+ uint64_t pp : 8; /**< Enable trigering from PP[N] with matching SourceID
+ 0=disable, 1=enableper bit N where 0<=N<=15 */
+#else
+ uint64_t pp : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn63xx;
+ struct cvmx_trax_trig1_sid_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t reserved_6_15 : 10;
+ uint64_t pp : 6; /**< Enable trigering from PP[N] with matching SourceID
+ 0=disable, 1=enable per bit N where 0<=N<=5 */
+#else
+ uint64_t pp : 6;
+ uint64_t reserved_6_15 : 10;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn63xxp1;
+ struct cvmx_trax_trig1_sid_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */
+ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA,
+ PCI,ZIP,POW, and PKO (writes) */
+ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */
+ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */
+ uint64_t reserved_10_15 : 6;
+ uint64_t pp : 10; /**< Enable trigering from PP[N] with matching SourceID
+ 0=disable, 1=enableper bit N where 0<=N<=15 */
+#else
+ uint64_t pp : 10;
+ uint64_t reserved_10_15 : 6;
+ uint64_t pki : 1;
+ uint64_t pko : 1;
+ uint64_t iobreq : 1;
+ uint64_t dwb : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn66xx;
+ struct cvmx_trax_trig1_sid_cn63xx cn68xx;
+ struct cvmx_trax_trig1_sid_cn63xx cn68xxp1;
+ struct cvmx_trax_trig1_sid_cn61xx cnf71xx;
+};
+typedef union cvmx_trax_trig1_sid cvmx_trax_trig1_sid_t;
+
+#include "cvmx-tra-defs.h"
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-trax-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-twsi.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-twsi.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-twsi.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,560 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the TWSI / I2C bus
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/i2c.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-twsi.h>
+#else
+#include "cvmx.h"
+#include "cvmx-twsi.h"
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "cvmx-csr-db.h"
+#endif
+#endif
+
+//#define PRINT_TWSI_CONFIG
+#ifdef PRINT_TWSI_CONFIG
+#define twsi_printf printf
+#else
+#define twsi_printf(...)
+#define cvmx_csr_db_decode(...)
+#endif
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+# if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static struct i2c_adapter *__cvmx_twsix_get_adapter(int twsi_id)
+{
+ struct octeon_i2c {
+ wait_queue_head_t queue;
+ struct i2c_adapter adap;
+ int irq;
+ int twsi_freq;
+ int sys_freq;
+ resource_size_t twsi_phys;
+ void __iomem *twsi_base;
+ resource_size_t regsize;
+ struct device *dev;
+ int broken_irq_mode;
+ };
+ struct i2c_adapter *adapter;
+ struct octeon_i2c *i2c;
+
+ adapter = i2c_get_adapter(0);
+ if (adapter == NULL)
+ return NULL;
+ i2c = container_of(adapter, struct octeon_i2c, adap);
+ return &i2c[twsi_id].adap;
+}
+#endif
+#endif
+
+
+/**
+ * Do a twsi read from a 7 bit device address using an (optional) internal address.
+ * Up to 8 bytes can be read at a time.
+ *
+ * @param twsi_id which Octeon TWSI bus to use
+ * @param dev_addr Device address (7 bit)
+ * @param internal_addr
+ * Internal address. Can be 0, 1 or 2 bytes in width
+ * @param num_bytes Number of data bytes to read
+ * @param ia_width_bytes
+ * Internal address size in bytes (0, 1, or 2)
+ * @param data Pointer argument where the read data is returned.
+ *
+ * @return read data returned in 'data' argument
+ * Number of bytes read on success
+ * -1 on failure
+ */
+int cvmx_twsix_read_ia(int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t *data)
+{
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+# if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ struct i2c_adapter *adapter;
+ u8 data_buf[8];
+ u8 addr_buf[8];
+ struct i2c_msg msg[2];
+ uint64_t r;
+ int i, j;
+
+ if (ia_width_bytes == 0)
+ return cvmx_twsix_read(twsi_id, dev_addr, num_bytes, data);
+
+ BUG_ON(ia_width_bytes > 2);
+ BUG_ON(num_bytes > 8 || num_bytes < 1);
+
+ adapter = __cvmx_twsix_get_adapter(twsi_id);
+ if (adapter == NULL)
+ return -1;
+
+ for (j = 0, i = ia_width_bytes - 1; i >= 0; i--, j++)
+ addr_buf[j] = (u8)(internal_addr >> (i * 8));
+
+ msg[0].addr = dev_addr;
+ msg[0].flags = 0;
+ msg[0].len = ia_width_bytes;
+ msg[0].buf = addr_buf;
+
+ msg[1].addr = dev_addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = num_bytes;
+ msg[1].buf = data_buf;
+
+ i = i2c_transfer(adapter, msg, 2);
+
+ i2c_put_adapter(adapter);
+
+ if (i == 2) {
+ r = 0;
+ for (i = 0; i < num_bytes; i++)
+ r = (r << 8) | data_buf[i];
+ *data = r;
+ return num_bytes;
+ } else {
+ return -1;
+ }
+# else
+ BUG(); /* The I2C driver is not compiled in */
+# endif
+#else
+ cvmx_mio_twsx_sw_twsi_t sw_twsi_val;
+ cvmx_mio_twsx_sw_twsi_ext_t twsi_ext;
+ int retry_limit = 5;
+
+ if (num_bytes < 1 || num_bytes > 8 || !data || ia_width_bytes < 0 || ia_width_bytes > 2)
+ return -1;
+retry:
+ twsi_ext.u64 = 0;
+ sw_twsi_val.u64 = 0;
+ sw_twsi_val.s.v = 1;
+ sw_twsi_val.s.r = 1;
+ sw_twsi_val.s.sovr = 1;
+ sw_twsi_val.s.size = num_bytes - 1;
+ sw_twsi_val.s.a = dev_addr;
+
+ if (ia_width_bytes > 0) {
+ sw_twsi_val.s.op = 1;
+ sw_twsi_val.s.ia = (internal_addr >> 3) & 0x1f;
+ sw_twsi_val.s.eop_ia = internal_addr & 0x7;
+ }
+ if (ia_width_bytes == 2) {
+ sw_twsi_val.s.eia = 1;
+ twsi_ext.s.ia = internal_addr >> 8;
+ cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u64);
+ }
+
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ while (((cvmx_mio_twsx_sw_twsi_t)(sw_twsi_val.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id)))).s.v)
+ cvmx_wait(1000);
+ twsi_printf("Results:\n");
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ if (!sw_twsi_val.s.r)
+ {
+ /* Check the reason for the failure. We may need to retry to handle multi-master
+ ** configurations.
+ ** Lost arbitration : 0x38, 0x68, 0xB0, 0x78
+ ** Core busy as slave: 0x80, 0x88, 0xA0, 0xA8, 0xB8, 0xC0, 0xC8
+ */
+ if (sw_twsi_val.s.d == 0x38
+ || sw_twsi_val.s.d == 0x68
+ || sw_twsi_val.s.d == 0xB0
+ || sw_twsi_val.s.d == 0x78
+ || sw_twsi_val.s.d == 0x80
+ || sw_twsi_val.s.d == 0x88
+ || sw_twsi_val.s.d == 0xA0
+ || sw_twsi_val.s.d == 0xA8
+ || sw_twsi_val.s.d == 0xB8
+ || sw_twsi_val.s.d == 0xC8)
+ {
+ if (retry_limit-- > 0)
+ {
+ cvmx_wait_usec(100);
+ goto retry;
+ }
+ }
+ /* For all other errors, return an error code */
+ return -1;
+ }
+
+ *data = (sw_twsi_val.s.d & (0xFFFFFFFF >> (32 - num_bytes*8)));
+ if (num_bytes > 4) {
+ twsi_ext.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id));
+ *data |= ((unsigned long long)(twsi_ext.s.d & (0xFFFFFFFF >> (32 - num_bytes*8))) << 32);
+ }
+ return num_bytes;
+#endif
+}
+
+/**
+ * Read from a TWSI device (7 bit device address only) without generating any
+ * internal addresses.
+ * Read from 1-8 bytes and returns them in the data pointer.
+ *
+ * @param twsi_id TWSI interface on Octeon to use
+ * @param dev_addr TWSI device address (7 bit only)
+ * @param num_bytes number of bytes to read
+ * @param data Pointer to data read from TWSI device
+ *
+ * @return Number of bytes read on success
+ * -1 on error
+ */
+int cvmx_twsix_read(int twsi_id, uint8_t dev_addr, int num_bytes, uint64_t *data)
+{
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+# if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ struct i2c_adapter *adapter;
+ u8 data_buf[8];
+ struct i2c_msg msg[1];
+ uint64_t r;
+ int i;
+
+ BUG_ON(num_bytes > 8 || num_bytes < 1);
+
+ adapter = __cvmx_twsix_get_adapter(twsi_id);
+ if (adapter == NULL)
+ return -1;
+
+ msg[0].addr = dev_addr;
+ msg[0].flags = I2C_M_RD;
+ msg[0].len = num_bytes;
+ msg[0].buf = data_buf;
+
+ i = i2c_transfer(adapter, msg, 1);
+
+ i2c_put_adapter(adapter);
+
+ if (i == 1) {
+ r = 0;
+ for (i = 0; i < num_bytes; i++)
+ r = (r << 8) | data_buf[i];
+ *data = r;
+ return num_bytes;
+ } else {
+ return -1;
+ }
+# else
+ BUG(); /* The I2C driver is not compiled in */
+# endif
+#else
+ cvmx_mio_twsx_sw_twsi_t sw_twsi_val;
+ cvmx_mio_twsx_sw_twsi_ext_t twsi_ext;
+ int retry_limit = 5;
+
+ if (num_bytes > 8 || num_bytes < 1)
+ return -1;
+retry:
+ sw_twsi_val.u64 = 0;
+ sw_twsi_val.s.v = 1;
+ sw_twsi_val.s.r = 1;
+ sw_twsi_val.s.a = dev_addr;
+ sw_twsi_val.s.sovr = 1;
+ sw_twsi_val.s.size = num_bytes - 1;
+
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ while (((cvmx_mio_twsx_sw_twsi_t)(sw_twsi_val.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id)))).s.v)
+ cvmx_wait(1000);
+ twsi_printf("Results:\n");
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ if (!sw_twsi_val.s.r)
+ if (!sw_twsi_val.s.r)
+ {
+ /* Check the reason for the failure. We may need to retry to handle multi-master
+ ** configurations.
+ ** Lost arbitration : 0x38, 0x68, 0xB0, 0x78
+ ** Core busy as slave: 0x80, 0x88, 0xA0, 0xA8, 0xB8, 0xC0, 0xC8
+ */
+ if (sw_twsi_val.s.d == 0x38
+ || sw_twsi_val.s.d == 0x68
+ || sw_twsi_val.s.d == 0xB0
+ || sw_twsi_val.s.d == 0x78
+ || sw_twsi_val.s.d == 0x80
+ || sw_twsi_val.s.d == 0x88
+ || sw_twsi_val.s.d == 0xA0
+ || sw_twsi_val.s.d == 0xA8
+ || sw_twsi_val.s.d == 0xB8
+ || sw_twsi_val.s.d == 0xC8)
+ {
+ if (retry_limit-- > 0)
+ {
+ cvmx_wait_usec(100);
+ goto retry;
+ }
+ }
+ /* For all other errors, return an error code */
+ return -1;
+ }
+
+ *data = (sw_twsi_val.s.d & (0xFFFFFFFF >> (32 - num_bytes*8)));
+ if (num_bytes > 4) {
+ twsi_ext.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id));
+ *data |= ((unsigned long long)(twsi_ext.s.d & (0xFFFFFFFF >> (32 - num_bytes*8))) << 32);
+ }
+ return num_bytes;
+#endif
+}
+
+/**
+ * Perform a twsi write operation to a 7 bit device address.
+ *
+ * Note that many eeprom devices have page restrictions regarding address boundaries
+ * that can be crossed in one write operation. This is device dependent, and this routine
+ * does nothing in this regard.
+ * This command does not generate any internal addressess.
+ *
+ * @param twsi_id Octeon TWSI interface to use
+ * @param dev_addr TWSI device address
+ * @param num_bytes Number of bytes to write (between 1 and 8 inclusive)
+ * @param data Data to write
+ *
+ * @return 0 on success
+ * -1 on failure
+ */
+int cvmx_twsix_write(int twsi_id, uint8_t dev_addr, int num_bytes, uint64_t data)
+{
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+# if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ struct i2c_adapter *adapter;
+ u8 data_buf[8];
+ struct i2c_msg msg[1];
+ int i, j;
+
+ BUG_ON(num_bytes > 8 || num_bytes < 1);
+
+ adapter = __cvmx_twsix_get_adapter(twsi_id);
+ if (adapter == NULL)
+ return -1;
+
+ for (j = 0, i = num_bytes - 1; i >= 0; i--, j++)
+ data_buf[j] = (u8)(data >> (i * 8));
+
+ msg[0].addr = dev_addr;
+ msg[0].flags = 0;
+ msg[0].len = num_bytes;
+ msg[0].buf = data_buf;
+
+ i = i2c_transfer(adapter, msg, 1);
+
+ i2c_put_adapter(adapter);
+
+ if (i == 1)
+ return num_bytes;
+ else
+ return -1;
+# else
+ BUG(); /* The I2C driver is not compiled in */
+# endif
+#else
+ cvmx_mio_twsx_sw_twsi_t sw_twsi_val;
+
+ if (num_bytes > 8 || num_bytes < 1)
+ return -1;
+
+ sw_twsi_val.u64 = 0;
+ sw_twsi_val.s.v = 1;
+ sw_twsi_val.s.a = dev_addr;
+ sw_twsi_val.s.d = data & 0xffffffff;
+ sw_twsi_val.s.sovr = 1;
+ sw_twsi_val.s.size = num_bytes - 1;
+ if (num_bytes > 4) {
+ /* Upper four bytes go into a separate register */
+ cvmx_mio_twsx_sw_twsi_ext_t twsi_ext;
+ twsi_ext.u64 = 0;
+ twsi_ext.s.d = data >> 32;
+ cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u64);
+ }
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ while (((cvmx_mio_twsx_sw_twsi_t)(sw_twsi_val.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id)))).s.v)
+ ;
+ twsi_printf("Results:\n");
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ if (!sw_twsi_val.s.r)
+ return -1;
+
+ return 0;
+#endif
+}
+
+/**
+ * Write 1-8 bytes to a TWSI device using an internal address.
+ *
+ * @param twsi_id which TWSI interface on Octeon to use
+ * @param dev_addr TWSI device address (7 bit only)
+ * @param internal_addr
+ * TWSI internal address (0, 8, or 16 bits)
+ * @param num_bytes Number of bytes to write (1-8)
+ * @param ia_width_bytes
+ * internal address width, in bytes (0, 1, 2)
+ * @param data Data to write. Data is written MSB first on the twsi bus, and only the lower
+ * num_bytes bytes of the argument are valid. (If a 2 byte write is done, only
+ * the low 2 bytes of the argument is used.
+ *
+ * @return Number of bytes read on success,
+ * -1 on error
+ */
+int cvmx_twsix_write_ia(int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t data)
+{
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+# if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ struct i2c_adapter *adapter;
+ u8 data_buf[8];
+ u8 addr_buf[8];
+ struct i2c_msg msg[2];
+ int i, j;
+
+ if (ia_width_bytes == 0)
+ return cvmx_twsix_write(twsi_id, dev_addr, num_bytes, data);
+
+ BUG_ON(ia_width_bytes > 2);
+ BUG_ON(num_bytes > 8 || num_bytes < 1);
+
+ adapter = __cvmx_twsix_get_adapter(twsi_id);
+ if (adapter == NULL)
+ return -1;
+
+
+ for (j = 0, i = ia_width_bytes - 1; i >= 0; i--, j++)
+ addr_buf[j] = (u8)(internal_addr >> (i * 8));
+
+ for (j = 0, i = num_bytes - 1; i >= 0; i--, j++)
+ data_buf[j] = (u8)(data >> (i * 8));
+
+ msg[0].addr = dev_addr;
+ msg[0].flags = 0;
+ msg[0].len = ia_width_bytes;
+ msg[0].buf = addr_buf;
+
+ msg[1].addr = dev_addr;
+ msg[1].flags = 0;
+ msg[1].len = num_bytes;
+ msg[1].buf = data_buf;
+
+ i = i2c_transfer(adapter, msg, 2);
+
+ i2c_put_adapter(adapter);
+
+ if (i == 2) {
+ /* Poll until reads succeed, or polling times out */
+ int to = 100;
+ while (to-- > 0) {
+ uint64_t data;
+ if (cvmx_twsix_read(twsi_id, dev_addr, 1, &data) >= 0)
+ break;
+ }
+ }
+
+ if (i == 2)
+ return num_bytes;
+ else
+ return -1;
+# else
+ BUG(); /* The I2C driver is not compiled in */
+# endif
+#else
+ cvmx_mio_twsx_sw_twsi_t sw_twsi_val;
+ cvmx_mio_twsx_sw_twsi_ext_t twsi_ext;
+ int to;
+
+ if (num_bytes < 1 || num_bytes > 8 || ia_width_bytes < 0 || ia_width_bytes > 2)
+ return -1;
+
+ twsi_ext.u64 = 0;
+
+ sw_twsi_val.u64 = 0;
+ sw_twsi_val.s.v = 1;
+ sw_twsi_val.s.sovr = 1;
+ sw_twsi_val.s.size = num_bytes - 1;
+ sw_twsi_val.s.a = dev_addr;
+ sw_twsi_val.s.d = 0xFFFFFFFF & data;
+
+ if (ia_width_bytes > 0) {
+ sw_twsi_val.s.op = 1;
+ sw_twsi_val.s.ia = (internal_addr >> 3) & 0x1f;
+ sw_twsi_val.s.eop_ia = internal_addr & 0x7;
+ }
+ if (ia_width_bytes == 2) {
+ sw_twsi_val.s.eia = 1;
+ twsi_ext.s.ia = internal_addr >> 8;
+ }
+ if (num_bytes > 4)
+ twsi_ext.s.d = data >> 32;
+
+ twsi_printf("%s: twsi_id=%x, dev_addr=%x, internal_addr=%x\n\tnum_bytes=%d, ia_width_bytes=%d, data=%lx\n",
+ __FUNCTION__, twsi_id, dev_addr, internal_addr, num_bytes, ia_width_bytes, data);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u64);
+ cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u64);
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+ while (((cvmx_mio_twsx_sw_twsi_t)(sw_twsi_val.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id)))).s.v)
+ ;
+ twsi_printf("Results:\n");
+ cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64);
+
+ /* Poll until reads succeed, or polling times out */
+ to = 100;
+ while (to-- > 0) {
+ uint64_t data;
+ if (cvmx_twsix_read(twsi_id, dev_addr, 1, &data) >= 0)
+ break;
+ }
+ if (to <= 0)
+ return -1;
+
+ return num_bytes;
+#endif
+}
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-twsi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-twsi.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-twsi.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-twsi.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,325 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the TWSI / I2C bus
+ *
+ * Note: Currently on 7 bit device addresses are supported
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMX_TWSI_H__
+#define __CVMX_TWSI_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+ /* Extra TWSI Bus Opcodes */
+#define TWSI_SLAVE_ADD 0
+#define TWSI_DATA 1
+#define TWSI_CTL 2
+#define TWSI_CLKCTL_STAT 3 /* R=0 selects CLKCTL, R=1 selects STAT */
+#define TWSI_STAT 3 /* when R = 1 */
+#define TWSI_SLAVE_ADD_EXT 4
+#define TWSI_RST 7
+
+
+/**
+ * Do a twsi read from a 7 bit device address using an (optional) internal address.
+ * Up to 8 bytes can be read at a time.
+ *
+ * @param twsi_id which Octeon TWSI bus to use
+ * @param dev_addr Device address (7 bit)
+ * @param internal_addr
+ * Internal address. Can be 0, 1 or 2 bytes in width
+ * @param num_bytes Number of data bytes to read
+ * @param ia_width_bytes
+ * Internal address size in bytes (0, 1, or 2)
+ * @param data Pointer argument where the read data is returned.
+ *
+ * @return read data returned in 'data' argument
+ * Number of bytes read on success
+ * -1 on failure
+ */
+int cvmx_twsix_read_ia(int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t *data);
+
+
+
+
+/**
+ * A convenience wrapper function around cvmx_twsix_read_ia() that
+ * only supports 8 bit internal addresses.
+ * Reads up to 7 bytes, and returns both the value read or error
+ * value in the return value
+ *
+ * @param twsi_id which Octeon TWSI bus to use
+ * @param dev_addr Device address (7 bit only)
+ * @param internal_addr
+ * Internal address (8 bit only)
+ * @param num_bytes Number of bytes to read (0-7)
+ *
+ * @return Value read from TWSI on success
+ * -1 on error
+ */
+static inline int64_t cvmx_twsix_read_ia8(int twsi_id, uint8_t dev_addr, uint8_t internal_addr, int num_bytes)
+{
+ uint64_t data;
+ if (num_bytes < 1 || num_bytes > 7)
+ return -1;
+ if (cvmx_twsix_read_ia(twsi_id,dev_addr,internal_addr,num_bytes, 1, &data) < 0)
+ return -1;
+ return data;
+}
+
+/**
+ * A convenience wrapper function around cvmx_twsix_read_ia() that
+ * only supports 16 bit internal addresses.
+ * Reads up to 7 bytes, and returns both the value read or error
+ * value in the return value
+ *
+ * @param twsi_id which Octeon TWSI bus to use
+ * @param dev_addr Device address (7 bit only)
+ * @param internal_addr
+ * Internal address (16 bit only)
+ * @param num_bytes Number of bytes to read (0-7)
+ *
+ * @return Value read from TWSI on success
+ * -1 on error
+ */
+static inline int64_t cvmx_twsix_read_ia16(int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes)
+{
+ uint64_t data;
+ if (num_bytes < 1 || num_bytes > 7)
+ return -1;
+ if (cvmx_twsix_read_ia(twsi_id, dev_addr, internal_addr, num_bytes, 2, &data) < 0)
+ return -1;
+ return data;
+}
+
+
+
+/**
+ * Read from a TWSI device (7 bit device address only) without generating any
+ * internal addresses.
+ * Read from 1-8 bytes and returns them in the data pointer.
+ *
+ * @param twsi_id TWSI interface on Octeon to use
+ * @param dev_addr TWSI device address (7 bit only)
+ * @param num_bytes number of bytes to read
+ * @param data Pointer to data read from TWSI device
+ *
+ * @return Number of bytes read on success
+ * -1 on error
+ */
+int cvmx_twsix_read(int twsi_id, uint8_t dev_addr, int num_bytes, uint64_t *data);
+
+
+
+/**
+ * Perform a twsi write operation to a 7 bit device address.
+ *
+ * Note that many eeprom devices have page restrictions regarding address boundaries
+ * that can be crossed in one write operation. This is device dependent, and this routine
+ * does nothing in this regard.
+ * This command does not generate any internal addressess.
+ *
+ * @param twsi_id Octeon TWSI interface to use
+ * @param dev_addr TWSI device address
+ * @param num_bytes Number of bytes to write (between 1 and 8 inclusive)
+ * @param data Data to write
+ *
+ * @return 0 on success
+ * -1 on failure
+ */
+int cvmx_twsix_write(int twsi_id, uint8_t dev_addr, int num_bytes, uint64_t data);
+
+/**
+ * Write 1-8 bytes to a TWSI device using an internal address.
+ *
+ * @param twsi_id which TWSI interface on Octeon to use
+ * @param dev_addr TWSI device address (7 bit only)
+ * @param internal_addr
+ * TWSI internal address (0, 8, or 16 bits)
+ * @param num_bytes Number of bytes to write (1-8)
+ * @param ia_width_bytes
+ * internal address width, in bytes (0, 1, 2)
+ * @param data Data to write. Data is written MSB first on the twsi bus, and only the lower
+ * num_bytes bytes of the argument are valid. (If a 2 byte write is done, only
+ * the low 2 bytes of the argument is used.
+ *
+ * @return Number of bytes read on success,
+ * -1 on error
+ */
+int cvmx_twsix_write_ia(int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t data);
+
+/***********************************************************************
+** Functions below are deprecated, and not recomended for use.
+** They have been superceded by more flexible functions that are
+** now provided.
+************************************************************************/
+
+
+
+
+
+
+/**
+ * Read 8-bit from a device on the TWSI / I2C bus
+ *
+ * @param twsi_id Which TWSI bus to use. CN3XXX, CN58XX, and CN50XX only
+ * support 0. CN56XX and CN57XX support 0-1.
+ * @param dev_addr I2C device address (7 bit)
+ * @param internal_addr
+ * Internal device address
+ *
+ * @return 8-bit data or < 0 in case of error
+ */
+static inline int cvmx_twsix_read8(int twsi_id, uint8_t dev_addr, uint8_t internal_addr)
+{
+ return cvmx_twsix_read_ia8(twsi_id, dev_addr, internal_addr, 1);
+}
+
+/**
+ * Read 8-bit from a device on the TWSI / I2C bus
+ *
+ * Uses current internal address
+ *
+ * @param twsi_id Which TWSI bus to use. CN3XXX, CN58XX, and CN50XX only
+ * support 0. CN56XX and CN57XX support 0-1.
+ * @param dev_addr I2C device address (7 bit)
+ *
+ * @return 8-bit value or < 0 in case of error
+ */
+static inline int cvmx_twsix_read8_cur_addr(int twsi_id, uint8_t dev_addr)
+{
+ uint64_t data;
+
+ if (cvmx_twsix_read(twsi_id,dev_addr, 1, &data) < 0)
+ return -1;
+ return(data & 0xff);
+}
+
+/**
+ * Write 8-bit to a device on the TWSI / I2C bus
+ *
+ * @param twsi_id Which TWSI bus to use. CN3XXX, CN58XX, and CN50XX only
+ * support 0. CN56XX and CN57XX support 0-1.
+ * @param dev_addr I2C device address (7 bit)
+ * @param internal_addr
+ * Internal device address
+ * @param data Data to be written
+ *
+ * @return 0 on success and < 0 in case of error
+ */
+static inline int cvmx_twsix_write8(int twsi_id, uint8_t dev_addr, uint8_t internal_addr, uint8_t data)
+{
+ if (cvmx_twsix_write_ia(twsi_id,dev_addr,internal_addr, 1, 1,data) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Read 8-bit from a device on the TWSI / I2C bus zero.
+ *
+ * This function is for compatibility with SDK 1.6.0 and
+ * before which only supported a single TWSI bus.
+ *
+ * @param dev_addr I2C device address (7 bit)
+ * @param internal_addr
+ * Internal device address
+ *
+ * @return 8-bit data or < 0 in case of error
+ */
+static inline int cvmx_twsi_read8(uint8_t dev_addr, uint8_t internal_addr)
+{
+ return cvmx_twsix_read8(0, dev_addr, internal_addr);
+}
+
+/**
+ * Read 8-bit from a device on the TWSI / I2C bus zero.
+ *
+ * Uses current internal address
+ *
+ * This function is for compatibility with SDK 1.6.0 and
+ * before which only supported a single TWSI bus.
+ *
+ * @param dev_addr I2C device address (7 bit)
+ *
+ * @return 8-bit value or < 0 in case of error
+ */
+static inline int cvmx_twsi_read8_cur_addr(uint8_t dev_addr)
+{
+ return cvmx_twsix_read8_cur_addr(0, dev_addr);
+}
+
+/**
+ * Write 8-bit to a device on the TWSI / I2C bus zero.
+ * This function is for compatibility with SDK 1.6.0 and
+ * before which only supported a single TWSI bus.
+ *
+ * @param dev_addr I2C device address (7 bit)
+ * @param internal_addr
+ * Internal device address
+ * @param data Data to be written
+ *
+ * @return 0 on success and < 0 in case of error
+ */
+static inline int cvmx_twsi_write8(uint8_t dev_addr, uint8_t internal_addr, uint8_t data)
+{
+ return cvmx_twsix_write8(0, dev_addr, internal_addr, data);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_TWSI_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-twsi.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-uahcx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-uahcx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-uahcx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,2824 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-uahcx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon uahcx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_UAHCX_DEFS_H__
+#define __CVMX_UAHCX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_ASYNCLISTADDR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_ASYNCLISTADDR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000028ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_ASYNCLISTADDR(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_CONFIGFLAG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_CONFIGFLAG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000050ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_CONFIGFLAG(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000050ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_CTRLDSSEGMENT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_CTRLDSSEGMENT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000020ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_CTRLDSSEGMENT(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_FRINDEX(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_FRINDEX(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F000000001Cull);
+}
+#else
+#define CVMX_UAHCX_EHCI_FRINDEX(block_id) (CVMX_ADD_IO_SEG(0x00016F000000001Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_HCCAPBASE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_HCCAPBASE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000000ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_HCCAPBASE(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_HCCPARAMS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_HCCPARAMS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000008ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_HCCPARAMS(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_HCSPARAMS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_HCSPARAMS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000004ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_HCSPARAMS(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000004ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_INSNREG00(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_INSNREG00(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000090ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_INSNREG00(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_INSNREG03(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_INSNREG03(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F000000009Cull);
+}
+#else
+#define CVMX_UAHCX_EHCI_INSNREG03(block_id) (CVMX_ADD_IO_SEG(0x00016F000000009Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_INSNREG04(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_INSNREG04(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F00000000A0ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_INSNREG04(block_id) (CVMX_ADD_IO_SEG(0x00016F00000000A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_INSNREG06(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_INSNREG06(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F00000000E8ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_INSNREG06(block_id) (CVMX_ADD_IO_SEG(0x00016F00000000E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_INSNREG07(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_INSNREG07(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F00000000ECull);
+}
+#else
+#define CVMX_UAHCX_EHCI_INSNREG07(block_id) (CVMX_ADD_IO_SEG(0x00016F00000000ECull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_PERIODICLISTBASE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_PERIODICLISTBASE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000024ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_PERIODICLISTBASE(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000024ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_PORTSCX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0))))))
+ cvmx_warn("CVMX_UAHCX_EHCI_PORTSCX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000050ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 4;
+}
+#else
+#define CVMX_UAHCX_EHCI_PORTSCX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0000000050ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_USBCMD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_USBCMD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000010ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_USBCMD(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000010ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_USBINTR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_USBINTR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000018ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_USBINTR(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000018ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_EHCI_USBSTS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_EHCI_USBSTS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000014ull);
+}
+#else
+#define CVMX_UAHCX_EHCI_USBSTS(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000014ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCBULKCURRENTED(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCBULKCURRENTED(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F000000042Cull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCBULKCURRENTED(block_id) (CVMX_ADD_IO_SEG(0x00016F000000042Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCBULKHEADED(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCBULKHEADED(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000428ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCBULKHEADED(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000428ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCCOMMANDSTATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCCOMMANDSTATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000408ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCCOMMANDSTATUS(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000408ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCCONTROL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCCONTROL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000404ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCCONTROL(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000404ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCCONTROLCURRENTED(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCCONTROLCURRENTED(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000424ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCCONTROLCURRENTED(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000424ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCCONTROLHEADED(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCCONTROLHEADED(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000420ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCCONTROLHEADED(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000420ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCDONEHEAD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCDONEHEAD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000430ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCDONEHEAD(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000430ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCFMINTERVAL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCFMINTERVAL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000434ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCFMINTERVAL(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000434ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCFMNUMBER(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCFMNUMBER(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F000000043Cull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCFMNUMBER(block_id) (CVMX_ADD_IO_SEG(0x00016F000000043Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCFMREMAINING(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCFMREMAINING(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000438ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCFMREMAINING(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000438ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCHCCA(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCHCCA(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000418ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCHCCA(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000418ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCINTERRUPTDISABLE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCINTERRUPTDISABLE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000414ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCINTERRUPTDISABLE(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000414ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCINTERRUPTENABLE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCINTERRUPTENABLE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000410ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCINTERRUPTENABLE(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000410ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCINTERRUPTSTATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCINTERRUPTSTATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F000000040Cull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCINTERRUPTSTATUS(block_id) (CVMX_ADD_IO_SEG(0x00016F000000040Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCLSTHRESHOLD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCLSTHRESHOLD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000444ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCLSTHRESHOLD(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000444ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCPERIODCURRENTED(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCPERIODCURRENTED(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F000000041Cull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCPERIODCURRENTED(block_id) (CVMX_ADD_IO_SEG(0x00016F000000041Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCPERIODICSTART(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCPERIODICSTART(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000440ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCPERIODICSTART(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000440ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCREVISION(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCREVISION(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000400ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCREVISION(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000400ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCRHDESCRIPTORA(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCRHDESCRIPTORA(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000448ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCRHDESCRIPTORA(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000448ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCRHDESCRIPTORB(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCRHDESCRIPTORB(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F000000044Cull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCRHDESCRIPTORB(block_id) (CVMX_ADD_IO_SEG(0x00016F000000044Cull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCRHPORTSTATUSX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((((offset >= 1) && (offset <= 2))) && ((block_id == 0))))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCRHPORTSTATUSX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000450ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 4;
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCRHPORTSTATUSX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0000000450ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_HCRHSTATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_HCRHSTATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000450ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_HCRHSTATUS(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000450ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_INSNREG06(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_INSNREG06(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000498ull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_INSNREG06(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000498ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UAHCX_OHCI0_INSNREG07(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UAHCX_OHCI0_INSNREG07(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F000000049Cull);
+}
+#else
+#define CVMX_UAHCX_OHCI0_INSNREG07(block_id) (CVMX_ADD_IO_SEG(0x00016F000000049Cull))
+#endif
+
+/**
+ * cvmx_uahc#_ehci_asynclistaddr
+ *
+ * ASYNCLISTADDR = Current Asynchronous List Address Register
+ *
+ * This 32-bit register contains the address of the next asynchronous queue head to be executed. If the host
+ * controller is in 64-bit mode (as indicated by a one in 64-bit Addressing Capability field in the
+ * HCCPARAMS register), then the most significant 32 bits of every control data structure address comes from
+ * the CTRLDSSEGMENT register (See Section 2.3.5). Bits [4:0] of this register cannot be modified by system
+ * software and will always return a zero when read. The memory structure referenced by this physical memory
+ * pointer is assumed to be 32-byte (cache line) aligned.
+ */
+union cvmx_uahcx_ehci_asynclistaddr {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_asynclistaddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lpl : 27; /**< Link Pointer Low (LPL). These bits correspond to memory address signals [31:5],
+ respectively. This field may only reference a Queue Head (QH). */
+ uint32_t reserved_0_4 : 5;
+#else
+ uint32_t reserved_0_4 : 5;
+ uint32_t lpl : 27;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_asynclistaddr_s cn61xx;
+ struct cvmx_uahcx_ehci_asynclistaddr_s cn63xx;
+ struct cvmx_uahcx_ehci_asynclistaddr_s cn63xxp1;
+ struct cvmx_uahcx_ehci_asynclistaddr_s cn66xx;
+ struct cvmx_uahcx_ehci_asynclistaddr_s cn68xx;
+ struct cvmx_uahcx_ehci_asynclistaddr_s cn68xxp1;
+ struct cvmx_uahcx_ehci_asynclistaddr_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_asynclistaddr cvmx_uahcx_ehci_asynclistaddr_t;
+
+/**
+ * cvmx_uahc#_ehci_configflag
+ *
+ * CONFIGFLAG = Configure Flag Register
+ * This register is in the auxiliary power well. It is only reset by hardware when the auxiliary power is initially
+ * applied or in response to a host controller reset.
+ */
+union cvmx_uahcx_ehci_configflag {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_configflag_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_1_31 : 31;
+ uint32_t cf : 1; /**< Configure Flag (CF) .Host software sets this bit as the last action in
+ its process of configuring the Host Controller (see Section 4.1). This bit controls the
+ default port-routing control logic. Bit values and side-effects are listed below.
+ 0b: Port routing control logic default-routes each port to an implementation
+ dependent classic host controller.
+ 1b: Port routing control logic default-routes all ports to this host controller. */
+#else
+ uint32_t cf : 1;
+ uint32_t reserved_1_31 : 31;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_configflag_s cn61xx;
+ struct cvmx_uahcx_ehci_configflag_s cn63xx;
+ struct cvmx_uahcx_ehci_configflag_s cn63xxp1;
+ struct cvmx_uahcx_ehci_configflag_s cn66xx;
+ struct cvmx_uahcx_ehci_configflag_s cn68xx;
+ struct cvmx_uahcx_ehci_configflag_s cn68xxp1;
+ struct cvmx_uahcx_ehci_configflag_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_configflag cvmx_uahcx_ehci_configflag_t;
+
+/**
+ * cvmx_uahc#_ehci_ctrldssegment
+ *
+ * CTRLDSSEGMENT = Control Data Structure Segment Register
+ *
+ * This 32-bit register corresponds to the most significant address bits [63:32] for all EHCI data structures. If
+ * the 64-bit Addressing Capability field in HCCPARAMS is a zero, then this register is not used. Software
+ * cannot write to it and a read from this register will return zeros.
+ *
+ * If the 64-bit Addressing Capability field in HCCPARAMS is a one, then this register is used with the link
+ * pointers to construct 64-bit addresses to EHCI control data structures. This register is concatenated with the
+ * link pointer from either the PERIODICLISTBASE, ASYNCLISTADDR, or any control data structure link
+ * field to construct a 64-bit address.
+ *
+ * This register allows the host software to locate all control data structures within the same 4 Gigabyte
+ * memory segment.
+ */
+union cvmx_uahcx_ehci_ctrldssegment {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_ctrldssegment_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ctrldsseg : 32; /**< Control Data Strucute Semgent Address Bit [63:32] */
+#else
+ uint32_t ctrldsseg : 32;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_ctrldssegment_s cn61xx;
+ struct cvmx_uahcx_ehci_ctrldssegment_s cn63xx;
+ struct cvmx_uahcx_ehci_ctrldssegment_s cn63xxp1;
+ struct cvmx_uahcx_ehci_ctrldssegment_s cn66xx;
+ struct cvmx_uahcx_ehci_ctrldssegment_s cn68xx;
+ struct cvmx_uahcx_ehci_ctrldssegment_s cn68xxp1;
+ struct cvmx_uahcx_ehci_ctrldssegment_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_ctrldssegment cvmx_uahcx_ehci_ctrldssegment_t;
+
+/**
+ * cvmx_uahc#_ehci_frindex
+ *
+ * FRINDEX = Frame Index Register
+ * This register is used by the host controller to index into the periodic frame list. The register updates every
+ * 125 microseconds (once each micro-frame). Bits [N:3] are used to select a particular entry in the Periodic
+ * Frame List during periodic schedule execution. The number of bits used for the index depends on the size of
+ * the frame list as set by system software in the Frame List Size field in the USBCMD register.
+ * This register cannot be written unless the Host Controller is in the Halted state as indicated by the
+ * HCHalted bit. A write to this register while the Run/Stop bit is set to a one (USBCMD register) produces
+ * undefined results. Writes to this register also affect the SOF value.
+ */
+union cvmx_uahcx_ehci_frindex {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_frindex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t fi : 14; /**< Frame Index. The value in this register increments at the end of each time frame (e.g.
+ micro-frame). Bits [N:3] are used for the Frame List current index. This means that each
+ location of the frame list is accessed 8 times (frames or micro-frames) before moving to
+ the next index. The following illustrates values of N based on the value of the Frame List
+ Size field in the USBCMD register.
+ USBCMD[Frame List Size] Number Elements N
+ 00b (1024) 12
+ 01b (512) 11
+ 10b (256) 10
+ 11b Reserved */
+#else
+ uint32_t fi : 14;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_frindex_s cn61xx;
+ struct cvmx_uahcx_ehci_frindex_s cn63xx;
+ struct cvmx_uahcx_ehci_frindex_s cn63xxp1;
+ struct cvmx_uahcx_ehci_frindex_s cn66xx;
+ struct cvmx_uahcx_ehci_frindex_s cn68xx;
+ struct cvmx_uahcx_ehci_frindex_s cn68xxp1;
+ struct cvmx_uahcx_ehci_frindex_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_frindex cvmx_uahcx_ehci_frindex_t;
+
+/**
+ * cvmx_uahc#_ehci_hccapbase
+ *
+ * HCCAPBASE = Host Controller BASE Capability Register
+ *
+ */
+union cvmx_uahcx_ehci_hccapbase {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_hccapbase_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hciversion : 16; /**< Host Controller Interface Version Number */
+ uint32_t reserved_8_15 : 8;
+ uint32_t caplength : 8; /**< Capabitlity Registers Length */
+#else
+ uint32_t caplength : 8;
+ uint32_t reserved_8_15 : 8;
+ uint32_t hciversion : 16;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_hccapbase_s cn61xx;
+ struct cvmx_uahcx_ehci_hccapbase_s cn63xx;
+ struct cvmx_uahcx_ehci_hccapbase_s cn63xxp1;
+ struct cvmx_uahcx_ehci_hccapbase_s cn66xx;
+ struct cvmx_uahcx_ehci_hccapbase_s cn68xx;
+ struct cvmx_uahcx_ehci_hccapbase_s cn68xxp1;
+ struct cvmx_uahcx_ehci_hccapbase_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_hccapbase cvmx_uahcx_ehci_hccapbase_t;
+
+/**
+ * cvmx_uahc#_ehci_hccparams
+ *
+ * HCCPARAMS = Host Controller Capability Parameters
+ * Multiple Mode control (time-base bit functionality), addressing capability
+ */
+union cvmx_uahcx_ehci_hccparams {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_hccparams_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t eecp : 8; /**< EHCI Extended Capabilities Pointer. Default = Implementation Dependent.
+ This optional field indicates the existence of a capabilities list. A value of 00h indicates
+ no extended capabilities are implemented. A non-zero value in this register indicates the
+ offset in PCI configuration space of the first EHCI extended capability. The pointer value
+ must be 40h or greater if implemented to maintain the consistency of the PCI header
+ defined for this class of device. */
+ uint32_t ist : 4; /**< Isochronous Scheduling Threshold. Default = implementation dependent. This field
+ indicates, relative to the current position of the executing host controller, where software
+ can reliably update the isochronous schedule. When bit [7] is zero, the value of the least
+ significant 3 bits indicates the number of micro-frames a host controller can hold a set of
+ isochronous data structures (one or more) before flushing the state. When bit [7] is a
+ one, then host software assumes the host controller may cache an isochronous data
+ structure for an entire frame. Refer to Section 4.7.2.1 for details on how software uses
+ this information for scheduling isochronous transfers. */
+ uint32_t reserved_3_3 : 1;
+ uint32_t aspc : 1; /**< Asynchronous Schedule Park Capability. Default = Implementation dependent. If this
+ bit is set to a one, then the host controller supports the park feature for high-speed
+ queue heads in the Asynchronous Schedule. The feature can be disabled or enabled
+ and set to a specific level by using the Asynchronous Schedule Park Mode Enable and
+ Asynchronous Schedule Park Mode Count fields in the USBCMD register. */
+ uint32_t pflf : 1; /**< Programmable Frame List Flag. Default = Implementation dependent. If this bit is set
+ to a zero, then system software must use a frame list length of 1024 elements with this
+ host controller. The USBCMD register Frame List Size field is a read-only register and
+ should be set to zero.
+ If set to a one, then system software can specify and use a smaller frame list and
+ configure the host controller via the USBCMD register Frame List Size field. The frame
+ list must always be aligned on a 4K page boundary. This requirement ensures that the
+ frame list is always physically contiguous. */
+ uint32_t ac64 : 1; /**< 64-bit Addressing Capability1 . This field documents the addressing range capability of
+ this implementation. The value of this field determines whether software should use the
+ data structures defined in Section 3 (32-bit) or those defined in Appendix B (64-bit).
+ Values for this field have the following interpretation:
+ - 0: data structures using 32-bit address memory pointers
+ - 1: data structures using 64-bit address memory pointers */
+#else
+ uint32_t ac64 : 1;
+ uint32_t pflf : 1;
+ uint32_t aspc : 1;
+ uint32_t reserved_3_3 : 1;
+ uint32_t ist : 4;
+ uint32_t eecp : 8;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_hccparams_s cn61xx;
+ struct cvmx_uahcx_ehci_hccparams_s cn63xx;
+ struct cvmx_uahcx_ehci_hccparams_s cn63xxp1;
+ struct cvmx_uahcx_ehci_hccparams_s cn66xx;
+ struct cvmx_uahcx_ehci_hccparams_s cn68xx;
+ struct cvmx_uahcx_ehci_hccparams_s cn68xxp1;
+ struct cvmx_uahcx_ehci_hccparams_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_hccparams cvmx_uahcx_ehci_hccparams_t;
+
+/**
+ * cvmx_uahc#_ehci_hcsparams
+ *
+ * HCSPARAMS = Host Controller Structural Parameters
+ * This is a set of fields that are structural parameters: Number of downstream ports, etc.
+ */
+union cvmx_uahcx_ehci_hcsparams {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_hcsparams_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t dpn : 4; /**< Debug Port Number. Optional. This register identifies which of the host controller ports
+ is the debug port. The value is the port number (one-based) of the debug port. A nonzero
+ value in this field indicates the presence of a debug port. The value in this register
+ must not be greater than N_PORTS (see below). */
+ uint32_t reserved_17_19 : 3;
+ uint32_t p_indicator : 1; /**< Port Indicator. This bit indicates whether the ports support port
+ indicator control. When this bit is a one, the port status and control
+ registers include a read/writeable field for controlling the state of
+ the port indicator. */
+ uint32_t n_cc : 4; /**< Number of Companion Controller. This field indicates the number of
+ companion controllers associated with this USB 2.0 host controller.
+ A zero in this field indicates there are no companion host controllers.
+ Port-ownership hand-off is not supported. Only high-speed devices are
+ supported on the host controller root ports.
+ A value larger than zero in this field indicates there are companion USB 1.1 host
+ controller(s). Port-ownership hand-offs are supported. High, Full-and Low-speed
+ devices are supported on the host controller root ports. */
+ uint32_t n_pcc : 4; /**< Number of Ports per Companion Controller (N_PCC). This field indicates
+ the number of ports supported per companion host controller. It is used to
+ indicate the port routing configuration to system software. */
+ uint32_t prr : 1; /**< Port Routing Rules. This field indicates the method used by this implementation for
+ how all ports are mapped to companion controllers. The value of this field has
+ the following interpretation:
+ 0 The first N_PCC ports are routed to the lowest numbered function
+ companion host controller, the next N_PCC port are routed to the next
+ lowest function companion controller, and so on.
+ 1 The port routing is explicitly enumerated by the first N_PORTS elements
+ of the HCSP-PORTROUTE array. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t ppc : 1; /**< Port Power Control. This field indicates whether the host controller
+ implementation includes port power control. A one in this bit indicates the ports have
+ port power switches. A zero in this bit indicates the port do not have port power
+ switches. The value of this field affects the functionality of the Port Power field
+ in each port status and control register (see Section 2.3.8). */
+ uint32_t n_ports : 4; /**< This field specifies the number of physical downstream ports implemented
+ on this host controller. The value of this field determines how many port registers are
+ addressable in the Operational Register Space (see Table 2-8). Valid values are in the
+ range of 1H to FH. A zero in this field is undefined. */
+#else
+ uint32_t n_ports : 4;
+ uint32_t ppc : 1;
+ uint32_t reserved_5_6 : 2;
+ uint32_t prr : 1;
+ uint32_t n_pcc : 4;
+ uint32_t n_cc : 4;
+ uint32_t p_indicator : 1;
+ uint32_t reserved_17_19 : 3;
+ uint32_t dpn : 4;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_hcsparams_s cn61xx;
+ struct cvmx_uahcx_ehci_hcsparams_s cn63xx;
+ struct cvmx_uahcx_ehci_hcsparams_s cn63xxp1;
+ struct cvmx_uahcx_ehci_hcsparams_s cn66xx;
+ struct cvmx_uahcx_ehci_hcsparams_s cn68xx;
+ struct cvmx_uahcx_ehci_hcsparams_s cn68xxp1;
+ struct cvmx_uahcx_ehci_hcsparams_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_hcsparams cvmx_uahcx_ehci_hcsparams_t;
+
+/**
+ * cvmx_uahc#_ehci_insnreg00
+ *
+ * EHCI_INSNREG00 = EHCI Programmable Microframe Base Value Register (Synopsys Speicific)
+ * This register allows you to change the microframe length value (default is microframe SOF = 125 s) to reduce the simulation time.
+ */
+union cvmx_uahcx_ehci_insnreg00 {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_insnreg00_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t mfmc : 13; /**< For byte interface (8-bits), <13:1> is used as the 1-microframe counter.
+ For word interface (16_bits> <12:1> is used as the 1-microframe counter with word
+ interface (16-bits). */
+ uint32_t en : 1; /**< Writing 1b1 enables this register.
+ Note: Do not enable this register for the gate-level netlist */
+#else
+ uint32_t en : 1;
+ uint32_t mfmc : 13;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_insnreg00_s cn61xx;
+ struct cvmx_uahcx_ehci_insnreg00_s cn63xx;
+ struct cvmx_uahcx_ehci_insnreg00_s cn63xxp1;
+ struct cvmx_uahcx_ehci_insnreg00_s cn66xx;
+ struct cvmx_uahcx_ehci_insnreg00_s cn68xx;
+ struct cvmx_uahcx_ehci_insnreg00_s cn68xxp1;
+ struct cvmx_uahcx_ehci_insnreg00_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_insnreg00 cvmx_uahcx_ehci_insnreg00_t;
+
+/**
+ * cvmx_uahc#_ehci_insnreg03
+ *
+ * EHCI_INSNREG03 = EHCI Timing Adjust Register (Synopsys Speicific)
+ * This register allows you to change the timing of Phy Tx turnaround delay etc.
+ */
+union cvmx_uahcx_ehci_insnreg03 {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_insnreg03_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_13_31 : 19;
+ uint32_t txtx_tadao : 3; /**< Tx-Tx turnaround Delay Add on. This field specifies the extra delays in phy_clks to
+ be added to the "Transmit to Transmit turnaround delay" value maintained in the core.
+ The default value of this register field is 0. This default value of 0 is sufficient
+ for most PHYs. But for some PHYs which puts wait states during the token packet, it
+ may be required to program a value greater than 0 to meet the transmit to transmit
+ minimum turnaround time. The recommendation to use the default value of 0 and change
+ it only if there is an issue with minimum transmit-to- transmit turnaround time. This
+ value should be programmed during core initialization and should not be changed afterwards. */
+ uint32_t reserved_9_9 : 1;
+ uint32_t ta_off : 8; /**< Time-Available Offset. This value indicates the additional number of bytes to be
+ accommodated for the time-available calculation. The USB traffic on the bus can be started
+ only when sufficient time is available to complete the packet within the EOF1 point. Refer
+ to the USB 2.0 specification for details of the EOF1 point. This time-available
+ calculation is done in the hardware, and can be further offset by programming a value in
+ this location.
+ Note: Time-available calculation is added for future flexibility. The application is not
+ required to program this field by default. */
+ uint32_t reserved_0_0 : 1;
+#else
+ uint32_t reserved_0_0 : 1;
+ uint32_t ta_off : 8;
+ uint32_t reserved_9_9 : 1;
+ uint32_t txtx_tadao : 3;
+ uint32_t reserved_13_31 : 19;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_insnreg03_s cn61xx;
+ struct cvmx_uahcx_ehci_insnreg03_s cn63xx;
+ struct cvmx_uahcx_ehci_insnreg03_s cn63xxp1;
+ struct cvmx_uahcx_ehci_insnreg03_s cn66xx;
+ struct cvmx_uahcx_ehci_insnreg03_s cn68xx;
+ struct cvmx_uahcx_ehci_insnreg03_s cn68xxp1;
+ struct cvmx_uahcx_ehci_insnreg03_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_insnreg03 cvmx_uahcx_ehci_insnreg03_t;
+
+/**
+ * cvmx_uahc#_ehci_insnreg04
+ *
+ * EHCI_INSNREG04 = EHCI Debug Register (Synopsys Speicific)
+ * This register is used only for debug purposes.
+ */
+union cvmx_uahcx_ehci_insnreg04 {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_insnreg04_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t auto_dis : 1; /**< Automatic feature disable.
+ 1'b0: 0 by default, the automatic feature is enabled. The Suspend signal is deasserted
+ (logic level 1'b1) when run/stop is reset by software, but the hchalted bit is not
+ yet set.
+ 1'b1: Disables the automatic feature, which takes all ports out of suspend when software
+ clears the run/stop bit. This is for backward compatibility.
+ This bit has an added functionality in release 2.80a and later. For systems where the host
+ is halted without waking up all ports out of suspend, the port can become stuck because
+ the PHYCLK is not running when the halt is programmed. To avoid this, the DWC H20AHB host
+ core automatically pulls ports out of suspend when the host is halted by software. This bit
+ is used to disable this automatic function. */
+ uint32_t nakrf_dis : 1; /**< NAK Reload Fix Disable.
+ 1b0: NAK reload fix enabled.
+ 1b1: NAK reload fix disabled. (Incorrect NAK reload transition at the end of a microframe
+ for backward compatibility with Release 2.40c. For more information see the USB 2.0
+ Host-AHB Release Notes. */
+ uint32_t reserved_3_3 : 1;
+ uint32_t pesd : 1; /**< Scales down port enumeration time.
+ 1'b1: scale down enabled
+ 1'b0: scale downd disabled
+ This is for simulation only. */
+ uint32_t hcp_fw : 1; /**< HCCPARAMS Field Writeable.
+ 1'b1: The HCCPARAMS register's bits 17, 15:4, and 2:0 become writable.
+ 1'b0: The HCCPARAMS register's bits 17, 15:4, and 2:0 are not writable. */
+ uint32_t hcp_rw : 1; /**< HCCPARAMS Reigster Writeable.
+ 1'b1: The HCCPARAMS register becomes writable.
+ 1'b0: The HCCPARAMS register is not writable. */
+#else
+ uint32_t hcp_rw : 1;
+ uint32_t hcp_fw : 1;
+ uint32_t pesd : 1;
+ uint32_t reserved_3_3 : 1;
+ uint32_t nakrf_dis : 1;
+ uint32_t auto_dis : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_insnreg04_s cn61xx;
+ struct cvmx_uahcx_ehci_insnreg04_s cn63xx;
+ struct cvmx_uahcx_ehci_insnreg04_s cn63xxp1;
+ struct cvmx_uahcx_ehci_insnreg04_s cn66xx;
+ struct cvmx_uahcx_ehci_insnreg04_s cn68xx;
+ struct cvmx_uahcx_ehci_insnreg04_s cn68xxp1;
+ struct cvmx_uahcx_ehci_insnreg04_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_insnreg04 cvmx_uahcx_ehci_insnreg04_t;
+
+/**
+ * cvmx_uahc#_ehci_insnreg06
+ *
+ * EHCI_INSNREG06 = EHCI AHB Error Status Register (Synopsys Speicific)
+ * This register contains AHB Error Status.
+ */
+union cvmx_uahcx_ehci_insnreg06 {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_insnreg06_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t vld : 1; /**< AHB Error Captured. Indicator that an AHB error was encountered and values were captured.
+ To clear this field the application must write a 0 to it. */
+ uint32_t reserved_0_30 : 31;
+#else
+ uint32_t reserved_0_30 : 31;
+ uint32_t vld : 1;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_insnreg06_s cn61xx;
+ struct cvmx_uahcx_ehci_insnreg06_s cn63xx;
+ struct cvmx_uahcx_ehci_insnreg06_s cn63xxp1;
+ struct cvmx_uahcx_ehci_insnreg06_s cn66xx;
+ struct cvmx_uahcx_ehci_insnreg06_s cn68xx;
+ struct cvmx_uahcx_ehci_insnreg06_s cn68xxp1;
+ struct cvmx_uahcx_ehci_insnreg06_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_insnreg06 cvmx_uahcx_ehci_insnreg06_t;
+
+/**
+ * cvmx_uahc#_ehci_insnreg07
+ *
+ * EHCI_INSNREG07 = EHCI AHB Error Address Register (Synopsys Speicific)
+ * This register contains AHB Error Status.
+ */
+union cvmx_uahcx_ehci_insnreg07 {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_insnreg07_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t err_addr : 32; /**< AHB Master Error Address. AHB address of the control phase at which the AHB error occurred */
+#else
+ uint32_t err_addr : 32;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_insnreg07_s cn61xx;
+ struct cvmx_uahcx_ehci_insnreg07_s cn63xx;
+ struct cvmx_uahcx_ehci_insnreg07_s cn63xxp1;
+ struct cvmx_uahcx_ehci_insnreg07_s cn66xx;
+ struct cvmx_uahcx_ehci_insnreg07_s cn68xx;
+ struct cvmx_uahcx_ehci_insnreg07_s cn68xxp1;
+ struct cvmx_uahcx_ehci_insnreg07_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_insnreg07 cvmx_uahcx_ehci_insnreg07_t;
+
+/**
+ * cvmx_uahc#_ehci_periodiclistbase
+ *
+ * PERIODICLISTBASE = Periodic Frame List Base Address Register
+ *
+ * This 32-bit register contains the beginning address of the Periodic Frame List in the system memory. If the
+ * host controller is in 64-bit mode (as indicated by a one in the 64-bit Addressing Capability field in the
+ * HCCSPARAMS register), then the most significant 32 bits of every control data structure address comes
+ * from the CTRLDSSEGMENT register (see Section 2.3.5). System software loads this register prior to
+ * starting the schedule execution by the Host Controller (see 4.1). The memory structure referenced by this
+ * physical memory pointer is assumed to be 4-Kbyte aligned. The contents of this register are combined with
+ * the Frame Index Register (FRINDEX) to enable the Host Controller to step through the Periodic Frame List
+ * in sequence.
+ */
+union cvmx_uahcx_ehci_periodiclistbase {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_periodiclistbase_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t baddr : 20; /**< Base Address (Low). These bits correspond to memory address signals [31:12],respectively. */
+ uint32_t reserved_0_11 : 12;
+#else
+ uint32_t reserved_0_11 : 12;
+ uint32_t baddr : 20;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_periodiclistbase_s cn61xx;
+ struct cvmx_uahcx_ehci_periodiclistbase_s cn63xx;
+ struct cvmx_uahcx_ehci_periodiclistbase_s cn63xxp1;
+ struct cvmx_uahcx_ehci_periodiclistbase_s cn66xx;
+ struct cvmx_uahcx_ehci_periodiclistbase_s cn68xx;
+ struct cvmx_uahcx_ehci_periodiclistbase_s cn68xxp1;
+ struct cvmx_uahcx_ehci_periodiclistbase_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_periodiclistbase cvmx_uahcx_ehci_periodiclistbase_t;
+
+/**
+ * cvmx_uahc#_ehci_portsc#
+ *
+ * PORTSCX = Port X Status and Control Register
+ * Default: 00002000h (w/PPC set to one); 00003000h (w/PPC set to a zero)
+ */
+union cvmx_uahcx_ehci_portscx {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_portscx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t wkoc_e : 1; /**< Wake on Over-current Enable.Writing this bit to a
+ one enables the port to be sensitive to over-current conditions as wake-up events.
+ This field is zero if Port Power is zero. */
+ uint32_t wkdscnnt_e : 1; /**< Wake on Disconnect Enable. Writing this bit to a one enables the port to be
+ sensitive to device disconnects as wake-up events.
+ This field is zero if Port Power is zero. */
+ uint32_t wkcnnt_e : 1; /**< Wake on Connect Enable. Writing this bit to a one enables the port to be
+ sensitive to device connects as wake-up events.
+ This field is zero if Port Power is zero. */
+ uint32_t ptc : 4; /**< Port Test Control. When this field is zero, the port is NOT
+ operating in a test mode. A non-zero value indicates that it is operating
+ in test mode and the specific test mode is indicated by the specific value.
+ The encoding of the test mode bits are (0110b - 1111b are reserved):
+ Bits Test Mode
+ 0000b Test mode not enabled
+ 0001b Test J_STATE
+ 0010b Test K_STATE
+ 0011b Test SE0_NAK
+ 0100b Test Packet
+ 0101b Test FORCE_ENABLE */
+ uint32_t pic : 2; /**< Port Indicator Control. Writing to these bits has no effect if the
+ P_INDICATOR bit in the HCSPARAMS register is a zero. If P_INDICATOR bit is a one,
+ then the bit encodings are:
+ Bit Value Meaning
+ 00b Port indicators are off
+ 01b Amber
+ 10b Green
+ 11b Undefined
+ This field is zero if Port Power is zero. */
+ uint32_t po : 1; /**< Port Owner.This bit unconditionally goes to a 0b when the
+ Configured bit in the CONFIGFLAG register makes a 0b to 1b transition. This bit
+ unconditionally goes to 1b whenever the Configured bit is zero.
+ System software uses this field to release ownership of the port to a selected host
+ controller (in the event that the attached device is not a high-speed device). Software
+ writes a one to this bit when the attached device is not a high-speed device. A one in
+ this bit means that a companion host controller owns and controls the port. */
+ uint32_t pp : 1; /**< Port Power. The function of this bit depends on the value of the Port
+ Power Control (PPC) field in the HCSPARAMS register. The behavior is as follows:
+ PPC PP Operation
+ 0b 1b RO - Host controller does not have port power control switches.
+ Each port is hard-wired to power.
+ 1b 1b/0b R/W - Host controller has port power control switches. This bit
+ represents the current setting of the switch (0 = off, 1 = on). When
+ power is not available on a port (i.e. PP equals a 0), the port is
+ nonfunctional and will not report attaches, detaches, etc.
+ When an over-current condition is detected on a powered port and PPC is a one, the PP
+ bit in each affected port may be transitioned by the host controller from a 1 to 0
+ (removing power from the port). */
+ uint32_t lsts : 2; /**< Line Status.These bits reflect the current logical levels of the D+ (bit 11) and D(bit 10)
+ signal lines. These bits are used for detection of low-speed USB devices prior to
+ the port reset and enable sequence. This field is valid only when the port enable bit is
+ zero and the current connect status bit is set to a one.
+ The encoding of the bits are:
+ Bits[11:10] USB State Interpretation
+ 00b SE0 Not Low-speed device, perform EHCI reset
+ 10b J-state Not Low-speed device, perform EHCI reset
+ 01b K-state Low-speed device, release ownership of port
+ 11b Undefined Not Low-speed device, perform EHCI reset.
+ This value of this field is undefined if Port Power is zero. */
+ uint32_t reserved_9_9 : 1;
+ uint32_t prst : 1; /**< Port Reset.1=Port is in Reset. 0=Port is not in Reset. Default = 0. When
+ software writes a one to this bit (from a zero), the bus reset sequence as defined in the
+ USB Specification Revision 2.0 is started. Software writes a zero to this bit to terminate
+ the bus reset sequence. Software must keep this bit at a one long enough to ensure the
+ reset sequence, as specified in the USB Specification Revision 2.0, completes. Note:
+ when software writes this bit to a one, it must also write a zero to the Port Enable bit.
+ Note that when software writes a zero to this bit there may be a delay before the bit
+ status changes to a zero. The bit status will not read as a zero until after the reset has
+ completed. If the port is in high-speed mode after reset is complete, the host controller
+ will automatically enable this port (e.g. set the Port Enable bit to a one). A host controller
+ must terminate the reset and stabilize the state of the port within 2 milliseconds of
+ software transitioning this bit from a one to a zero. For example: if the port detects that
+ the attached device is high-speed during reset, then the host controller must have the
+ port in the enabled state within 2ms of software writing this bit to a zero.
+ The HCHalted bit in the USBSTS register should be a zero before software attempts to
+ use this bit. The host controller may hold Port Reset asserted to a one when the
+ HCHalted bit is a one.
+ This field is zero if Port Power is zero. */
+ uint32_t spd : 1; /**< Suspend. 1=Port in suspend state. 0=Port not in suspend state. Default = 0. Port
+ Enabled Bit and Suspend bit of this register define the port states as follows:
+ Bits [Port Enabled, Suspend] Port State
+ 0X Disable
+ 10 Enable
+ 11 Suspend
+ When in suspend state, downstream propagation of data is blocked on this port, except
+ for port reset. The blocking occurs at the end of the current transaction, if a transaction
+ was in progress when this bit was written to 1. In the suspend state, the port is sensitive
+ to resume detection. Note that the bit status does not change until the port is
+ suspended and that there may be a delay in suspending a port if there is a transaction
+ currently in progress on the USB.
+ A write of zero to this bit is ignored by the host controller. The host controller will
+ unconditionally set this bit to a zero when:
+ . Software sets the Force Port Resume bit to a zero (from a one).
+ . Software sets the Port Reset bit to a one (from a zero).
+ If host software sets this bit to a one when the port is not enabled (i.e. Port enabled bit is
+ a zero) the results are undefined.
+ This field is zero if Port Power is zero. */
+ uint32_t fpr : 1; /**< Force Port Resume.
+ 1= Resume detected/driven on port. 0=No resume (Kstate)
+ detected/driven on port. Default = 0. This functionality defined for manipulating
+ this bit depends on the value of the Suspend bit. For example, if the port is not
+ suspended (Suspend and Enabled bits are a one) and software transitions this bit to a
+ one, then the effects on the bus are undefined.
+ Software sets this bit to a 1 to drive resume signaling. The Host Controller sets this bit to
+ a 1 if a J-to-K transition is detected while the port is in the Suspend state. When this bit
+ transitions to a one because a J-to-K transition is detected, the Port Change Detect bit in
+ the USBSTS register is also set to a one. If software sets this bit to a one, the host
+ controller must not set the Port Change Detect bit.
+ Note that when the EHCI controller owns the port, the resume sequence follows the
+ defined sequence documented in the USB Specification Revision 2.0. The resume
+ signaling (Full-speed 'K') is driven on the port as long as this bit remains a one. Software
+ must appropriately time the Resume and set this bit to a zero when the appropriate
+ amount of time has elapsed. Writing a zero (from one) causes the port to return to high-
+ speed mode (forcing the bus below the port into a high-speed idle). This bit will remain a
+ one until the port has switched to the high-speed idle. The host controller must complete
+ this transition within 2 milliseconds of software setting this bit to a zero.
+ This field is zero if Port Power is zero. */
+ uint32_t occ : 1; /**< Over-current Change. 1=This bit gets set to a one when there is a change to Over-current Active.
+ Software clears this bit by writing a one to this bit position. */
+ uint32_t oca : 1; /**< Over-current Active. 1=This port currently has an over-current condition. 0=This port does not
+ have an over-current condition. This bit will automatically transition from a one to a zero when
+ the over current condition is removed. */
+ uint32_t pedc : 1; /**< Port Enable/Disable Change. 1=Port enabled/disabled status has changed.
+ 0=No change. Default = 0. For the root hub, this bit gets set to a one only when a port is
+ disabled due to the appropriate conditions existing at the EOF2 point (See Chapter 11 of
+ the USB Specification for the definition of a Port Error). Software clears this bit by writing
+ a 1 to it.
+ This field is zero if Port Power is zero. */
+ uint32_t ped : 1; /**< Port Enabled/Disabled. 1=Enable. 0=Disable. Ports can only be
+ enabled by the host controller as a part of the reset and enable. Software cannot enable
+ a port by writing a one to this field. The host controller will only set this bit to a one when
+ the reset sequence determines that the attached device is a high-speed device.
+ Ports can be disabled by either a fault condition (disconnect event or other fault
+ condition) or by host software. Note that the bit status does not change until the port
+ state actually changes. There may be a delay in disabling or enabling a port due to other
+ host controller and bus events. See Section 4.2 for full details on port reset and enable.
+ When the port is disabled (0b) downstream propagation of data is blocked on this port,
+ except for reset.
+ This field is zero if Port Power is zero. */
+ uint32_t csc : 1; /**< Connect Status Change. 1=Change in Current Connect Status. 0=No change. Indicates a change
+ has occurred in the port's Current Connect Status. The host controller sets this bit for all
+ changes to the port device connect status, even if system software has not cleared an existing
+ connect status change. For example, the insertion status changes twice before system software
+ has cleared the changed condition, hub hardware will be setting an already-set bit
+ (i.e., the bit will remain set). Software sets this bit to 0 by writing a 1 to it.
+ This field is zero if Port Power is zero. */
+ uint32_t ccs : 1; /**< Current Connect Status. 1=Device is present on port. 0=No device is present.
+ This value reflects the current state of the port, and may not correspond
+ directly to the event that caused the Connect Status Change bit (Bit 1) to be set.
+ This field is zero if Port Power is zero. */
+#else
+ uint32_t ccs : 1;
+ uint32_t csc : 1;
+ uint32_t ped : 1;
+ uint32_t pedc : 1;
+ uint32_t oca : 1;
+ uint32_t occ : 1;
+ uint32_t fpr : 1;
+ uint32_t spd : 1;
+ uint32_t prst : 1;
+ uint32_t reserved_9_9 : 1;
+ uint32_t lsts : 2;
+ uint32_t pp : 1;
+ uint32_t po : 1;
+ uint32_t pic : 2;
+ uint32_t ptc : 4;
+ uint32_t wkcnnt_e : 1;
+ uint32_t wkdscnnt_e : 1;
+ uint32_t wkoc_e : 1;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_portscx_s cn61xx;
+ struct cvmx_uahcx_ehci_portscx_s cn63xx;
+ struct cvmx_uahcx_ehci_portscx_s cn63xxp1;
+ struct cvmx_uahcx_ehci_portscx_s cn66xx;
+ struct cvmx_uahcx_ehci_portscx_s cn68xx;
+ struct cvmx_uahcx_ehci_portscx_s cn68xxp1;
+ struct cvmx_uahcx_ehci_portscx_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_portscx cvmx_uahcx_ehci_portscx_t;
+
+/**
+ * cvmx_uahc#_ehci_usbcmd
+ *
+ * USBCMD = USB Command Register
+ * The Command Register indicates the command to be executed by the serial bus host controller. Writing to the register causes a command to be executed.
+ */
+union cvmx_uahcx_ehci_usbcmd {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_usbcmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31 : 8;
+ uint32_t itc : 8; /**< Interrupt Threshold Control. This field is used by system software
+ to select the maximum rate at which the host controller will issue interrupts. The only
+ valid values are defined below. If software writes an invalid value to this register, the
+ results are undefined. Value Maximum Interrupt Interval
+ 00h Reserved
+ 01h 1 micro-frame
+ 02h 2 micro-frames
+ 04h 4 micro-frames
+ 08h 8 micro-frames (default, equates to 1 ms)
+ 10h 16 micro-frames (2 ms)
+ 20h 32 micro-frames (4 ms)
+ 40h 64 micro-frames (8 ms) */
+ uint32_t reserved_12_15 : 4;
+ uint32_t aspm_en : 1; /**< Asynchronous Schedule Park Mode Enable. */
+ uint32_t reserved_10_10 : 1;
+ uint32_t aspmc : 2; /**< Asynchronous Schedule Park Mode Count. */
+ uint32_t lhcr : 1; /**< Light Host Controller Reset */
+ uint32_t iaa_db : 1; /**< Interrupt on Async Advance Doorbell.This bit is used as a doorbell by
+ software to tell the host controller to issue an interrupt the next time it advances
+ asynchronous schedule. Software must write a 1 to this bit to ring the doorbell.
+ When the host controller has evicted all appropriate cached schedule state, it sets the
+ Interrupt on Async Advance status bit in the USBSTS register. If the Interrupt on Async
+ Advance Enable bit in the USBINTR register is a one then the host controller will assert
+ an interrupt at the next interrupt threshold. */
+ uint32_t as_en : 1; /**< Asynchronous Schedule Enable .This bit controls whether the host
+ controller skips processing the Asynchronous Schedule. Values mean:
+ - 0: Do not process the Asynchronous Schedule
+ - 1: Use the ASYNCLISTADDR register to access the Asynchronous Schedule. */
+ uint32_t ps_en : 1; /**< Periodic Schedule Enable. This bit controls whether the host
+ controller skips processing the Periodic Schedule. Values mean:
+ - 0: Do not process the Periodic Schedule
+ - 1: Use the PERIODICLISTBASE register to access the Periodic Schedule. */
+ uint32_t fls : 2; /**< Frame List Size. This field is R/W only if Programmable
+ Frame List Flag in the HCCPARAMS registers is set to a one. This field specifies the
+ size of the frame list. The size the frame list controls which bits in the Frame Index
+ Register should be used for the Frame List Current index. Values mean:
+ 00b: 1024 elements (4096 bytes) Default value
+ 01b: 512 elements (2048 bytes)
+ 10b: 256 elements (1024 bytes) - for resource-constrained environments
+ 11b: Reserved */
+ uint32_t hcreset : 1; /**< Host Controller Reset (HCRESET). This control bit is used by software to reset
+ the host controller. The effects of this on Root Hub registers are similar to a Chip
+ Hardware Reset. When software writes a one to this bit, the Host Controller resets
+ its internal pipelines, timers, counters, state machines, etc. to their initial
+ value. Any transaction currently in progress on USB is immediately terminated.
+ A USB reset is not driven on downstream ports.
+ This bit is set to zero by the Host Controller when the reset process is complete. Software can not
+ terminate the reset process early by writing zero to this register.
+ Software should not set this bit to a one when the HCHalted bit in the USBSTS register is a zero.
+ Attempting to reset an activtely running host controller will result in undefined behavior. */
+ uint32_t rs : 1; /**< Run/Stop (RS).
+ 1=Run. 0=Stop.
+ When set to a 1, the Host Controller proceeds with execution of the schedule.
+ The Host Controller continues execution as long as this bit is set to a 1.
+ When this bit is set to 0, the Host Controller completes the current and any
+ actively pipelined transactions on the USB and then halts. The Host
+ Controller must halt within 16 micro-frames after software clears the Run bit. The HC
+ Halted bit in the status register indicates when the Host Controller has finished its
+ pending pipelined transactions and has entered the stopped state. Software must not
+ write a one to this field unless the host controller is in the Halted state (i.e. HCHalted in
+ the USBSTS register is a one). Doing so will yield undefined results. */
+#else
+ uint32_t rs : 1;
+ uint32_t hcreset : 1;
+ uint32_t fls : 2;
+ uint32_t ps_en : 1;
+ uint32_t as_en : 1;
+ uint32_t iaa_db : 1;
+ uint32_t lhcr : 1;
+ uint32_t aspmc : 2;
+ uint32_t reserved_10_10 : 1;
+ uint32_t aspm_en : 1;
+ uint32_t reserved_12_15 : 4;
+ uint32_t itc : 8;
+ uint32_t reserved_24_31 : 8;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_usbcmd_s cn61xx;
+ struct cvmx_uahcx_ehci_usbcmd_s cn63xx;
+ struct cvmx_uahcx_ehci_usbcmd_s cn63xxp1;
+ struct cvmx_uahcx_ehci_usbcmd_s cn66xx;
+ struct cvmx_uahcx_ehci_usbcmd_s cn68xx;
+ struct cvmx_uahcx_ehci_usbcmd_s cn68xxp1;
+ struct cvmx_uahcx_ehci_usbcmd_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_usbcmd cvmx_uahcx_ehci_usbcmd_t;
+
+/**
+ * cvmx_uahc#_ehci_usbintr
+ *
+ * USBINTR = USB Interrupt Enable Register
+ * This register enables and disables reporting of the corresponding interrupt to the software. When a bit is set
+ * and the corresponding interrupt is active, an interrupt is generated to the host. Interrupt sources that are
+ * disabled in this register still appear in the USBSTS to allow the software to poll for events.
+ * Each interrupt enable bit description indicates whether it is dependent on the interrupt threshold mechanism.
+ * Note: for all enable register bits, 1= Enabled, 0= Disabled
+ */
+union cvmx_uahcx_ehci_usbintr {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_usbintr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31 : 26;
+ uint32_t ioaa_en : 1; /**< Interrupt on Async Advance Enable When this bit is a one, and the Interrupt on
+ Async Advance bit in the USBSTS register is a one, the host controller will issue an
+ interrupt at the next interrupt threshold. The interrupt is acknowledged by software
+ clearing the Interrupt on Async Advance bit. */
+ uint32_t hserr_en : 1; /**< Host System Error Enable When this bit is a one, and the Host System
+ Error Status bit in the USBSTS register is a one, the host controller will issue an
+ interrupt. The interrupt is acknowledged by software clearing the Host System Error bit. */
+ uint32_t flro_en : 1; /**< Frame List Rollover Enable. When this bit is a one, and the Frame List
+ Rollover bit in the USBSTS register is a one, the host controller will issue an
+ interrupt. The interrupt is acknowledged by software clearing the Frame List Rollover bit. */
+ uint32_t pci_en : 1; /**< Port Change Interrupt Enable. When this bit is a one, and the Port Change Detect bit in
+ the USBSTS register is a one, the host controller will issue an interrupt.
+ The interrupt is acknowledged by software clearing the Port Change Detect bit. */
+ uint32_t usberrint_en : 1; /**< USB Error Interrupt Enable. When this bit is a one, and the USBERRINT
+ bit in the USBSTS register is a one, the host controller will issue an interrupt at the next
+ interrupt threshold. The interrupt is acknowledged by software clearing the USBERRINT bit. */
+ uint32_t usbint_en : 1; /**< USB Interrupt Enable. When this bit is a one, and the USBINT bit in the USBSTS register
+ is a one, the host controller will issue an interrupt at the next interrupt threshold.
+ The interrupt is acknowledged by software clearing the USBINT bit. */
+#else
+ uint32_t usbint_en : 1;
+ uint32_t usberrint_en : 1;
+ uint32_t pci_en : 1;
+ uint32_t flro_en : 1;
+ uint32_t hserr_en : 1;
+ uint32_t ioaa_en : 1;
+ uint32_t reserved_6_31 : 26;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_usbintr_s cn61xx;
+ struct cvmx_uahcx_ehci_usbintr_s cn63xx;
+ struct cvmx_uahcx_ehci_usbintr_s cn63xxp1;
+ struct cvmx_uahcx_ehci_usbintr_s cn66xx;
+ struct cvmx_uahcx_ehci_usbintr_s cn68xx;
+ struct cvmx_uahcx_ehci_usbintr_s cn68xxp1;
+ struct cvmx_uahcx_ehci_usbintr_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_usbintr cvmx_uahcx_ehci_usbintr_t;
+
+/**
+ * cvmx_uahc#_ehci_usbsts
+ *
+ * USBSTS = USB Status Register
+ * This register indicates pending interrupts and various states of the Host Controller. The status resulting from
+ * a transaction on the serial bus is not indicated in this register. Software sets a bit to 0 in this register by
+ * writing a 1 to it.
+ */
+union cvmx_uahcx_ehci_usbsts {
+ uint32_t u32;
+ struct cvmx_uahcx_ehci_usbsts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t ass : 1; /**< Asynchronous Schedule Status. The bit reports the current real
+ status of the Asynchronous Schedule. If this bit is a zero then the status of the
+ Asynchronous Schedule is disabled. If this bit is a one then the status of the
+ Asynchronous Schedule is enabled. The Host Controller is not required to immediately
+ disable or enable the Asynchronous Schedule when software transitions the
+ Asynchronous Schedule Enable bit in the USBCMD register. When this bit and the
+ Asynchronous Schedule Enable bit are the same value, the Asynchronous Schedule is
+ either enabled (1) or disabled (0). */
+ uint32_t pss : 1; /**< Periodic Schedule Status. The bit reports the current real status of
+ the Periodic Schedule. If this bit is a zero then the status of the Periodic
+ Schedule is disabled. If this bit is a one then the status of the Periodic Schedule
+ is enabled. The Host Controller is not required to immediately disable or enable the
+ Periodic Schedule when software transitions the Periodic Schedule Enable bit in
+ the USBCMD register. When this bit and the Periodic Schedule Enable bit are the
+ same value, the Periodic Schedule is either enabled (1) or disabled (0). */
+ uint32_t reclm : 1; /**< Reclamation.This is a read-only status bit, which is used to detect an
+ empty asynchronous schedule. */
+ uint32_t hchtd : 1; /**< HCHalted. This bit is a zero whenever the Run/Stop bit is a one. The
+ Host Controller sets this bit to one after it has stopped executing as a result of the
+ Run/Stop bit being set to 0, either by software or by the Host Controller hardware (e.g.
+ internal error). */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ioaa : 1; /**< Interrupt on Async Advance. System software can force the host
+ controller to issue an interrupt the next time the host controller advances the
+ asynchronous schedule by writing a one to the Interrupt on Async Advance Doorbell bit
+ in the USBCMD register. This status bit indicates the assertion of that interrupt source. */
+ uint32_t hsyserr : 1; /**< Host System Error. The Host Controller sets this bit to 1 when a serious error
+ occurs during a host system access involving the Host Controller module. */
+ uint32_t flro : 1; /**< Frame List Rollover. The Host Controller sets this bit to a one when the
+ Frame List Index rolls over from its maximum value to zero. The exact value at
+ which the rollover occurs depends on the frame list size. For example, if
+ the frame list size (as programmed in the Frame List Size field of the USBCMD register)
+ is 1024, the Frame Index Register rolls over every time FRINDEX[13] toggles. Similarly,
+ if the size is 512, the Host Controller sets this bit to a one every time FRINDEX[12]
+ toggles. */
+ uint32_t pcd : 1; /**< Port Change Detect. The Host Controller sets this bit to a one when any port
+ for which the Port Owner bit is set to zero (see Section 2.3.9) has a change bit transition
+ from a zero to a one or a Force Port Resume bit transition from a zero to a one as a
+ result of a J-K transition detected on a suspended port. This bit will also be set as a
+ result of the Connect Status Change being set to a one after system software has
+ relinquished ownership of a connected port by writing a one to a port's Port Owner bit. */
+ uint32_t usberrint : 1; /**< USB Error Interrupt. The Host Controller sets this bit to 1 when completion of a USB
+ transaction results in an error condition (e.g., error counter underflow). If the TD on
+ which the error interrupt occurred also had its IOC bit set, both this bit and USBINT
+ bit are set. */
+ uint32_t usbint : 1; /**< USB Interrupt. The Host Controller sets this bit to 1 on the completion of a USB
+ transaction, which results in the retirement of a Transfer Descriptor that had its
+ IOC bit set. The Host Controller also sets this bit to 1 when a short packet is
+ detected (actual number of bytes received was less than the expected number of bytes). */
+#else
+ uint32_t usbint : 1;
+ uint32_t usberrint : 1;
+ uint32_t pcd : 1;
+ uint32_t flro : 1;
+ uint32_t hsyserr : 1;
+ uint32_t ioaa : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t hchtd : 1;
+ uint32_t reclm : 1;
+ uint32_t pss : 1;
+ uint32_t ass : 1;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_uahcx_ehci_usbsts_s cn61xx;
+ struct cvmx_uahcx_ehci_usbsts_s cn63xx;
+ struct cvmx_uahcx_ehci_usbsts_s cn63xxp1;
+ struct cvmx_uahcx_ehci_usbsts_s cn66xx;
+ struct cvmx_uahcx_ehci_usbsts_s cn68xx;
+ struct cvmx_uahcx_ehci_usbsts_s cn68xxp1;
+ struct cvmx_uahcx_ehci_usbsts_s cnf71xx;
+};
+typedef union cvmx_uahcx_ehci_usbsts cvmx_uahcx_ehci_usbsts_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcbulkcurrented
+ *
+ * HCBULKCURRENTED = Host Controller Bulk Current ED Register
+ *
+ * The HcBulkCurrentED register contains the physical address of the current endpoint of the Bulk list. As the Bulk list will be served in a round-robin
+ * fashion, the endpoints will be ordered according to their insertion to the list.
+ */
+union cvmx_uahcx_ohci0_hcbulkcurrented {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcbulkcurrented_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bced : 28; /**< BulkCurrentED. This is advanced to the next ED after the HC has served the
+ present one. HC continues processing the list from where it left off in the
+ last Frame. When it reaches the end of the Bulk list, HC checks the
+ ControlListFilled of HcControl. If set, it copies the content of HcBulkHeadED
+ to HcBulkCurrentED and clears the bit. If it is not set, it does nothing.
+ HCD is only allowed to modify this register when the BulkListEnable of
+ HcControl is cleared. When set, the HCD only reads the instantaneous value of
+ this register. This is initially set to zero to indicate the end of the Bulk
+ list. */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t bced : 28;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcbulkcurrented_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcbulkcurrented_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcbulkcurrented_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcbulkcurrented_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcbulkcurrented_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcbulkcurrented_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcbulkcurrented_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcbulkcurrented cvmx_uahcx_ohci0_hcbulkcurrented_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcbulkheaded
+ *
+ * HCBULKHEADED = Host Controller Bulk Head ED Register
+ *
+ * The HcBulkHeadED register contains the physical address of the first Endpoint Descriptor of the Bulk list.
+ */
+union cvmx_uahcx_ohci0_hcbulkheaded {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcbulkheaded_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bhed : 28; /**< BulkHeadED. HC traverses the Bulk list starting with the HcBulkHeadED
+ pointer. The content is loaded from HCCA during the initialization of HC. */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t bhed : 28;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcbulkheaded_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcbulkheaded_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcbulkheaded_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcbulkheaded_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcbulkheaded_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcbulkheaded_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcbulkheaded_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcbulkheaded cvmx_uahcx_ohci0_hcbulkheaded_t;
+
+/**
+ * cvmx_uahc#_ohci0_hccommandstatus
+ *
+ * HCCOMMANDSTATUS = Host Controller Command Status Register
+ *
+ * The HcCommandStatus register is used by the Host Controller to receive commands issued by the Host Controller Driver, as well as reflecting the
+ * current status of the Host Controller. To the Host Controller Driver, it appears to be a "write to set" register. The Host Controller must ensure
+ * that bits written as '1' become set in the register while bits written as '0' remain unchanged in the register. The Host Controller Driver
+ * may issue multiple distinct commands to the Host Controller without concern for corrupting previously issued commands. The Host Controller Driver
+ * has normal read access to all bits.
+ * The SchedulingOverrunCount field indicates the number of frames with which the Host Controller has detected the scheduling overrun error. This
+ * occurs when the Periodic list does not complete before EOF. When a scheduling overrun error is detected, the Host Controller increments the counter
+ * and sets the SchedulingOverrun field in the HcInterruptStatus register.
+ */
+union cvmx_uahcx_ohci0_hccommandstatus {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hccommandstatus_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_18_31 : 14;
+ uint32_t soc : 2; /**< SchedulingOverrunCount. These bits are incremented on each scheduling overrun
+ error. It is initialized to 00b and wraps around at 11b. This will be
+ incremented when a scheduling overrun is detected even if SchedulingOverrun
+ in HcInterruptStatus has already been set. This is used by HCD to monitor
+ any persistent scheduling problems. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t ocr : 1; /**< OwnershipChangeRequest. This bit is set by an OS HCD to request a change of
+ control of the HC. When set HC will set the OwnershipChange field in
+ HcInterruptStatus. After the changeover, this bit is cleared and remains so
+ until the next request from OS HCD. */
+ uint32_t blf : 1; /**< BulkListFilled This bit is used to indicate whether there are any TDs on the
+ Bulk list. It is set by HCD whenever it adds a TD to an ED in the Bulk list.
+ When HC begins to process the head of the Bulk list, it checks BF. As long
+ as BulkListFilled is 0, HC will not start processing the Bulk list. If
+ BulkListFilled is 1, HC will start processing the Bulk list and will set BF
+ to 0. If HC finds a TD on the list, then HC will set BulkListFilled to 1
+ causing the Bulk list processing to continue. If no TD is found on the Bulk
+ list,and if HCD does not set BulkListFilled, then BulkListFilled will still
+ be 0 when HC completes processing the Bulk list and Bulk list processing will
+ stop. */
+ uint32_t clf : 1; /**< ControlListFilled. This bit is used to indicate whether there are any TDs
+ on the Control list. It is set by HCD whenever it adds a TD to an ED in the
+ Control list. When HC begins to process the head of the Control list, it
+ checks CLF. As long as ControlListFilled is 0, HC will not start processing
+ the Control list. If CF is 1, HC will start processing the Control list and
+ will set ControlListFilled to 0. If HC finds a TD on the list, then HC will
+ set ControlListFilled to 1 causing the Control list processing to continue.
+ If no TD is found on the Control list, and if the HCD does not set
+ ControlListFilled, then ControlListFilled will still be 0 when HC completes
+ processing the Control list and Control list processing will stop. */
+ uint32_t hcr : 1; /**< HostControllerReset. This bit is set by HCD to initiate a software reset of
+ HC. Regardless of the functional state of HC, it moves to the USBSUSPEND
+ state in which most of the operational registers are reset except those
+ stated otherwise; e.g., the InterruptRouting field of HcControl, and no
+ Host bus accesses are allowed. This bit is cleared by HC upon the
+ completion of the reset operation. The reset operation must be completed
+ within 10 ms. This bit, when set, should not cause a reset to the Root Hub
+ and no subsequent reset signaling should be asserted to its downstream ports. */
+#else
+ uint32_t hcr : 1;
+ uint32_t clf : 1;
+ uint32_t blf : 1;
+ uint32_t ocr : 1;
+ uint32_t reserved_4_15 : 12;
+ uint32_t soc : 2;
+ uint32_t reserved_18_31 : 14;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hccommandstatus_s cn61xx;
+ struct cvmx_uahcx_ohci0_hccommandstatus_s cn63xx;
+ struct cvmx_uahcx_ohci0_hccommandstatus_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hccommandstatus_s cn66xx;
+ struct cvmx_uahcx_ohci0_hccommandstatus_s cn68xx;
+ struct cvmx_uahcx_ohci0_hccommandstatus_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hccommandstatus_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hccommandstatus cvmx_uahcx_ohci0_hccommandstatus_t;
+
+/**
+ * cvmx_uahc#_ohci0_hccontrol
+ *
+ * HCCONTROL = Host Controller Control Register
+ *
+ * The HcControl register defines the operating modes for the Host Controller. Most of the fields in this register are modified only by the Host Controller
+ * Driver, except HostControllerFunctionalState and RemoteWakeupConnected.
+ */
+union cvmx_uahcx_ohci0_hccontrol {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hccontrol_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_11_31 : 21;
+ uint32_t rwe : 1; /**< RemoteWakeupEnable. This bit is used by HCD to enable or disable the remote wakeup
+ feature upon the detection of upstream resume signaling. When this bit is set and
+ the ResumeDetected bit in HcInterruptStatus is set, a remote wakeup is signaled
+ to the host system. Setting this bit has no impact on the generation of hardware
+ interrupt. */
+ uint32_t rwc : 1; /**< RemoteWakeupConnected.This bit indicates whether HC supports remote wakeup signaling.
+ If remote wakeup is supported and used by the system it is the responsibility of
+ system firmware to set this bit during POST. HC clears the bit upon a hardware reset
+ but does not alter it upon a software reset. Remote wakeup signaling of the host
+ system is host-bus-specific and is not described in this specification. */
+ uint32_t ir : 1; /**< InterruptRouting
+ This bit determines the routing of interrupts generated by events registered in
+ HcInterruptStatus. If clear, all interrupts are routed to the normal host bus
+ interrupt mechanism. If set, interrupts are routed to the System Management
+ Interrupt. HCD clears this bit upon a hardware reset, but it does not alter
+ this bit upon a software reset. HCD uses this bit as a tag to indicate the
+ ownership of HC. */
+ uint32_t hcfs : 2; /**< HostControllerFunctionalState for USB
+ 00b: USBRESET
+ 01b: USBRESUME
+ 10b: USBOPERATIONAL
+ 11b: USBSUSPEND
+ A transition to USBOPERATIONAL from another state causes SOF generation to begin
+ 1 ms later. HCD may determine whether HC has begun sending SOFs by reading the
+ StartofFrame field of HcInterruptStatus.
+ This field may be changed by HC only when in the USBSUSPEND state. HC may move from
+ the USBSUSPEND state to the USBRESUME state after detecting the resume signaling
+ from a downstream port.
+ HC enters USBSUSPEND after a software reset, whereas it enters USBRESET after a
+ hardware reset. The latter also resets the Root Hub and asserts subsequent reset
+ signaling to downstream ports. */
+ uint32_t ble : 1; /**< BulkListEnable. This bit is set to enable the processing of the Bulk list in the
+ next Frame. If cleared by HCD, processing of the Bulk list does not occur after
+ the next SOF. HC checks this bit whenever it determines to process the list. When
+ disabled, HCD may modify the list. If HcBulkCurrentED is pointing to an ED to be
+ removed, HCD must advance the pointer by updating HcBulkCurrentED before re-enabling
+ processing of the list. */
+ uint32_t cle : 1; /**< ControlListEnable. This bit is set to enable the processing of the Control list in
+ the next Frame. If cleared by HCD, processing of the Control list does not occur
+ after the next SOF. HC must check this bit whenever it determines to process the
+ list. When disabled, HCD may modify the list. If HcControlCurrentED is pointing to
+ an ED to be removed, HCD must advance the pointer by updating HcControlCurrentED
+ before re-enabling processing of the list. */
+ uint32_t ie : 1; /**< IsochronousEnable This bit is used by HCD to enable/disable processing of
+ isochronous EDs. While processing the periodic list in a Frame, HC checks the
+ status of this bit when it finds an Isochronous ED (F=1). If set (enabled), HC
+ continues processing the EDs. If cleared (disabled), HC halts processing of the
+ periodic list (which now contains only isochronous EDs) and begins processing the
+ Bulk/Control lists. Setting this bit is guaranteed to take effect in the next
+ Frame (not the current Frame). */
+ uint32_t ple : 1; /**< PeriodicListEnable. This bit is set to enable the processing of the periodic list
+ in the next Frame. If cleared by HCD, processing of the periodic list does not
+ occur after the next SOF. HC must check this bit before it starts processing
+ the list. */
+ uint32_t cbsr : 2; /**< ControlBulkServiceRatio. This specifies the service ratio between Control and
+ Bulk EDs. Before processing any of the nonperiodic lists, HC must compare the
+ ratio specified with its internal count on how many nonempty Control EDs have
+ been processed, in determining whether to continue serving another Control ED
+ or switching to Bulk EDs. The internal count will be retained when crossing
+ the frame boundary. In case of reset, HCD is responsible for restoring this
+ value.
+
+ CBSR No. of Control EDs Over Bulk EDs Served
+ 0 1:1
+ 1 2:1
+ 2 3:1
+ 3 4:1 */
+#else
+ uint32_t cbsr : 2;
+ uint32_t ple : 1;
+ uint32_t ie : 1;
+ uint32_t cle : 1;
+ uint32_t ble : 1;
+ uint32_t hcfs : 2;
+ uint32_t ir : 1;
+ uint32_t rwc : 1;
+ uint32_t rwe : 1;
+ uint32_t reserved_11_31 : 21;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hccontrol_s cn61xx;
+ struct cvmx_uahcx_ohci0_hccontrol_s cn63xx;
+ struct cvmx_uahcx_ohci0_hccontrol_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hccontrol_s cn66xx;
+ struct cvmx_uahcx_ohci0_hccontrol_s cn68xx;
+ struct cvmx_uahcx_ohci0_hccontrol_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hccontrol_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hccontrol cvmx_uahcx_ohci0_hccontrol_t;
+
+/**
+ * cvmx_uahc#_ohci0_hccontrolcurrented
+ *
+ * HCCONTROLCURRENTED = Host Controller Control Current ED Register
+ *
+ * The HcControlCurrentED register contains the physical address of the current Endpoint Descriptor of the Control list.
+ */
+union cvmx_uahcx_ohci0_hccontrolcurrented {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hccontrolcurrented_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cced : 28; /**< ControlCurrentED. This pointer is advanced to the next ED after serving the
+ present one. HC will continue processing the list from where it left off in
+ the last Frame. When it reaches the end of the Control list, HC checks the
+ ControlListFilled of in HcCommandStatus. If set, it copies the content of
+ HcControlHeadED to HcControlCurrentED and clears the bit. If not set, it
+ does nothing. HCD is allowed to modify this register only when the
+ ControlListEnable of HcControl is cleared. When set, HCD only reads the
+ instantaneous value of this register. Initially, this is set to zero to
+ indicate the end of the Control list. */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t cced : 28;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hccontrolcurrented_s cn61xx;
+ struct cvmx_uahcx_ohci0_hccontrolcurrented_s cn63xx;
+ struct cvmx_uahcx_ohci0_hccontrolcurrented_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hccontrolcurrented_s cn66xx;
+ struct cvmx_uahcx_ohci0_hccontrolcurrented_s cn68xx;
+ struct cvmx_uahcx_ohci0_hccontrolcurrented_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hccontrolcurrented_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hccontrolcurrented cvmx_uahcx_ohci0_hccontrolcurrented_t;
+
+/**
+ * cvmx_uahc#_ohci0_hccontrolheaded
+ *
+ * HCCONTROLHEADED = Host Controller Control Head ED Register
+ *
+ * The HcControlHeadED register contains the physical address of the first Endpoint Descriptor of the Control list.
+ */
+union cvmx_uahcx_ohci0_hccontrolheaded {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hccontrolheaded_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ched : 28; /**< ControlHeadED. HC traverses the Control list starting with the HcControlHeadED
+ pointer. The content is loaded from HCCA during the initialization of HC. */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t ched : 28;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hccontrolheaded_s cn61xx;
+ struct cvmx_uahcx_ohci0_hccontrolheaded_s cn63xx;
+ struct cvmx_uahcx_ohci0_hccontrolheaded_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hccontrolheaded_s cn66xx;
+ struct cvmx_uahcx_ohci0_hccontrolheaded_s cn68xx;
+ struct cvmx_uahcx_ohci0_hccontrolheaded_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hccontrolheaded_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hccontrolheaded cvmx_uahcx_ohci0_hccontrolheaded_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcdonehead
+ *
+ * HCDONEHEAD = Host Controller Done Head Register
+ *
+ * The HcDoneHead register contains the physical address of the last completed Transfer Descriptor that was added to the Done queue. In normal operation,
+ * the Host Controller Driver should not need to read this register as its content is periodically written to the HCCA.
+ */
+union cvmx_uahcx_ohci0_hcdonehead {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcdonehead_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dh : 28; /**< DoneHead. When a TD is completed, HC writes the content of HcDoneHead to the
+ NextTD field of the TD. HC then overwrites the content of HcDoneHead with the
+ address of this TD. This is set to zero whenever HC writes the content of
+ this register to HCCA. It also sets the WritebackDoneHead of HcInterruptStatus. */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t dh : 28;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcdonehead_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcdonehead_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcdonehead_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcdonehead_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcdonehead_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcdonehead_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcdonehead_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcdonehead cvmx_uahcx_ohci0_hcdonehead_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcfminterval
+ *
+ * HCFMINTERVAL = Host Controller Frame Interval Register
+ *
+ * The HcFmInterval register contains a 14-bit value which indicates the bit time interval in a Frame, (i.e., between two consecutive SOFs), and a 15-bit value
+ * indicating the Full Speed maximum packet size that the Host Controller may transmit or receive without causing scheduling overrun. The Host Controller Driver
+ * may carry out minor adjustment on the FrameInterval by writing a new value over the present one at each SOF. This provides the programmability necessary for
+ * the Host Controller to synchronize with an external clocking resource and to adjust any unknown local clock offset.
+ */
+union cvmx_uahcx_ohci0_hcfminterval {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcfminterval_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t fit : 1; /**< FrameIntervalToggle. HCD toggles this bit whenever it loads a new value to
+ FrameInterval. */
+ uint32_t fsmps : 15; /**< FSLargestDataPacket. This field specifies a value which is loaded into the
+ Largest Data Packet Counter at the beginning of each frame. The counter value
+ represents the largest amount of data in bits which can be sent or received by
+ the HC in a single transaction at any given time without causing scheduling
+ overrun. The field value is calculated by the HCD. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t fi : 14; /**< FrameInterval. This specifies the interval between two consecutive SOFs in bit
+ times. The nominal value is set to be 11,999. HCD should store the current
+ value of this field before resetting HC. By setting the HostControllerReset
+ field of HcCommandStatus as this will cause the HC to reset this field to its
+ nominal value. HCD may choose to restore the stored value upon the completion
+ of the Reset sequence. */
+#else
+ uint32_t fi : 14;
+ uint32_t reserved_14_15 : 2;
+ uint32_t fsmps : 15;
+ uint32_t fit : 1;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcfminterval_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcfminterval_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcfminterval_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcfminterval_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcfminterval_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcfminterval_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcfminterval_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcfminterval cvmx_uahcx_ohci0_hcfminterval_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcfmnumber
+ *
+ * HCFMNUMBER = Host Cotroller Frame Number Register
+ *
+ * The HcFmNumber register is a 16-bit counter. It provides a timing reference among events happening in the Host Controller and the Host Controller Driver.
+ * The Host Controller Driver may use the 16-bit value specified in this register and generate a 32-bit frame number without requiring frequent access to
+ * the register.
+ */
+union cvmx_uahcx_ohci0_hcfmnumber {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcfmnumber_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t fn : 16; /**< FrameNumber. This is incremented when HcFmRemaining is re-loaded. It will be
+ rolled over to 0h after ffffh. When entering the USBOPERATIONAL state,
+ this will be incremented automatically. The content will be written to HCCA
+ after HC has incremented the FrameNumber at each frame boundary and sent a
+ SOF but before HC reads the first ED in that Frame. After writing to HCCA,
+ HC will set the StartofFrame in HcInterruptStatus. */
+#else
+ uint32_t fn : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcfmnumber_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcfmnumber_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcfmnumber_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcfmnumber_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcfmnumber_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcfmnumber_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcfmnumber_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcfmnumber cvmx_uahcx_ohci0_hcfmnumber_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcfmremaining
+ *
+ * HCFMREMAINING = Host Controller Frame Remaining Register
+ * The HcFmRemaining register is a 14-bit down counter showing the bit time remaining in the current Frame.
+ */
+union cvmx_uahcx_ohci0_hcfmremaining {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcfmremaining_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t frt : 1; /**< FrameRemainingToggle. This bit is loaded from the FrameIntervalToggle field
+ of HcFmInterval whenever FrameRemaining reaches 0. This bit is used by HCD
+ for the synchronization between FrameInterval and FrameRemaining. */
+ uint32_t reserved_14_30 : 17;
+ uint32_t fr : 14; /**< FrameRemaining. This counter is decremented at each bit time. When it
+ reaches zero, it is reset by loading the FrameInterval value specified in
+ HcFmInterval at the next bit time boundary. When entering the USBOPERATIONAL
+ state, HC re-loads the content with the FrameInterval of HcFmInterval and uses
+ the updated value from the next SOF. */
+#else
+ uint32_t fr : 14;
+ uint32_t reserved_14_30 : 17;
+ uint32_t frt : 1;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcfmremaining_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcfmremaining_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcfmremaining_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcfmremaining_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcfmremaining_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcfmremaining_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcfmremaining_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcfmremaining cvmx_uahcx_ohci0_hcfmremaining_t;
+
+/**
+ * cvmx_uahc#_ohci0_hchcca
+ *
+ * HCHCCA = Host Controller Host Controller Communication Area Register
+ *
+ * The HcHCCA register contains the physical address of the Host Controller Communication Area. The Host Controller Driver determines the alignment restrictions
+ * by writing all 1s to HcHCCA and reading the content of HcHCCA. The alignment is evaluated by examining the number of zeroes in the lower order bits. The
+ * minimum alignment is 256 bytes; therefore, bits 0 through 7 must always return '0' when read. Detailed description can be found in Chapter 4. This area
+ * is used to hold the control structures and the Interrupt table that are accessed by both the Host Controller and the Host Controller Driver.
+ */
+union cvmx_uahcx_ohci0_hchcca {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hchcca_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hcca : 24; /**< This is the base address (bits [31:8]) of the Host Controller Communication Area. */
+ uint32_t reserved_0_7 : 8;
+#else
+ uint32_t reserved_0_7 : 8;
+ uint32_t hcca : 24;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hchcca_s cn61xx;
+ struct cvmx_uahcx_ohci0_hchcca_s cn63xx;
+ struct cvmx_uahcx_ohci0_hchcca_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hchcca_s cn66xx;
+ struct cvmx_uahcx_ohci0_hchcca_s cn68xx;
+ struct cvmx_uahcx_ohci0_hchcca_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hchcca_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hchcca cvmx_uahcx_ohci0_hchcca_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcinterruptdisable
+ *
+ * HCINTERRUPTDISABLE = Host Controller InterruptDisable Register
+ *
+ * Each disable bit in the HcInterruptDisable register corresponds to an associated interrupt bit in the HcInterruptStatus register. The HcInterruptDisable
+ * register is coupled with the HcInterruptEnable register. Thus, writing a '1' to a bit in this register clears the corresponding bit in the HcInterruptEnable
+ * register, whereas writing a '0' to a bit in this register leaves the corresponding bit in the HcInterruptEnable register unchanged. On read, the current
+ * value of the HcInterruptEnable register is returned.
+ */
+union cvmx_uahcx_ohci0_hcinterruptdisable {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcinterruptdisable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t mie : 1; /**< A '0' written to this field is ignored by HC.
+ A '1' written to this field disables interrupt generation due to events
+ specified in the other bits of this register. This field is set after a
+ hardware or software reset. */
+ uint32_t oc : 1; /**< 0 - Ignore; 1 - Disable interrupt generation due to Ownership Change. */
+ uint32_t reserved_7_29 : 23;
+ uint32_t rhsc : 1; /**< 0 - Ignore; 1 - Disable interrupt generation due to Root Hub Status Change. */
+ uint32_t fno : 1; /**< 0 - Ignore; 1 - Disable interrupt generation due to Frame Number Overflow. */
+ uint32_t ue : 1; /**< 0 - Ignore; 1 - Disable interrupt generation due to Unrecoverable Error. */
+ uint32_t rd : 1; /**< 0 - Ignore; 1 - Disable interrupt generation due to Resume Detect. */
+ uint32_t sf : 1; /**< 0 - Ignore; 1 - Disable interrupt generation due to Start of Frame. */
+ uint32_t wdh : 1; /**< 0 - Ignore; 1 - Disable interrupt generation due to HcDoneHead Writeback. */
+ uint32_t so : 1; /**< 0 - Ignore; 1 - Disable interrupt generation due to Scheduling Overrun. */
+#else
+ uint32_t so : 1;
+ uint32_t wdh : 1;
+ uint32_t sf : 1;
+ uint32_t rd : 1;
+ uint32_t ue : 1;
+ uint32_t fno : 1;
+ uint32_t rhsc : 1;
+ uint32_t reserved_7_29 : 23;
+ uint32_t oc : 1;
+ uint32_t mie : 1;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcinterruptdisable_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcinterruptdisable_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcinterruptdisable_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcinterruptdisable_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcinterruptdisable_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcinterruptdisable_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcinterruptdisable_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcinterruptdisable cvmx_uahcx_ohci0_hcinterruptdisable_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcinterruptenable
+ *
+ * HCINTERRUPTENABLE = Host Controller InterruptEnable Register
+ *
+ * Each enable bit in the HcInterruptEnable register corresponds to an associated interrupt bit in the HcInterruptStatus register. The HcInterruptEnable
+ * register is used to control which events generate a hardware interrupt. When a bit is set in the HcInterruptStatus register AND the corresponding bit
+ * in the HcInterruptEnable register is set AND the MasterInterruptEnable bit is set, then a hardware interrupt is requested on the host bus.
+ * Writing a '1' to a bit in this register sets the corresponding bit, whereas writing a '0' to a bit in this register leaves the corresponding bit
+ * unchanged. On read, the current value of this register is returned.
+ */
+union cvmx_uahcx_ohci0_hcinterruptenable {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcinterruptenable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t mie : 1; /**< A '0' written to this field is ignored by HC.
+ A '1' written to this field enables interrupt generation due to events
+ specified in the other bits of this register. This is used by HCD as a Master
+ Interrupt Enable. */
+ uint32_t oc : 1; /**< 0 - Ignore; 1 - Enable interrupt generation due to Ownership Change. */
+ uint32_t reserved_7_29 : 23;
+ uint32_t rhsc : 1; /**< 0 - Ignore; 1 - Enable interrupt generation due to Root Hub Status Change. */
+ uint32_t fno : 1; /**< 0 - Ignore; 1 - Enable interrupt generation due to Frame Number Overflow. */
+ uint32_t ue : 1; /**< 0 - Ignore; 1 - Enable interrupt generation due to Unrecoverable Error. */
+ uint32_t rd : 1; /**< 0 - Ignore; 1 - Enable interrupt generation due to Resume Detect. */
+ uint32_t sf : 1; /**< 0 - Ignore; 1 - Enable interrupt generation due to Start of Frame. */
+ uint32_t wdh : 1; /**< 0 - Ignore; 1 - Enable interrupt generation due to HcDoneHead Writeback. */
+ uint32_t so : 1; /**< 0 - Ignore; 1 - Enable interrupt generation due to Scheduling Overrun. */
+#else
+ uint32_t so : 1;
+ uint32_t wdh : 1;
+ uint32_t sf : 1;
+ uint32_t rd : 1;
+ uint32_t ue : 1;
+ uint32_t fno : 1;
+ uint32_t rhsc : 1;
+ uint32_t reserved_7_29 : 23;
+ uint32_t oc : 1;
+ uint32_t mie : 1;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcinterruptenable_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcinterruptenable_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcinterruptenable_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcinterruptenable_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcinterruptenable_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcinterruptenable_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcinterruptenable_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcinterruptenable cvmx_uahcx_ohci0_hcinterruptenable_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcinterruptstatus
+ *
+ * HCINTERRUPTSTATUS = Host Controller InterruptStatus Register
+ *
+ * This register provides status on various events that cause hardware interrupts. When an event occurs, Host Controller sets the corresponding bit
+ * in this register. When a bit becomes set, a hardware interrupt is generated if the interrupt is enabled in the HcInterruptEnable register
+ * and the MasterInterruptEnable bit is set. The Host Controller Driver may clear specific bits in this register by writing '1' to bit positions
+ * to be cleared. The Host Controller Driver may not set any of these bits. The Host Controller will never clear the bit.
+ */
+union cvmx_uahcx_ohci0_hcinterruptstatus {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcinterruptstatus_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_31_31 : 1;
+ uint32_t oc : 1; /**< OwnershipChange. This bit is set by HC when HCD sets the OwnershipChangeRequest
+ field in HcCommandStatus. This event, when unmasked, will always generate an
+ System Management Interrupt (SMI) immediately. This bit is tied to 0b when the
+ SMI pin is not implemented. */
+ uint32_t reserved_7_29 : 23;
+ uint32_t rhsc : 1; /**< RootHubStatusChange. This bit is set when the content of HcRhStatus or the
+ content of any of HcRhPortStatus[NumberofDownstreamPort] has changed. */
+ uint32_t fno : 1; /**< FrameNumberOverflow. This bit is set when the MSb of HcFmNumber (bit 15)
+ changes value, from 0 to 1 or from 1 to 0, and after HccaFrameNumber has been
+ updated. */
+ uint32_t ue : 1; /**< UnrecoverableError. This bit is set when HC detects a system error not related
+ to USB. HC should not proceed with any processing nor signaling before the
+ system error has been corrected. HCD clears this bit after HC has been reset. */
+ uint32_t rd : 1; /**< ResumeDetected. This bit is set when HC detects that a device on the USB is
+ asserting resume signaling. It is the transition from no resume signaling to
+ resume signaling causing this bit to be set. This bit is not set when HCD
+ sets the USBRESUME state. */
+ uint32_t sf : 1; /**< StartofFrame. This bit is set by HC at each start of a frame and after the
+ update of HccaFrameNumber. HC also generates a SOF token at the same time. */
+ uint32_t wdh : 1; /**< WritebackDoneHead. This bit is set immediately after HC has written
+ HcDoneHead to HccaDoneHead. Further updates of the HccaDoneHead will not
+ occur until this bit has been cleared. HCD should only clear this bit after
+ it has saved the content of HccaDoneHead. */
+ uint32_t so : 1; /**< SchedulingOverrun. This bit is set when the USB schedule for the current
+ Frame overruns and after the update of HccaFrameNumber. A scheduling overrun
+ will also cause the SchedulingOverrunCount of HcCommandStatus to be
+ incremented. */
+#else
+ uint32_t so : 1;
+ uint32_t wdh : 1;
+ uint32_t sf : 1;
+ uint32_t rd : 1;
+ uint32_t ue : 1;
+ uint32_t fno : 1;
+ uint32_t rhsc : 1;
+ uint32_t reserved_7_29 : 23;
+ uint32_t oc : 1;
+ uint32_t reserved_31_31 : 1;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcinterruptstatus_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcinterruptstatus_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcinterruptstatus_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcinterruptstatus_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcinterruptstatus_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcinterruptstatus_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcinterruptstatus_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcinterruptstatus cvmx_uahcx_ohci0_hcinterruptstatus_t;
+
+/**
+ * cvmx_uahc#_ohci0_hclsthreshold
+ *
+ * HCLSTHRESHOLD = Host Controller LS Threshold Register
+ *
+ * The HcLSThreshold register contains an 11-bit value used by the Host Controller to determine whether to commit to the transfer of a maximum of 8-byte
+ * LS packet before EOF. Neither the Host Controller nor the Host Controller Driver are allowed to change this value.
+ */
+union cvmx_uahcx_ohci0_hclsthreshold {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hclsthreshold_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_12_31 : 20;
+ uint32_t lst : 12; /**< LSThreshold
+ This field contains a value which is compared to the FrameRemaining field
+ prior to initiating a Low Speed transaction. The transaction is started only
+ if FrameRemaining >= this field. The value is calculated by HCD
+ with the consideration of transmission and setup overhead. */
+#else
+ uint32_t lst : 12;
+ uint32_t reserved_12_31 : 20;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hclsthreshold_s cn61xx;
+ struct cvmx_uahcx_ohci0_hclsthreshold_s cn63xx;
+ struct cvmx_uahcx_ohci0_hclsthreshold_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hclsthreshold_s cn66xx;
+ struct cvmx_uahcx_ohci0_hclsthreshold_s cn68xx;
+ struct cvmx_uahcx_ohci0_hclsthreshold_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hclsthreshold_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hclsthreshold cvmx_uahcx_ohci0_hclsthreshold_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcperiodcurrented
+ *
+ * HCPERIODCURRENTED = Host Controller Period Current ED Register
+ *
+ * The HcPeriodCurrentED register contains the physical address of the current Isochronous or Interrupt Endpoint Descriptor.
+ */
+union cvmx_uahcx_ohci0_hcperiodcurrented {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcperiodcurrented_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pced : 28; /**< PeriodCurrentED. This is used by HC to point to the head of one of the
+ Periodic lists which will be processed in the current Frame. The content of
+ this register is updated by HC after a periodic ED has been processed. HCD
+ may read the content in determining which ED is currently being processed
+ at the time of reading. */
+ uint32_t reserved_0_3 : 4;
+#else
+ uint32_t reserved_0_3 : 4;
+ uint32_t pced : 28;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcperiodcurrented_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcperiodcurrented_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcperiodcurrented_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcperiodcurrented_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcperiodcurrented_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcperiodcurrented_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcperiodcurrented_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcperiodcurrented cvmx_uahcx_ohci0_hcperiodcurrented_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcperiodicstart
+ *
+ * HCPERIODICSTART = Host Controller Periodic Start Register
+ *
+ * The HcPeriodicStart register has a 14-bit programmable value which determines when is the earliest time HC should start processing the periodic list.
+ */
+union cvmx_uahcx_ohci0_hcperiodicstart {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcperiodicstart_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_14_31 : 18;
+ uint32_t ps : 14; /**< PeriodicStart After a hardware reset, this field is cleared. This is then set
+ by HCD during the HC initialization. The value is calculated roughly as 10%
+ off from HcFmInterval.. A typical value will be 3E67h. When HcFmRemaining
+ reaches the value specified, processing of the periodic lists will have
+ priority over Control/Bulk processing. HC will therefore start processing
+ the Interrupt list after completing the current Control or Bulk transaction
+ that is in progress. */
+#else
+ uint32_t ps : 14;
+ uint32_t reserved_14_31 : 18;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcperiodicstart_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcperiodicstart_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcperiodicstart_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcperiodicstart_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcperiodicstart_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcperiodicstart_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcperiodicstart_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcperiodicstart cvmx_uahcx_ohci0_hcperiodicstart_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcrevision
+ *
+ * HCREVISION = Host Controller Revision Register
+ *
+ */
+union cvmx_uahcx_ohci0_hcrevision {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcrevision_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31 : 24;
+ uint32_t rev : 8; /**< Revision This read-only field contains the BCD representation of the version
+ of the HCI specification that is implemented by this HC. For example, a value
+ of 11h corresponds to version 1.1. All of the HC implementations that are
+ compliant with this specification will have a value of 10h. */
+#else
+ uint32_t rev : 8;
+ uint32_t reserved_8_31 : 24;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcrevision_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcrevision_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcrevision_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcrevision_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcrevision_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcrevision_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcrevision_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcrevision cvmx_uahcx_ohci0_hcrevision_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcrhdescriptora
+ *
+ * HCRHDESCRIPTORA = Host Controller Root Hub DescriptorA Register
+ *
+ * The HcRhDescriptorA register is the first register of two describing the characteristics of the Root Hub. Reset values are implementation-specific.
+ * The descriptor length (11), descriptor type (0x29), and hub controller current (0) fields of the hub Class Descriptor are emulated by the HCD. All
+ * other fields are located in the HcRhDescriptorA and HcRhDescriptorB registers.
+ */
+union cvmx_uahcx_ohci0_hcrhdescriptora {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcrhdescriptora_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t potpgt : 8; /**< PowerOnToPowerGoodTime. This byte specifies the duration HCD has to wait before
+ accessing a powered-on port of the Root Hub. It is implementation-specific. The
+ unit of time is 2 ms. The duration is calculated as POTPGT * 2 ms. */
+ uint32_t reserved_13_23 : 11;
+ uint32_t nocp : 1; /**< NoOverCurrentProtection. This bit describes how the overcurrent status for the
+ Root Hub ports are reported. When this bit is cleared, the
+ OverCurrentProtectionMode field specifies global or per-port reporting.
+ - 0: Over-current status is reported collectively for all downstream ports
+ - 1: No overcurrent protection supported */
+ uint32_t ocpm : 1; /**< OverCurrentProtectionMode. This bit describes how the overcurrent status for
+ the Root Hub ports are reported. At reset, this fields should reflect the same
+ mode as PowerSwitchingMode. This field is valid only if the
+ NoOverCurrentProtection field is cleared. 0: over-current status is reported
+ collectively for all downstream ports 1: over-current status is reported on a
+ per-port basis */
+ uint32_t dt : 1; /**< DeviceType. This bit specifies that the Root Hub is not a compound device. The
+ Root Hub is not permitted to be a compound device. This field should always
+ read/write 0. */
+ uint32_t psm : 1; /**< PowerSwitchingMode. This bit is used to specify how the power switching of
+ the Root Hub ports is controlled. It is implementation-specific. This field
+ is only valid if the NoPowerSwitching field is cleared. 0: all ports are
+ powered at the same time. 1: each port is powered individually. This mode
+ allows port power to be controlled by either the global switch or per-port
+ switching. If the PortPowerControlMask bit is set, the port responds only
+ to port power commands (Set/ClearPortPower). If the port mask is cleared,
+ then the port is controlled only by the global power switch
+ (Set/ClearGlobalPower). */
+ uint32_t nps : 1; /**< NoPowerSwitching These bits are used to specify whether power switching is
+ supported or port are always powered. It is implementation-specific. When
+ this bit is cleared, the PowerSwitchingMode specifies global or per-port
+ switching.
+ - 0: Ports are power switched
+ - 1: Ports are always powered on when the HC is powered on */
+ uint32_t ndp : 8; /**< NumberDownstreamPorts. These bits specify the number of downstream ports
+ supported by the Root Hub. It is implementation-specific. The minimum number
+ of ports is 1. The maximum number of ports supported by OpenHCI is 15. */
+#else
+ uint32_t ndp : 8;
+ uint32_t nps : 1;
+ uint32_t psm : 1;
+ uint32_t dt : 1;
+ uint32_t ocpm : 1;
+ uint32_t nocp : 1;
+ uint32_t reserved_13_23 : 11;
+ uint32_t potpgt : 8;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcrhdescriptora_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcrhdescriptora_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcrhdescriptora_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcrhdescriptora_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcrhdescriptora_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcrhdescriptora_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcrhdescriptora_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcrhdescriptora cvmx_uahcx_ohci0_hcrhdescriptora_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcrhdescriptorb
+ *
+ * HCRHDESCRIPTORB = Host Controller Root Hub DescriptorB Register
+ *
+ * The HcRhDescriptorB register is the second register of two describing the characteristics of the Root Hub. These fields are written during
+ * initialization to correspond with the system implementation. Reset values are implementation-specific.
+ */
+union cvmx_uahcx_ohci0_hcrhdescriptorb {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcrhdescriptorb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ppcm : 16; /**< PortPowerControlMask.
+ Each bit indicates if a port is affected by a global power control command
+ when PowerSwitchingMode is set. When set, the port's power state is only
+ affected by per-port power control (Set/ClearPortPower). When cleared, the
+ port is controlled by the global power switch (Set/ClearGlobalPower). If
+ the device is configured to global switching mode (PowerSwitchingMode=0),
+ this field is not valid.
+ bit 0: Reserved
+ bit 1: Ganged-power mask on Port \#1
+ bit 2: Ganged-power mask on Port \#2
+ - ...
+ bit15: Ganged-power mask on Port \#15 */
+ uint32_t dr : 16; /**< DeviceRemovable.
+ Each bit is dedicated to a port of the Root Hub. When cleared,the attached
+ device is removable. When set, the attached device is not removable.
+ bit 0: Reserved
+ bit 1: Device attached to Port \#1
+ bit 2: Device attached to Port \#2
+ - ...
+ bit15: Device attached to Port \#15 */
+#else
+ uint32_t dr : 16;
+ uint32_t ppcm : 16;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcrhdescriptorb_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcrhdescriptorb_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcrhdescriptorb_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcrhdescriptorb_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcrhdescriptorb_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcrhdescriptorb_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcrhdescriptorb_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcrhdescriptorb cvmx_uahcx_ohci0_hcrhdescriptorb_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcrhportstatus#
+ *
+ * HCRHPORTSTATUSX = Host Controller Root Hub Port X Status Registers
+ *
+ * The HcRhPortStatus[1:NDP] register is used to control and report port events on a per-port basis. NumberDownstreamPorts represents the number
+ * of HcRhPortStatus registers that are implemented in hardware. The lower word is used to reflect the port status, whereas the upper word reflects
+ * the status change bits. Some status bits are implemented with special write behavior (see below). If a transaction (token through handshake) is
+ * in progress when a write to change port status occurs, the resulting port status change must be postponed until the transaction completes.
+ * Reserved bits should always be written '0'.
+ */
+union cvmx_uahcx_ohci0_hcrhportstatusx {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcrhportstatusx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t prsc : 1; /**< PortResetStatusChange. This bit is set at the end of the 10-ms port reset
+ signal. The HCD writes a '1' to clear this bit. Writing a '0' has no effect.
+ 0 = port reset is not complete
+ 1 = port reset is complete */
+ uint32_t ocic : 1; /**< PortOverCurrentIndicatorChange. This bit is valid only if overcurrent
+ conditions are reported on a per-port basis. This bit is set when Root Hub
+ changes the PortOverCurrentIndicator bit. The HCD writes a '1' to clear this
+ bit. Writing a '0' has no effect.
+ 0 = no change in PortOverCurrentIndicator
+ 1 = PortOverCurrentIndicator has changed */
+ uint32_t pssc : 1; /**< PortSuspendStatusChange. This bit is set when the full resume sequence has
+ been completed. This sequence includes the 20-s resume pulse, LS EOP, and
+ 3-ms resychronization delay.
+ The HCD writes a '1' to clear this bit. Writing a '0' has no effect. This
+ bit is also cleared when ResetStatusChange is set.
+ 0 = resume is not completed
+ 1 = resume completed */
+ uint32_t pesc : 1; /**< PortEnableStatusChange. This bit is set when hardware events cause the
+ PortEnableStatus bit to be cleared. Changes from HCD writes do not set this
+ bit. The HCD writes a '1' to clear this bit. Writing a '0' has no effect.
+ 0 = no change in PortEnableStatus
+ 1 = change in PortEnableStatus */
+ uint32_t csc : 1; /**< ConnectStatusChange. This bit is set whenever a connect or disconnect event
+ occurs. The HCD writes a '1' to clear this bit. Writing a '0' has no
+ effect. If CurrentConnectStatus is cleared when a SetPortReset,SetPortEnable,
+ or SetPortSuspend write occurs, this bit is set to force the driver to
+ re-evaluate the connection status since these writes should not occur if the
+ port is disconnected.
+ 0 = no change in CurrentConnectStatus
+ 1 = change in CurrentConnectStatus
+ Note: If the DeviceRemovable[NDP] bit is set, this bit is set only after a
+ Root Hub reset to inform the system that the device is attached. Description */
+ uint32_t reserved_10_15 : 6;
+ uint32_t lsda : 1; /**< (read) LowSpeedDeviceAttached. This bit indicates the speed of the device
+ attached to this port. When set, a Low Speed device is attached to this
+ port. When clear, a Full Speed device is attached to this port. This
+ field is valid only when the CurrentConnectStatus is set.
+ 0 = full speed device attached
+ 1 = low speed device attached
+ (write) ClearPortPower. The HCD clears the PortPowerStatus bit by writing a
+ '1' to this bit. Writing a '0' has no effect. */
+ uint32_t pps : 1; /**< (read) PortPowerStatus. This bit reflects the port's power status, regardless
+ of the type of power switching implemented. This bit is cleared if an
+ overcurrent condition is detected. HCD sets this bit by writing
+ SetPortPower or SetGlobalPower. HCD clears this bit by writing
+ ClearPortPower or ClearGlobalPower. Which power control switches are
+ enabled is determined by PowerSwitchingMode and PortPortControlMask[NDP].
+ In global switching mode (PowerSwitchingMode=0), only Set/ClearGlobalPower
+ controls this bit. In per-port power switching (PowerSwitchingMode=1),
+ if the PortPowerControlMask[NDP] bit for the port is set, only
+ Set/ClearPortPower commands are enabled. If the mask is not set, only
+ Set/ClearGlobalPower commands are enabled. When port power is disabled,
+ CurrentConnectStatus, PortEnableStatus, PortSuspendStatus, and
+ PortResetStatus should be reset.
+ 0 = port power is off
+ 1 = port power is on
+ (write) SetPortPower. The HCD writes a '1' to set the PortPowerStatus bit.
+ Writing a '0' has no effect. Note: This bit is always reads '1'
+ if power switching is not supported. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t prs : 1; /**< (read) PortResetStatus. When this bit is set by a write to SetPortReset, port
+ reset signaling is asserted. When reset is completed, this bit is
+ cleared when PortResetStatusChange is set. This bit cannot be set if
+ CurrentConnectStatus is cleared.
+ 0 = port reset signal is not active
+ 1 = port reset signal is active
+ (write) SetPortReset. The HCD sets the port reset signaling by writing a '1'
+ to this bit. Writing a '0'has no effect. If CurrentConnectStatus is
+ cleared, this write does not set PortResetStatus, but instead sets
+ ConnectStatusChange. This informs the driver that it attempted to reset
+ a disconnected port. Description */
+ uint32_t poci : 1; /**< (read) PortOverCurrentIndicator. This bit is only valid when the Root Hub is
+ configured in such a way that overcurrent conditions are reported on a
+ per-port basis. If per-port overcurrent reporting is not supported, this
+ bit is set to 0. If cleared, all power operations are normal for this
+ port. If set, an overcurrent condition exists on this port. This bit
+ always reflects the overcurrent input signal
+ 0 = no overcurrent condition.
+ 1 = overcurrent condition detected.
+ (write) ClearSuspendStatus. The HCD writes a '1' to initiate a resume.
+ Writing a '0' has no effect. A resume is initiated only if
+ PortSuspendStatus is set. */
+ uint32_t pss : 1; /**< (read) PortSuspendStatus. This bit indicates the port is suspended or in the
+ resume sequence. It is set by a SetSuspendState write and cleared when
+ PortSuspendStatusChange is set at the end of the resume interval. This
+ bit cannot be set if CurrentConnectStatus is cleared. This bit is also
+ cleared when PortResetStatusChange is set at the end of the port reset
+ or when the HC is placed in the USBRESUME state. If an upstream resume is
+ in progress, it should propagate to the HC.
+ 0 = port is not suspended
+ 1 = port is suspended
+ (write) SetPortSuspend. The HCD sets the PortSuspendStatus bit by writing a
+ '1' to this bit. Writing a '0' has no effect. If CurrentConnectStatus
+ is cleared, this write does not set PortSuspendStatus; instead it sets
+ ConnectStatusChange.This informs the driver that it attempted to suspend
+ a disconnected port. */
+ uint32_t pes : 1; /**< (read) PortEnableStatus. This bit indicates whether the port is enabled or
+ disabled. The Root Hub may clear this bit when an overcurrent condition,
+ disconnect event, switched-off power, or operational bus error such
+ as babble is detected. This change also causes PortEnabledStatusChange
+ to be set. HCD sets this bit by writing SetPortEnable and clears it by
+ writing ClearPortEnable. This bit cannot be set when CurrentConnectStatus
+ is cleared. This bit is also set, if not already, at the completion of a
+ port reset when ResetStatusChange is set or port suspend when
+ SuspendStatusChange is set.
+ 0 = port is disabled
+ 1 = port is enabled
+ (write) SetPortEnable. The HCD sets PortEnableStatus by writing a '1'.
+ Writing a '0' has no effect. If CurrentConnectStatus is cleared, this
+ write does not set PortEnableStatus, but instead sets ConnectStatusChange.
+ This informs the driver that it attempted to enable a disconnected port. */
+ uint32_t ccs : 1; /**< (read) CurrentConnectStatus. This bit reflects the current state of the
+ downstream port.
+ 0 = no device connected
+ 1 = device connected
+ (write) ClearPortEnable.
+ The HCD writes a '1' to this bit to clear the PortEnableStatus bit.
+ Writing a '0' has no effect. The CurrentConnectStatus is not
+ affected by any write.
+ Note: This bit is always read '1b' when the attached device is
+ nonremovable (DeviceRemoveable[NDP]). */
+#else
+ uint32_t ccs : 1;
+ uint32_t pes : 1;
+ uint32_t pss : 1;
+ uint32_t poci : 1;
+ uint32_t prs : 1;
+ uint32_t reserved_5_7 : 3;
+ uint32_t pps : 1;
+ uint32_t lsda : 1;
+ uint32_t reserved_10_15 : 6;
+ uint32_t csc : 1;
+ uint32_t pesc : 1;
+ uint32_t pssc : 1;
+ uint32_t ocic : 1;
+ uint32_t prsc : 1;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcrhportstatusx_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcrhportstatusx_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcrhportstatusx_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcrhportstatusx_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcrhportstatusx_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcrhportstatusx_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcrhportstatusx_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcrhportstatusx cvmx_uahcx_ohci0_hcrhportstatusx_t;
+
+/**
+ * cvmx_uahc#_ohci0_hcrhstatus
+ *
+ * HCRHSTATUS = Host Controller Root Hub Status Register
+ *
+ * The HcRhStatus register is divided into two parts. The lower word of a Dword represents the Hub Status field and the upper word represents the Hub
+ * Status Change field. Reserved bits should always be written '0'.
+ */
+union cvmx_uahcx_ohci0_hcrhstatus {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_hcrhstatus_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t crwe : 1; /**< (write) ClearRemoteWakeupEnable Writing a '1' clears DeviceRemoveWakeupEnable.
+ Writing a '0' has no effect. */
+ uint32_t reserved_18_30 : 13;
+ uint32_t ccic : 1; /**< OverCurrentIndicatorChange. This bit is set by hardware when a change has
+ occurred to the OCI field of this register. The HCD clears this bit by
+ writing a '1'. Writing a '0' has no effect. */
+ uint32_t lpsc : 1; /**< (read) LocalPowerStatusChange. The Root Hub does not support the local power
+ status feature; thus, this bit is always read as '0'.
+ (write) SetGlobalPower In global power mode (PowerSwitchingMode=0), This bit
+ is written to '1' to turn on power to all ports (clear PortPowerStatus).
+ In per-port power mode, it sets PortPowerStatus only on ports whose
+ PortPowerControlMask bit is not set. Writing a '0' has no effect. */
+ uint32_t drwe : 1; /**< (read) DeviceRemoteWakeupEnable. This bit enables a ConnectStatusChange bit as
+ a resume event, causing a USBSUSPEND to USBRESUME state transition and
+ setting the ResumeDetected interrupt. 0 = ConnectStatusChange is not a
+ remote wakeup event. 1 = ConnectStatusChange is a remote wakeup event.
+ (write) SetRemoteWakeupEnable Writing a '1' sets DeviceRemoveWakeupEnable.
+ Writing a '0' has no effect. */
+ uint32_t reserved_2_14 : 13;
+ uint32_t oci : 1; /**< OverCurrentIndicator. This bit reports overcurrent conditions when the global
+ reporting is implemented. When set, an overcurrent condition exists. When
+ cleared, all power operations are normal. If per-port overcurrent protection
+ is implemented this bit is always '0' */
+ uint32_t lps : 1; /**< (read) LocalPowerStatus. The Root Hub does not support the local power status
+ feature; thus, this bit is always read as '0.
+ (write) ClearGlobalPower. In global power mode (PowerSwitchingMode=0), This
+ bit is written to '1' to turn off power to all ports
+ (clear PortPowerStatus). In per-port power mode, it clears
+ PortPowerStatus only on ports whose PortPowerControlMask bit is not
+ set. Writing a '0' has no effect. Description */
+#else
+ uint32_t lps : 1;
+ uint32_t oci : 1;
+ uint32_t reserved_2_14 : 13;
+ uint32_t drwe : 1;
+ uint32_t lpsc : 1;
+ uint32_t ccic : 1;
+ uint32_t reserved_18_30 : 13;
+ uint32_t crwe : 1;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_hcrhstatus_s cn61xx;
+ struct cvmx_uahcx_ohci0_hcrhstatus_s cn63xx;
+ struct cvmx_uahcx_ohci0_hcrhstatus_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_hcrhstatus_s cn66xx;
+ struct cvmx_uahcx_ohci0_hcrhstatus_s cn68xx;
+ struct cvmx_uahcx_ohci0_hcrhstatus_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_hcrhstatus_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_hcrhstatus cvmx_uahcx_ohci0_hcrhstatus_t;
+
+/**
+ * cvmx_uahc#_ohci0_insnreg06
+ *
+ * OHCI0_INSNREG06 = OHCI AHB Error Status Register (Synopsys Speicific)
+ *
+ * This register contains AHB Error Status.
+ */
+union cvmx_uahcx_ohci0_insnreg06 {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_insnreg06_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t vld : 1; /**< AHB Error Captured. Indicator that an AHB error was encountered and values were captured.
+ To clear this field the application must write a 0 to it. */
+ uint32_t reserved_0_30 : 31;
+#else
+ uint32_t reserved_0_30 : 31;
+ uint32_t vld : 1;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_insnreg06_s cn61xx;
+ struct cvmx_uahcx_ohci0_insnreg06_s cn63xx;
+ struct cvmx_uahcx_ohci0_insnreg06_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_insnreg06_s cn66xx;
+ struct cvmx_uahcx_ohci0_insnreg06_s cn68xx;
+ struct cvmx_uahcx_ohci0_insnreg06_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_insnreg06_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_insnreg06 cvmx_uahcx_ohci0_insnreg06_t;
+
+/**
+ * cvmx_uahc#_ohci0_insnreg07
+ *
+ * OHCI0_INSNREG07 = OHCI AHB Error Address Register (Synopsys Speicific)
+ *
+ * This register contains AHB Error Status.
+ */
+union cvmx_uahcx_ohci0_insnreg07 {
+ uint32_t u32;
+ struct cvmx_uahcx_ohci0_insnreg07_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t err_addr : 32; /**< AHB Master Error Address. AHB address of the control phase at which the AHB error occurred */
+#else
+ uint32_t err_addr : 32;
+#endif
+ } s;
+ struct cvmx_uahcx_ohci0_insnreg07_s cn61xx;
+ struct cvmx_uahcx_ohci0_insnreg07_s cn63xx;
+ struct cvmx_uahcx_ohci0_insnreg07_s cn63xxp1;
+ struct cvmx_uahcx_ohci0_insnreg07_s cn66xx;
+ struct cvmx_uahcx_ohci0_insnreg07_s cn68xx;
+ struct cvmx_uahcx_ohci0_insnreg07_s cn68xxp1;
+ struct cvmx_uahcx_ohci0_insnreg07_s cnf71xx;
+};
+typedef union cvmx_uahcx_ohci0_insnreg07 cvmx_uahcx_ohci0_insnreg07_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-uahcx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-uart.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-uart.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-uart.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,171 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-uart.h>
+#else
+#include "cvmx.h"
+#include "cvmx-uart.h"
+#include "cvmx-interrupt.h"
+#endif
+
+#ifndef CVMX_BUILD_FOR_TOOLCHAIN
+void cvmx_uart_enable_intr(int uart, cvmx_uart_intr_handler_t handler)
+{
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+ cvmx_uart_ier_t ier;
+
+ cvmx_interrupt_register(CVMX_IRQ_UART0 + uart, handler, NULL);
+ /* Enable uart interrupts for debugger Control-C processing */
+ ier.u64 = cvmx_read_csr(CVMX_MIO_UARTX_IER(uart));
+ ier.s.erbfi = 1;
+ cvmx_write_csr(CVMX_MIO_UARTX_IER(uart), ier.u64);
+
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_UART0 + uart);
+#endif
+}
+#endif
+
+static int cvmx_uart_simulator_p(void)
+{
+#ifndef CVMX_BUILD_FOR_TOOLCHAIN
+ return cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM;
+#else
+ extern int __octeon_simulator_p;
+ return __octeon_simulator_p;
+#endif
+}
+
+
+/**
+ * Function that does the real work of setting up the Octeon uart.
+ * Takes all parameters as arguments, so it does not require gd
+ * structure to be set up.
+ *
+ * @param uart_index Index of uart to configure
+ * @param cpu_clock_hertz
+ * CPU clock frequency in Hz
+ * @param baudrate Baudrate to configure
+ *
+ * @return 0 on success
+ * !0 on error
+ */
+int cvmx_uart_setup2(int uart_index, int cpu_clock_hertz, int baudrate)
+{
+ uint16_t divisor;
+ cvmx_uart_fcr_t fcrval;
+ cvmx_uart_mcr_t mcrval;
+ cvmx_uart_lcr_t lcrval;
+
+ fcrval.u64 = 0;
+ fcrval.s.en = 1; /* enable the FIFO's */
+ fcrval.s.rxfr = 1; /* reset the RX fifo */
+ fcrval.s.txfr = 1; /* reset the TX fifo */
+
+ if (cvmx_uart_simulator_p())
+ divisor = 1;
+ else
+ divisor = ((unsigned long)(cpu_clock_hertz + 8 * baudrate) / (unsigned long)(16 * baudrate));
+
+ cvmx_write_csr(CVMX_MIO_UARTX_FCR(uart_index), fcrval.u64);
+
+ mcrval.u64 = 0;
+ if (uart_index == 1 && cvmx_uart_simulator_p())
+ mcrval.s.afce = 1; /* enable auto flow control for simulator. Needed for gdb regression callfuncs.exp. */
+ else
+ mcrval.s.afce = 0; /* disable auto flow control so board can power on without serial port connected */
+
+ mcrval.s.rts = 1; /* looks like this must be set for auto flow control to work */
+
+ cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart_index));
+
+ lcrval.u64 = 0;
+ lcrval.s.cls = CVMX_UART_BITS8;
+ lcrval.s.stop = 0; /* stop bit included? */
+ lcrval.s.pen = 0; /* no parity? */
+ lcrval.s.eps = 1; /* even parity? */
+ lcrval.s.dlab = 1; /* temporary to program the divisor */
+ cvmx_write_csr(CVMX_MIO_UARTX_LCR(uart_index), lcrval.u64);
+
+ cvmx_write_csr(CVMX_MIO_UARTX_DLL(uart_index), divisor & 0xff);
+ cvmx_write_csr(CVMX_MIO_UARTX_DLH(uart_index), (divisor>>8) & 0xff);
+
+ lcrval.s.dlab = 0; /* divisor is programmed now, set this back to normal */
+ cvmx_write_csr(CVMX_MIO_UARTX_LCR(uart_index), lcrval.u64);
+
+ /* spec says need to wait after you program the divisor */
+ if (!cvmx_uart_simulator_p())
+ {
+ uint64_t read_cycle;
+ CVMX_MF_CYCLE (read_cycle);
+ read_cycle += (2 * divisor * 16) + 10000;
+
+ /* Spin */
+ while (1)
+ {
+ uint64_t new_cycle;
+ CVMX_MF_CYCLE (new_cycle);
+ if (new_cycle >= read_cycle)
+ break;
+ }
+ }
+
+ /* Don't enable flow control until after baud rate is configured. - we don't want
+ ** to allow characters in until after the baud rate is fully configured */
+ cvmx_write_csr(CVMX_MIO_UARTX_MCR(uart_index), mcrval.u64);
+ return 0;
+
+}
+
+/**
+ * Setup a uart for use
+ *
+ * @param uart_index Uart to setup (0 or 1)
+ * @return Zero on success
+ */
+int cvmx_uart_setup (int uart_index)
+{
+ return cvmx_uart_setup2(uart_index, cvmx_clock_get_rate (CVMX_CLOCK_SCLK), 115200);
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-uart.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-uart.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-uart.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-uart.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,170 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * interface to the serial port UART hardware
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+
+#ifndef __CVMX_UART_H__
+#define __CVMX_UART_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_UART_NUM_PORTS 2
+#define CVMX_UART_TX_FIFO_SIZE 64
+#define CVMX_UART_RX_FIFO_SIZE 64
+
+/* CSR typedefs have been moved to cvmx-uart-defs.h */
+
+typedef void (*cvmx_uart_intr_handler_t)(int, uint64_t[], void *);
+
+extern void cvmx_uart_enable_intr(int, cvmx_uart_intr_handler_t);
+extern int cvmx_uart_setup2(int, int, int);
+extern int cvmx_uart_setup(int);
+
+/* Defined in libc. */
+unsigned __octeon_uart_trylock (void);
+void __octeon_uart_unlock (void);
+
+/**
+ * Get a single byte from serial port.
+ *
+ * @param uart_index Uart to read from (0 or 1)
+ * @return The byte read
+ */
+static inline uint8_t cvmx_uart_read_byte(int uart_index)
+{
+ cvmx_uart_lsr_t lsrval;
+
+ /* Spin until data is available */
+ do
+ {
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart_index));
+ } while (!lsrval.s.dr);
+
+ /* Read and return the data */
+ return cvmx_read_csr(CVMX_MIO_UARTX_RBR(uart_index));
+}
+
+/**
+ * Get a single byte from serial port with a timeout.
+ *
+ * @param uart_index Uart to read from (0 or 1)
+ * @param timedout Record if a timeout has happened
+ * @param timeout the timeout count
+ * @return The byte read
+ */
+static inline uint8_t cvmx_uart_read_byte_with_timeout(int uart_index, int *timedout, volatile unsigned timeout)
+{
+ cvmx_uart_lsr_t lsrval;
+
+ /* Spin until data is available */
+ *timedout = 0;
+ do
+ {
+ if(timeout == 0)
+ {
+ *timedout = 1;
+ return -1;
+ }
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart_index));
+ timeout --;
+ } while (!lsrval.s.dr);
+
+ /* Read and return the data */
+ return cvmx_read_csr(CVMX_MIO_UARTX_RBR(uart_index));
+}
+
+
+/**
+ * Put a single byte to uart port.
+ *
+ * @param uart_index Uart to write to (0 or 1)
+ * @param ch Byte to write
+ */
+static inline void cvmx_uart_write_byte(int uart_index, uint8_t ch)
+{
+ cvmx_uart_lsr_t lsrval;
+
+ /* Spin until there is room */
+ do
+ {
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart_index));
+ }
+ while (lsrval.s.thre == 0);
+
+ /* Write the byte */
+ cvmx_write_csr(CVMX_MIO_UARTX_THR(uart_index), ch);
+}
+
+/**
+ * Write a string to the uart
+ *
+ * @param uart_index Uart to use (0 or 1)
+ * @param str String to write
+ */
+static inline void cvmx_uart_write_string(int uart_index, const char *str)
+{
+ /* Just loop writing one byte at a time */
+ while (*str)
+ {
+ cvmx_uart_write_byte(uart_index, *str);
+ str++;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVM_UART_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-uart.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-uctlx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-uctlx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-uctlx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,964 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-uctlx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon uctlx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_UCTLX_DEFS_H__
+#define __CVMX_UCTLX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_BIST_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_BIST_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F0000A0ull);
+}
+#else
+#define CVMX_UCTLX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_CLK_RST_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_CLK_RST_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000000ull);
+}
+#else
+#define CVMX_UCTLX_CLK_RST_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_EHCI_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_EHCI_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000080ull);
+}
+#else
+#define CVMX_UCTLX_EHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_EHCI_FLA(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_EHCI_FLA(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F0000A8ull);
+}
+#else
+#define CVMX_UCTLX_EHCI_FLA(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_ERTO_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_ERTO_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000090ull);
+}
+#else
+#define CVMX_UCTLX_ERTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_IF_ENA(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_IF_ENA(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000030ull);
+}
+#else
+#define CVMX_UCTLX_IF_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000030ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_INT_ENA(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_INT_ENA(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000028ull);
+}
+#else
+#define CVMX_UCTLX_INT_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000028ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_INT_REG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_INT_REG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000020ull);
+}
+#else
+#define CVMX_UCTLX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x000118006F000020ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_OHCI_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_OHCI_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000088ull);
+}
+#else
+#define CVMX_UCTLX_OHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000088ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_ORTO_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_ORTO_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000098ull);
+}
+#else
+#define CVMX_UCTLX_ORTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000098ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_PPAF_WM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_PPAF_WM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000038ull);
+}
+#else
+#define CVMX_UCTLX_PPAF_WM(block_id) (CVMX_ADD_IO_SEG(0x000118006F000038ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_UPHY_CTL_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_UCTLX_UPHY_CTL_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000008ull);
+}
+#else
+#define CVMX_UCTLX_UPHY_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 1)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 1)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 1)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x000118006F000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8;
+}
+#else
+#define CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(offset, block_id) (CVMX_ADD_IO_SEG(0x000118006F000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
+#endif
+
+/**
+ * cvmx_uctl#_bist_status
+ *
+ * UCTL_BIST_STATUS = UCTL Bist Status
+ *
+ * Results from BIST runs of UCTL's memories.
+ */
+union cvmx_uctlx_bist_status {
+ uint64_t u64;
+ struct cvmx_uctlx_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t data_bis : 1; /**< UAHC EHCI Data Ram Bist Status */
+ uint64_t desc_bis : 1; /**< UAHC EHCI Descriptor Ram Bist Status */
+ uint64_t erbm_bis : 1; /**< UCTL EHCI Read Buffer Memory Bist Status */
+ uint64_t orbm_bis : 1; /**< UCTL OHCI Read Buffer Memory Bist Status */
+ uint64_t wrbm_bis : 1; /**< UCTL Write Buffer Memory Bist Sta */
+ uint64_t ppaf_bis : 1; /**< PP Access FIFO Memory Bist Status */
+#else
+ uint64_t ppaf_bis : 1;
+ uint64_t wrbm_bis : 1;
+ uint64_t orbm_bis : 1;
+ uint64_t erbm_bis : 1;
+ uint64_t desc_bis : 1;
+ uint64_t data_bis : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_uctlx_bist_status_s cn61xx;
+ struct cvmx_uctlx_bist_status_s cn63xx;
+ struct cvmx_uctlx_bist_status_s cn63xxp1;
+ struct cvmx_uctlx_bist_status_s cn66xx;
+ struct cvmx_uctlx_bist_status_s cn68xx;
+ struct cvmx_uctlx_bist_status_s cn68xxp1;
+ struct cvmx_uctlx_bist_status_s cnf71xx;
+};
+typedef union cvmx_uctlx_bist_status cvmx_uctlx_bist_status_t;
+
+/**
+ * cvmx_uctl#_clk_rst_ctl
+ *
+ * CLK_RST_CTL = Clock and Reset Control Reigster
+ * This register controls the frequceny of hclk and resets for hclk and phy clocks. It also controls Simulation modes and Bists.
+ */
+union cvmx_uctlx_clk_rst_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_clk_rst_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63 : 39;
+ uint64_t clear_bist : 1; /**< Clear BIST on the HCLK memories */
+ uint64_t start_bist : 1; /**< Starts BIST on the HCLK memories during 0-to-1
+ transition. */
+ uint64_t ehci_sm : 1; /**< Only set it during simulation time. When set to 1,
+ this bit sets the PHY in a non-driving mode so the
+ EHCI can detect device connection.
+ Note: it must not be set to 1, during normal
+ operation. */
+ uint64_t ohci_clkcktrst : 1; /**< Clear clock reset. Active low. OHCI initial reset
+ signal for the DPLL block. This is only needed by
+ simulation. The duration of the reset in simulation
+ must be the same as HRST.
+ Note: it must be set to 1 during normal operation. */
+ uint64_t ohci_sm : 1; /**< OHCI Simulation Mode. It selects the counter value
+ for simulation or real time for 1 ms.
+ - 0: counter full 1ms; 1: simulation time. */
+ uint64_t ohci_susp_lgcy : 1; /**< OHCI Clock Control Signal. Note: This bit must be
+ set to 0 if the OHCI 48/12Mhz clocks must be
+ suspended when the EHCI and OHCI controllers are
+ not active. */
+ uint64_t app_start_clk : 1; /**< OHCI Clock Control Signal. When the OHCI clocks are
+ suspended, the system has to assert this signal to
+ start the clocks (12 and 48 Mhz). */
+ uint64_t o_clkdiv_rst : 1; /**< OHCI 12Mhz clock divider reset. Active low. When
+ set to 0, divider is held in reset.
+ The reset to the divider is also asserted when core
+ reset is asserted. */
+ uint64_t h_clkdiv_byp : 1; /**< Used to enable the bypass input to the USB_CLK_DIV */
+ uint64_t h_clkdiv_rst : 1; /**< Host clock divider reset. Active low. When set to 0,
+ divider is held in reset. This must be set to 0
+ before change H_DIV0 and H_DIV1.
+ The reset to the divider is also asserted when core
+ reset is asserted. */
+ uint64_t h_clkdiv_en : 1; /**< Hclk enable. When set to 1, the hclk is gernerated. */
+ uint64_t o_clkdiv_en : 1; /**< OHCI 48Mhz/12MHz clock enable. When set to 1, the
+ clocks are gernerated. */
+ uint64_t h_div : 4; /**< The hclk frequency is sclk frequency divided by
+ H_DIV. The maximum frequency of hclk is 200Mhz.
+ The minimum frequency of hclk is no less than the
+ UTMI clock frequency which is 60Mhz. After writing a
+ value to this field, the software should read the
+ field for the value written. The [H_ENABLE] field of
+ this register should not be set until after this
+ field is set and then read.
+ Only the following values are valid:
+ 1, 2, 3, 4, 6, 8, 12.
+ All other values are reserved and will be coded as
+ following:
+ 0 -> 1
+ 5 -> 4
+ 7 -> 6
+ 9,10,11 -> 8
+ 13,14,15 -> 12 */
+ uint64_t p_refclk_sel : 2; /**< PHY PLL Reference Clock Select.
+ - 00: uses 12Mhz crystal at USB_XO and USB_XI;
+ - 01: uses 12/24/48Mhz 2.5V clock source at USB_XO.
+ USB_XI should be tied to GND(Not Supported).
+ 1x: Reserved. */
+ uint64_t p_refclk_div : 2; /**< PHY Reference Clock Frequency Select.
+ - 00: 12MHz,
+ - 01: 24Mhz (Not Supported),
+ - 10: 48Mhz (Not Supported),
+ - 11: Reserved.
+ Note: This value must be set during POR is active.
+ If a crystal is used as a reference clock,this field
+ must be set to 12 MHz. Values 01 and 10 are reserved
+ when a crystal is used. */
+ uint64_t reserved_4_4 : 1;
+ uint64_t p_com_on : 1; /**< PHY Common Block Power-Down Control.
+ - 1: The XO, Bias, and PLL blocks are powered down in
+ Suspend mode.
+ - 0: The XO, Bias, and PLL blocks remain powered in
+ suspend mode.
+ Note: This bit must be set to 0 during POR is active
+ in current design. */
+ uint64_t p_por : 1; /**< Power on reset for PHY. Resets all the PHY's
+ registers and state machines. */
+ uint64_t p_prst : 1; /**< PHY Clock Reset. The is the value for phy_rst_n,
+ utmi_rst_n[1] and utmi_rst_n[0]. It is synchronized
+ to each clock domain to generate the corresponding
+ reset signal. This should not be set to 1 until the
+ time it takes for six clock cycles (HCLK and
+ PHY CLK, which ever is slower) has passed. */
+ uint64_t hrst : 1; /**< Host Clock Reset. This is the value for hreset_n.
+ This should not be set to 1 until 12ms after PHY CLK
+ is stable. */
+#else
+ uint64_t hrst : 1;
+ uint64_t p_prst : 1;
+ uint64_t p_por : 1;
+ uint64_t p_com_on : 1;
+ uint64_t reserved_4_4 : 1;
+ uint64_t p_refclk_div : 2;
+ uint64_t p_refclk_sel : 2;
+ uint64_t h_div : 4;
+ uint64_t o_clkdiv_en : 1;
+ uint64_t h_clkdiv_en : 1;
+ uint64_t h_clkdiv_rst : 1;
+ uint64_t h_clkdiv_byp : 1;
+ uint64_t o_clkdiv_rst : 1;
+ uint64_t app_start_clk : 1;
+ uint64_t ohci_susp_lgcy : 1;
+ uint64_t ohci_sm : 1;
+ uint64_t ohci_clkcktrst : 1;
+ uint64_t ehci_sm : 1;
+ uint64_t start_bist : 1;
+ uint64_t clear_bist : 1;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_uctlx_clk_rst_ctl_s cn61xx;
+ struct cvmx_uctlx_clk_rst_ctl_s cn63xx;
+ struct cvmx_uctlx_clk_rst_ctl_s cn63xxp1;
+ struct cvmx_uctlx_clk_rst_ctl_s cn66xx;
+ struct cvmx_uctlx_clk_rst_ctl_s cn68xx;
+ struct cvmx_uctlx_clk_rst_ctl_s cn68xxp1;
+ struct cvmx_uctlx_clk_rst_ctl_s cnf71xx;
+};
+typedef union cvmx_uctlx_clk_rst_ctl cvmx_uctlx_clk_rst_ctl_t;
+
+/**
+ * cvmx_uctl#_ehci_ctl
+ *
+ * UCTL_EHCI_CTL = UCTL EHCI Control Register
+ * This register controls the general behavior of UCTL EHCI datapath.
+ */
+union cvmx_uctlx_ehci_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_ehci_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t desc_rbm : 1; /**< Descriptor Read Burst Mode on AHB bus
+ - 1: A read burst can be interruprted after 16 AHB
+ clock cycle
+ - 0: A read burst will not be interrupted until it
+ finishes or no more data available */
+ uint64_t reg_nb : 1; /**< 1: EHCI register access will not be blocked by EHCI
+ buffer/descriptor access on AHB
+ - 0: Buffer/descriptor and register access will be
+ mutually exclusive */
+ uint64_t l2c_dc : 1; /**< When set to 1, set the commit bit in the descriptor
+ store commands to L2C. */
+ uint64_t l2c_bc : 1; /**< When set to 1, set the commit bit in the buffer
+ store commands to L2C. */
+ uint64_t l2c_0pag : 1; /**< When set to 1, sets the zero-page bit in store
+ command to L2C. */
+ uint64_t l2c_stt : 1; /**< When set to 1, use STT when store to L2C. */
+ uint64_t l2c_buff_emod : 2; /**< Endian format for buffer from/to the L2C.
+ IN: A-B-C-D-E-F-G-H
+ OUT0: A-B-C-D-E-F-G-H
+ OUT1: H-G-F-E-D-C-B-A
+ OUT2: D-C-B-A-H-G-F-E
+ OUT3: E-F-G-H-A-B-C-D */
+ uint64_t l2c_desc_emod : 2; /**< Endian format for descriptor from/to the L2C.
+ IN: A-B-C-D-E-F-G-H
+ OUT0: A-B-C-D-E-F-G-H
+ OUT1: H-G-F-E-D-C-B-A
+ OUT2: D-C-B-A-H-G-F-E
+ OUT3: E-F-G-H-A-B-C-D */
+ uint64_t inv_reg_a2 : 1; /**< UAHC register address bit<2> invert. When set to 1,
+ for a 32-bit NCB I/O register access, the address
+ offset will be flipped between 0x4 and 0x0. */
+ uint64_t ehci_64b_addr_en : 1; /**< EHCI AHB Master 64-bit Addressing Enable.
+ - 1: enable ehci 64-bit addressing mode;
+ - 0: disable ehci 64-bit addressing mode.
+ When ehci 64-bit addressing mode is disabled,
+ UCTL_EHCI_CTL[L2C_ADDR_MSB] is used as the address
+ bit[39:32]. */
+ uint64_t l2c_addr_msb : 8; /**< This is the bit [39:32] of an address sent to L2C
+ for ehci whenUCTL_EHCI_CFG[EHCI_64B_ADDR_EN=0]). */
+#else
+ uint64_t l2c_addr_msb : 8;
+ uint64_t ehci_64b_addr_en : 1;
+ uint64_t inv_reg_a2 : 1;
+ uint64_t l2c_desc_emod : 2;
+ uint64_t l2c_buff_emod : 2;
+ uint64_t l2c_stt : 1;
+ uint64_t l2c_0pag : 1;
+ uint64_t l2c_bc : 1;
+ uint64_t l2c_dc : 1;
+ uint64_t reg_nb : 1;
+ uint64_t desc_rbm : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_uctlx_ehci_ctl_s cn61xx;
+ struct cvmx_uctlx_ehci_ctl_s cn63xx;
+ struct cvmx_uctlx_ehci_ctl_s cn63xxp1;
+ struct cvmx_uctlx_ehci_ctl_s cn66xx;
+ struct cvmx_uctlx_ehci_ctl_s cn68xx;
+ struct cvmx_uctlx_ehci_ctl_s cn68xxp1;
+ struct cvmx_uctlx_ehci_ctl_s cnf71xx;
+};
+typedef union cvmx_uctlx_ehci_ctl cvmx_uctlx_ehci_ctl_t;
+
+/**
+ * cvmx_uctl#_ehci_fla
+ *
+ * UCTL_EHCI_FLA = UCTL EHCI Frame Length Adjument Register
+ * This register configures the EHCI Frame Length Adjustment.
+ */
+union cvmx_uctlx_ehci_fla {
+ uint64_t u64;
+ struct cvmx_uctlx_ehci_fla_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t fla : 6; /**< EHCI Frame Length Adjustment. This feature
+ adjusts any offset from the clock source that drives
+ the uSOF counter. The uSOF cycle time (number of
+ uSOF counter clock periods to generate a uSOF
+ microframe length) is equal to 59,488 plus this value.
+ The default value is 32(0x20), which gives an SOF cycle
+ time of 60,000 (each microframe has 60,000 bit times).
+ -------------------------------------------------
+ Frame Length (decimal) FLA Value
+ -------------------------------------------------
+ 59488 0x00
+ 59504 0x01
+ 59520 0x02
+ ... ...
+ 59984 0x1F
+ 60000 0x20
+ 60016 0x21
+ ... ...
+ 60496 0x3F
+ --------------------------------------------------
+ Note: keep this value to 0x20 (decimal 32) for no
+ offset. */
+#else
+ uint64_t fla : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_uctlx_ehci_fla_s cn61xx;
+ struct cvmx_uctlx_ehci_fla_s cn63xx;
+ struct cvmx_uctlx_ehci_fla_s cn63xxp1;
+ struct cvmx_uctlx_ehci_fla_s cn66xx;
+ struct cvmx_uctlx_ehci_fla_s cn68xx;
+ struct cvmx_uctlx_ehci_fla_s cn68xxp1;
+ struct cvmx_uctlx_ehci_fla_s cnf71xx;
+};
+typedef union cvmx_uctlx_ehci_fla cvmx_uctlx_ehci_fla_t;
+
+/**
+ * cvmx_uctl#_erto_ctl
+ *
+ * UCTL_ERTO_CTL = UCTL EHCI Readbuffer TimeOut Control Register
+ * This register controls timeout for EHCI Readbuffer.
+ */
+union cvmx_uctlx_erto_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_erto_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t to_val : 27; /**< Read buffer timeout value
+ (value 0 means timeout disabled) */
+ uint64_t reserved_0_4 : 5;
+#else
+ uint64_t reserved_0_4 : 5;
+ uint64_t to_val : 27;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_uctlx_erto_ctl_s cn61xx;
+ struct cvmx_uctlx_erto_ctl_s cn63xx;
+ struct cvmx_uctlx_erto_ctl_s cn63xxp1;
+ struct cvmx_uctlx_erto_ctl_s cn66xx;
+ struct cvmx_uctlx_erto_ctl_s cn68xx;
+ struct cvmx_uctlx_erto_ctl_s cn68xxp1;
+ struct cvmx_uctlx_erto_ctl_s cnf71xx;
+};
+typedef union cvmx_uctlx_erto_ctl cvmx_uctlx_erto_ctl_t;
+
+/**
+ * cvmx_uctl#_if_ena
+ *
+ * UCTL_IF_ENA = UCTL Interface Enable Register
+ *
+ * Register to enable the uctl interface clock.
+ */
+union cvmx_uctlx_if_ena {
+ uint64_t u64;
+ struct cvmx_uctlx_if_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< Turns on the USB UCTL interface clock */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_uctlx_if_ena_s cn61xx;
+ struct cvmx_uctlx_if_ena_s cn63xx;
+ struct cvmx_uctlx_if_ena_s cn63xxp1;
+ struct cvmx_uctlx_if_ena_s cn66xx;
+ struct cvmx_uctlx_if_ena_s cn68xx;
+ struct cvmx_uctlx_if_ena_s cn68xxp1;
+ struct cvmx_uctlx_if_ena_s cnf71xx;
+};
+typedef union cvmx_uctlx_if_ena cvmx_uctlx_if_ena_t;
+
+/**
+ * cvmx_uctl#_int_ena
+ *
+ * UCTL_INT_ENA = UCTL Interrupt Enable Register
+ *
+ * Register to enable individual interrupt source in corresponding to UCTL_INT_REG
+ */
+union cvmx_uctlx_int_ena {
+ uint64_t u64;
+ struct cvmx_uctlx_int_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ec_ovf_e : 1; /**< Ehci Commit OVerFlow Error */
+ uint64_t oc_ovf_e : 1; /**< Ohci Commit OVerFlow Error */
+ uint64_t wb_pop_e : 1; /**< Write Buffer FIFO Poped When Empty */
+ uint64_t wb_psh_f : 1; /**< Write Buffer FIFO Pushed When Full */
+ uint64_t cf_psh_f : 1; /**< Command FIFO Pushed When Full */
+ uint64_t or_psh_f : 1; /**< OHCI Read Buffer FIFO Pushed When Full */
+ uint64_t er_psh_f : 1; /**< EHCI Read Buffer FIFO Pushed When Full */
+ uint64_t pp_psh_f : 1; /**< PP Access FIFO Pushed When Full */
+#else
+ uint64_t pp_psh_f : 1;
+ uint64_t er_psh_f : 1;
+ uint64_t or_psh_f : 1;
+ uint64_t cf_psh_f : 1;
+ uint64_t wb_psh_f : 1;
+ uint64_t wb_pop_e : 1;
+ uint64_t oc_ovf_e : 1;
+ uint64_t ec_ovf_e : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_uctlx_int_ena_s cn61xx;
+ struct cvmx_uctlx_int_ena_s cn63xx;
+ struct cvmx_uctlx_int_ena_s cn63xxp1;
+ struct cvmx_uctlx_int_ena_s cn66xx;
+ struct cvmx_uctlx_int_ena_s cn68xx;
+ struct cvmx_uctlx_int_ena_s cn68xxp1;
+ struct cvmx_uctlx_int_ena_s cnf71xx;
+};
+typedef union cvmx_uctlx_int_ena cvmx_uctlx_int_ena_t;
+
+/**
+ * cvmx_uctl#_int_reg
+ *
+ * UCTL_INT_REG = UCTL Interrupt Register
+ *
+ * Summary of different bits of RSL interrupt status.
+ */
+union cvmx_uctlx_int_reg {
+ uint64_t u64;
+ struct cvmx_uctlx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63 : 56;
+ uint64_t ec_ovf_e : 1; /**< Ehci Commit OVerFlow Error
+ When the error happenes, the whole NCB system needs
+ to be reset. */
+ uint64_t oc_ovf_e : 1; /**< Ohci Commit OVerFlow Error
+ When the error happenes, the whole NCB system needs
+ to be reset. */
+ uint64_t wb_pop_e : 1; /**< Write Buffer FIFO Poped When Empty */
+ uint64_t wb_psh_f : 1; /**< Write Buffer FIFO Pushed When Full */
+ uint64_t cf_psh_f : 1; /**< Command FIFO Pushed When Full */
+ uint64_t or_psh_f : 1; /**< OHCI Read Buffer FIFO Pushed When Full */
+ uint64_t er_psh_f : 1; /**< EHCI Read Buffer FIFO Pushed When Full */
+ uint64_t pp_psh_f : 1; /**< PP Access FIFO Pushed When Full */
+#else
+ uint64_t pp_psh_f : 1;
+ uint64_t er_psh_f : 1;
+ uint64_t or_psh_f : 1;
+ uint64_t cf_psh_f : 1;
+ uint64_t wb_psh_f : 1;
+ uint64_t wb_pop_e : 1;
+ uint64_t oc_ovf_e : 1;
+ uint64_t ec_ovf_e : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_uctlx_int_reg_s cn61xx;
+ struct cvmx_uctlx_int_reg_s cn63xx;
+ struct cvmx_uctlx_int_reg_s cn63xxp1;
+ struct cvmx_uctlx_int_reg_s cn66xx;
+ struct cvmx_uctlx_int_reg_s cn68xx;
+ struct cvmx_uctlx_int_reg_s cn68xxp1;
+ struct cvmx_uctlx_int_reg_s cnf71xx;
+};
+typedef union cvmx_uctlx_int_reg cvmx_uctlx_int_reg_t;
+
+/**
+ * cvmx_uctl#_ohci_ctl
+ *
+ * RSL registers starting from 0x10 can be accessed only after hclk is active and hreset is deasserted.
+ *
+ * UCTL_OHCI_CTL = UCTL OHCI Control Register
+ * This register controls the general behavior of UCTL OHCI datapath.
+ */
+union cvmx_uctlx_ohci_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_ohci_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63 : 45;
+ uint64_t reg_nb : 1; /**< 1: OHCI register access will not be blocked by EHCI
+ buffer/descriptor access on AHB
+ - 0: Buffer/descriptor and register access will be
+ mutually exclusive */
+ uint64_t l2c_dc : 1; /**< When set to 1, set the commit bit in the descriptor
+ store commands to L2C. */
+ uint64_t l2c_bc : 1; /**< When set to 1, set the commit bit in the buffer
+ store commands to L2C. */
+ uint64_t l2c_0pag : 1; /**< When set to 1, sets the zero-page bit in store
+ command to L2C. */
+ uint64_t l2c_stt : 1; /**< When set to 1, use STT when store to L2C. */
+ uint64_t l2c_buff_emod : 2; /**< Endian format for buffer from/to the L2C.
+ IN: A-B-C-D-E-F-G-H
+ OUT0: A-B-C-D-E-F-G-H
+ OUT1: H-G-F-E-D-C-B-A
+ OUT2: D-C-B-A-H-G-F-E
+ OUT3: E-F-G-H-A-B-C-D */
+ uint64_t l2c_desc_emod : 2; /**< Endian format for descriptor from/to the L2C.
+ IN: A-B-C-D-E-F-G-H
+ OUT0: A-B-C-D-E-F-G-H
+ OUT1: H-G-F-E-D-C-B-A
+ OUT2: D-C-B-A-H-G-F-E
+ OUT3: E-F-G-H-A-B-C-D */
+ uint64_t inv_reg_a2 : 1; /**< UAHC register address bit<2> invert. When set to 1,
+ for a 32-bit NCB I/O register access, the address
+ offset will be flipped between 0x4 and 0x0. */
+ uint64_t reserved_8_8 : 1;
+ uint64_t l2c_addr_msb : 8; /**< This is the bit [39:32] of an address sent to L2C
+ for ohci. */
+#else
+ uint64_t l2c_addr_msb : 8;
+ uint64_t reserved_8_8 : 1;
+ uint64_t inv_reg_a2 : 1;
+ uint64_t l2c_desc_emod : 2;
+ uint64_t l2c_buff_emod : 2;
+ uint64_t l2c_stt : 1;
+ uint64_t l2c_0pag : 1;
+ uint64_t l2c_bc : 1;
+ uint64_t l2c_dc : 1;
+ uint64_t reg_nb : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_uctlx_ohci_ctl_s cn61xx;
+ struct cvmx_uctlx_ohci_ctl_s cn63xx;
+ struct cvmx_uctlx_ohci_ctl_s cn63xxp1;
+ struct cvmx_uctlx_ohci_ctl_s cn66xx;
+ struct cvmx_uctlx_ohci_ctl_s cn68xx;
+ struct cvmx_uctlx_ohci_ctl_s cn68xxp1;
+ struct cvmx_uctlx_ohci_ctl_s cnf71xx;
+};
+typedef union cvmx_uctlx_ohci_ctl cvmx_uctlx_ohci_ctl_t;
+
+/**
+ * cvmx_uctl#_orto_ctl
+ *
+ * UCTL_ORTO_CTL = UCTL OHCI Readbuffer TimeOut Control Register
+ * This register controls timeout for OHCI Readbuffer.
+ */
+union cvmx_uctlx_orto_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_orto_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63 : 32;
+ uint64_t to_val : 24; /**< Read buffer timeout value
+ (value 0 means timeout disabled) */
+ uint64_t reserved_0_7 : 8;
+#else
+ uint64_t reserved_0_7 : 8;
+ uint64_t to_val : 24;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_uctlx_orto_ctl_s cn61xx;
+ struct cvmx_uctlx_orto_ctl_s cn63xx;
+ struct cvmx_uctlx_orto_ctl_s cn63xxp1;
+ struct cvmx_uctlx_orto_ctl_s cn66xx;
+ struct cvmx_uctlx_orto_ctl_s cn68xx;
+ struct cvmx_uctlx_orto_ctl_s cn68xxp1;
+ struct cvmx_uctlx_orto_ctl_s cnf71xx;
+};
+typedef union cvmx_uctlx_orto_ctl cvmx_uctlx_orto_ctl_t;
+
+/**
+ * cvmx_uctl#_ppaf_wm
+ *
+ * UCTL_PPAF_WM = UCTL PP Access FIFO WaterMark Register
+ *
+ * Register to set PP access FIFO full watermark.
+ */
+union cvmx_uctlx_ppaf_wm {
+ uint64_t u64;
+ struct cvmx_uctlx_ppaf_wm_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t wm : 5; /**< Number of entries when PP Access FIFO will assert
+ full (back pressure) */
+#else
+ uint64_t wm : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_uctlx_ppaf_wm_s cn61xx;
+ struct cvmx_uctlx_ppaf_wm_s cn63xx;
+ struct cvmx_uctlx_ppaf_wm_s cn63xxp1;
+ struct cvmx_uctlx_ppaf_wm_s cn66xx;
+ struct cvmx_uctlx_ppaf_wm_s cnf71xx;
+};
+typedef union cvmx_uctlx_ppaf_wm cvmx_uctlx_ppaf_wm_t;
+
+/**
+ * cvmx_uctl#_uphy_ctl_status
+ *
+ * UPHY_CTL_STATUS = USB PHY Control and Status Reigster
+ * This register controls the USB PHY test and Bist.
+ */
+union cvmx_uctlx_uphy_ctl_status {
+ uint64_t u64;
+ struct cvmx_uctlx_uphy_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t bist_done : 1; /**< PHY BIST DONE. Asserted at the end of the PHY BIST
+ sequence. */
+ uint64_t bist_err : 1; /**< PHY BIST Error. Valid when BIST_ENB is high.
+ Indicates an internal error was detected during the
+ BIST sequence. */
+ uint64_t hsbist : 1; /**< High-Speed BIST Enable */
+ uint64_t fsbist : 1; /**< Full-Speed BIST Enable */
+ uint64_t lsbist : 1; /**< Low-Speed BIST Enable */
+ uint64_t siddq : 1; /**< Drives the PHY SIDDQ input. Normally should be set
+ to zero. Customers not using USB PHY interface
+ should do the following:
+ Provide 3.3V to USB_VDD33 Tie USB_REXT to 3.3V
+ supply and Set SIDDQ to 1. */
+ uint64_t vtest_en : 1; /**< Analog Test Pin Enable.
+ 1 = The PHY's ANALOG_TEST pin is enabled for the
+ input and output of applicable analog test
+ signals.
+ 0 = The ANALOG_TEST pin is disabled. */
+ uint64_t uphy_bist : 1; /**< When set to 1, it makes sure that during PHY BIST,
+ utmi_txvld == 0. */
+ uint64_t bist_en : 1; /**< PHY BIST ENABLE */
+ uint64_t ate_reset : 1; /**< Reset Input from ATE. This is a test signal. When
+ the USB core is powered up (not in suspend mode), an
+ automatic tester can use this to disable PHYCLOCK
+ and FREECLK, then re-enable them with an aligned
+ phase.
+ - 1: PHYCLOCKs and FREECLK outputs are disable.
+ - 0: PHYCLOCKs and FREECLK are available within a
+ specific period after ATERESET is de-asserted. */
+#else
+ uint64_t ate_reset : 1;
+ uint64_t bist_en : 1;
+ uint64_t uphy_bist : 1;
+ uint64_t vtest_en : 1;
+ uint64_t siddq : 1;
+ uint64_t lsbist : 1;
+ uint64_t fsbist : 1;
+ uint64_t hsbist : 1;
+ uint64_t bist_err : 1;
+ uint64_t bist_done : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_uctlx_uphy_ctl_status_s cn61xx;
+ struct cvmx_uctlx_uphy_ctl_status_s cn63xx;
+ struct cvmx_uctlx_uphy_ctl_status_s cn63xxp1;
+ struct cvmx_uctlx_uphy_ctl_status_s cn66xx;
+ struct cvmx_uctlx_uphy_ctl_status_s cn68xx;
+ struct cvmx_uctlx_uphy_ctl_status_s cn68xxp1;
+ struct cvmx_uctlx_uphy_ctl_status_s cnf71xx;
+};
+typedef union cvmx_uctlx_uphy_ctl_status cvmx_uctlx_uphy_ctl_status_t;
+
+/**
+ * cvmx_uctl#_uphy_port#_ctl_status
+ *
+ * UPHY_PORTX_CTL_STATUS = USB PHY Port X Control and Status Reigsters
+ * This register controls the each port of the USB PHY.
+ */
+union cvmx_uctlx_uphy_portx_ctl_status {
+ uint64_t u64;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t tdata_out : 4; /**< PHY test data out. Presents either interlly
+ generated signals or test register contenets, based
+ upon the value of TDATA_SEL */
+ uint64_t txbiststuffenh : 1; /**< High-Byte Transmit Bit-Stuffing Enable. It must be
+ set to 1'b1 in normal operation. */
+ uint64_t txbiststuffen : 1; /**< Low-Byte Transmit Bit-Stuffing Enable. It must be
+ set to 1'b1 in normal operation. */
+ uint64_t dmpulldown : 1; /**< D- Pull-Down Resistor Enable. It must be set to 1'b1
+ in normal operation. */
+ uint64_t dppulldown : 1; /**< D+ Pull-Down Resistor Enable. It must be set to 1'b1
+ in normal operation. */
+ uint64_t vbusvldext : 1; /**< In host mode, this input is not used and can be tied
+ to 1'b0. */
+ uint64_t portreset : 1; /**< Per-port reset */
+ uint64_t txhsvxtune : 2; /**< Transmitter High-Speed Crossover Adjustment */
+ uint64_t txvreftune : 4; /**< HS DC Voltage Level Adjustment
+ When the recommended 37.4 Ohm resistor is present
+ on USB_REXT, the recommended TXVREFTUNE value is 15 */
+ uint64_t txrisetune : 1; /**< HS Transmitter Rise/Fall Time Adjustment
+ When the recommended 37.4 Ohm resistor is present
+ on USB_REXT, the recommended TXRISETUNE value is 1 */
+ uint64_t txpreemphasistune : 1; /**< HS transmitter pre-emphasis enable.
+ When the recommended 37.4 Ohm resistor is present
+ on USB_REXT, the recommended TXPREEMPHASISTUNE
+ value is 1 */
+ uint64_t txfslstune : 4; /**< FS/LS Source Impedance Adjustment */
+ uint64_t sqrxtune : 3; /**< Squelch Threshold Adjustment */
+ uint64_t compdistune : 3; /**< Disconnect Threshold Adjustment */
+ uint64_t loop_en : 1; /**< Port Loop back Test Enable
+ - 1: During data transmission, the receive logic is
+ enabled
+ - 0: During data transmission, the receive logic is
+ disabled */
+ uint64_t tclk : 1; /**< PHY port test clock, used to load TDATA_IN to the
+ UPHY. */
+ uint64_t tdata_sel : 1; /**< Test Data out select
+ - 1: Mode-defined test register contents are output
+ - 0: internally generated signals are output */
+ uint64_t taddr_in : 4; /**< Mode address for test interface. Specifies the
+ register address for writing to or reading from the
+ PHY test interface register. */
+ uint64_t tdata_in : 8; /**< Internal testing Register input data and select.
+ This is a test bus. Data presents on [3:0] and the
+ corresponding select (enable) presents on bits[7:4]. */
+#else
+ uint64_t tdata_in : 8;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_sel : 1;
+ uint64_t tclk : 1;
+ uint64_t loop_en : 1;
+ uint64_t compdistune : 3;
+ uint64_t sqrxtune : 3;
+ uint64_t txfslstune : 4;
+ uint64_t txpreemphasistune : 1;
+ uint64_t txrisetune : 1;
+ uint64_t txvreftune : 4;
+ uint64_t txhsvxtune : 2;
+ uint64_t portreset : 1;
+ uint64_t vbusvldext : 1;
+ uint64_t dppulldown : 1;
+ uint64_t dmpulldown : 1;
+ uint64_t txbiststuffen : 1;
+ uint64_t txbiststuffenh : 1;
+ uint64_t tdata_out : 4;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } s;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn61xx;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xx;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xxp1;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn66xx;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn68xx;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn68xxp1;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cnf71xx;
+};
+typedef union cvmx_uctlx_uphy_portx_ctl_status cvmx_uctlx_uphy_portx_ctl_status_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-uctlx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-usb.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-usb.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-usb.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,3458 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * "cvmx-usb.c" defines a set of low level USB functions to help
+ * developers create Octeon USB drivers for various operating
+ * systems. These functions provide a generic API to the Octeon
+ * USB blocks, hiding the internal hardware specific
+ * operations.
+ *
+ * <hr>$Revision: 32636 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+#include <asm/octeon/cvmx-usbnx-defs.h>
+#include <asm/octeon/cvmx-usbcx-defs.h>
+#include <asm/octeon/cvmx-usb.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+#include <asm/octeon/cvmx-swap.h>
+#if 0
+ /* Do not use cvmx-error.h for now. When the cvmx-error.h is properly
+ * ported, remove the above #if 0, and all #ifdef __CVMX_ERROR_H__ within
+ * this file */
+ #include <asm/octeon/cvmx-error.h>
+#endif
+#else
+#include "cvmx.h"
+#include "cvmx-clock.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-usb.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "cvmx-csr-db.h"
+#endif
+#include "cvmx-swap.h"
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include "cvmx-error.h"
+#endif
+#endif
+
+#define MAX_RETRIES 3 /* Maximum number of times to retry failed transactions */
+#define MAX_PIPES 32 /* Maximum number of pipes that can be open at once */
+#define MAX_TRANSACTIONS 256 /* Maximum number of outstanding transactions across all pipes */
+#define MAX_CHANNELS 8 /* Maximum number of hardware channels supported by the USB block */
+#define MAX_USB_ADDRESS 127 /* The highest valid USB device address */
+#define MAX_USB_ENDPOINT 15 /* The highest valid USB endpoint number */
+#define MAX_USB_HUB_PORT 15 /* The highest valid port number on a hub */
+#define MAX_TRANSFER_BYTES ((1<<19)-1) /* The low level hardware can transfer a maximum of this number of bytes in each transfer. The field is 19 bits wide */
+#define MAX_TRANSFER_PACKETS ((1<<10)-1) /* The low level hardware can transfer a maximum of this number of packets in each transfer. The field is 10 bits wide */
+#define ALLOW_CSR_DECODES 0 /* CSR decoding when CVMX_USB_INITIALIZE_FLAGS_DEBUG_CSRS is set
+ enlarges the code a lot. This define overrides the ability to do CSR
+ decoding since it isn't necessary 99% of the time. Change this to a
+ one if you need CSR decoding */
+
+/* These defines disable the normal read and write csr. This is so I can add
+ extra debug stuff to the usb specific version and I won't use the normal
+ version by mistake */
+#define cvmx_read_csr use_cvmx_usb_read_csr64_instead_of_cvmx_read_csr
+#define cvmx_write_csr use_cvmx_usb_write_csr64_instead_of_cvmx_write_csr
+
+typedef enum
+{
+ __CVMX_USB_TRANSACTION_FLAGS_IN_USE = 1<<16,
+} cvmx_usb_transaction_flags_t;
+
+/**
+ * Logical transactions may take numerous low level
+ * transactions, especially when splits are concerned. This
+ * enum represents all of the possible stages a transaction can
+ * be in. Note that split completes are always even. This is so
+ * the NAK handler can backup to the previous low level
+ * transaction with a simple clearing of bit 0.
+ */
+typedef enum
+{
+ CVMX_USB_STAGE_NON_CONTROL,
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_SETUP,
+ CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_DATA,
+ CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_STATUS,
+ CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
+} cvmx_usb_stage_t;
+
+/**
+ * This structure describes each pending USB transaction
+ * regardless of type. These are linked together to form a list
+ * of pending requests for a pipe.
+ */
+typedef struct cvmx_usb_transaction
+{
+ struct cvmx_usb_transaction *prev; /**< Transaction before this one in the pipe */
+ struct cvmx_usb_transaction *next; /**< Transaction after this one in the pipe */
+ cvmx_usb_transfer_t type; /**< Type of transaction, duplicated of the pipe */
+ cvmx_usb_transaction_flags_t flags; /**< State flags for this transaction */
+ uint64_t buffer; /**< User's physical buffer address to read/write */
+ int buffer_length; /**< Size of the user's buffer in bytes */
+ uint64_t control_header; /**< For control transactions, physical address of the 8 byte standard header */
+ int iso_start_frame; /**< For ISO transactions, the starting frame number */
+ int iso_number_packets; /**< For ISO transactions, the number of packets in the request */
+ cvmx_usb_iso_packet_t *iso_packets; /**< For ISO transactions, the sub packets in the request */
+ int xfersize;
+ int pktcnt;
+ int retries;
+ int actual_bytes; /**< Actual bytes transfer for this transaction */
+ cvmx_usb_stage_t stage; /**< For control transactions, the current stage */
+ cvmx_usb_callback_func_t callback; /**< User's callback function when complete */
+ void *callback_data; /**< User's data */
+} cvmx_usb_transaction_t;
+
+/**
+ * A pipe represents a virtual connection between Octeon and some
+ * USB device. It contains a list of pending request to the device.
+ */
+typedef struct cvmx_usb_pipe
+{
+ struct cvmx_usb_pipe *prev; /**< Pipe before this one in the list */
+ struct cvmx_usb_pipe *next; /**< Pipe after this one in the list */
+ cvmx_usb_transaction_t *head; /**< The first pending transaction */
+ cvmx_usb_transaction_t *tail; /**< The last pending transaction */
+ uint64_t interval; /**< For periodic pipes, the interval between packets in frames */
+ uint64_t next_tx_frame; /**< The next frame this pipe is allowed to transmit on */
+ cvmx_usb_pipe_flags_t flags; /**< State flags for this pipe */
+ cvmx_usb_speed_t device_speed; /**< Speed of device connected to this pipe */
+ cvmx_usb_transfer_t transfer_type; /**< Type of transaction supported by this pipe */
+ cvmx_usb_direction_t transfer_dir; /**< IN or OUT. Ignored for Control */
+ int multi_count; /**< Max packet in a row for the device */
+ uint16_t max_packet; /**< The device's maximum packet size in bytes */
+ uint8_t device_addr; /**< USB device address at other end of pipe */
+ uint8_t endpoint_num; /**< USB endpoint number at other end of pipe */
+ uint8_t hub_device_addr; /**< Hub address this device is connected to */
+ uint8_t hub_port; /**< Hub port this device is connected to */
+ uint8_t pid_toggle; /**< This toggles between 0/1 on every packet send to track the data pid needed */
+ uint8_t channel; /**< Hardware DMA channel for this pipe */
+ int8_t split_sc_frame; /**< The low order bits of the frame number the split complete should be sent on */
+} cvmx_usb_pipe_t;
+
+typedef struct
+{
+ cvmx_usb_pipe_t *head; /**< Head of the list, or NULL if empty */
+ cvmx_usb_pipe_t *tail; /**< Tail if the list, or NULL if empty */
+} cvmx_usb_pipe_list_t;
+
+typedef struct
+{
+ struct
+ {
+ int channel;
+ int size;
+ uint64_t address;
+ } entry[MAX_CHANNELS+1];
+ int head;
+ int tail;
+} cvmx_usb_tx_fifo_t;
+
+/**
+ * The state of the USB block is stored in this structure
+ */
+typedef struct
+{
+ int init_flags; /**< Flags passed to initialize */
+ int index; /**< Which USB block this is for */
+ int idle_hardware_channels; /**< Bit set for every idle hardware channel */
+ cvmx_usbcx_hprt_t usbcx_hprt; /**< Stored port status so we don't need to read a CSR to determine splits */
+ cvmx_usb_pipe_t *pipe_for_channel[MAX_CHANNELS]; /**< Map channels to pipes */
+ cvmx_usb_transaction_t *free_transaction_head; /**< List of free transactions head */
+ cvmx_usb_transaction_t *free_transaction_tail; /**< List of free transactions tail */
+ cvmx_usb_pipe_t pipe[MAX_PIPES]; /**< Storage for pipes */
+ cvmx_usb_transaction_t transaction[MAX_TRANSACTIONS]; /**< Storage for transactions */
+ cvmx_usb_callback_func_t callback[__CVMX_USB_CALLBACK_END]; /**< User global callbacks */
+ void *callback_data[__CVMX_USB_CALLBACK_END]; /**< User data for each callback */
+ int indent; /**< Used by debug output to indent functions */
+ cvmx_usb_port_status_t port_status; /**< Last port status used for change notification */
+ cvmx_usb_pipe_list_t free_pipes; /**< List of all pipes that are currently closed */
+ cvmx_usb_pipe_list_t idle_pipes; /**< List of open pipes that have no transactions */
+ cvmx_usb_pipe_list_t active_pipes[4]; /**< Active pipes indexed by transfer type */
+ uint64_t frame_number; /**< Increments every SOF interrupt for time keeping */
+ cvmx_usb_transaction_t *active_split; /**< Points to the current active split, or NULL */
+ cvmx_usb_tx_fifo_t periodic;
+ cvmx_usb_tx_fifo_t nonperiodic;
+} cvmx_usb_internal_state_t;
+
+/* This macro logs out whenever a function is called if debugging is on */
+#define CVMX_USB_LOG_CALLED() \
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS)) \
+ cvmx_dprintf("%*s%s: called\n", 2*usb->indent++, "", __FUNCTION__);
+
+/* This macro logs out each function parameter if debugging is on */
+#define CVMX_USB_LOG_PARAM(format, param) \
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS)) \
+ cvmx_dprintf("%*s%s: param %s = " format "\n", 2*usb->indent, "", __FUNCTION__, #param, param);
+
+/* This macro logs out when a function returns a value */
+#define CVMX_USB_RETURN(v) \
+ do { \
+ __typeof(v) r = v; \
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS)) \
+ cvmx_dprintf("%*s%s: returned %s(%d)\n", 2*--usb->indent, "", __FUNCTION__, #v, r); \
+ return r; \
+ } while (0);
+
+/* This macro logs out when a function doesn't return a value */
+#define CVMX_USB_RETURN_NOTHING() \
+ do { \
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS)) \
+ cvmx_dprintf("%*s%s: returned\n", 2*--usb->indent, "", __FUNCTION__); \
+ return; \
+ } while (0);
+
+/* This macro spins on a field waiting for it to reach a value */
+#define CVMX_WAIT_FOR_FIELD32(address, type, field, op, value, timeout_usec)\
+ ({int result; \
+ do { \
+ uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
+ cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000; \
+ type c; \
+ while (1) \
+ { \
+ c.u32 = __cvmx_usb_read_csr32(usb, address); \
+ if (c.s.field op (value)) { \
+ result = 0; \
+ break; \
+ } else if (cvmx_get_cycle() > done) { \
+ result = -1; \
+ break; \
+ } else \
+ cvmx_wait(100); \
+ } \
+ } while (0); \
+ result;})
+
+/* This macro logically sets a single field in a CSR. It does the sequence
+ read, modify, and write */
+#define USB_SET_FIELD32(address, type, field, value)\
+ do { \
+ type c; \
+ c.u32 = __cvmx_usb_read_csr32(usb, address);\
+ c.s.field = value; \
+ __cvmx_usb_write_csr32(usb, address, c.u32);\
+ } while (0)
+
+/* Returns the IO address to push/pop stuff data from the FIFOs */
+#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
+
+/**
+ * @INTERNAL
+ * Read a USB 32bit CSR. It performs the necessary address swizzle
+ * for 32bit CSRs and logs the value in a readable format if
+ * debugging is on.
+ *
+ * @param usb USB block this access is for
+ * @param address 64bit address to read
+ *
+ * @return Result of the read
+ */
+static inline uint32_t __cvmx_usb_read_csr32(cvmx_usb_internal_state_t *usb,
+ uint64_t address)
+{
+ uint32_t result = cvmx_read64_uint32(address ^ 4);
+#if ALLOW_CSR_DECODES
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CSRS))
+ {
+ cvmx_dprintf("Read: ");
+ cvmx_csr_db_decode(cvmx_get_proc_id(), address, result);
+ }
+#endif
+ return result;
+}
+
+
+/**
+ * @INTERNAL
+ * Write a USB 32bit CSR. It performs the necessary address
+ * swizzle for 32bit CSRs and logs the value in a readable format
+ * if debugging is on.
+ *
+ * @param usb USB block this access is for
+ * @param address 64bit address to write
+ * @param value Value to write
+ */
+static inline void __cvmx_usb_write_csr32(cvmx_usb_internal_state_t *usb,
+ uint64_t address, uint32_t value)
+{
+#if ALLOW_CSR_DECODES
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CSRS))
+ {
+ cvmx_dprintf("Write: ");
+ cvmx_csr_db_decode(cvmx_get_proc_id(), address, value);
+ }
+#endif
+ cvmx_write64_uint32(address ^ 4, value);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+}
+
+
+/**
+ * @INTERNAL
+ * Read a USB 64bit CSR. It logs the value in a readable format if
+ * debugging is on.
+ *
+ * @param usb USB block this access is for
+ * @param address 64bit address to read
+ *
+ * @return Result of the read
+ */
+static inline uint64_t __cvmx_usb_read_csr64(cvmx_usb_internal_state_t *usb,
+ uint64_t address)
+{
+ uint64_t result = cvmx_read64_uint64(address);
+#if ALLOW_CSR_DECODES
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CSRS))
+ {
+ cvmx_dprintf("Read: ");
+ cvmx_csr_db_decode(cvmx_get_proc_id(), address, result);
+ }
+#endif
+ return result;
+}
+
+
+/**
+ * @INTERNAL
+ * Write a USB 64bit CSR. It logs the value in a readable format
+ * if debugging is on.
+ *
+ * @param usb USB block this access is for
+ * @param address 64bit address to write
+ * @param value Value to write
+ */
+static inline void __cvmx_usb_write_csr64(cvmx_usb_internal_state_t *usb,
+ uint64_t address, uint64_t value)
+{
+#if ALLOW_CSR_DECODES
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CSRS))
+ {
+ cvmx_dprintf("Write: ");
+ cvmx_csr_db_decode(cvmx_get_proc_id(), address, value);
+ }
+#endif
+ cvmx_write64_uint64(address, value);
+}
+
+
+/**
+ * @INTERNAL
+ * Utility function to convert complete codes into strings
+ *
+ * @param complete_code
+ * Code to convert
+ *
+ * @return Human readable string
+ */
+static const char *__cvmx_usb_complete_to_string(cvmx_usb_complete_t complete_code)
+{
+ switch (complete_code)
+ {
+ case CVMX_USB_COMPLETE_SUCCESS: return "SUCCESS";
+ case CVMX_USB_COMPLETE_SHORT: return "SHORT";
+ case CVMX_USB_COMPLETE_CANCEL: return "CANCEL";
+ case CVMX_USB_COMPLETE_ERROR: return "ERROR";
+ case CVMX_USB_COMPLETE_STALL: return "STALL";
+ case CVMX_USB_COMPLETE_XACTERR: return "XACTERR";
+ case CVMX_USB_COMPLETE_DATATGLERR: return "DATATGLERR";
+ case CVMX_USB_COMPLETE_BABBLEERR: return "BABBLEERR";
+ case CVMX_USB_COMPLETE_FRAMEERR: return "FRAMEERR";
+ }
+ return "Update __cvmx_usb_complete_to_string";
+}
+
+
+/**
+ * @INTERNAL
+ * Return non zero if this pipe connects to a non HIGH speed
+ * device through a high speed hub.
+ *
+ * @param usb USB block this access is for
+ * @param pipe Pipe to check
+ *
+ * @return Non zero if we need to do split transactions
+ */
+static inline int __cvmx_usb_pipe_needs_split(cvmx_usb_internal_state_t *usb, cvmx_usb_pipe_t *pipe)
+{
+ return ((pipe->device_speed != CVMX_USB_SPEED_HIGH) && (usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH));
+}
+
+
+/**
+ * @INTERNAL
+ * Trivial utility function to return the correct PID for a pipe
+ *
+ * @param pipe pipe to check
+ *
+ * @return PID for pipe
+ */
+static inline int __cvmx_usb_get_data_pid(cvmx_usb_pipe_t *pipe)
+{
+ if (pipe->pid_toggle)
+ return 2; /* Data1 */
+ else
+ return 0; /* Data0 */
+}
+
+
+/**
+ * Return the number of USB ports supported by this Octeon
+ * chip. If the chip doesn't support USB, or is not supported
+ * by this API, a zero will be returned. Most Octeon chips
+ * support one usb port, but some support two ports.
+ * cvmx_usb_initialize() must be called on independent
+ * cvmx_usb_state_t structures.
+ *
+ * This utilizes cvmx_helper_board_usb_get_num_ports()
+ * to get any board specific variations.
+ *
+ * @return Number of port, zero if usb isn't supported
+ */
+int cvmx_usb_get_num_ports(void)
+{
+ int arch_ports = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ arch_ports = 2;
+ else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ arch_ports = 1;
+ else
+ arch_ports = 0;
+
+ return __cvmx_helper_board_usb_get_num_ports(arch_ports);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_get_num_ports);
+#endif
+
+
+/**
+ * @INTERNAL
+ * Allocate a usb transaction for use
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return Transaction or NULL
+ */
+static inline cvmx_usb_transaction_t *__cvmx_usb_alloc_transaction(cvmx_usb_internal_state_t *usb)
+{
+ cvmx_usb_transaction_t *t;
+ t = usb->free_transaction_head;
+ if (t)
+ {
+ usb->free_transaction_head = t->next;
+ if (!usb->free_transaction_head)
+ usb->free_transaction_tail = NULL;
+ }
+ else if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
+ cvmx_dprintf("%s: Failed to allocate a transaction\n", __FUNCTION__);
+ if (t)
+ {
+ memset(t, 0, sizeof(*t));
+ t->flags = __CVMX_USB_TRANSACTION_FLAGS_IN_USE;
+ }
+ return t;
+}
+
+
+/**
+ * @INTERNAL
+ * Free a usb transaction
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param transaction
+ * Transaction to free
+ */
+static inline void __cvmx_usb_free_transaction(cvmx_usb_internal_state_t *usb,
+ cvmx_usb_transaction_t *transaction)
+{
+ transaction->flags = 0;
+ transaction->prev = NULL;
+ transaction->next = NULL;
+ if (usb->free_transaction_tail)
+ usb->free_transaction_tail->next = transaction;
+ else
+ usb->free_transaction_head = transaction;
+ usb->free_transaction_tail = transaction;
+}
+
+
+/**
+ * @INTERNAL
+ * Add a pipe to the tail of a list
+ * @param list List to add pipe to
+ * @param pipe Pipe to add
+ */
+static inline void __cvmx_usb_append_pipe(cvmx_usb_pipe_list_t *list, cvmx_usb_pipe_t *pipe)
+{
+ pipe->next = NULL;
+ pipe->prev = list->tail;
+ if (list->tail)
+ list->tail->next = pipe;
+ else
+ list->head = pipe;
+ list->tail = pipe;
+}
+
+
+/**
+ * @INTERNAL
+ * Remove a pipe from a list
+ * @param list List to remove pipe from
+ * @param pipe Pipe to remove
+ */
+static inline void __cvmx_usb_remove_pipe(cvmx_usb_pipe_list_t *list, cvmx_usb_pipe_t *pipe)
+{
+ if (list->head == pipe)
+ {
+ list->head = pipe->next;
+ pipe->next = NULL;
+ if (list->head)
+ list->head->prev = NULL;
+ else
+ list->tail = NULL;
+ }
+ else if (list->tail == pipe)
+ {
+ list->tail = pipe->prev;
+ list->tail->next = NULL;
+ pipe->prev = NULL;
+ }
+ else
+ {
+ pipe->prev->next = pipe->next;
+ pipe->next->prev = pipe->prev;
+ pipe->prev = NULL;
+ pipe->next = NULL;
+ }
+}
+
+
+/**
+ * Initialize a USB port for use. This must be called before any
+ * other access to the Octeon USB port is made. The port starts
+ * off in the disabled state.
+ *
+ * @param state Pointer to an empty cvmx_usb_state_t structure
+ * that will be populated by the initialize call.
+ * This structure is then passed to all other USB
+ * functions.
+ * @param usb_port_number
+ * Which Octeon USB port to initialize.
+ * @param flags Flags to control hardware initialization. See
+ * cvmx_usb_initialize_flags_t for the flag
+ * definitions. Some flags are mandatory.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+cvmx_usb_status_t cvmx_usb_initialize(cvmx_usb_state_t *state,
+ int usb_port_number,
+ cvmx_usb_initialize_flags_t flags)
+{
+ cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
+ cvmx_usbnx_usbp_ctl_status_t usbn_usbp_ctl_status;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ usb->init_flags = flags;
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("%d", usb_port_number);
+ CVMX_USB_LOG_PARAM("0x%x", flags);
+
+ /* Make sure that state is large enough to store the internal state */
+ if (sizeof(*state) < sizeof(*usb))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ /* At first allow 0-1 for the usb port number */
+ if ((usb_port_number < 0) || (usb_port_number > 1))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ /* For all chips except 52XX there is only one port */
+ if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ /* Try to determine clock type automatically */
+ if ((flags & (CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI |
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0)
+ {
+ if (__cvmx_helper_board_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; /* Only 12 MHZ crystals are supported */
+ else
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+ }
+
+ if (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND)
+ {
+ /* Check for auto ref clock frequency */
+ if (!(flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
+ switch (__cvmx_helper_board_usb_get_clock_type())
+ {
+ case USB_CLOCK_TYPE_REF_12:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_24:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_48:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ break;
+ }
+ }
+
+ memset(usb, 0, sizeof(*usb));
+ usb->init_flags = flags;
+
+ /* Initialize the USB state structure */
+ {
+ int i;
+ usb->index = usb_port_number;
+
+ /* Initialize the transaction double linked list */
+ usb->free_transaction_head = NULL;
+ usb->free_transaction_tail = NULL;
+ for (i=0; i<MAX_TRANSACTIONS; i++)
+ __cvmx_usb_free_transaction(usb, usb->transaction + i);
+ for (i=0; i<MAX_PIPES; i++)
+ __cvmx_usb_append_pipe(&usb->free_pipes, usb->pipe + i);
+ }
+
+ /* Power On Reset and PHY Initialization */
+
+ /* 1. Wait for DCOK to assert (nothing to do) */
+ /* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
+ USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 */
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hrst = 0;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hclk_rst = 0;
+ usbn_clk_ctl.s.enable = 0;
+ /* 2b. Select the USB reference clock/crystal parameters by writing
+ appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND)
+ {
+ /* The USB port uses 12/24/48MHz 2.5V board clock
+ source at USB_XO. USB_XI should be tied to GND.
+ Most Octeon evaluation boards require this setting */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.cn31xx.p_xenbn = 0;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
+ else
+ usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */
+
+ switch (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK)
+ {
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
+ usbn_clk_ctl.s.p_c_sel = 0;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
+ usbn_clk_ctl.s.p_c_sel = 1;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
+ usbn_clk_ctl.s.p_c_sel = 2;
+ break;
+ }
+ }
+ else
+ {
+ /* The USB port uses a 12MHz crystal as clock source
+ at USB_XO and USB_XI */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.cn31xx.p_xenbn = 1;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
+ else
+ usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */
+
+ usbn_clk_ctl.s.p_c_sel = 0;
+ }
+ /* 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
+ setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down such
+ that USB is as close as possible to 125Mhz */
+ {
+ int divisor = (cvmx_clock_get_rate(CVMX_CLOCK_CORE)+125000000-1)/125000000;
+ if (divisor < 4) /* Lower than 4 doesn't seem to work properly */
+ divisor = 4;
+ usbn_clk_ctl.s.divide = divisor;
+ usbn_clk_ctl.s.divide2 = 0;
+ }
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
+ usbn_clk_ctl.s.hclk_rst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
+ cvmx_wait(64);
+ /* 3. Program the power-on reset field in the USBN clock-control register:
+ USBN_CLK_CTL[POR] = 0 */
+ usbn_clk_ctl.s.por = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 4. Wait 1 ms for PHY clock to start */
+ cvmx_wait_usec(1000);
+ /* 5. Program the Reset input from automatic test equipment field in the
+ USBP control and status register: USBN_USBP_CTL_STATUS[ATE_RESET] = 1 */
+ usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index));
+ usbn_usbp_ctl_status.s.ate_reset = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 6. Wait 10 cycles */
+ cvmx_wait(10);
+ /* 7. Clear ATE_RESET field in the USBN clock-control register:
+ USBN_USBP_CTL_STATUS[ATE_RESET] = 0 */
+ usbn_usbp_ctl_status.s.ate_reset = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 8. Program the PHY reset field in the USBN clock-control register:
+ USBN_CLK_CTL[PRST] = 1 */
+ usbn_clk_ctl.s.prst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 9. Program the USBP control and status register to select host or
+ device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
+ device */
+ usbn_usbp_ctl_status.s.hst_mode = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 10. Wait 1 \xB5s */
+ cvmx_wait_usec(1);
+ /* 11. Program the hreset_n field in the USBN clock-control register:
+ USBN_CLK_CTL[HRST] = 1 */
+ usbn_clk_ctl.s.hrst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 12. Proceed to USB core initialization */
+ usbn_clk_ctl.s.enable = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ cvmx_wait_usec(1);
+
+ /* USB Core Initialization */
+
+ /* 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
+ determine USB core configuration parameters. */
+ /* Nothing needed */
+ /* 2. Program the following fields in the global AHB configuration
+ register (USBC_GAHBCFG)
+ DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
+ Burst length, USBC_GAHBCFG[HBSTLEN] = 0
+ Nonperiodic TxFIFO empty level (slave mode only),
+ USBC_GAHBCFG[NPTXFEMPLVL]
+ Periodic TxFIFO empty level (slave mode only),
+ USBC_GAHBCFG[PTXFEMPLVL]
+ Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 */
+ {
+ cvmx_usbcx_gahbcfg_t usbcx_gahbcfg;
+ /* Due to an errata, CN31XX doesn't support DMA */
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
+ usbcx_gahbcfg.u32 = 0;
+ usbcx_gahbcfg.s.dmaen = !(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ usb->idle_hardware_channels = 0x1; /* Only use one channel with non DMA */
+ else if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ usb->idle_hardware_channels = 0xf7; /* CN5XXX have an errata with channel 3 */
+ else
+ usb->idle_hardware_channels = 0xff;
+ usbcx_gahbcfg.s.hbstlen = 0;
+ usbcx_gahbcfg.s.nptxfemplvl = 1;
+ usbcx_gahbcfg.s.ptxfemplvl = 1;
+ usbcx_gahbcfg.s.glblintrmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
+ usbcx_gahbcfg.u32);
+ }
+ /* 3. Program the following fields in USBC_GUSBCFG register.
+ HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
+ ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
+ USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
+ PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 */
+ {
+ cvmx_usbcx_gusbcfg_t usbcx_gusbcfg;
+ usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
+ usbcx_gusbcfg.s.toutcal = 0;
+ usbcx_gusbcfg.s.ddrsel = 0;
+ usbcx_gusbcfg.s.usbtrdtim = 0x5;
+ usbcx_gusbcfg.s.phylpwrclksel = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
+ usbcx_gusbcfg.u32);
+ }
+ /* 4. The software must unmask the following bits in the USBC_GINTMSK
+ register.
+ OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
+ Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1 */
+ {
+ cvmx_usbcx_gintmsk_t usbcx_gintmsk;
+ int channel;
+
+ usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
+ usbcx_gintmsk.s.otgintmsk = 1;
+ usbcx_gintmsk.s.modemismsk = 1;
+ usbcx_gintmsk.s.hchintmsk = 1;
+ usbcx_gintmsk.s.sofmsk = 0;
+ /* We need RX FIFO interrupts if we don't have DMA */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ usbcx_gintmsk.s.rxflvlmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
+ usbcx_gintmsk.u32);
+
+ /* Disable all channel interrupts. We'll enable them per channel later */
+ for (channel=0; channel<8; channel++)
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ }
+
+ {
+ /* Host Port Initialization */
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
+ cvmx_dprintf("%s: USB%d is in host mode\n", __FUNCTION__, usb->index);
+
+ /* 1. Program the host-port interrupt-mask field to unmask,
+ USBC_GINTMSK[PRTINT] = 1 */
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t,
+ prtintmsk, 1);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t,
+ disconnintmsk, 1);
+ /* 2. Program the USBC_HCFG register to select full-speed host or
+ high-speed host. */
+ {
+ cvmx_usbcx_hcfg_t usbcx_hcfg;
+ usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
+ usbcx_hcfg.s.fslssupp = 0;
+ usbcx_hcfg.s.fslspclksel = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
+ }
+ /* 3. Program the port power bit to drive VBUS on the USB,
+ USBC_HPRT[PRTPWR] = 1 */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t, prtpwr, 1);
+
+ /* Steps 4-15 from the manual are done later in the port enable */
+ }
+
+#ifdef __CVMX_ERROR_H__
+ cvmx_error_enable_group(CVMX_ERROR_GROUP_USB, usb->index);
+#endif
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_initialize);
+#endif
+
+
+/**
+ * Shutdown a USB port after a call to cvmx_usb_initialize().
+ * The port should be disabled with all pipes closed when this
+ * function is called.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+cvmx_usb_status_t cvmx_usb_shutdown(cvmx_usb_state_t *state)
+{
+ cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+
+ /* Make sure all pipes are closed */
+ if (usb->idle_pipes.head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS].head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT].head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_CONTROL].head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_BULK].head)
+ CVMX_USB_RETURN(CVMX_USB_BUSY);
+
+#ifdef __CVMX_ERROR_H__
+ cvmx_error_disable_group(CVMX_ERROR_GROUP_USB, usb->index);
+#endif
+
+ /* Disable the clocks and put them in power on reset */
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.enable = 1;
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hclk_rst = 1;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hrst = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_shutdown);
+#endif
+
+
+/**
+ * Enable a USB port. After this call succeeds, the USB port is
+ * online and servicing requests.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+cvmx_usb_status_t cvmx_usb_enable(cvmx_usb_state_t *state)
+{
+ cvmx_usbcx_ghwcfg3_t usbcx_ghwcfg3;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+
+ /* If the port is already enabled the just return. We don't need to do
+ anything */
+ if (usb->usbcx_hprt.s.prtena)
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+
+ /* If there is nothing plugged into the port then fail immediately */
+ if (!usb->usbcx_hprt.s.prtconnsts)
+ {
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
+ cvmx_dprintf("%s: USB%d Nothing plugged into the port\n", __FUNCTION__, usb->index);
+ CVMX_USB_RETURN(CVMX_USB_TIMEOUT);
+ }
+
+ /* Program the port reset bit to start the reset process */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t, prtrst, 1);
+
+ /* Wait at least 50ms (high speed), or 10ms (full speed) for the reset
+ process to complete. */
+ cvmx_wait_usec(50000);
+
+ /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t, prtrst, 0);
+
+ /* Wait for the USBC_HPRT[PRTENA]. */
+ if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t,
+ prtena, ==, 1, 100000))
+ {
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
+ cvmx_dprintf("%s: Timeout waiting for the port to finish reset\n",
+ __FUNCTION__);
+ CVMX_USB_RETURN(CVMX_USB_TIMEOUT);
+ }
+
+ /* Read the port speed field to get the enumerated speed, USBC_HPRT[PRTSPD]. */
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
+ cvmx_dprintf("%s: USB%d is in %s speed mode\n", __FUNCTION__, usb->index,
+ (usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH) ? "high" :
+ (usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_FULL) ? "full" :
+ "low");
+
+ usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));
+
+ /* 13. Program the USBC_GRXFSIZ register to select the size of the receive
+ FIFO (25%). */
+ USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz_t,
+ rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
+ /* 14. Program the USBC_GNPTXFSIZ register to select the size and the
+ start address of the non- periodic transmit FIFO for nonperiodic
+ transactions (50%). */
+ {
+ cvmx_usbcx_gnptxfsiz_t siz;
+ siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
+ siz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
+ siz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), siz.u32);
+ }
+ /* 15. Program the USBC_HPTXFSIZ register to select the size and start
+ address of the periodic transmit FIFO for periodic transactions (25%). */
+ {
+ cvmx_usbcx_hptxfsiz_t siz;
+ siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
+ siz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
+ siz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), siz.u32);
+ }
+ /* Flush all FIFOs */
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t, txfnum, 0x10);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t, txfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t,
+ txfflsh, ==, 0, 100);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t, rxfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t,
+ rxfflsh, ==, 0, 100);
+
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_enable);
+#endif
+
+
+/**
+ * Disable a USB port. After this call the USB port will not
+ * generate data transfers and will not generate events.
+ * Transactions in process will fail and call their
+ * associated callbacks.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+cvmx_usb_status_t cvmx_usb_disable(cvmx_usb_state_t *state)
+{
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+
+ /* Disable the port */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t, prtena, 1);
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_disable);
+#endif
+
+
+/**
+ * Get the current state of the USB port. Use this call to
+ * determine if the usb port has anything connected, is enabled,
+ * or has some sort of error condition. The return value of this
+ * call has "changed" bits to signal of the value of some fields
+ * have changed between calls. These "changed" fields are based
+ * on the last call to cvmx_usb_set_status(). In order to clear
+ * them, you must update the status through cvmx_usb_set_status().
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return Port status information
+ */
+cvmx_usb_port_status_t cvmx_usb_get_status(cvmx_usb_state_t *state)
+{
+ cvmx_usbcx_hprt_t usbc_hprt;
+ cvmx_usb_port_status_t result;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ memset(&result, 0, sizeof(result));
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ result.port_enabled = usbc_hprt.s.prtena;
+ result.port_over_current = usbc_hprt.s.prtovrcurract;
+ result.port_powered = usbc_hprt.s.prtpwr;
+ result.port_speed = usbc_hprt.s.prtspd;
+ result.connected = usbc_hprt.s.prtconnsts;
+ result.connect_change = (result.connected != usb->port_status.connected);
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS))
+ cvmx_dprintf("%*s%s: returned port enabled=%d, over_current=%d, powered=%d, speed=%d, connected=%d, connect_change=%d\n",
+ 2*(--usb->indent), "", __FUNCTION__,
+ result.port_enabled,
+ result.port_over_current,
+ result.port_powered,
+ result.port_speed,
+ result.connected,
+ result.connect_change);
+ return result;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_get_status);
+#endif
+
+
+/**
+ * Set the current state of the USB port. The status is used as
+ * a reference for the "changed" bits returned by
+ * cvmx_usb_get_status(). Other than serving as a reference, the
+ * status passed to this function is not used. No fields can be
+ * changed through this call.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param port_status
+ * Port status to set, most like returned by cvmx_usb_get_status()
+ */
+void cvmx_usb_set_status(cvmx_usb_state_t *state, cvmx_usb_port_status_t port_status)
+{
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ usb->port_status = port_status;
+ CVMX_USB_RETURN_NOTHING();
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_set_status);
+#endif
+
+
+/**
+ * @INTERNAL
+ * Convert a USB transaction into a handle
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param transaction
+ * Transaction to get handle for
+ *
+ * @return Handle
+ */
+static inline int __cvmx_usb_get_submit_handle(cvmx_usb_internal_state_t *usb,
+ cvmx_usb_transaction_t *transaction)
+{
+ return ((unsigned long)transaction - (unsigned long)usb->transaction) /
+ sizeof(*transaction);
+}
+
+
+/**
+ * @INTERNAL
+ * Convert a USB pipe into a handle
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe Pipe to get handle for
+ *
+ * @return Handle
+ */
+static inline int __cvmx_usb_get_pipe_handle(cvmx_usb_internal_state_t *usb,
+ cvmx_usb_pipe_t *pipe)
+{
+ return ((unsigned long)pipe - (unsigned long)usb->pipe) / sizeof(*pipe);
+}
+
+
+/**
+ * Open a virtual pipe between the host and a USB device. A pipe
+ * must be opened before data can be transferred between a device
+ * and Octeon.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param flags Optional pipe flags defined in
+ * cvmx_usb_pipe_flags_t.
+ * @param device_addr
+ * USB device address to open the pipe to
+ * (0-127).
+ * @param endpoint_num
+ * USB endpoint number to open the pipe to
+ * (0-15).
+ * @param device_speed
+ * The speed of the device the pipe is going
+ * to. This must match the device's speed,
+ * which may be different than the port speed.
+ * @param max_packet The maximum packet length the device can
+ * transmit/receive (low speed=0-8, full
+ * speed=0-1023, high speed=0-1024). This value
+ * comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <10:0>.
+ * @param transfer_type
+ * The type of transfer this pipe is for.
+ * @param transfer_dir
+ * The direction the pipe is in. This is not
+ * used for control pipes.
+ * @param interval For ISOCHRONOUS and INTERRUPT transfers,
+ * this is how often the transfer is scheduled
+ * for. All other transfers should specify
+ * zero. The units are in frames (8000/sec at
+ * high speed, 1000/sec for full speed).
+ * @param multi_count
+ * For high speed devices, this is the maximum
+ * allowed number of packet per microframe.
+ * Specify zero for non high speed devices. This
+ * value comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <12:11>.
+ * @param hub_device_addr
+ * Hub device address this device is connected
+ * to. Devices connected directly to Octeon
+ * use zero. This is only used when the device
+ * is full/low speed behind a high speed hub.
+ * The address will be of the high speed hub,
+ * not and full speed hubs after it.
+ * @param hub_port Which port on the hub the device is
+ * connected. Use zero for devices connected
+ * directly to Octeon. Like hub_device_addr,
+ * this is only used for full/low speed
+ * devices behind a high speed hub.
+ *
+ * @return A non negative value is a pipe handle. Negative
+ * values are failure codes from cvmx_usb_status_t.
+ */
+int cvmx_usb_open_pipe(cvmx_usb_state_t *state, cvmx_usb_pipe_flags_t flags,
+ int device_addr, int endpoint_num,
+ cvmx_usb_speed_t device_speed, int max_packet,
+ cvmx_usb_transfer_t transfer_type,
+ cvmx_usb_direction_t transfer_dir, int interval,
+ int multi_count, int hub_device_addr, int hub_port)
+{
+ cvmx_usb_pipe_t *pipe;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("0x%x", flags);
+ CVMX_USB_LOG_PARAM("%d", device_addr);
+ CVMX_USB_LOG_PARAM("%d", endpoint_num);
+ CVMX_USB_LOG_PARAM("%d", device_speed);
+ CVMX_USB_LOG_PARAM("%d", max_packet);
+ CVMX_USB_LOG_PARAM("%d", transfer_type);
+ CVMX_USB_LOG_PARAM("%d", transfer_dir);
+ CVMX_USB_LOG_PARAM("%d", interval);
+ CVMX_USB_LOG_PARAM("%d", multi_count);
+ CVMX_USB_LOG_PARAM("%d", hub_device_addr);
+ CVMX_USB_LOG_PARAM("%d", hub_port);
+
+ if (cvmx_unlikely((device_addr < 0) || (device_addr > MAX_USB_ADDRESS)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely((endpoint_num < 0) || (endpoint_num > MAX_USB_ENDPOINT)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(device_speed > CVMX_USB_SPEED_LOW))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely((max_packet <= 0) || (max_packet > 1024)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(transfer_type > CVMX_USB_TRANSFER_INTERRUPT))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely((transfer_dir != CVMX_USB_DIRECTION_OUT) &&
+ (transfer_dir != CVMX_USB_DIRECTION_IN)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(interval < 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely((transfer_type == CVMX_USB_TRANSFER_CONTROL) && interval))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(multi_count < 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely((device_speed != CVMX_USB_SPEED_HIGH) &&
+ (multi_count != 0)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely((hub_device_addr < 0) || (hub_device_addr > MAX_USB_ADDRESS)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely((hub_port < 0) || (hub_port > MAX_USB_HUB_PORT)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ /* Find a free pipe */
+ pipe = usb->free_pipes.head;
+ if (!pipe)
+ CVMX_USB_RETURN(CVMX_USB_NO_MEMORY);
+ __cvmx_usb_remove_pipe(&usb->free_pipes, pipe);
+ pipe->flags = flags | __CVMX_USB_PIPE_FLAGS_OPEN;
+ if ((device_speed == CVMX_USB_SPEED_HIGH) &&
+ (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (transfer_type == CVMX_USB_TRANSFER_BULK))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ pipe->device_addr = device_addr;
+ pipe->endpoint_num = endpoint_num;
+ pipe->device_speed = device_speed;
+ pipe->max_packet = max_packet;
+ pipe->transfer_type = transfer_type;
+ pipe->transfer_dir = transfer_dir;
+ /* All pipes use interval to rate limit NAK processing. Force an interval
+ if one wasn't supplied */
+ if (!interval)
+ interval = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ {
+ pipe->interval = interval*8;
+ /* Force start splits to be schedule on uFrame 0 */
+ pipe->next_tx_frame = ((usb->frame_number+7)&~7) + pipe->interval;
+ }
+ else
+ {
+ pipe->interval = interval;
+ pipe->next_tx_frame = usb->frame_number + pipe->interval;
+ }
+ pipe->multi_count = multi_count;
+ pipe->hub_device_addr = hub_device_addr;
+ pipe->hub_port = hub_port;
+ pipe->pid_toggle = 0;
+ pipe->split_sc_frame = -1;
+ __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
+
+ /* We don't need to tell the hardware about this pipe yet since
+ it doesn't have any submitted requests */
+
+ CVMX_USB_RETURN(__cvmx_usb_get_pipe_handle(usb, pipe));
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_open_pipe);
+#endif
+
+
+/**
+ * @INTERNAL
+ * Poll the RX FIFOs and remove data as needed. This function is only used
+ * in non DMA mode. It is very important that this function be called quickly
+ * enough to prevent FIFO overflow.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ */
+static void __cvmx_usb_poll_rx_fifo(cvmx_usb_internal_state_t *usb)
+{
+ cvmx_usbcx_grxstsph_t rx_status;
+ int channel;
+ int bytes;
+ uint64_t address;
+ uint32_t *ptr;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", usb);
+
+ rx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GRXSTSPH(usb->index));
+ /* Only read data if IN data is there */
+ if (rx_status.s.pktsts != 2)
+ CVMX_USB_RETURN_NOTHING();
+ /* Check if no data is available */
+ if (!rx_status.s.bcnt)
+ CVMX_USB_RETURN_NOTHING();
+
+ channel = rx_status.s.chnum;
+ bytes = rx_status.s.bcnt;
+ if (!bytes)
+ CVMX_USB_RETURN_NOTHING();
+
+ /* Get where the DMA engine would have written this data */
+ address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8);
+ ptr = cvmx_phys_to_ptr(address);
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, address + bytes);
+
+ /* Loop writing the FIFO data for this packet into memory */
+ while (bytes > 0)
+ {
+ *ptr++ = __cvmx_usb_read_csr32(usb, USB_FIFO_ADDRESS(channel, usb->index));
+ bytes -= 4;
+ }
+ CVMX_SYNCW;
+
+ CVMX_USB_RETURN_NOTHING();
+}
+
+
+/**
+ * Fill the TX hardware fifo with data out of the software
+ * fifos
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param fifo Software fifo to use
+ * @param available Amount of space in the hardware fifo
+ *
+ * @return Non zero if the hardware fifo was too small and needs
+ * to be serviced again.
+ */
+static int __cvmx_usb_fill_tx_hw(cvmx_usb_internal_state_t *usb, cvmx_usb_tx_fifo_t *fifo, int available)
+{
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", usb);
+ CVMX_USB_LOG_PARAM("%p", fifo);
+ CVMX_USB_LOG_PARAM("%d", available);
+
+ /* We're done either when there isn't anymore space or the software FIFO
+ is empty */
+ while (available && (fifo->head != fifo->tail))
+ {
+ int i = fifo->tail;
+ const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
+ uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, usb->index) ^ 4;
+ int words = available;
+
+ /* Limit the amount of data to waht the SW fifo has */
+ if (fifo->entry[i].size <= available)
+ {
+ words = fifo->entry[i].size;
+ fifo->tail++;
+ if (fifo->tail > MAX_CHANNELS)
+ fifo->tail = 0;
+ }
+
+ /* Update the next locations and counts */
+ available -= words;
+ fifo->entry[i].address += words * 4;
+ fifo->entry[i].size -= words;
+
+ /* Write the HW fifo data. The read every three writes is due
+ to an errata on CN3XXX chips */
+ while (words > 3)
+ {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ words -= 3;
+ }
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words)
+ {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words)
+ cvmx_write64_uint32(csr_address, *ptr++);
+ }
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ }
+ CVMX_USB_RETURN(fifo->head != fifo->tail);
+}
+
+
+/**
+ * Check the hardware FIFOs and fill them as needed
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ */
+static void __cvmx_usb_poll_tx_fifo(cvmx_usb_internal_state_t *usb)
+{
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", usb);
+
+ if (usb->periodic.head != usb->periodic.tail)
+ {
+ cvmx_usbcx_hptxsts_t tx_status;
+ tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic, tx_status.s.ptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, ptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, ptxfempmsk, 0);
+ }
+
+ if (usb->nonperiodic.head != usb->nonperiodic.tail)
+ {
+ cvmx_usbcx_gnptxsts_t tx_status;
+ tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, tx_status.s.nptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, nptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, nptxfempmsk, 0);
+ }
+
+ CVMX_USB_RETURN_NOTHING();
+}
+
+
+/**
+ * @INTERNAL
+ * Fill the TX FIFO with an outgoing packet
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param channel Channel number to get packet from
+ */
+static void __cvmx_usb_fill_tx_fifo(cvmx_usb_internal_state_t *usb, int channel)
+{
+ cvmx_usbcx_hccharx_t hcchar;
+ cvmx_usbcx_hcspltx_t usbc_hcsplt;
+ cvmx_usbcx_hctsizx_t usbc_hctsiz;
+ cvmx_usb_tx_fifo_t *fifo;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", usb);
+ CVMX_USB_LOG_PARAM("%d", channel);
+
+ /* We only need to fill data on outbound channels */
+ hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
+ CVMX_USB_RETURN_NOTHING();
+
+ /* OUT Splits only have data on the start and not the complete */
+ usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index));
+ if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
+ CVMX_USB_RETURN_NOTHING();
+
+ /* Find out how many bytes we need to fill and convert it into 32bit words */
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ if (!usbc_hctsiz.s.xfersize)
+ CVMX_USB_RETURN_NOTHING();
+
+ if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
+ (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
+ fifo = &usb->periodic;
+ else
+ fifo = &usb->nonperiodic;
+
+ fifo->entry[fifo->head].channel = channel;
+ fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
+ fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
+ fifo->head++;
+ if (fifo->head > MAX_CHANNELS)
+ fifo->head = 0;
+
+ __cvmx_usb_poll_tx_fifo(usb);
+
+ CVMX_USB_RETURN_NOTHING();
+}
+
+/**
+ * @INTERNAL
+ * Perform channel specific setup for Control transactions. All
+ * the generic stuff will already have been done in
+ * __cvmx_usb_start_channel()
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param channel Channel to setup
+ * @param pipe Pipe for control transaction
+ */
+static void __cvmx_usb_start_channel_control(cvmx_usb_internal_state_t *usb,
+ int channel,
+ cvmx_usb_pipe_t *pipe)
+{
+ cvmx_usb_transaction_t *transaction = pipe->head;
+ cvmx_usb_control_header_t *header = cvmx_phys_to_ptr(transaction->control_header);
+ int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+ int packets_to_transfer;
+ cvmx_usbcx_hctsizx_t usbc_hctsiz;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", usb);
+ CVMX_USB_LOG_PARAM("%d", channel);
+ CVMX_USB_LOG_PARAM("%p", pipe);
+
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ switch (transaction->stage)
+ {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ cvmx_dprintf("%s: ERROR - Non control stage\n", __FUNCTION__);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = sizeof(*header);
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, epdir, CVMX_USB_DIRECTION_OUT);
+ /* Setup send the control header instead of the buffer data. The
+ buffer data will be used in the next stage */
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, transaction->control_header);
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = 0;
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, epdir, CVMX_USB_DIRECTION_OUT);
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), cvmx_usbcx_hcspltx_t, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_DATA:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ {
+ if (header->s.request_type & 0x80)
+ bytes_to_transfer = 0;
+ else if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+ }
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ cvmx_usbcx_hccharx_t, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ if (!(header->s.request_type & 0x80))
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ cvmx_usbcx_hccharx_t, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), cvmx_usbcx_hcspltx_t, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), cvmx_usbcx_hcspltx_t, compsplt, 1);
+ break;
+ }
+
+ /* Make sure the transfer never exceeds the byte limit of the hardware.
+ Further bytes will be sent as continued transactions */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES)
+ {
+ /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
+ bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /* Calculate the number of packets to transfer. If the length is zero
+ we still need to transfer one packet */
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ if (packets_to_transfer == 0)
+ packets_to_transfer = 1;
+ else if ((packets_to_transfer>1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA))
+ {
+ /* Limit to one packet when not using DMA. Channels must be restarted
+ between every packet for IN transactions, so there is no reason to
+ do multiple packets in a row */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+ else if (packets_to_transfer > MAX_TRANSFER_PACKETS)
+ {
+ /* Limit the number of packet and data transferred to what the
+ hardware can handle */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ CVMX_USB_RETURN_NOTHING();
+}
+
+
+/**
+ * @INTERNAL
+ * Start a channel to perform the pipe's head transaction
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param channel Channel to setup
+ * @param pipe Pipe to start
+ */
+static void __cvmx_usb_start_channel(cvmx_usb_internal_state_t *usb,
+ int channel,
+ cvmx_usb_pipe_t *pipe)
+{
+ cvmx_usb_transaction_t *transaction = pipe->head;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", usb);
+ CVMX_USB_LOG_PARAM("%d", channel);
+ CVMX_USB_LOG_PARAM("%p", pipe);
+
+ if (cvmx_unlikely((usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS) ||
+ (pipe->flags & CVMX_USB_PIPE_FLAGS_DEBUG_TRANSFERS)))
+ cvmx_dprintf("%s: Channel %d started. Pipe %d transaction %d stage %d\n",
+ __FUNCTION__, channel, __cvmx_usb_get_pipe_handle(usb, pipe),
+ __cvmx_usb_get_submit_handle(usb, transaction),
+ transaction->stage);
+
+ /* Make sure all writes to the DMA region get flushed */
+ CVMX_SYNCW;
+
+ /* Attach the channel to the pipe */
+ usb->pipe_for_channel[channel] = pipe;
+ pipe->channel = channel;
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /* Mark this channel as in use */
+ usb->idle_hardware_channels &= ~(1<<channel);
+
+ /* Enable the channel interrupt bits */
+ {
+ cvmx_usbcx_hcintx_t usbc_hcint;
+ cvmx_usbcx_hcintmskx_t usbc_hcintmsk;
+ cvmx_usbcx_haintmsk_t usbc_haintmsk;
+
+ /* Clear all channel status bits */
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index), usbc_hcint.u32);
+
+ usbc_hcintmsk.u32 = 0;
+ usbc_hcintmsk.s.chhltdmsk = 1;
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ {
+ /* Channels need these extra interrupts when we aren't in DMA mode */
+ usbc_hcintmsk.s.datatglerrmsk = 1;
+ usbc_hcintmsk.s.frmovrunmsk = 1;
+ usbc_hcintmsk.s.bblerrmsk = 1;
+ usbc_hcintmsk.s.xacterrmsk = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ {
+ /* Splits don't generate xfercompl, so we need ACK and NYET */
+ usbc_hcintmsk.s.nyetmsk = 1;
+ usbc_hcintmsk.s.ackmsk = 1;
+ }
+ usbc_hcintmsk.s.nakmsk = 1;
+ usbc_hcintmsk.s.stallmsk = 1;
+ usbc_hcintmsk.s.xfercomplmsk = 1;
+ }
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), usbc_hcintmsk.u32);
+
+ /* Enable the channel interrupt to propagate */
+ usbc_haintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index));
+ usbc_haintmsk.s.haintmsk |= 1<<channel;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), usbc_haintmsk.u32);
+ }
+
+ /* Setup the locations the DMA engines use */
+ {
+ uint64_t dma_address = transaction->buffer + transaction->actual_bytes;
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ dma_address = transaction->buffer + transaction->iso_packets[0].offset + transaction->actual_bytes;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, dma_address);
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, dma_address);
+ }
+
+ /* Setup both the size of the transfer and the SPLIT characteristics */
+ {
+ cvmx_usbcx_hcspltx_t usbc_hcsplt = {.u32 = 0};
+ cvmx_usbcx_hctsizx_t usbc_hctsiz = {.u32 = 0};
+ int packets_to_transfer;
+ int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+
+ /* ISOCHRONOUS transactions store each individual transfer size in the
+ packet structure, not the global buffer_length */
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ bytes_to_transfer = transaction->iso_packets[0].length - transaction->actual_bytes;
+
+ /* We need to do split transactions when we are talking to non high
+ speed devices that are behind a high speed hub */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ {
+ /* On the start split phase (stage is even) record the frame number we
+ will need to send the split complete. We only store the lower two bits
+ since the time ahead can only be two frames */
+ if ((transaction->stage&1) == 0)
+ {
+ if (transaction->type == CVMX_USB_TRANSFER_BULK)
+ pipe->split_sc_frame = (usb->frame_number + 1) & 0x7f;
+ else
+ pipe->split_sc_frame = (usb->frame_number + 2) & 0x7f;
+ }
+ else
+ pipe->split_sc_frame = -1;
+
+ usbc_hcsplt.s.spltena = 1;
+ usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
+ usbc_hcsplt.s.prtaddr = pipe->hub_port;
+ usbc_hcsplt.s.compsplt = (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
+
+ /* SPLIT transactions can only ever transmit one data packet so
+ limit the transfer size to the max packet size */
+ if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+
+ /* ISOCHRONOUS OUT splits are unique in that they limit
+ data transfers to 188 byte chunks representing the
+ begin/middle/end of the data or all */
+ if (!usbc_hcsplt.s.compsplt &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_ISOCHRONOUS))
+ {
+ /* Clear the split complete frame number as there isn't going
+ to be a split complete */
+ pipe->split_sc_frame = -1;
+ /* See if we've started this transfer and sent data */
+ if (transaction->actual_bytes == 0)
+ {
+ /* Nothing sent yet, this is either a begin or the
+ entire payload */
+ if (bytes_to_transfer <= 188)
+ usbc_hcsplt.s.xactpos = 3; /* Entire payload in one go */
+ else
+ usbc_hcsplt.s.xactpos = 2; /* First part of payload */
+ }
+ else
+ {
+ /* Continuing the previous data, we must either be
+ in the middle or at the end */
+ if (bytes_to_transfer <= 188)
+ usbc_hcsplt.s.xactpos = 1; /* End of payload */
+ else
+ usbc_hcsplt.s.xactpos = 0; /* Middle of payload */
+ }
+ /* Again, the transfer size is limited to 188 bytes */
+ if (bytes_to_transfer > 188)
+ bytes_to_transfer = 188;
+ }
+ }
+
+ /* Make sure the transfer never exceeds the byte limit of the hardware.
+ Further bytes will be sent as continued transactions */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES)
+ {
+ /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
+ bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /* Calculate the number of packets to transfer. If the length is zero
+ we still need to transfer one packet */
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ if (packets_to_transfer == 0)
+ packets_to_transfer = 1;
+ else if ((packets_to_transfer>1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA))
+ {
+ /* Limit to one packet when not using DMA. Channels must be restarted
+ between every packet for IN transactions, so there is no reason to
+ do multiple packets in a row */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+ else if (packets_to_transfer > MAX_TRANSFER_PACKETS)
+ {
+ /* Limit the number of packet and data transferred to what the
+ hardware can handle */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ /* Update the DATA0/DATA1 toggle */
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ /* High speed pipes may need a hardware ping before they start */
+ if (pipe->flags & __CVMX_USB_PIPE_FLAGS_NEED_PING)
+ usbc_hctsiz.s.dopng = 1;
+
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index), usbc_hcsplt.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ }
+
+ /* Setup the Host Channel Characteristics Register */
+ {
+ cvmx_usbcx_hccharx_t usbc_hcchar = {.u32 = 0};
+
+ /* Set the startframe odd/even properly. This is only used for periodic */
+ usbc_hcchar.s.oddfrm = usb->frame_number&1;
+
+ /* Set the number of back to back packets allowed by this endpoint.
+ Split transactions interpret "ec" as the number of immediate
+ retries of failure. These retries happen too quickly, so we
+ disable these entirely for splits */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count < 1)
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count > 3)
+ usbc_hcchar.s.ec = 3;
+ else
+ usbc_hcchar.s.ec = pipe->multi_count;
+
+ /* Set the rest of the endpoint specific settings */
+ usbc_hcchar.s.devaddr = pipe->device_addr;
+ usbc_hcchar.s.eptype = transaction->type;
+ usbc_hcchar.s.lspddev = (pipe->device_speed == CVMX_USB_SPEED_LOW);
+ usbc_hcchar.s.epdir = pipe->transfer_dir;
+ usbc_hcchar.s.epnum = pipe->endpoint_num;
+ usbc_hcchar.s.mps = pipe->max_packet;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ }
+
+ /* Do transaction type specific fixups as needed */
+ switch (transaction->type)
+ {
+ case CVMX_USB_TRANSFER_CONTROL:
+ __cvmx_usb_start_channel_control(usb, channel, pipe);
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ if (!__cvmx_usb_pipe_needs_split(usb, pipe))
+ {
+ /* ISO transactions require different PIDs depending on direction
+ and how many packets are needed */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)
+ {
+ if (pipe->multi_count < 2) /* Need DATA0 */
+ USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), cvmx_usbcx_hctsizx_t, pid, 0);
+ else /* Need MDATA */
+ USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), cvmx_usbcx_hctsizx_t, pid, 3);
+ }
+ }
+ break;
+ }
+ {
+ cvmx_usbcx_hctsizx_t usbc_hctsiz = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index))};
+ transaction->xfersize = usbc_hctsiz.s.xfersize;
+ transaction->pktcnt = usbc_hctsiz.s.pktcnt;
+ }
+ /* Remeber when we start a split transaction */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ usb->active_split = transaction;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, chena, 1);
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_fill_tx_fifo(usb, channel);
+ CVMX_USB_RETURN_NOTHING();
+}
+
+
+/**
+ * @INTERNAL
+ * Find a pipe that is ready to be scheduled to hardware.
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param list Pipe list to search
+ * @param current_frame
+ * Frame counter to use as a time reference.
+ *
+ * @return Pipe or NULL if none are ready
+ */
+static cvmx_usb_pipe_t *__cvmx_usb_find_ready_pipe(cvmx_usb_internal_state_t *usb, cvmx_usb_pipe_list_t *list, uint64_t current_frame)
+{
+ cvmx_usb_pipe_t *pipe = list->head;
+ while (pipe)
+ {
+ if (!(pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED) && pipe->head &&
+ (pipe->next_tx_frame <= current_frame) &&
+ ((pipe->split_sc_frame == -1) || ((((int)current_frame - (int)pipe->split_sc_frame) & 0x7f) < 0x40)) &&
+ (!usb->active_split || (usb->active_split == pipe->head)))
+ {
+ CVMX_PREFETCH(pipe, 128);
+ CVMX_PREFETCH(pipe->head, 0);
+ return pipe;
+ }
+ pipe = pipe->next;
+ }
+ return NULL;
+}
+
+
+/**
+ * @INTERNAL
+ * Called whenever a pipe might need to be scheduled to the
+ * hardware.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param is_sof True if this schedule was called on a SOF interrupt.
+ */
+static void __cvmx_usb_schedule(cvmx_usb_internal_state_t *usb, int is_sof)
+{
+ int channel;
+ cvmx_usb_pipe_t *pipe;
+ int need_sof;
+ cvmx_usb_transfer_t ttype;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", usb);
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ {
+ /* Without DMA we need to be careful to not schedule something at the end of a frame and cause an overrun */
+ cvmx_usbcx_hfnum_t hfnum = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index))};
+ cvmx_usbcx_hfir_t hfir = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFIR(usb->index))};
+ if (hfnum.s.frrem < hfir.s.frint/4)
+ goto done;
+ }
+
+ while (usb->idle_hardware_channels)
+ {
+ /* Find an idle channel */
+ CVMX_CLZ(channel, usb->idle_hardware_channels);
+ channel = 31 - channel;
+ if (cvmx_unlikely(channel > 7))
+ {
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
+ cvmx_dprintf("%s: Idle hardware channels has a channel higher than 7. This is wrong\n", __FUNCTION__);
+ break;
+ }
+
+ /* Find a pipe needing service */
+ pipe = NULL;
+ if (is_sof)
+ {
+ /* Only process periodic pipes on SOF interrupts. This way we are
+ sure that the periodic data is sent in the beginning of the
+ frame */
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_ISOCHRONOUS, usb->frame_number);
+ if (cvmx_likely(!pipe))
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_INTERRUPT, usb->frame_number);
+ }
+ if (cvmx_likely(!pipe))
+ {
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_CONTROL, usb->frame_number);
+ if (cvmx_likely(!pipe))
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_BULK, usb->frame_number);
+ }
+ if (!pipe)
+ break;
+
+ CVMX_USB_LOG_PARAM("%d", channel);
+ CVMX_USB_LOG_PARAM("%p", pipe);
+
+ if (cvmx_unlikely((usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS) ||
+ (pipe->flags & CVMX_USB_PIPE_FLAGS_DEBUG_TRANSFERS)))
+ {
+ cvmx_usb_transaction_t *transaction = pipe->head;
+ const cvmx_usb_control_header_t *header = (transaction->control_header) ? cvmx_phys_to_ptr(transaction->control_header) : NULL;
+ const char *dir = (pipe->transfer_dir == CVMX_USB_DIRECTION_IN) ? "IN" : "OUT";
+ const char *type;
+ switch (pipe->transfer_type)
+ {
+ case CVMX_USB_TRANSFER_CONTROL:
+ type = "SETUP";
+ dir = (header->s.request_type & 0x80) ? "IN" : "OUT";
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ type = "ISOCHRONOUS";
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ type = "BULK";
+ break;
+ default: /* CVMX_USB_TRANSFER_INTERRUPT */
+ type = "INTERRUPT";
+ break;
+ }
+ cvmx_dprintf("%s: Starting pipe %d, transaction %d on channel %d. %s %s len=%d header=0x%llx\n",
+ __FUNCTION__, __cvmx_usb_get_pipe_handle(usb, pipe),
+ __cvmx_usb_get_submit_handle(usb, transaction),
+ channel, type, dir,
+ transaction->buffer_length,
+ (header) ? (unsigned long long)header->u64 : 0ull);
+ }
+ __cvmx_usb_start_channel(usb, channel, pipe);
+ }
+
+done:
+ /* Only enable SOF interrupts when we have transactions pending in the
+ future that might need to be scheduled */
+ need_sof = 0;
+ for (ttype=CVMX_USB_TRANSFER_CONTROL; ttype<=CVMX_USB_TRANSFER_INTERRUPT; ttype++)
+ {
+ pipe = usb->active_pipes[ttype].head;
+ while (pipe)
+ {
+ if (pipe->next_tx_frame > usb->frame_number)
+ {
+ need_sof = 1;
+ break;
+ }
+ pipe=pipe->next;
+ }
+ }
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, sofmsk, need_sof);
+ CVMX_USB_RETURN_NOTHING();
+}
+
+
+/**
+ * @INTERNAL
+ * Call a user's callback for a specific reason.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe Pipe the callback is for or NULL
+ * @param transaction
+ * Transaction the callback is for or NULL
+ * @param reason Reason this callback is being called
+ * @param complete_code
+ * Completion code for the transaction, if any
+ */
+static void __cvmx_usb_perform_callback(cvmx_usb_internal_state_t *usb,
+ cvmx_usb_pipe_t *pipe,
+ cvmx_usb_transaction_t *transaction,
+ cvmx_usb_callback_t reason,
+ cvmx_usb_complete_t complete_code)
+{
+ cvmx_usb_callback_func_t callback = usb->callback[reason];
+ void *user_data = usb->callback_data[reason];
+ int submit_handle = -1;
+ int pipe_handle = -1;
+ int bytes_transferred = 0;
+
+ if (pipe)
+ pipe_handle = __cvmx_usb_get_pipe_handle(usb, pipe);
+
+ if (transaction)
+ {
+ submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
+ bytes_transferred = transaction->actual_bytes;
+ /* Transactions are allowed to override the default callback */
+ if ((reason == CVMX_USB_CALLBACK_TRANSFER_COMPLETE) && transaction->callback)
+ {
+ callback = transaction->callback;
+ user_data = transaction->callback_data;
+ }
+ }
+
+ if (!callback)
+ return;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLBACKS))
+ cvmx_dprintf("%*s%s: calling callback %p(usb=%p, complete_code=%s, "
+ "pipe_handle=%d, submit_handle=%d, bytes_transferred=%d, user_data=%p);\n",
+ 2*usb->indent, "", __FUNCTION__, callback, usb,
+ __cvmx_usb_complete_to_string(complete_code),
+ pipe_handle, submit_handle, bytes_transferred, user_data);
+
+ callback((cvmx_usb_state_t *)usb, reason, complete_code, pipe_handle, submit_handle,
+ bytes_transferred, user_data);
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLBACKS))
+ cvmx_dprintf("%*s%s: callback %p complete\n", 2*usb->indent, "",
+ __FUNCTION__, callback);
+}
+
+
+/**
+ * @INTERNAL
+ * Signal the completion of a transaction and free it. The
+ * transaction will be removed from the pipe transaction list.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe Pipe the transaction is on
+ * @param transaction
+ * Transaction that completed
+ * @param complete_code
+ * Completion code
+ */
+static void __cvmx_usb_perform_complete(cvmx_usb_internal_state_t * usb,
+ cvmx_usb_pipe_t *pipe,
+ cvmx_usb_transaction_t *transaction,
+ cvmx_usb_complete_t complete_code)
+{
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", usb);
+ CVMX_USB_LOG_PARAM("%p", pipe);
+ CVMX_USB_LOG_PARAM("%p", transaction);
+ CVMX_USB_LOG_PARAM("%d", complete_code);
+
+ /* If this was a split then clear our split in progress marker */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+
+ /* Isochronous transactions need extra processing as they might not be done
+ after a single data transfer */
+ if (cvmx_unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS))
+ {
+ /* Update the number of bytes transferred in this ISO packet */
+ transaction->iso_packets[0].length = transaction->actual_bytes;
+ transaction->iso_packets[0].status = complete_code;
+
+ /* If there are more ISOs pending and we succeeded, schedule the next
+ one */
+ if ((transaction->iso_number_packets > 1) && (complete_code == CVMX_USB_COMPLETE_SUCCESS))
+ {
+ transaction->actual_bytes = 0; /* No bytes transferred for this packet as of yet */
+ transaction->iso_number_packets--; /* One less ISO waiting to transfer */
+ transaction->iso_packets++; /* Increment to the next location in our packet array */
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ goto done;
+ }
+ }
+
+ /* Remove the transaction from the pipe list */
+ if (transaction->next)
+ transaction->next->prev = transaction->prev;
+ else
+ pipe->tail = transaction->prev;
+ if (transaction->prev)
+ transaction->prev->next = transaction->next;
+ else
+ pipe->head = transaction->next;
+ if (!pipe->head)
+ {
+ __cvmx_usb_remove_pipe(usb->active_pipes + pipe->transfer_type, pipe);
+ __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
+
+ }
+ __cvmx_usb_perform_callback(usb, pipe, transaction,
+ CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
+ complete_code);
+ __cvmx_usb_free_transaction(usb, transaction);
+done:
+ CVMX_USB_RETURN_NOTHING();
+}
+
+
+/**
+ * @INTERNAL
+ * Submit a usb transaction to a pipe. Called for all types
+ * of transactions.
+ *
+ * @param usb
+ * @param pipe_handle
+ * Which pipe to submit to. Will be validated in this function.
+ * @param type Transaction type
+ * @param flags Flags for the transaction
+ * @param buffer User buffer for the transaction
+ * @param buffer_length
+ * User buffer's length in bytes
+ * @param control_header
+ * For control transactions, the 8 byte standard header
+ * @param iso_start_frame
+ * For ISO transactions, the start frame
+ * @param iso_number_packets
+ * For ISO, the number of packet in the transaction.
+ * @param iso_packets
+ * A description of each ISO packet
+ * @param callback User callback to call when the transaction completes
+ * @param user_data User's data for the callback
+ *
+ * @return Submit handle or negative on failure. Matches the result
+ * in the external API.
+ */
+static int __cvmx_usb_submit_transaction(cvmx_usb_internal_state_t *usb,
+ int pipe_handle,
+ cvmx_usb_transfer_t type,
+ int flags,
+ uint64_t buffer,
+ int buffer_length,
+ uint64_t control_header,
+ int iso_start_frame,
+ int iso_number_packets,
+ cvmx_usb_iso_packet_t *iso_packets,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ cvmx_usb_transaction_t *transaction;
+ cvmx_usb_pipe_t *pipe = usb->pipe + pipe_handle;
+
+ CVMX_USB_LOG_CALLED();
+ if (cvmx_unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ /* Fail if the pipe isn't open */
+ if (cvmx_unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(pipe->transfer_type != type))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ transaction = __cvmx_usb_alloc_transaction(usb);
+ if (cvmx_unlikely(!transaction))
+ CVMX_USB_RETURN(CVMX_USB_NO_MEMORY);
+
+ transaction->type = type;
+ transaction->flags |= flags;
+ transaction->buffer = buffer;
+ transaction->buffer_length = buffer_length;
+ transaction->control_header = control_header;
+ transaction->iso_start_frame = iso_start_frame; // FIXME: This is not used, implement it
+ transaction->iso_number_packets = iso_number_packets;
+ transaction->iso_packets = iso_packets;
+ transaction->callback = callback;
+ transaction->callback_data = user_data;
+ if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
+ transaction->stage = CVMX_USB_STAGE_SETUP;
+ else
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+
+ transaction->next = NULL;
+ if (pipe->tail)
+ {
+ transaction->prev = pipe->tail;
+ transaction->prev->next = transaction;
+ }
+ else
+ {
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ transaction->prev = NULL;
+ pipe->head = transaction;
+ __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
+ __cvmx_usb_append_pipe(usb->active_pipes + pipe->transfer_type, pipe);
+ }
+ pipe->tail = transaction;
+
+ submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
+
+ /* We may need to schedule the pipe if this was the head of the pipe */
+ if (!transaction->prev)
+ __cvmx_usb_schedule(usb, 0);
+
+ CVMX_USB_RETURN(submit_handle);
+}
+
+
+/**
+ * Call to submit a USB Bulk transfer to a pipe.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Handle to the pipe for the transfer.
+ * @param buffer Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @param buffer_length
+ * Length of buffer in bytes.
+ * @param callback Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @param user_data User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * @return A submitted transaction handle or negative on
+ * failure. Negative values are failure codes from
+ * cvmx_usb_status_t.
+ */
+int cvmx_usb_submit_bulk(cvmx_usb_state_t *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("%d", pipe_handle);
+ CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)buffer);
+ CVMX_USB_LOG_PARAM("%d", buffer_length);
+
+ /* Pipe handle checking is done later in a common place */
+ if (cvmx_unlikely(!buffer))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(buffer_length < 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_BULK,
+ 0, /* flags */
+ buffer,
+ buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ callback,
+ user_data);
+ CVMX_USB_RETURN(submit_handle);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_submit_bulk);
+#endif
+
+
+/**
+ * Call to submit a USB Interrupt transfer to a pipe.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Handle to the pipe for the transfer.
+ * @param buffer Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @param buffer_length
+ * Length of buffer in bytes.
+ * @param callback Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @param user_data User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * @return A submitted transaction handle or negative on
+ * failure. Negative values are failure codes from
+ * cvmx_usb_status_t.
+ */
+int cvmx_usb_submit_interrupt(cvmx_usb_state_t *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("%d", pipe_handle);
+ CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)buffer);
+ CVMX_USB_LOG_PARAM("%d", buffer_length);
+
+ /* Pipe handle checking is done later in a common place */
+ if (cvmx_unlikely(!buffer))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(buffer_length < 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_INTERRUPT,
+ 0, /* flags */
+ buffer,
+ buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ callback,
+ user_data);
+ CVMX_USB_RETURN(submit_handle);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_submit_interrupt);
+#endif
+
+
+/**
+ * Call to submit a USB Control transfer to a pipe.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Handle to the pipe for the transfer.
+ * @param control_header
+ * USB 8 byte control header physical address.
+ * Note that this is NOT A POINTER, but the
+ * full 64bit physical address of the buffer.
+ * @param buffer Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @param buffer_length
+ * Length of buffer in bytes.
+ * @param callback Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @param user_data User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * @return A submitted transaction handle or negative on
+ * failure. Negative values are failure codes from
+ * cvmx_usb_status_t.
+ */
+int cvmx_usb_submit_control(cvmx_usb_state_t *state, int pipe_handle,
+ uint64_t control_header,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+ cvmx_usb_control_header_t *header = cvmx_phys_to_ptr(control_header);
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("%d", pipe_handle);
+ CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)control_header);
+ CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)buffer);
+ CVMX_USB_LOG_PARAM("%d", buffer_length);
+
+ /* Pipe handle checking is done later in a common place */
+ if (cvmx_unlikely(!control_header))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ /* Some drivers send a buffer with a zero length. God only knows why */
+ if (cvmx_unlikely(buffer && (buffer_length < 0)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(!buffer && (buffer_length != 0)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if ((header->s.request_type & 0x80) == 0)
+ buffer_length = cvmx_le16_to_cpu(header->s.length);
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_CONTROL,
+ 0, /* flags */
+ buffer,
+ buffer_length,
+ control_header,
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ callback,
+ user_data);
+ CVMX_USB_RETURN(submit_handle);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_submit_control);
+#endif
+
+
+/**
+ * Call to submit a USB Isochronous transfer to a pipe.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Handle to the pipe for the transfer.
+ * @param start_frame
+ * Number of frames into the future to schedule
+ * this transaction.
+ * @param flags Flags to control the transfer. See
+ * cvmx_usb_isochronous_flags_t for the flag
+ * definitions.
+ * @param number_packets
+ * Number of sequential packets to transfer.
+ * "packets" is a pointer to an array of this
+ * many packet structures.
+ * @param packets Description of each transfer packet as
+ * defined by cvmx_usb_iso_packet_t. The array
+ * pointed to here must stay valid until the
+ * complete callback is called.
+ * @param buffer Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @param buffer_length
+ * Length of buffer in bytes.
+ * @param callback Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @param user_data User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * @return A submitted transaction handle or negative on
+ * failure. Negative values are failure codes from
+ * cvmx_usb_status_t.
+ */
+int cvmx_usb_submit_isochronous(cvmx_usb_state_t *state, int pipe_handle,
+ int start_frame, int flags,
+ int number_packets,
+ cvmx_usb_iso_packet_t packets[],
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("%d", pipe_handle);
+ CVMX_USB_LOG_PARAM("%d", start_frame);
+ CVMX_USB_LOG_PARAM("0x%x", flags);
+ CVMX_USB_LOG_PARAM("%d", number_packets);
+ CVMX_USB_LOG_PARAM("%p", packets);
+ CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)buffer);
+ CVMX_USB_LOG_PARAM("%d", buffer_length);
+
+ /* Pipe handle checking is done later in a common place */
+ if (cvmx_unlikely(start_frame < 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(flags & ~(CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT | CVMX_USB_ISOCHRONOUS_FLAGS_ASAP)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(number_packets < 1))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(!packets))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(!buffer))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(buffer_length < 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_ISOCHRONOUS,
+ flags,
+ buffer,
+ buffer_length,
+ 0, /* control_header */
+ start_frame,
+ number_packets,
+ packets,
+ callback,
+ user_data);
+ CVMX_USB_RETURN(submit_handle);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_submit_isochronous);
+#endif
+
+
+/**
+ * Cancel one outstanding request in a pipe. Canceling a request
+ * can fail if the transaction has already completed before cancel
+ * is called. Even after a successful cancel call, it may take
+ * a frame or two for the cvmx_usb_poll() function to call the
+ * associated callback.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Pipe handle to cancel requests in.
+ * @param submit_handle
+ * Handle to transaction to cancel, returned by the submit function.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+cvmx_usb_status_t cvmx_usb_cancel(cvmx_usb_state_t *state, int pipe_handle,
+ int submit_handle)
+{
+ cvmx_usb_transaction_t *transaction;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+ cvmx_usb_pipe_t *pipe = usb->pipe + pipe_handle;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("%d", pipe_handle);
+ CVMX_USB_LOG_PARAM("%d", submit_handle);
+
+ if (cvmx_unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely((submit_handle < 0) || (submit_handle >= MAX_TRANSACTIONS)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ /* Fail if the pipe isn't open */
+ if (cvmx_unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ transaction = usb->transaction + submit_handle;
+
+ /* Fail if this transaction already completed */
+ if (cvmx_unlikely((transaction->flags & __CVMX_USB_TRANSACTION_FLAGS_IN_USE) == 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ /* If the transaction is the HEAD of the queue and scheduled. We need to
+ treat it special */
+ if ((pipe->head == transaction) &&
+ (pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED))
+ {
+ cvmx_usbcx_hccharx_t usbc_hcchar;
+
+ usb->pipe_for_channel[pipe->channel] = NULL;
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ CVMX_SYNCW;
+
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
+ /* If the channel isn't enabled then the transaction already completed */
+ if (usbc_hcchar.s.chena)
+ {
+ usbc_hcchar.s.chdis = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index), usbc_hcchar.u32);
+ }
+ }
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_CANCEL);
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_cancel);
+#endif
+
+
+/**
+ * Cancel all outstanding requests in a pipe. Logically all this
+ * does is call cvmx_usb_cancel() in a loop.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Pipe handle to cancel requests in.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+cvmx_usb_status_t cvmx_usb_cancel_all(cvmx_usb_state_t *state, int pipe_handle)
+{
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+ cvmx_usb_pipe_t *pipe = usb->pipe + pipe_handle;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("%d", pipe_handle);
+ if (cvmx_unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ /* Fail if the pipe isn't open */
+ if (cvmx_unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ /* Simply loop through and attempt to cancel each transaction */
+ while (pipe->head)
+ {
+ cvmx_usb_status_t result = cvmx_usb_cancel(state, pipe_handle,
+ __cvmx_usb_get_submit_handle(usb, pipe->head));
+ if (cvmx_unlikely(result != CVMX_USB_SUCCESS))
+ CVMX_USB_RETURN(result);
+ }
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_cancel_all);
+#endif
+
+
+/**
+ * Close a pipe created with cvmx_usb_open_pipe().
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Pipe handle to close.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t. CVMX_USB_BUSY is returned if the
+ * pipe has outstanding transfers.
+ */
+cvmx_usb_status_t cvmx_usb_close_pipe(cvmx_usb_state_t *state, int pipe_handle)
+{
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+ cvmx_usb_pipe_t *pipe = usb->pipe + pipe_handle;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("%d", pipe_handle);
+ if (cvmx_unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ /* Fail if the pipe isn't open */
+ if (cvmx_unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ /* Fail if the pipe has pending transactions */
+ if (cvmx_unlikely(pipe->head))
+ CVMX_USB_RETURN(CVMX_USB_BUSY);
+
+ pipe->flags = 0;
+ __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
+ __cvmx_usb_append_pipe(&usb->free_pipes, pipe);
+
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_close_pipe);
+#endif
+
+
+/**
+ * Register a function to be called when various USB events occur.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param reason Which event to register for.
+ * @param callback Function to call when the event occurs.
+ * @param user_data User data parameter to the function.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+cvmx_usb_status_t cvmx_usb_register_callback(cvmx_usb_state_t *state,
+ cvmx_usb_callback_t reason,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+ CVMX_USB_LOG_PARAM("%d", reason);
+ CVMX_USB_LOG_PARAM("%p", callback);
+ CVMX_USB_LOG_PARAM("%p", user_data);
+ if (cvmx_unlikely(reason >= __CVMX_USB_CALLBACK_END))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (cvmx_unlikely(!callback))
+ CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+
+ usb->callback[reason] = callback;
+ usb->callback_data[reason] = user_data;
+
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_register_callback);
+#endif
+
+
+/**
+ * Get the current USB protocol level frame number. The frame
+ * number is always in the range of 0-0x7ff.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return USB frame number
+ */
+int cvmx_usb_get_frame_number(cvmx_usb_state_t *state)
+{
+ int frame_number;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+ cvmx_usbcx_hfnum_t usbc_hfnum;
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ frame_number = usbc_hfnum.s.frnum;
+
+ CVMX_USB_RETURN(frame_number);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_get_frame_number);
+#endif
+
+
+/**
+ * @INTERNAL
+ * Poll a channel for status
+ *
+ * @param usb USB device
+ * @param channel Channel to poll
+ *
+ * @return Zero on success
+ */
+static int __cvmx_usb_poll_channel(cvmx_usb_internal_state_t *usb, int channel)
+{
+ cvmx_usbcx_hcintx_t usbc_hcint;
+ cvmx_usbcx_hctsizx_t usbc_hctsiz;
+ cvmx_usbcx_hccharx_t usbc_hcchar;
+ cvmx_usb_pipe_t *pipe;
+ cvmx_usb_transaction_t *transaction;
+ int bytes_this_transfer;
+ int bytes_in_last_packet;
+ int packets_processed;
+ int buffer_space_left;
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", usb);
+ CVMX_USB_LOG_PARAM("%d", channel);
+
+ /* Read the interrupt status bits for the channel */
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+
+#if 0
+ cvmx_dprintf("Channel %d%s%s%s%s%s%s%s%s%s%s%s\n", channel,
+ (usbc_hcint.s.datatglerr) ? " DATATGLERR" : "",
+ (usbc_hcint.s.frmovrun) ? " FRMOVRUN" : "",
+ (usbc_hcint.s.bblerr) ? " BBLERR" : "",
+ (usbc_hcint.s.xacterr) ? " XACTERR" : "",
+ (usbc_hcint.s.nyet) ? " NYET" : "",
+ (usbc_hcint.s.ack) ? " ACK" : "",
+ (usbc_hcint.s.nak) ? " NAK" : "",
+ (usbc_hcint.s.stall) ? " STALL" : "",
+ (usbc_hcint.s.ahberr) ? " AHBERR" : "",
+ (usbc_hcint.s.chhltd) ? " CHHLTD" : "",
+ (usbc_hcint.s.xfercompl) ? " XFERCOMPL" : "");
+#endif
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ {
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+
+ if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis)
+ {
+ /* There seems to be a bug in CN31XX which can cause interrupt
+ IN transfers to get stuck until we do a write of HCCHARX
+ without changing things */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ CVMX_USB_RETURN(0);
+ }
+
+ /* In non DMA mode the channels don't halt themselves. We need to
+ manually disable channels that are left running */
+ if (!usbc_hcint.s.chhltd)
+ {
+ if (usbc_hcchar.s.chena)
+ {
+ cvmx_usbcx_hcintmskx_t hcintmsk;
+ /* Disable all interrupts except CHHLTD */
+ hcintmsk.u32 = 0;
+ hcintmsk.s.chhltdmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), hcintmsk.u32);
+ usbc_hcchar.s.chdis = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ CVMX_USB_RETURN(0);
+ }
+ else if (usbc_hcint.s.xfercompl)
+ {
+ /* Successful IN/OUT with transfer complete. Channel halt isn't needed */
+ }
+ else
+ {
+ cvmx_dprintf("USB%d: Channel %d interrupt without halt\n", usb->index, channel);
+ CVMX_USB_RETURN(0);
+ }
+ }
+ }
+ else
+ {
+ /* There is are no interrupts that we need to process when the channel is
+ still running */
+ if (!usbc_hcint.s.chhltd)
+ CVMX_USB_RETURN(0);
+ }
+
+ /* Disable the channel interrupts now that it is done */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ usb->idle_hardware_channels |= (1<<channel);
+
+ /* Make sure this channel is tied to a valid pipe */
+ pipe = usb->pipe_for_channel[channel];
+ CVMX_PREFETCH(pipe, 0);
+ CVMX_PREFETCH(pipe, 128);
+ if (!pipe)
+ CVMX_USB_RETURN(0);
+ transaction = pipe->head;
+ CVMX_PREFETCH0(transaction);
+
+ /* Disconnect this pipe from the HW channel. Later the schedule function will
+ figure out which pipe needs to go */
+ usb->pipe_for_channel[channel] = NULL;
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /* Read the channel config info so we can figure out how much data
+ transfered */
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ /* Calculating the number of bytes successfully transferred is dependent on
+ the transfer direction */
+ packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
+ if (usbc_hcchar.s.epdir)
+ {
+ /* IN transactions are easy. For every byte received the hardware
+ decrements xfersize. All we need to do is subtract the current
+ value of xfersize from its starting value and we know how many
+ bytes were written to the buffer */
+ bytes_this_transfer = transaction->xfersize - usbc_hctsiz.s.xfersize;
+ }
+ else
+ {
+ /* OUT transaction don't decrement xfersize. Instead pktcnt is
+ decremented on every successful packet send. The hardware does
+ this when it receives an ACK, or NYET. If it doesn't
+ receive one of these responses pktcnt doesn't change */
+ bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
+ /* The last packet may not be a full transfer if we didn't have
+ enough data */
+ if (bytes_this_transfer > transaction->xfersize)
+ bytes_this_transfer = transaction->xfersize;
+ }
+ /* Figure out how many bytes were in the last packet of the transfer */
+ if (packets_processed)
+ bytes_in_last_packet = bytes_this_transfer - (packets_processed-1) * usbc_hcchar.s.mps;
+ else
+ bytes_in_last_packet = bytes_this_transfer;
+
+ /* As a special case, setup transactions output the setup header, not
+ the user's data. For this reason we don't count setup data as bytes
+ transferred */
+ if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
+ (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
+ bytes_this_transfer = 0;
+
+ /* Optional debug output */
+ if (cvmx_unlikely((usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS) ||
+ (pipe->flags & CVMX_USB_PIPE_FLAGS_DEBUG_TRANSFERS)))
+ cvmx_dprintf("%s: Channel %d halted. Pipe %d transaction %d stage %d bytes=%d\n",
+ __FUNCTION__, channel,
+ __cvmx_usb_get_pipe_handle(usb, pipe),
+ __cvmx_usb_get_submit_handle(usb, transaction),
+ transaction->stage, bytes_this_transfer);
+
+ /* Add the bytes transferred to the running total. It is important that
+ bytes_this_transfer doesn't count any data that needs to be
+ retransmitted */
+ transaction->actual_bytes += bytes_this_transfer;
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ buffer_space_left = transaction->iso_packets[0].length - transaction->actual_bytes;
+ else
+ buffer_space_left = transaction->buffer_length - transaction->actual_bytes;
+
+ /* We need to remember the PID toggle state for the next transaction. The
+ hardware already updated it for the next transaction */
+ pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
+
+ /* For high speed bulk out, assume the next transaction will need to do a
+ ping before proceeding. If this isn't true the ACK processing below
+ will clear this flag */
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ if (usbc_hcint.s.stall)
+ {
+ /* STALL as a response means this transaction cannot be completed
+ because the device can't process transactions. Tell the user. Any
+ data that was transferred will be counted on the actual bytes
+ transferred */
+ pipe->pid_toggle = 0;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_STALL);
+ }
+ else if (usbc_hcint.s.xacterr)
+ {
+ /* We know at least one packet worked if we get a ACK or NAK. Reset the retry counter */
+ if (usbc_hcint.s.nak || usbc_hcint.s.ack)
+ transaction->retries = 0;
+ transaction->retries++;
+ if (transaction->retries > MAX_RETRIES)
+ {
+ /* XactErr as a response means the device signaled something wrong with
+ the transfer. For example, PID toggle errors cause these */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_XACTERR);
+ }
+ else
+ {
+ /* If this was a split then clear our split in progress marker */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+ /* Rewind to the beginning of the transaction by anding off the
+ split complete bit */
+ transaction->stage &= ~1;
+ pipe->split_sc_frame = -1;
+ pipe->next_tx_frame += pipe->interval;
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ }
+ }
+ else if (usbc_hcint.s.bblerr)
+ {
+ /* Babble Error (BblErr) */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_BABBLEERR);
+ }
+ else if (usbc_hcint.s.datatglerr)
+ {
+ /* We'll retry the exact same transaction again */
+ transaction->retries++;
+ }
+ else if (usbc_hcint.s.nyet)
+ {
+ /* NYET as a response is only allowed in three cases: as a response to
+ a ping, as a response to a split transaction, and as a response to
+ a bulk out. The ping case is handled by hardware, so we only have
+ splits and bulk out */
+ if (!__cvmx_usb_pipe_needs_split(usb, pipe))
+ {
+ transaction->retries = 0;
+ /* If there is more data to go then we need to try again. Otherwise
+ this transaction is complete */
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ else
+ {
+ /* Split transactions retry the split complete 4 times then rewind
+ to the start split and do the entire transactions again */
+ transaction->retries++;
+ if ((transaction->retries & 0x3) == 0)
+ {
+ /* Rewind to the beginning of the transaction by anding off the
+ split complete bit */
+ transaction->stage &= ~1;
+ pipe->split_sc_frame = -1;
+ }
+ }
+ }
+ else if (usbc_hcint.s.ack)
+ {
+ transaction->retries = 0;
+ /* The ACK bit can only be checked after the other error bits. This is
+ because a multi packet transfer may succeed in a number of packets
+ and then get a different response on the last packet. In this case
+ both ACK and the last response bit will be set. If none of the
+ other response bits is set, then the last packet must have been an
+ ACK */
+
+ /* Since we got an ACK, we know we don't need to do a ping on this
+ pipe */
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ switch (transaction->type)
+ {
+ case CVMX_USB_TRANSFER_CONTROL:
+ switch (transaction->stage)
+ {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ /* This should be impossible */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ pipe->pid_toggle = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage = CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
+ else
+ {
+ cvmx_usb_control_header_t *header = cvmx_phys_to_ptr(transaction->control_header);
+ if (header->s.length)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ {
+ cvmx_usb_control_header_t *header = cvmx_phys_to_ptr(transaction->control_header);
+ if (header->s.length)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ {
+ transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
+ /* For setup OUT data that are splits, the hardware
+ doesn't appear to count transferred data. Here
+ we manually update the data transferred */
+ if (!usbc_hcchar.s.epdir)
+ {
+ if (buffer_space_left < pipe->max_packet)
+ transaction->actual_bytes += buffer_space_left;
+ else
+ transaction->actual_bytes += pipe->max_packet;
+ }
+ }
+ else if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
+ {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
+ {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ else
+ {
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ }
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage = CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
+ else
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ break;
+ }
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ /* The only time a bulk transfer isn't complete when
+ it finishes with an ACK is during a split transaction. For
+ splits we need to continue the transfer if more data is
+ needed */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ else
+ {
+ if (buffer_space_left && (bytes_in_last_packet == pipe->max_packet))
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ else
+ {
+ if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ }
+ else
+ {
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (usbc_hcint.s.nak))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ if (!buffer_space_left || (bytes_in_last_packet < pipe->max_packet))
+ {
+ if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ {
+ /* ISOCHRONOUS OUT splits don't require a complete split stage.
+ Instead they use a sequence of begin OUT splits to transfer
+ the data 188 bytes at a time. Once the transfer is complete,
+ the pipe sleeps until the next schedule interval */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)
+ {
+ /* If no space left or this wasn't a max size packet then
+ this transfer is complete. Otherwise start it again
+ to send the next 188 bytes */
+ if (!buffer_space_left || (bytes_this_transfer < 188))
+ {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ else
+ {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE)
+ {
+ /* We are in the incoming data phase. Keep getting
+ data until we run out of space or get a small
+ packet */
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
+ {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ else
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ }
+ }
+ else
+ {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ break;
+ }
+ }
+ else if (usbc_hcint.s.nak)
+ {
+ /* If this was a split then clear our split in progress marker */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+ /* NAK as a response means the device couldn't accept the transaction,
+ but it should be retried in the future. Rewind to the beginning of
+ the transaction by anding off the split complete bit. Retry in the
+ next interval */
+ transaction->retries = 0;
+ transaction->stage &= ~1;
+ pipe->next_tx_frame += pipe->interval;
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ }
+ else
+ {
+ cvmx_usb_port_status_t port;
+ port = cvmx_usb_get_status((cvmx_usb_state_t *)usb);
+ if (port.port_enabled)
+ {
+ /* We'll retry the exact same transaction again */
+ transaction->retries++;
+ }
+ else
+ {
+ /* We get channel halted interrupts with no result bits sets when the
+ cable is unplugged */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ }
+ }
+ CVMX_USB_RETURN(0);
+}
+
+
+/**
+ * Poll the USB block for status and call all needed callback
+ * handlers. This function is meant to be called in the interrupt
+ * handler for the USB controller. It can also be called
+ * periodically in a loop for non-interrupt based operation.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+cvmx_usb_status_t cvmx_usb_poll(cvmx_usb_state_t *state)
+{
+ cvmx_usbcx_hfnum_t usbc_hfnum;
+ cvmx_usbcx_gintsts_t usbc_gintsts;
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+
+ CVMX_PREFETCH(usb, 0);
+ CVMX_PREFETCH(usb, 1*128);
+ CVMX_PREFETCH(usb, 2*128);
+ CVMX_PREFETCH(usb, 3*128);
+ CVMX_PREFETCH(usb, 4*128);
+
+ CVMX_USB_LOG_CALLED();
+ CVMX_USB_LOG_PARAM("%p", state);
+
+ /* Update the frame counter */
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum)
+ usb->frame_number += 0x4000;
+ usb->frame_number &= ~0x3fffull;
+ usb->frame_number |= usbc_hfnum.s.frnum;
+
+ /* Read the pending interrupts */
+ usbc_gintsts.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTSTS(usb->index));
+
+ /* Clear the interrupts now that we know about them */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), usbc_gintsts.u32);
+
+ if (usbc_gintsts.s.rxflvl)
+ {
+ /* RxFIFO Non-Empty (RxFLvl)
+ Indicates that there is at least one packet pending to be read
+ from the RxFIFO. */
+ /* In DMA mode this is handled by hardware */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_poll_rx_fifo(usb);
+ }
+ if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp)
+ {
+ /* Fill the Tx FIFOs when not in DMA mode */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_poll_tx_fifo(usb);
+ }
+ if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint)
+ {
+ cvmx_usbcx_hprt_t usbc_hprt;
+ /* Disconnect Detected Interrupt (DisconnInt)
+ Asserted when a device disconnect is detected. */
+
+ /* Host Port Interrupt (PrtInt)
+ The core sets this bit to indicate a change in port status of one
+ of the O2P USB core ports in Host mode. The application must
+ read the Host Port Control and Status (HPRT) register to
+ determine the exact event that caused this interrupt. The
+ application must clear the appropriate status bit in the Host Port
+ Control and Status register to clear this bit. */
+
+ /* Call the user's port callback */
+ __cvmx_usb_perform_callback(usb, NULL, NULL,
+ CVMX_USB_CALLBACK_PORT_CHANGED,
+ CVMX_USB_COMPLETE_SUCCESS);
+ /* Clear the port change bits */
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.s.prtena = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), usbc_hprt.u32);
+ }
+ if (usbc_gintsts.s.hchint)
+ {
+ /* Host Channels Interrupt (HChInt)
+ The core sets this bit to indicate that an interrupt is pending on
+ one of the channels of the core (in Host mode). The application
+ must read the Host All Channels Interrupt (HAINT) register to
+ determine the exact number of the channel on which the
+ interrupt occurred, and then read the corresponding Host
+ Channel-n Interrupt (HCINTn) register to determine the exact
+ cause of the interrupt. The application must clear the
+ appropriate status bit in the HCINTn register to clear this bit. */
+ cvmx_usbcx_haint_t usbc_haint;
+ usbc_haint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINT(usb->index));
+ while (usbc_haint.u32)
+ {
+ int channel;
+ CVMX_CLZ(channel, usbc_haint.u32);
+ channel = 31 - channel;
+ __cvmx_usb_poll_channel(usb, channel);
+ usbc_haint.u32 ^= 1<<channel;
+ }
+ }
+
+ __cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
+
+ CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usb_poll);
+#endif
+
+extern void cvmx_usb_set_toggle(cvmx_usb_state_t *state, int endpoint_num, int toggle)
+{
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+ cvmx_usb_pipe_t *pipe = usb->pipe + endpoint_num;
+
+ pipe->pid_toggle = !!toggle;
+}
+
+extern int cvmx_usb_get_toggle(cvmx_usb_state_t *state, int endpoint_num)
+{
+ cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+ cvmx_usb_pipe_t *pipe = usb->pipe + endpoint_num;
+
+ if (pipe->pid_toggle)
+ return (1);
+ return (0);
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-usb.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-usb.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-usb.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-usb.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1093 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * "cvmx-usb.h" defines a set of low level USB functions to help
+ * developers create Octeon USB drivers for various operating
+ * systems. These functions provide a generic API to the Octeon
+ * USB blocks, hiding the internal hardware specific
+ * operations.
+ *
+ * At a high level the device driver needs to:
+ *
+ * -# Call cvmx_usb_get_num_ports() to get the number of
+ * supported ports.
+ * -# Call cvmx_usb_initialize() for each Octeon USB port.
+ * -# Enable the port using cvmx_usb_enable().
+ * -# Either periodically, or in an interrupt handler, call
+ * cvmx_usb_poll() to service USB events.
+ * -# Manage pipes using cvmx_usb_open_pipe() and
+ * cvmx_usb_close_pipe().
+ * -# Manage transfers using cvmx_usb_submit_*() and
+ * cvmx_usb_cancel*().
+ * -# Shutdown USB on unload using cvmx_usb_shutdown().
+ *
+ * To monitor USB status changes, the device driver must use
+ * cvmx_usb_register_callback() to register for events that it
+ * is interested in. Below are a few hints on successfully
+ * implementing a driver on top of this API.
+ *
+ * <h2>Initialization</h2>
+ *
+ * When a driver is first loaded, it is normally not necessary
+ * to bring up the USB port completely. Most operating systems
+ * expect to initialize and enable the port in two independent
+ * steps. Normally an operating system will probe hardware,
+ * initialize anything found, and then enable the hardware.
+ *
+ * In the probe phase you should:
+ * -# Use cvmx_usb_get_num_ports() to determine the number of
+ * USB port to be supported.
+ * -# Allocate space for a cvmx_usb_state_t structure for each
+ * port.
+ * -# Tell the operating system about each port
+ *
+ * In the initialization phase you should:
+ * -# Use cvmx_usb_initialize() on each port.
+ * -# Do not call cvmx_usb_enable(). This leaves the USB port in
+ * the disabled state until the operating system is ready.
+ *
+ * Finally, in the enable phase you should:
+ * -# Call cvmx_usb_enable() on the appropriate port.
+ * -# Note that some operating system use a RESET instead of an
+ * enable call. To implement RESET, you should call
+ * cvmx_usb_disable() followed by cvmx_usb_enable().
+ *
+ * <h2>Locking</h2>
+ *
+ * All of the functions in the cvmx-usb API assume exclusive
+ * access to the USB hardware and internal data structures. This
+ * means that the driver must provide locking as necessary.
+ *
+ * In the single CPU state it is normally enough to disable
+ * interrupts before every call to cvmx_usb*() and enable them
+ * again after the call is complete. Keep in mind that it is
+ * very common for the callback handlers to make additional
+ * calls into cvmx-usb, so the disable/enable must be protected
+ * against recursion. As an example, the Linux kernel
+ * local_irq_save() and local_irq_restore() are perfect for this
+ * in the non SMP case.
+ *
+ * In the SMP case, locking is more complicated. For SMP you not
+ * only need to disable interrupts on the local core, but also
+ * take a lock to make sure that another core cannot call
+ * cvmx-usb.
+ *
+ * <h2>Port callback</h2>
+ *
+ * The port callback prototype needs to look as follows:
+ *
+ * void port_callback(cvmx_usb_state_t *usb,
+ * cvmx_usb_callback_t reason,
+ * cvmx_usb_complete_t status,
+ * int pipe_handle,
+ * int submit_handle,
+ * int bytes_transferred,
+ * void *user_data);
+ * - @b usb is the cvmx_usb_state_t for the port.
+ * - @b reason will always be
+ * CVMX_USB_CALLBACK_PORT_CHANGED.
+ * - @b status will always be CVMX_USB_COMPLETE_SUCCESS.
+ * - @b pipe_handle will always be -1.
+ * - @b submit_handle will always be -1.
+ * - @b bytes_transferred will always be 0.
+ * - @b user_data is the void pointer originally passed along
+ * with the callback. Use this for any state information you
+ * need.
+ *
+ * The port callback will be called whenever the user plugs /
+ * unplugs a device from the port. It will not be called when a
+ * device is plugged / unplugged from a hub connected to the
+ * root port. Normally all the callback needs to do is tell the
+ * operating system to poll the root hub for status. Under
+ * Linux, this is performed by calling usb_hcd_poll_rh_status().
+ * In the Linux driver we use @b user_data. to pass around the
+ * Linux "hcd" structure. Once the port callback completes,
+ * Linux automatically calls octeon_usb_hub_status_data() which
+ * uses cvmx_usb_get_status() to determine the root port status.
+ *
+ * <h2>Complete callback</h2>
+ *
+ * The completion callback prototype needs to look as follows:
+ *
+ * void complete_callback(cvmx_usb_state_t *usb,
+ * cvmx_usb_callback_t reason,
+ * cvmx_usb_complete_t status,
+ * int pipe_handle,
+ * int submit_handle,
+ * int bytes_transferred,
+ * void *user_data);
+ * - @b usb is the cvmx_usb_state_t for the port.
+ * - @b reason will always be
+ * CVMX_USB_CALLBACK_TRANSFER_COMPLETE.
+ * - @b status will be one of the cvmx_usb_complete_t
+ * enumerations.
+ * - @b pipe_handle is the handle to the pipe the transaction
+ * was originally submitted on.
+ * - @b submit_handle is the handle returned by the original
+ * cvmx_usb_submit_* call.
+ * - @b bytes_transferred is the number of bytes successfully
+ * transferred in the transaction. This will be zero on most
+ * error conditions.
+ * - @b user_data is the void pointer originally passed along
+ * with the callback. Use this for any state information you
+ * need. For example, the Linux "urb" is stored in here in the
+ * Linux driver.
+ *
+ * In general your callback handler should use @b status and @b
+ * bytes_transferred to tell the operating system the how the
+ * transaction completed. Normally the pipe is not changed in
+ * this callback.
+ *
+ * <h2>Canceling transactions</h2>
+ *
+ * When a transaction is cancelled using cvmx_usb_cancel*(), the
+ * actual length of time until the complete callback is called
+ * can vary greatly. It may be called before cvmx_usb_cancel*()
+ * returns, or it may be called a number of usb frames in the
+ * future once the hardware frees the transaction. In either of
+ * these cases, the complete handler will receive
+ * CVMX_USB_COMPLETE_CANCEL.
+ *
+ * <h2>Handling pipes</h2>
+ *
+ * USB "pipes" is a software construct created by this API to
+ * enable the ordering of usb transactions to a device endpoint.
+ * Octeon's underlying hardware doesn't have any concept
+ * equivalent to "pipes". The hardware instead has eight
+ * channels that can be used simultaneously to have up to eight
+ * transaction in process at the same time. In order to maintain
+ * ordering in a pipe, the transactions for a pipe will only be
+ * active in one hardware channel at a time. From an API user's
+ * perspective, this doesn't matter but it can be helpful to
+ * keep this in mind when you are probing hardware while
+ * debugging.
+ *
+ * Also keep in mind that usb transactions contain state
+ * information about the previous transaction to the same
+ * endpoint. Each transaction has a PID toggle that changes 0/1
+ * between each sub packet. This is maintained in the pipe data
+ * structures. For this reason, you generally cannot create and
+ * destroy a pipe for every transaction. A sequence of
+ * transaction to the same endpoint must use the same pipe.
+ *
+ * <h2>Root Hub</h2>
+ *
+ * Some operating systems view the usb root port as a normal usb
+ * hub. These systems attempt to control the root hub with
+ * messages similar to the usb 2.0 spec for hub control and
+ * status. For these systems it may be necessary to write
+ * function to decode standard usb control messages into
+ * equivalent cvmx-usb API calls. As an example, the following
+ * code is used under Linux for some of the basic hub control
+ * messages.
+ *
+ * @code
+ * static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength)
+ * {
+ * cvmx_usb_state_t *usb = (cvmx_usb_state_t *)hcd->hcd_priv;
+ * cvmx_usb_port_status_t usb_port_status;
+ * int port_status;
+ * struct usb_hub_descriptor *desc;
+ * unsigned long flags;
+ *
+ * switch (typeReq)
+ * {
+ * case ClearHubFeature:
+ * DEBUG_ROOT_HUB("OcteonUSB: ClearHubFeature\n");
+ * switch (wValue)
+ * {
+ * case C_HUB_LOCAL_POWER:
+ * case C_HUB_OVER_CURRENT:
+ * // Nothing required here
+ * break;
+ * default:
+ * return -EINVAL;
+ * }
+ * break;
+ * case ClearPortFeature:
+ * DEBUG_ROOT_HUB("OcteonUSB: ClearPortFeature");
+ * if (wIndex != 1)
+ * {
+ * DEBUG_ROOT_HUB(" INVALID\n");
+ * return -EINVAL;
+ * }
+ *
+ * switch (wValue)
+ * {
+ * case USB_PORT_FEAT_ENABLE:
+ * DEBUG_ROOT_HUB(" ENABLE");
+ * local_irq_save(flags);
+ * cvmx_usb_disable(usb);
+ * local_irq_restore(flags);
+ * break;
+ * case USB_PORT_FEAT_SUSPEND:
+ * DEBUG_ROOT_HUB(" SUSPEND");
+ * // Not supported on Octeon
+ * break;
+ * case USB_PORT_FEAT_POWER:
+ * DEBUG_ROOT_HUB(" POWER");
+ * // Not supported on Octeon
+ * break;
+ * case USB_PORT_FEAT_INDICATOR:
+ * DEBUG_ROOT_HUB(" INDICATOR");
+ * // Port inidicator not supported
+ * break;
+ * case USB_PORT_FEAT_C_CONNECTION:
+ * DEBUG_ROOT_HUB(" C_CONNECTION");
+ * // Clears drivers internal connect status change flag
+ * cvmx_usb_set_status(usb, cvmx_usb_get_status(usb));
+ * break;
+ * case USB_PORT_FEAT_C_RESET:
+ * DEBUG_ROOT_HUB(" C_RESET");
+ * // Clears the driver's internal Port Reset Change flag
+ * cvmx_usb_set_status(usb, cvmx_usb_get_status(usb));
+ * break;
+ * case USB_PORT_FEAT_C_ENABLE:
+ * DEBUG_ROOT_HUB(" C_ENABLE");
+ * // Clears the driver's internal Port Enable/Disable Change flag
+ * cvmx_usb_set_status(usb, cvmx_usb_get_status(usb));
+ * break;
+ * case USB_PORT_FEAT_C_SUSPEND:
+ * DEBUG_ROOT_HUB(" C_SUSPEND");
+ * // Clears the driver's internal Port Suspend Change flag,
+ * which is set when resume signaling on the host port is
+ * complete
+ * break;
+ * case USB_PORT_FEAT_C_OVER_CURRENT:
+ * DEBUG_ROOT_HUB(" C_OVER_CURRENT");
+ * // Clears the driver's overcurrent Change flag
+ * cvmx_usb_set_status(usb, cvmx_usb_get_status(usb));
+ * break;
+ * default:
+ * DEBUG_ROOT_HUB(" UNKNOWN\n");
+ * return -EINVAL;
+ * }
+ * DEBUG_ROOT_HUB("\n");
+ * break;
+ * case GetHubDescriptor:
+ * DEBUG_ROOT_HUB("OcteonUSB: GetHubDescriptor\n");
+ * desc = (struct usb_hub_descriptor *)buf;
+ * desc->bDescLength = 9;
+ * desc->bDescriptorType = 0x29;
+ * desc->bNbrPorts = 1;
+ * desc->wHubCharacteristics = 0x08;
+ * desc->bPwrOn2PwrGood = 1;
+ * desc->bHubContrCurrent = 0;
+ * desc->bitmap[0] = 0;
+ * desc->bitmap[1] = 0xff;
+ * break;
+ * case GetHubStatus:
+ * DEBUG_ROOT_HUB("OcteonUSB: GetHubStatus\n");
+ * *(__le32 *)buf = 0;
+ * break;
+ * case GetPortStatus:
+ * DEBUG_ROOT_HUB("OcteonUSB: GetPortStatus");
+ * if (wIndex != 1)
+ * {
+ * DEBUG_ROOT_HUB(" INVALID\n");
+ * return -EINVAL;
+ * }
+ *
+ * usb_port_status = cvmx_usb_get_status(usb);
+ * port_status = 0;
+ *
+ * if (usb_port_status.connect_change)
+ * {
+ * port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
+ * DEBUG_ROOT_HUB(" C_CONNECTION");
+ * }
+ *
+ * if (usb_port_status.port_enabled)
+ * {
+ * port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
+ * DEBUG_ROOT_HUB(" C_ENABLE");
+ * }
+ *
+ * if (usb_port_status.connected)
+ * {
+ * port_status |= (1 << USB_PORT_FEAT_CONNECTION);
+ * DEBUG_ROOT_HUB(" CONNECTION");
+ * }
+ *
+ * if (usb_port_status.port_enabled)
+ * {
+ * port_status |= (1 << USB_PORT_FEAT_ENABLE);
+ * DEBUG_ROOT_HUB(" ENABLE");
+ * }
+ *
+ * if (usb_port_status.port_over_current)
+ * {
+ * port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
+ * DEBUG_ROOT_HUB(" OVER_CURRENT");
+ * }
+ *
+ * if (usb_port_status.port_powered)
+ * {
+ * port_status |= (1 << USB_PORT_FEAT_POWER);
+ * DEBUG_ROOT_HUB(" POWER");
+ * }
+ *
+ * if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH)
+ * {
+ * port_status |= (1 << USB_PORT_FEAT_HIGHSPEED);
+ * DEBUG_ROOT_HUB(" HIGHSPEED");
+ * }
+ * else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW)
+ * {
+ * port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
+ * DEBUG_ROOT_HUB(" LOWSPEED");
+ * }
+ *
+ * *((__le32 *)buf) = cpu_to_le32(port_status);
+ * DEBUG_ROOT_HUB("\n");
+ * break;
+ * case SetHubFeature:
+ * DEBUG_ROOT_HUB("OcteonUSB: SetHubFeature\n");
+ * // No HUB features supported
+ * break;
+ * case SetPortFeature:
+ * DEBUG_ROOT_HUB("OcteonUSB: SetPortFeature");
+ * if (wIndex != 1)
+ * {
+ * DEBUG_ROOT_HUB(" INVALID\n");
+ * return -EINVAL;
+ * }
+ *
+ * switch (wValue)
+ * {
+ * case USB_PORT_FEAT_SUSPEND:
+ * DEBUG_ROOT_HUB(" SUSPEND\n");
+ * return -EINVAL;
+ * case USB_PORT_FEAT_POWER:
+ * DEBUG_ROOT_HUB(" POWER\n");
+ * return -EINVAL;
+ * case USB_PORT_FEAT_RESET:
+ * DEBUG_ROOT_HUB(" RESET\n");
+ * local_irq_save(flags);
+ * cvmx_usb_disable(usb);
+ * if (cvmx_usb_enable(usb))
+ * DEBUG_ERROR("Failed to enable the port\n");
+ * local_irq_restore(flags);
+ * return 0;
+ * case USB_PORT_FEAT_INDICATOR:
+ * DEBUG_ROOT_HUB(" INDICATOR\n");
+ * // Not supported
+ * break;
+ * default:
+ * DEBUG_ROOT_HUB(" UNKNOWN\n");
+ * return -EINVAL;
+ * }
+ * break;
+ * default:
+ * DEBUG_ROOT_HUB("OcteonUSB: Unknown root hub request\n");
+ * return -EINVAL;
+ * }
+ * return 0;
+ * }
+ * @endcode
+ *
+ * <h2>Interrupts</h2>
+ *
+ * If you plan on using usb interrupts, cvmx_usb_poll() must be
+ * called on every usb interrupt. It will read the usb state,
+ * call any needed callbacks, and schedule transactions as
+ * needed. Your device driver needs only to hookup an interrupt
+ * handler and call cvmx_usb_poll(). Octeon's usb port 0 causes
+ * CIU bit CIU_INT*_SUM0[USB] to be set (bit 56). For port 1,
+ * CIU bit CIU_INT_SUM1[USB1] is set (bit 17). How these bits
+ * are turned into interrupt numbers is operating system
+ * specific. For Linux, there are the convenient defines
+ * OCTEON_IRQ_USB0 and OCTEON_IRQ_USB1 for the IRQ numbers.
+ *
+ * If you aren't using interrupts, simple call cvmx_usb_poll()
+ * in your main processing loop.
+ *
+ * <hr>$Revision: 32636 $<hr>
+ */
+
+#ifndef __CVMX_USB_H__
+#define __CVMX_USB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Enumerations representing the status of function calls.
+ */
+typedef enum
+{
+ CVMX_USB_SUCCESS = 0, /**< There were no errors */
+ CVMX_USB_INVALID_PARAM = -1, /**< A parameter to the function was invalid */
+ CVMX_USB_NO_MEMORY = -2, /**< Insufficient resources were available for the request */
+ CVMX_USB_BUSY = -3, /**< The resource is busy and cannot service the request */
+ CVMX_USB_TIMEOUT = -4, /**< Waiting for an action timed out */
+ CVMX_USB_INCORRECT_MODE = -5, /**< The function call doesn't work in the current USB
+ mode. This happens when host only functions are
+ called in device mode or vice versa */
+} cvmx_usb_status_t;
+
+/**
+ * Enumerations representing the possible USB device speeds
+ */
+typedef enum
+{
+ CVMX_USB_SPEED_HIGH = 0, /**< Device is operation at 480Mbps */
+ CVMX_USB_SPEED_FULL = 1, /**< Device is operation at 12Mbps */
+ CVMX_USB_SPEED_LOW = 2, /**< Device is operation at 1.5Mbps */
+} cvmx_usb_speed_t;
+
+/**
+ * Enumeration representing the possible USB transfer types.
+ */
+typedef enum
+{
+ CVMX_USB_TRANSFER_CONTROL = 0, /**< USB transfer type control for hub and status transfers */
+ CVMX_USB_TRANSFER_ISOCHRONOUS = 1, /**< USB transfer type isochronous for low priority periodic transfers */
+ CVMX_USB_TRANSFER_BULK = 2, /**< USB transfer type bulk for large low priority transfers */
+ CVMX_USB_TRANSFER_INTERRUPT = 3, /**< USB transfer type interrupt for high priority periodic transfers */
+} cvmx_usb_transfer_t;
+
+/**
+ * Enumeration of the transfer directions
+ */
+typedef enum
+{
+ CVMX_USB_DIRECTION_OUT, /**< Data is transferring from Octeon to the device/host */
+ CVMX_USB_DIRECTION_IN, /**< Data is transferring from the device/host to Octeon */
+} cvmx_usb_direction_t;
+
+/**
+ * Enumeration of all possible status codes passed to callback
+ * functions.
+ */
+typedef enum
+{
+ CVMX_USB_COMPLETE_SUCCESS, /**< The transaction / operation finished without any errors */
+ CVMX_USB_COMPLETE_SHORT, /**< FIXME: This is currently not implemented */
+ CVMX_USB_COMPLETE_CANCEL, /**< The transaction was canceled while in flight by a user call to cvmx_usb_cancel* */
+ CVMX_USB_COMPLETE_ERROR, /**< The transaction aborted with an unexpected error status */
+ CVMX_USB_COMPLETE_STALL, /**< The transaction received a USB STALL response from the device */
+ CVMX_USB_COMPLETE_XACTERR, /**< The transaction failed with an error from the device even after a number of retries */
+ CVMX_USB_COMPLETE_DATATGLERR, /**< The transaction failed with a data toggle error even after a number of retries */
+ CVMX_USB_COMPLETE_BABBLEERR, /**< The transaction failed with a babble error */
+ CVMX_USB_COMPLETE_FRAMEERR, /**< The transaction failed with a frame error even after a number of retries */
+} cvmx_usb_complete_t;
+
+/**
+ * Structure returned containing the USB port status information.
+ */
+typedef struct
+{
+ uint32_t reserved : 25;
+ uint32_t port_enabled : 1; /**< 1 = Usb port is enabled, 0 = disabled */
+ uint32_t port_over_current : 1; /**< 1 = Over current detected, 0 = Over current not detected. Octeon doesn't support over current detection */
+ uint32_t port_powered : 1; /**< 1 = Port power is being supplied to the device, 0 = power is off. Octeon doesn't support turning port power off */
+ cvmx_usb_speed_t port_speed : 2; /**< Current port speed */
+ uint32_t connected : 1; /**< 1 = A device is connected to the port, 0 = No device is connected */
+ uint32_t connect_change : 1; /**< 1 = Device connected state changed since the last set status call */
+} cvmx_usb_port_status_t;
+
+/**
+ * This is the structure of a Control packet header
+ */
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t request_type : 8; /**< Bit 7 tells the direction: 1=IN, 0=OUT */
+ uint64_t request : 8; /**< The standard usb request to make */
+ uint64_t value : 16; /**< Value parameter for the request in little endian format */
+ uint64_t index : 16; /**< Index for the request in little endian format */
+ uint64_t length : 16; /**< Length of the data associated with this request in little endian format */
+ } s;
+} cvmx_usb_control_header_t;
+
+/**
+ * Descriptor for Isochronous packets
+ */
+typedef struct
+{
+ int offset; /**< This is the offset in bytes into the main buffer where this data is stored */
+ int length; /**< This is the length in bytes of the data */
+ cvmx_usb_complete_t status; /**< This is the status of this individual packet transfer */
+} cvmx_usb_iso_packet_t;
+
+/**
+ * Possible callback reasons for the USB API.
+ */
+typedef enum
+{
+ CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
+ /**< A callback of this type is called when a submitted transfer
+ completes. The completion callback will be called even if the
+ transfer fails or is canceled. The status parameter will
+ contain details of why he callback was called. */
+ CVMX_USB_CALLBACK_PORT_CHANGED, /**< The status of the port changed. For example, someone may have
+ plugged a device in. The status parameter contains
+ CVMX_USB_COMPLETE_SUCCESS. Use cvmx_usb_get_status() to get
+ the new port status. */
+ __CVMX_USB_CALLBACK_END /**< Do not use. Used internally for array bounds */
+} cvmx_usb_callback_t;
+
+/**
+ * USB state internal data. The contents of this structure
+ * may change in future SDKs. No data in it should be referenced
+ * by user's of this API.
+ */
+typedef struct
+{
+ char data[65536];
+} cvmx_usb_state_t;
+
+/**
+ * USB callback functions are always of the following type.
+ * The parameters are as follows:
+ * - state = USB device state populated by
+ * cvmx_usb_initialize().
+ * - reason = The cvmx_usb_callback_t used to register
+ * the callback.
+ * - status = The cvmx_usb_complete_t representing the
+ * status code of a transaction.
+ * - pipe_handle = The Pipe that caused this callback, or
+ * -1 if this callback wasn't associated with a pipe.
+ * - submit_handle = Transfer submit handle causing this
+ * callback, or -1 if this callback wasn't associated
+ * with a transfer.
+ * - Actual number of bytes transfer.
+ * - user_data = The user pointer supplied to the
+ * function cvmx_usb_submit() or
+ * cvmx_usb_register_callback() */
+typedef void (*cvmx_usb_callback_func_t)(cvmx_usb_state_t *state,
+ cvmx_usb_callback_t reason,
+ cvmx_usb_complete_t status,
+ int pipe_handle, int submit_handle,
+ int bytes_transferred, void *user_data);
+
+/**
+ * Flags to pass the initialization function.
+ */
+typedef enum
+{
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1<<0, /**< The USB port uses a 12MHz crystal as clock source
+ at USB_XO and USB_XI. */
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1<<1, /**< The USB port uses 12/24/48MHz 2.5V board clock
+ source at USB_XO. USB_XI should be tied to GND.*/
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO = 0, /**< Automatically determine clock type based on function
+ in cvmx-helper-board.c. */
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3<<3, /**< Mask for clock speed field */
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1<<3, /**< Speed of reference clock or crystal */
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2<<3, /**< Speed of reference clock */
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3<<3, /**< Speed of reference clock */
+ /* Bits 3-4 used to encode the clock frequency */
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1<<5, /**< Disable DMA and used polled IO for data transfer use for the USB */
+ CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS = 1<<16, /**< Enable extra console output for debugging USB transfers */
+ CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLBACKS = 1<<17, /**< Enable extra console output for debugging USB callbacks */
+ CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO = 1<<18, /**< Enable extra console output for USB informational data */
+ CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS = 1<<19, /**< Enable extra console output for every function call */
+ CVMX_USB_INITIALIZE_FLAGS_DEBUG_CSRS = 1<<20, /**< Enable extra console output for every CSR access */
+ CVMX_USB_INITIALIZE_FLAGS_DEBUG_ALL = ((CVMX_USB_INITIALIZE_FLAGS_DEBUG_CSRS<<1)-1) - (CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS-1),
+} cvmx_usb_initialize_flags_t;
+
+/**
+ * Flags for passing when a pipe is created. Currently no flags
+ * need to be passed.
+ */
+typedef enum
+{
+ CVMX_USB_PIPE_FLAGS_DEBUG_TRANSFERS = 1<<15,/**< Used to display CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS for a specific pipe only */
+ __CVMX_USB_PIPE_FLAGS_OPEN = 1<<16, /**< Used internally to determine if a pipe is open. Do not use */
+ __CVMX_USB_PIPE_FLAGS_SCHEDULED = 1<<17, /**< Used internally to determine if a pipe is actively using hardware. Do not use */
+ __CVMX_USB_PIPE_FLAGS_NEED_PING = 1<<18, /**< Used internally to determine if a high speed pipe is in the ping state. Do not use */
+} cvmx_usb_pipe_flags_t;
+
+/**
+ * Return the number of USB ports supported by this Octeon
+ * chip. If the chip doesn't support USB, or is not supported
+ * by this API, a zero will be returned. Most Octeon chips
+ * support one usb port, but some support two ports.
+ * cvmx_usb_initialize() must be called on independent
+ * cvmx_usb_state_t structures.
+ *
+ * @return Number of port, zero if usb isn't supported
+ */
+extern int cvmx_usb_get_num_ports(void);
+
+/**
+ * Initialize a USB port for use. This must be called before any
+ * other access to the Octeon USB port is made. The port starts
+ * off in the disabled state.
+ *
+ * @param state Pointer to an empty cvmx_usb_state_t structure
+ * that will be populated by the initialize call.
+ * This structure is then passed to all other USB
+ * functions.
+ * @param usb_port_number
+ * Which Octeon USB port to initialize.
+ * @param flags Flags to control hardware initialization. See
+ * cvmx_usb_initialize_flags_t for the flag
+ * definitions. Some flags are mandatory.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+extern cvmx_usb_status_t cvmx_usb_initialize(cvmx_usb_state_t *state,
+ int usb_port_number,
+ cvmx_usb_initialize_flags_t flags);
+
+/**
+ * Shutdown a USB port after a call to cvmx_usb_initialize().
+ * The port should be disabled with all pipes closed when this
+ * function is called.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+extern cvmx_usb_status_t cvmx_usb_shutdown(cvmx_usb_state_t *state);
+
+/**
+ * Enable a USB port. After this call succeeds, the USB port is
+ * online and servicing requests.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+extern cvmx_usb_status_t cvmx_usb_enable(cvmx_usb_state_t *state);
+
+/**
+ * Disable a USB port. After this call the USB port will not
+ * generate data transfers and will not generate events.
+ * Transactions in process will fail and call their
+ * associated callbacks.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+extern cvmx_usb_status_t cvmx_usb_disable(cvmx_usb_state_t *state);
+
+/**
+ * Get the current state of the USB port. Use this call to
+ * determine if the usb port has anything connected, is enabled,
+ * or has some sort of error condition. The return value of this
+ * call has "changed" bits to signal of the value of some fields
+ * have changed between calls. These "changed" fields are based
+ * on the last call to cvmx_usb_set_status(). In order to clear
+ * them, you must update the status through cvmx_usb_set_status().
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return Port status information
+ */
+extern cvmx_usb_port_status_t cvmx_usb_get_status(cvmx_usb_state_t *state);
+
+/**
+ * Set the current state of the USB port. The status is used as
+ * a reference for the "changed" bits returned by
+ * cvmx_usb_get_status(). Other than serving as a reference, the
+ * status passed to this function is not used. No fields can be
+ * changed through this call.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param port_status
+ * Port status to set, most like returned by cvmx_usb_get_status()
+ */
+extern void cvmx_usb_set_status(cvmx_usb_state_t *state, cvmx_usb_port_status_t port_status);
+
+/**
+ * Open a virtual pipe between the host and a USB device. A pipe
+ * must be opened before data can be transferred between a device
+ * and Octeon.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param flags Optional pipe flags defined in
+ * cvmx_usb_pipe_flags_t.
+ * @param device_addr
+ * USB device address to open the pipe to
+ * (0-127).
+ * @param endpoint_num
+ * USB endpoint number to open the pipe to
+ * (0-15).
+ * @param device_speed
+ * The speed of the device the pipe is going
+ * to. This must match the device's speed,
+ * which may be different than the port speed.
+ * @param max_packet The maximum packet length the device can
+ * transmit/receive (low speed=0-8, full
+ * speed=0-1023, high speed=0-1024). This value
+ * comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <10:0>.
+ * @param transfer_type
+ * The type of transfer this pipe is for.
+ * @param transfer_dir
+ * The direction the pipe is in. This is not
+ * used for control pipes.
+ * @param interval For ISOCHRONOUS and INTERRUPT transfers,
+ * this is how often the transfer is scheduled
+ * for. All other transfers should specify
+ * zero. The units are in frames (8000/sec at
+ * high speed, 1000/sec for full speed).
+ * @param multi_count
+ * For high speed devices, this is the maximum
+ * allowed number of packet per microframe.
+ * Specify zero for non high speed devices. This
+ * value comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <12:11>.
+ * @param hub_device_addr
+ * Hub device address this device is connected
+ * to. Devices connected directly to Octeon
+ * use zero. This is only used when the device
+ * is full/low speed behind a high speed hub.
+ * The address will be of the high speed hub,
+ * not and full speed hubs after it.
+ * @param hub_port Which port on the hub the device is
+ * connected. Use zero for devices connected
+ * directly to Octeon. Like hub_device_addr,
+ * this is only used for full/low speed
+ * devices behind a high speed hub.
+ *
+ * @return A non negative value is a pipe handle. Negative
+ * values are failure codes from cvmx_usb_status_t.
+ */
+extern int cvmx_usb_open_pipe(cvmx_usb_state_t *state,
+ cvmx_usb_pipe_flags_t flags,
+ int device_addr, int endpoint_num,
+ cvmx_usb_speed_t device_speed, int max_packet,
+ cvmx_usb_transfer_t transfer_type,
+ cvmx_usb_direction_t transfer_dir, int interval,
+ int multi_count, int hub_device_addr,
+ int hub_port);
+
+/**
+ * Call to submit a USB Bulk transfer to a pipe.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Handle to the pipe for the transfer.
+ * @param buffer Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @param buffer_length
+ * Length of buffer in bytes.
+ * @param callback Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @param user_data User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * @return A submitted transaction handle or negative on
+ * failure. Negative values are failure codes from
+ * cvmx_usb_status_t.
+ */
+extern int cvmx_usb_submit_bulk(cvmx_usb_state_t *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+
+/**
+ * Call to submit a USB Interrupt transfer to a pipe.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Handle to the pipe for the transfer.
+ * @param buffer Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @param buffer_length
+ * Length of buffer in bytes.
+ * @param callback Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @param user_data User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * @return A submitted transaction handle or negative on
+ * failure. Negative values are failure codes from
+ * cvmx_usb_status_t.
+ */
+extern int cvmx_usb_submit_interrupt(cvmx_usb_state_t *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+
+/**
+ * Call to submit a USB Control transfer to a pipe.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Handle to the pipe for the transfer.
+ * @param control_header
+ * USB 8 byte control header physical address.
+ * Note that this is NOT A POINTER, but the
+ * full 64bit physical address of the buffer.
+ * @param buffer Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @param buffer_length
+ * Length of buffer in bytes.
+ * @param callback Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @param user_data User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * @return A submitted transaction handle or negative on
+ * failure. Negative values are failure codes from
+ * cvmx_usb_status_t.
+ */
+extern int cvmx_usb_submit_control(cvmx_usb_state_t *state, int pipe_handle,
+ uint64_t control_header,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+
+/**
+ * Flags to pass the cvmx_usb_submit_isochronous() function.
+ */
+typedef enum
+{
+ CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT = 1<<0, /**< Do not return an error if a transfer is less than the maximum packet size of the device */
+ CVMX_USB_ISOCHRONOUS_FLAGS_ASAP = 1<<1, /**< Schedule the transaction as soon as possible */
+} cvmx_usb_isochronous_flags_t;
+
+/**
+ * Call to submit a USB Isochronous transfer to a pipe.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Handle to the pipe for the transfer.
+ * @param start_frame
+ * Number of frames into the future to schedule
+ * this transaction.
+ * @param flags Flags to control the transfer. See
+ * cvmx_usb_isochronous_flags_t for the flag
+ * definitions.
+ * @param number_packets
+ * Number of sequential packets to transfer.
+ * "packets" is a pointer to an array of this
+ * many packet structures.
+ * @param packets Description of each transfer packet as
+ * defined by cvmx_usb_iso_packet_t. The array
+ * pointed to here must stay valid until the
+ * complete callback is called.
+ * @param buffer Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @param buffer_length
+ * Length of buffer in bytes.
+ * @param callback Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @param user_data User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * @return A submitted transaction handle or negative on
+ * failure. Negative values are failure codes from
+ * cvmx_usb_status_t.
+ */
+extern int cvmx_usb_submit_isochronous(cvmx_usb_state_t *state, int pipe_handle,
+ int start_frame, int flags,
+ int number_packets,
+ cvmx_usb_iso_packet_t packets[],
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+
+/**
+ * Cancel one outstanding request in a pipe. Canceling a request
+ * can fail if the transaction has already completed before cancel
+ * is called. Even after a successful cancel call, it may take
+ * a frame or two for the cvmx_usb_poll() function to call the
+ * associated callback.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Pipe handle to cancel requests in.
+ * @param submit_handle
+ * Handle to transaction to cancel, returned by the submit function.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+extern cvmx_usb_status_t cvmx_usb_cancel(cvmx_usb_state_t *state,
+ int pipe_handle, int submit_handle);
+
+
+/**
+ * Cancel all outstanding requests in a pipe. Logically all this
+ * does is call cvmx_usb_cancel() in a loop.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Pipe handle to cancel requests in.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+extern cvmx_usb_status_t cvmx_usb_cancel_all(cvmx_usb_state_t *state,
+ int pipe_handle);
+
+/**
+ * Close a pipe created with cvmx_usb_open_pipe().
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param pipe_handle
+ * Pipe handle to close.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t. CVMX_USB_BUSY is returned if the
+ * pipe has outstanding transfers.
+ */
+extern cvmx_usb_status_t cvmx_usb_close_pipe(cvmx_usb_state_t *state,
+ int pipe_handle);
+
+/**
+ * Register a function to be called when various USB events occur.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ * @param reason Which event to register for.
+ * @param callback Function to call when the event occurs.
+ * @param user_data User data parameter to the function.
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+extern cvmx_usb_status_t cvmx_usb_register_callback(cvmx_usb_state_t *state,
+ cvmx_usb_callback_t reason,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+
+/**
+ * Get the current USB protocol level frame number. The frame
+ * number is always in the range of 0-0x7ff.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return USB frame number
+ */
+extern int cvmx_usb_get_frame_number(cvmx_usb_state_t *state);
+
+/**
+ * Poll the USB block for status and call all needed callback
+ * handlers. This function is meant to be called in the interrupt
+ * handler for the USB controller. It can also be called
+ * periodically in a loop for non-interrupt based operation.
+ *
+ * @param state USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return CVMX_USB_SUCCESS or a negative error code defined in
+ * cvmx_usb_status_t.
+ */
+extern cvmx_usb_status_t cvmx_usb_poll(cvmx_usb_state_t *state);
+
+/*
+ * The FreeBSD host driver uses these functions to manipulate the toggle to deal
+ * more easily with endpoint management.
+ */
+extern void cvmx_usb_set_toggle(cvmx_usb_state_t *state, int endpoint_num, int toggle);
+extern int cvmx_usb_get_toggle(cvmx_usb_state_t *state, int endpoint_num);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_USB_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-usb.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-usbcx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-usbcx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-usbcx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,4255 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-usbcx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon usbcx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_USBCX_DEFS_H__
+#define __CVMX_USBCX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DAINT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DAINT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000818ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DAINT(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000818ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DAINTMSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DAINTMSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F001000081Cull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DAINTMSK(block_id) (CVMX_ADD_IO_SEG(0x00016F001000081Cull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DCFG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DCFG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000800ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DCFG(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000800ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DCTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DCTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000804ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DCTL(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000804ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DIEPCTLX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 4)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 4)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_DIEPCTLX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000900ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_DIEPCTLX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000900ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DIEPINTX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 4)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 4)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_DIEPINTX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000908ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_DIEPINTX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000908ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DIEPMSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DIEPMSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000810ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DIEPMSK(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000810ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DIEPTSIZX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 4)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 4)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_DIEPTSIZX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000910ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_DIEPTSIZX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000910ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DOEPCTLX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 4)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 4)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_DOEPCTLX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000B00ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_DOEPCTLX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000B00ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DOEPINTX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 4)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 4)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_DOEPINTX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000B08ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_DOEPINTX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000B08ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DOEPMSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DOEPMSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000814ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DOEPMSK(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000814ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DOEPTSIZX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 4)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 4)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 4)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_DOEPTSIZX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000B10ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_DOEPTSIZX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000B10ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DPTXFSIZX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((((offset >= 1) && (offset <= 4))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((((offset >= 1) && (offset <= 4))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((((offset >= 1) && (offset <= 4))) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((((offset >= 1) && (offset <= 4))) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((((offset >= 1) && (offset <= 4))) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_DPTXFSIZX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000100ull) + (((offset) & 7) + ((block_id) & 1) * 0x40000000000ull) * 4;
+}
+#else
+#define CVMX_USBCX_DPTXFSIZX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000100ull) + (((offset) & 7) + ((block_id) & 1) * 0x40000000000ull) * 4)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DSTS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DSTS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000808ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DSTS(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000808ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DTKNQR1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DTKNQR1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000820ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DTKNQR1(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000820ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DTKNQR2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DTKNQR2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000824ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DTKNQR2(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000824ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DTKNQR3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DTKNQR3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000830ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DTKNQR3(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000830ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_DTKNQR4(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_DTKNQR4(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000834ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_DTKNQR4(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000834ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GAHBCFG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GAHBCFG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000008ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GAHBCFG(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000008ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GHWCFG1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GHWCFG1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000044ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GHWCFG1(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000044ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GHWCFG2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GHWCFG2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000048ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GHWCFG2(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000048ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GHWCFG3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GHWCFG3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F001000004Cull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GHWCFG3(block_id) (CVMX_ADD_IO_SEG(0x00016F001000004Cull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GHWCFG4(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GHWCFG4(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000050ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GHWCFG4(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000050ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GINTMSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GINTMSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000018ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GINTMSK(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000018ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GINTSTS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GINTSTS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000014ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GINTSTS(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000014ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GNPTXFSIZ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GNPTXFSIZ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000028ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GNPTXFSIZ(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000028ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GNPTXSTS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GNPTXSTS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F001000002Cull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GNPTXSTS(block_id) (CVMX_ADD_IO_SEG(0x00016F001000002Cull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GOTGCTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GOTGCTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000000ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GOTGCTL(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000000ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GOTGINT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GOTGINT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000004ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GOTGINT(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000004ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GRSTCTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GRSTCTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000010ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GRSTCTL(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000010ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GRXFSIZ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GRXFSIZ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000024ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GRXFSIZ(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000024ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GRXSTSPD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GRXSTSPD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010040020ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GRXSTSPD(block_id) (CVMX_ADD_IO_SEG(0x00016F0010040020ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GRXSTSPH(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GRXSTSPH(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000020ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GRXSTSPH(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000020ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GRXSTSRD(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GRXSTSRD(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F001004001Cull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GRXSTSRD(block_id) (CVMX_ADD_IO_SEG(0x00016F001004001Cull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GRXSTSRH(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GRXSTSRH(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F001000001Cull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GRXSTSRH(block_id) (CVMX_ADD_IO_SEG(0x00016F001000001Cull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GSNPSID(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GSNPSID(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000040ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GSNPSID(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000040ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_GUSBCFG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_GUSBCFG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F001000000Cull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_GUSBCFG(block_id) (CVMX_ADD_IO_SEG(0x00016F001000000Cull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HAINT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_HAINT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000414ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_HAINT(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000414ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HAINTMSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_HAINTMSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000418ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_HAINTMSK(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000418ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HCCHARX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 7)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 7)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_HCCHARX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000500ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_HCCHARX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000500ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HCFG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_HCFG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000400ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_HCFG(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000400ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HCINTMSKX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 7)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 7)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_HCINTMSKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F001000050Cull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_HCINTMSKX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F001000050Cull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HCINTX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 7)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 7)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_HCINTX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000508ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_HCINTX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000508ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HCSPLTX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 7)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 7)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_HCSPLTX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000504ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_HCSPLTX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000504ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HCTSIZX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 7)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 7)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_HCTSIZX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000510ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32;
+}
+#else
+#define CVMX_USBCX_HCTSIZX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010000510ull) + (((offset) & 7) + ((block_id) & 1) * 0x8000000000ull) * 32)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HFIR(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_HFIR(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000404ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_HFIR(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000404ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HFNUM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_HFNUM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000408ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_HFNUM(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000408ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HPRT(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_HPRT(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000440ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_HPRT(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000440ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HPTXFSIZ(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_HPTXFSIZ(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000100ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_HPTXFSIZ(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000100ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_HPTXSTS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_HPTXSTS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000410ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_HPTXSTS(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000410ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_NPTXDFIFOX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 7)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 7)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 7)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_USBCX_NPTXDFIFOX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010001000ull) + (((offset) & 7) + ((block_id) & 1) * 0x100000000ull) * 4096;
+}
+#else
+#define CVMX_USBCX_NPTXDFIFOX(offset, block_id) (CVMX_ADD_IO_SEG(0x00016F0010001000ull) + (((offset) & 7) + ((block_id) & 1) * 0x100000000ull) * 4096)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBCX_PCGCCTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBCX_PCGCCTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0010000E00ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBCX_PCGCCTL(block_id) (CVMX_ADD_IO_SEG(0x00016F0010000E00ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+
+/**
+ * cvmx_usbc#_daint
+ *
+ * Device All Endpoints Interrupt Register (DAINT)
+ *
+ * When a significant event occurs on an endpoint, a Device All Endpoints Interrupt register
+ * interrupts the application using the Device OUT Endpoints Interrupt bit or Device IN Endpoints
+ * Interrupt bit of the Core Interrupt register (GINTSTS.OEPInt or GINTSTS.IEPInt, respectively).
+ * There is one interrupt bit per endpoint, up to a maximum of 16 bits for OUT endpoints and 16
+ * bits for IN endpoints. For a bidirectional endpoint, the corresponding IN and OUT interrupt
+ * bits are used. Bits in this register are set and cleared when the application sets and clears
+ * bits in the corresponding Device Endpoint-n Interrupt register (DIEPINTn/DOEPINTn).
+ */
+union cvmx_usbcx_daint {
+ uint32_t u32;
+ struct cvmx_usbcx_daint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t outepint : 16; /**< OUT Endpoint Interrupt Bits (OutEPInt)
+ One bit per OUT endpoint:
+ Bit 16 for OUT endpoint 0, bit 31 for OUT endpoint 15 */
+ uint32_t inepint : 16; /**< IN Endpoint Interrupt Bits (InEpInt)
+ One bit per IN Endpoint:
+ Bit 0 for IN endpoint 0, bit 15 for endpoint 15 */
+#else
+ uint32_t inepint : 16;
+ uint32_t outepint : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_daint_s cn30xx;
+ struct cvmx_usbcx_daint_s cn31xx;
+ struct cvmx_usbcx_daint_s cn50xx;
+ struct cvmx_usbcx_daint_s cn52xx;
+ struct cvmx_usbcx_daint_s cn52xxp1;
+ struct cvmx_usbcx_daint_s cn56xx;
+ struct cvmx_usbcx_daint_s cn56xxp1;
+};
+typedef union cvmx_usbcx_daint cvmx_usbcx_daint_t;
+
+/**
+ * cvmx_usbc#_daintmsk
+ *
+ * Device All Endpoints Interrupt Mask Register (DAINTMSK)
+ *
+ * The Device Endpoint Interrupt Mask register works with the Device Endpoint Interrupt register
+ * to interrupt the application when an event occurs on a device endpoint. However, the Device
+ * All Endpoints Interrupt (DAINT) register bit corresponding to that interrupt will still be set.
+ * Mask Interrupt: 1'b0 Unmask Interrupt: 1'b1
+ */
+union cvmx_usbcx_daintmsk {
+ uint32_t u32;
+ struct cvmx_usbcx_daintmsk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t outepmsk : 16; /**< OUT EP Interrupt Mask Bits (OutEpMsk)
+ One per OUT Endpoint:
+ Bit 16 for OUT EP 0, bit 31 for OUT EP 15 */
+ uint32_t inepmsk : 16; /**< IN EP Interrupt Mask Bits (InEpMsk)
+ One bit per IN Endpoint:
+ Bit 0 for IN EP 0, bit 15 for IN EP 15 */
+#else
+ uint32_t inepmsk : 16;
+ uint32_t outepmsk : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_daintmsk_s cn30xx;
+ struct cvmx_usbcx_daintmsk_s cn31xx;
+ struct cvmx_usbcx_daintmsk_s cn50xx;
+ struct cvmx_usbcx_daintmsk_s cn52xx;
+ struct cvmx_usbcx_daintmsk_s cn52xxp1;
+ struct cvmx_usbcx_daintmsk_s cn56xx;
+ struct cvmx_usbcx_daintmsk_s cn56xxp1;
+};
+typedef union cvmx_usbcx_daintmsk cvmx_usbcx_daintmsk_t;
+
+/**
+ * cvmx_usbc#_dcfg
+ *
+ * Device Configuration Register (DCFG)
+ *
+ * This register configures the core in Device mode after power-on or after certain control
+ * commands or enumeration. Do not make changes to this register after initial programming.
+ */
+union cvmx_usbcx_dcfg {
+ uint32_t u32;
+ struct cvmx_usbcx_dcfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31 : 9;
+ uint32_t epmiscnt : 5; /**< IN Endpoint Mismatch Count (EPMisCnt)
+ The application programs this filed with a count that determines
+ when the core generates an Endpoint Mismatch interrupt
+ (GINTSTS.EPMis). The core loads this value into an internal
+ counter and decrements it. The counter is reloaded whenever
+ there is a match or when the counter expires. The width of this
+ counter depends on the depth of the Token Queue. */
+ uint32_t reserved_13_17 : 5;
+ uint32_t perfrint : 2; /**< Periodic Frame Interval (PerFrInt)
+ Indicates the time within a (micro)frame at which the application
+ must be notified using the End Of Periodic Frame Interrupt. This
+ can be used to determine if all the isochronous traffic for that
+ (micro)frame is complete.
+ * 2'b00: 80% of the (micro)frame interval
+ * 2'b01: 85%
+ * 2'b10: 90%
+ * 2'b11: 95% */
+ uint32_t devaddr : 7; /**< Device Address (DevAddr)
+ The application must program this field after every SetAddress
+ control command. */
+ uint32_t reserved_3_3 : 1;
+ uint32_t nzstsouthshk : 1; /**< Non-Zero-Length Status OUT Handshake (NZStsOUTHShk)
+ The application can use this field to select the handshake the
+ core sends on receiving a nonzero-length data packet during
+ the OUT transaction of a control transfer's Status stage.
+ * 1'b1: Send a STALL handshake on a nonzero-length status
+ OUT transaction and do not send the received OUT packet to
+ the application.
+ * 1'b0: Send the received OUT packet to the application (zero-
+ length or nonzero-length) and send a handshake based on
+ the NAK and STALL bits for the endpoint in the Device
+ Endpoint Control register. */
+ uint32_t devspd : 2; /**< Device Speed (DevSpd)
+ Indicates the speed at which the application requires the core to
+ enumerate, or the maximum speed the application can support.
+ However, the actual bus speed is determined only after the
+ chirp sequence is completed, and is based on the speed of the
+ USB host to which the core is connected. See "Device
+ Initialization" on page 249 for details.
+ * 2'b00: High speed (USB 2.0 PHY clock is 30 MHz or 60 MHz)
+ * 2'b01: Full speed (USB 2.0 PHY clock is 30 MHz or 60 MHz)
+ * 2'b10: Low speed (USB 1.1 transceiver clock is 6 MHz). If
+ you select 6 MHz LS mode, you must do a soft reset.
+ * 2'b11: Full speed (USB 1.1 transceiver clock is 48 MHz) */
+#else
+ uint32_t devspd : 2;
+ uint32_t nzstsouthshk : 1;
+ uint32_t reserved_3_3 : 1;
+ uint32_t devaddr : 7;
+ uint32_t perfrint : 2;
+ uint32_t reserved_13_17 : 5;
+ uint32_t epmiscnt : 5;
+ uint32_t reserved_23_31 : 9;
+#endif
+ } s;
+ struct cvmx_usbcx_dcfg_s cn30xx;
+ struct cvmx_usbcx_dcfg_s cn31xx;
+ struct cvmx_usbcx_dcfg_s cn50xx;
+ struct cvmx_usbcx_dcfg_s cn52xx;
+ struct cvmx_usbcx_dcfg_s cn52xxp1;
+ struct cvmx_usbcx_dcfg_s cn56xx;
+ struct cvmx_usbcx_dcfg_s cn56xxp1;
+};
+typedef union cvmx_usbcx_dcfg cvmx_usbcx_dcfg_t;
+
+/**
+ * cvmx_usbc#_dctl
+ *
+ * Device Control Register (DCTL)
+ *
+ */
+union cvmx_usbcx_dctl {
+ uint32_t u32;
+ struct cvmx_usbcx_dctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_12_31 : 20;
+ uint32_t pwronprgdone : 1; /**< Power-On Programming Done (PWROnPrgDone)
+ The application uses this bit to indicate that register
+ programming is completed after a wake-up from Power Down
+ mode. For more information, see "Device Mode Suspend and
+ Resume With Partial Power-Down" on page 357. */
+ uint32_t cgoutnak : 1; /**< Clear Global OUT NAK (CGOUTNak)
+ A write to this field clears the Global OUT NAK. */
+ uint32_t sgoutnak : 1; /**< Set Global OUT NAK (SGOUTNak)
+ A write to this field sets the Global OUT NAK.
+ The application uses this bit to send a NAK handshake on all
+ OUT endpoints.
+ The application should set the this bit only after making sure
+ that the Global OUT NAK Effective bit in the Core Interrupt
+ Register (GINTSTS.GOUTNakEff) is cleared. */
+ uint32_t cgnpinnak : 1; /**< Clear Global Non-Periodic IN NAK (CGNPInNak)
+ A write to this field clears the Global Non-Periodic IN NAK. */
+ uint32_t sgnpinnak : 1; /**< Set Global Non-Periodic IN NAK (SGNPInNak)
+ A write to this field sets the Global Non-Periodic IN NAK.The
+ application uses this bit to send a NAK handshake on all non-
+ periodic IN endpoints. The core can also set this bit when a
+ timeout condition is detected on a non-periodic endpoint.
+ The application should set this bit only after making sure that
+ the Global IN NAK Effective bit in the Core Interrupt Register
+ (GINTSTS.GINNakEff) is cleared. */
+ uint32_t tstctl : 3; /**< Test Control (TstCtl)
+ * 3'b000: Test mode disabled
+ * 3'b001: Test_J mode
+ * 3'b010: Test_K mode
+ * 3'b011: Test_SE0_NAK mode
+ * 3'b100: Test_Packet mode
+ * 3'b101: Test_Force_Enable
+ * Others: Reserved */
+ uint32_t goutnaksts : 1; /**< Global OUT NAK Status (GOUTNakSts)
+ * 1'b0: A handshake is sent based on the FIFO Status and the
+ NAK and STALL bit settings.
+ * 1'b1: No data is written to the RxFIFO, irrespective of space
+ availability. Sends a NAK handshake on all packets, except
+ on SETUP transactions. All isochronous OUT packets are
+ dropped. */
+ uint32_t gnpinnaksts : 1; /**< Global Non-Periodic IN NAK Status (GNPINNakSts)
+ * 1'b0: A handshake is sent out based on the data availability
+ in the transmit FIFO.
+ * 1'b1: A NAK handshake is sent out on all non-periodic IN
+ endpoints, irrespective of the data availability in the transmit
+ FIFO. */
+ uint32_t sftdiscon : 1; /**< Soft Disconnect (SftDiscon)
+ The application uses this bit to signal the O2P USB core to do a
+ soft disconnect. As long as this bit is set, the host will not see
+ that the device is connected, and the device will not receive
+ signals on the USB. The core stays in the disconnected state
+ until the application clears this bit.
+ The minimum duration for which the core must keep this bit set
+ is specified in Minimum Duration for Soft Disconnect .
+ * 1'b0: Normal operation. When this bit is cleared after a soft
+ disconnect, the core drives the phy_opmode_o signal on the
+ UTMI+ to 2'b00, which generates a device connect event to
+ the USB host. When the device is reconnected, the USB host
+ restarts device enumeration.
+ * 1'b1: The core drives the phy_opmode_o signal on the
+ UTMI+ to 2'b01, which generates a device disconnect event
+ to the USB host. */
+ uint32_t rmtwkupsig : 1; /**< Remote Wakeup Signaling (RmtWkUpSig)
+ When the application sets this bit, the core initiates remote
+ signaling to wake up the USB host.The application must set this
+ bit to get the core out of Suspended state and must clear this bit
+ after the core comes out of Suspended state. */
+#else
+ uint32_t rmtwkupsig : 1;
+ uint32_t sftdiscon : 1;
+ uint32_t gnpinnaksts : 1;
+ uint32_t goutnaksts : 1;
+ uint32_t tstctl : 3;
+ uint32_t sgnpinnak : 1;
+ uint32_t cgnpinnak : 1;
+ uint32_t sgoutnak : 1;
+ uint32_t cgoutnak : 1;
+ uint32_t pwronprgdone : 1;
+ uint32_t reserved_12_31 : 20;
+#endif
+ } s;
+ struct cvmx_usbcx_dctl_s cn30xx;
+ struct cvmx_usbcx_dctl_s cn31xx;
+ struct cvmx_usbcx_dctl_s cn50xx;
+ struct cvmx_usbcx_dctl_s cn52xx;
+ struct cvmx_usbcx_dctl_s cn52xxp1;
+ struct cvmx_usbcx_dctl_s cn56xx;
+ struct cvmx_usbcx_dctl_s cn56xxp1;
+};
+typedef union cvmx_usbcx_dctl cvmx_usbcx_dctl_t;
+
+/**
+ * cvmx_usbc#_diepctl#
+ *
+ * Device IN Endpoint-n Control Register (DIEPCTLn)
+ *
+ * The application uses the register to control the behaviour of each logical endpoint other than endpoint 0.
+ */
+union cvmx_usbcx_diepctlx {
+ uint32_t u32;
+ struct cvmx_usbcx_diepctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t epena : 1; /**< Endpoint Enable (EPEna)
+ Indicates that data is ready to be transmitted on the endpoint.
+ The core clears this bit before setting any of the following
+ interrupts on this endpoint:
+ * Endpoint Disabled
+ * Transfer Completed */
+ uint32_t epdis : 1; /**< Endpoint Disable (EPDis)
+ The application sets this bit to stop transmitting data on an
+ endpoint, even before the transfer for that endpoint is complete.
+ The application must wait for the Endpoint Disabled interrupt
+ before treating the endpoint as disabled. The core clears this bit
+ before setting the Endpoint Disabled Interrupt. The application
+ should set this bit only if Endpoint Enable is already set for this
+ endpoint. */
+ uint32_t setd1pid : 1; /**< For Interrupt/BULK enpoints:
+ Set DATA1 PID (SetD1PID)
+ Writing to this field sets the Endpoint Data Pid (DPID) field in
+ this register to DATA1.
+ For Isochronous endpoints:
+ Set Odd (micro)frame (SetOddFr)
+ Writing to this field sets the Even/Odd (micro)frame (EO_FrNum)
+ field to odd (micro)frame. */
+ uint32_t setd0pid : 1; /**< For Interrupt/BULK enpoints:
+ Writing to this field sets the Endpoint Data Pid (DPID) field in
+ this register to DATA0.
+ For Isochronous endpoints:
+ Set Odd (micro)frame (SetEvenFr)
+ Writing to this field sets the Even/Odd (micro)frame (EO_FrNum)
+ field to even (micro)frame. */
+ uint32_t snak : 1; /**< Set NAK (SNAK)
+ A write to this bit sets the NAK bit for the endpoint.
+ Using this bit, the application can control the transmission of
+ NAK handshakes on an endpoint. The core can also set this bit
+ for an endpoint after a SETUP packet is received on the
+ endpoint. */
+ uint32_t cnak : 1; /**< Clear NAK (CNAK)
+ A write to this bit clears the NAK bit for the endpoint. */
+ uint32_t txfnum : 4; /**< TxFIFO Number (TxFNum)
+ Non-periodic endpoints must set this bit to zero. Periodic
+ endpoints must map this to the corresponding Periodic TxFIFO
+ number.
+ * 4'h0: Non-Periodic TxFIFO
+ * Others: Specified Periodic TxFIFO number */
+ uint32_t stall : 1; /**< STALL Handshake (Stall)
+ For non-control, non-isochronous endpoints:
+ The application sets this bit to stall all tokens from the USB host
+ to this endpoint. If a NAK bit, Global Non-Periodic IN NAK, or
+ Global OUT NAK is set along with this bit, the STALL bit takes
+ priority. Only the application can clear this bit, never the core.
+ For control endpoints:
+ The application can only set this bit, and the core clears it, when
+ a SETUP token i received for this endpoint. If a NAK bit, Global
+ Non-Periodic IN NAK, or Global OUT NAK is set along with this
+ bit, the STALL bit takes priority. Irrespective of this bit's setting,
+ the core always responds to SETUP data packets with an ACK handshake. */
+ uint32_t reserved_20_20 : 1;
+ uint32_t eptype : 2; /**< Endpoint Type (EPType)
+ This is the transfer type supported by this logical endpoint.
+ * 2'b00: Control
+ * 2'b01: Isochronous
+ * 2'b10: Bulk
+ * 2'b11: Interrupt */
+ uint32_t naksts : 1; /**< NAK Status (NAKSts)
+ Indicates the following:
+ * 1'b0: The core is transmitting non-NAK handshakes based
+ on the FIFO status
+ * 1'b1: The core is transmitting NAK handshakes on this
+ endpoint.
+ When either the application or the core sets this bit:
+ * For non-isochronous IN endpoints: The core stops
+ transmitting any data on an IN endpoint, even if data is
+ available in the TxFIFO.
+ * For isochronous IN endpoints: The core sends out a zero-
+ length data packet, even if data is available in the TxFIFO.
+ Irrespective of this bit's setting, the core always responds to
+ SETUP data packets with an ACK handshake. */
+ uint32_t dpid : 1; /**< For interrupt/bulk IN and OUT endpoints:
+ Endpoint Data PID (DPID)
+ Contains the PID of the packet to be received or transmitted on
+ this endpoint. The application should program the PID of the first
+ packet to be received or transmitted on this endpoint, after the
+ endpoint is activated. Applications use the SetD1PID and
+ SetD0PID fields of this register to program either DATA0 or
+ DATA1 PID.
+ * 1'b0: DATA0
+ * 1'b1: DATA1
+ For isochronous IN and OUT endpoints:
+ Even/Odd (Micro)Frame (EO_FrNum)
+ Indicates the (micro)frame number in which the core transmits/
+ receives isochronous data for this endpoint. The application
+ should program the even/odd (micro) frame number in which it
+ intends to transmit/receive isochronous data for this endpoint
+ using the SetEvnFr and SetOddFr fields in this register.
+ * 1'b0: Even (micro)frame
+ * 1'b1: Odd (micro)frame */
+ uint32_t usbactep : 1; /**< USB Active Endpoint (USBActEP)
+ Indicates whether this endpoint is active in the current
+ configuration and interface. The core clears this bit for all
+ endpoints (other than EP 0) after detecting a USB reset. After
+ receiving the SetConfiguration and SetInterface commands, the
+ application must program endpoint registers accordingly and set
+ this bit. */
+ uint32_t nextep : 4; /**< Next Endpoint (NextEp)
+ Applies to non-periodic IN endpoints only.
+ Indicates the endpoint number to be fetched after the data for
+ the current endpoint is fetched. The core can access this field,
+ even when the Endpoint Enable (EPEna) bit is not set. This
+ field is not valid in Slave mode. */
+ uint32_t mps : 11; /**< Maximum Packet Size (MPS)
+ Applies to IN and OUT endpoints.
+ The application must program this field with the maximum
+ packet size for the current logical endpoint. This value is in
+ bytes. */
+#else
+ uint32_t mps : 11;
+ uint32_t nextep : 4;
+ uint32_t usbactep : 1;
+ uint32_t dpid : 1;
+ uint32_t naksts : 1;
+ uint32_t eptype : 2;
+ uint32_t reserved_20_20 : 1;
+ uint32_t stall : 1;
+ uint32_t txfnum : 4;
+ uint32_t cnak : 1;
+ uint32_t snak : 1;
+ uint32_t setd0pid : 1;
+ uint32_t setd1pid : 1;
+ uint32_t epdis : 1;
+ uint32_t epena : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_diepctlx_s cn30xx;
+ struct cvmx_usbcx_diepctlx_s cn31xx;
+ struct cvmx_usbcx_diepctlx_s cn50xx;
+ struct cvmx_usbcx_diepctlx_s cn52xx;
+ struct cvmx_usbcx_diepctlx_s cn52xxp1;
+ struct cvmx_usbcx_diepctlx_s cn56xx;
+ struct cvmx_usbcx_diepctlx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_diepctlx cvmx_usbcx_diepctlx_t;
+
+/**
+ * cvmx_usbc#_diepint#
+ *
+ * Device Endpoint-n Interrupt Register (DIEPINTn)
+ *
+ * This register indicates the status of an endpoint with respect to
+ * USB- and AHB-related events. The application must read this register
+ * when the OUT Endpoints Interrupt bit or IN Endpoints Interrupt bit of
+ * the Core Interrupt register (GINTSTS.OEPInt or GINTSTS.IEPInt,
+ * respectively) is set. Before the application can read this register,
+ * it must first read the Device All Endpoints Interrupt (DAINT) register
+ * to get the exact endpoint number for the Device Endpoint-n Interrupt
+ * register. The application must clear the appropriate bit in this register
+ * to clear the corresponding bits in the DAINT and GINTSTS registers.
+ */
+union cvmx_usbcx_diepintx {
+ uint32_t u32;
+ struct cvmx_usbcx_diepintx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_7_31 : 25;
+ uint32_t inepnakeff : 1; /**< IN Endpoint NAK Effective (INEPNakEff)
+ Applies to periodic IN endpoints only.
+ Indicates that the IN endpoint NAK bit set by the application has
+ taken effect in the core. This bit can be cleared when the
+ application clears the IN endpoint NAK by writing to
+ DIEPCTLn.CNAK.
+ This interrupt indicates that the core has sampled the NAK bit
+ set (either by the application or by the core).
+ This interrupt does not necessarily mean that a NAK handshake
+ is sent on the USB. A STALL bit takes priority over a NAK bit. */
+ uint32_t intknepmis : 1; /**< IN Token Received with EP Mismatch (INTknEPMis)
+ Applies to non-periodic IN endpoints only.
+ Indicates that the data in the top of the non-periodic TxFIFO
+ belongs to an endpoint other than the one for which the IN
+ token was received. This interrupt is asserted on the endpoint
+ for which the IN token was received. */
+ uint32_t intkntxfemp : 1; /**< IN Token Received When TxFIFO is Empty (INTknTXFEmp)
+ Applies only to non-periodic IN endpoints.
+ Indicates that an IN token was received when the associated
+ TxFIFO (periodic/non-periodic) was empty. This interrupt is
+ asserted on the endpoint for which the IN token was received. */
+ uint32_t timeout : 1; /**< Timeout Condition (TimeOUT)
+ Applies to non-isochronous IN endpoints only.
+ Indicates that the core has detected a timeout condition on the
+ USB for the last IN token on this endpoint. */
+ uint32_t ahberr : 1; /**< AHB Error (AHBErr)
+ This is generated only in Internal DMA mode when there is an
+ AHB error during an AHB read/write. The application can read
+ the corresponding endpoint DMA address register to get the
+ error address. */
+ uint32_t epdisbld : 1; /**< Endpoint Disabled Interrupt (EPDisbld)
+ This bit indicates that the endpoint is disabled per the
+ application's request. */
+ uint32_t xfercompl : 1; /**< Transfer Completed Interrupt (XferCompl)
+ Indicates that the programmed transfer is complete on the AHB
+ as well as on the USB, for this endpoint. */
+#else
+ uint32_t xfercompl : 1;
+ uint32_t epdisbld : 1;
+ uint32_t ahberr : 1;
+ uint32_t timeout : 1;
+ uint32_t intkntxfemp : 1;
+ uint32_t intknepmis : 1;
+ uint32_t inepnakeff : 1;
+ uint32_t reserved_7_31 : 25;
+#endif
+ } s;
+ struct cvmx_usbcx_diepintx_s cn30xx;
+ struct cvmx_usbcx_diepintx_s cn31xx;
+ struct cvmx_usbcx_diepintx_s cn50xx;
+ struct cvmx_usbcx_diepintx_s cn52xx;
+ struct cvmx_usbcx_diepintx_s cn52xxp1;
+ struct cvmx_usbcx_diepintx_s cn56xx;
+ struct cvmx_usbcx_diepintx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_diepintx cvmx_usbcx_diepintx_t;
+
+/**
+ * cvmx_usbc#_diepmsk
+ *
+ * Device IN Endpoint Common Interrupt Mask Register (DIEPMSK)
+ *
+ * This register works with each of the Device IN Endpoint Interrupt (DIEPINTn) registers
+ * for all endpoints to generate an interrupt per IN endpoint. The IN endpoint interrupt
+ * for a specific status in the DIEPINTn register can be masked by writing to the corresponding
+ * bit in this register. Status bits are masked by default.
+ * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_diepmsk {
+ uint32_t u32;
+ struct cvmx_usbcx_diepmsk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_7_31 : 25;
+ uint32_t inepnakeffmsk : 1; /**< IN Endpoint NAK Effective Mask (INEPNakEffMsk) */
+ uint32_t intknepmismsk : 1; /**< IN Token received with EP Mismatch Mask (INTknEPMisMsk) */
+ uint32_t intkntxfempmsk : 1; /**< IN Token Received When TxFIFO Empty Mask
+ (INTknTXFEmpMsk) */
+ uint32_t timeoutmsk : 1; /**< Timeout Condition Mask (TimeOUTMsk)
+ (Non-isochronous endpoints) */
+ uint32_t ahberrmsk : 1; /**< AHB Error Mask (AHBErrMsk) */
+ uint32_t epdisbldmsk : 1; /**< Endpoint Disabled Interrupt Mask (EPDisbldMsk) */
+ uint32_t xfercomplmsk : 1; /**< Transfer Completed Interrupt Mask (XferComplMsk) */
+#else
+ uint32_t xfercomplmsk : 1;
+ uint32_t epdisbldmsk : 1;
+ uint32_t ahberrmsk : 1;
+ uint32_t timeoutmsk : 1;
+ uint32_t intkntxfempmsk : 1;
+ uint32_t intknepmismsk : 1;
+ uint32_t inepnakeffmsk : 1;
+ uint32_t reserved_7_31 : 25;
+#endif
+ } s;
+ struct cvmx_usbcx_diepmsk_s cn30xx;
+ struct cvmx_usbcx_diepmsk_s cn31xx;
+ struct cvmx_usbcx_diepmsk_s cn50xx;
+ struct cvmx_usbcx_diepmsk_s cn52xx;
+ struct cvmx_usbcx_diepmsk_s cn52xxp1;
+ struct cvmx_usbcx_diepmsk_s cn56xx;
+ struct cvmx_usbcx_diepmsk_s cn56xxp1;
+};
+typedef union cvmx_usbcx_diepmsk cvmx_usbcx_diepmsk_t;
+
+/**
+ * cvmx_usbc#_dieptsiz#
+ *
+ * Device Endpoint-n Transfer Size Register (DIEPTSIZn)
+ *
+ * The application must modify this register before enabling the endpoint.
+ * Once the endpoint is enabled using Endpoint Enable bit of the Device Endpoint-n Control registers (DIEPCTLn.EPEna/DOEPCTLn.EPEna),
+ * the core modifies this register. The application can only read this register once the core has cleared the Endpoint Enable bit.
+ * This register is used only for endpoints other than Endpoint 0.
+ */
+union cvmx_usbcx_dieptsizx {
+ uint32_t u32;
+ struct cvmx_usbcx_dieptsizx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_31_31 : 1;
+ uint32_t mc : 2; /**< Multi Count (MC)
+ Applies to IN endpoints only.
+ For periodic IN endpoints, this field indicates the number of
+ packets that must be transmitted per microframe on the USB.
+ The core uses this field to calculate the data PID for
+ isochronous IN endpoints.
+ * 2'b01: 1 packet
+ * 2'b10: 2 packets
+ * 2'b11: 3 packets
+ For non-periodic IN endpoints, this field is valid only in Internal
+ DMA mode. It specifies the number of packets the core should
+ fetch for an IN endpoint before it switches to the endpoint
+ pointed to by the Next Endpoint field of the Device Endpoint-n
+ Control register (DIEPCTLn.NextEp) */
+ uint32_t pktcnt : 10; /**< Packet Count (PktCnt)
+ Indicates the total number of USB packets that constitute the
+ Transfer Size amount of data for this endpoint.
+ IN Endpoints: This field is decremented every time a packet
+ (maximum size or short packet) is read from the TxFIFO. */
+ uint32_t xfersize : 19; /**< Transfer Size (XferSize)
+ This field contains the transfer size in bytes for the current
+ endpoint.
+ The core only interrupts the application after it has exhausted
+ the transfer size amount of data. The transfer size can be set to
+ the maximum packet size of the endpoint, to be interrupted at
+ the end of each packet.
+ IN Endpoints: The core decrements this field every time a
+ packet from the external memory is written to the TxFIFO. */
+#else
+ uint32_t xfersize : 19;
+ uint32_t pktcnt : 10;
+ uint32_t mc : 2;
+ uint32_t reserved_31_31 : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_dieptsizx_s cn30xx;
+ struct cvmx_usbcx_dieptsizx_s cn31xx;
+ struct cvmx_usbcx_dieptsizx_s cn50xx;
+ struct cvmx_usbcx_dieptsizx_s cn52xx;
+ struct cvmx_usbcx_dieptsizx_s cn52xxp1;
+ struct cvmx_usbcx_dieptsizx_s cn56xx;
+ struct cvmx_usbcx_dieptsizx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_dieptsizx cvmx_usbcx_dieptsizx_t;
+
+/**
+ * cvmx_usbc#_doepctl#
+ *
+ * Device OUT Endpoint-n Control Register (DOEPCTLn)
+ *
+ * The application uses the register to control the behaviour of each logical endpoint other than endpoint 0.
+ */
+union cvmx_usbcx_doepctlx {
+ uint32_t u32;
+ struct cvmx_usbcx_doepctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t epena : 1; /**< Endpoint Enable (EPEna)
+ Indicates that the application has allocated the memory tp start
+ receiving data from the USB.
+ The core clears this bit before setting any of the following
+ interrupts on this endpoint:
+ * SETUP Phase Done
+ * Endpoint Disabled
+ * Transfer Completed
+ For control OUT endpoints in DMA mode, this bit must be set
+ to be able to transfer SETUP data packets in memory. */
+ uint32_t epdis : 1; /**< Endpoint Disable (EPDis)
+ The application sets this bit to stop transmitting data on an
+ endpoint, even before the transfer for that endpoint is complete.
+ The application must wait for the Endpoint Disabled interrupt
+ before treating the endpoint as disabled. The core clears this bit
+ before setting the Endpoint Disabled Interrupt. The application
+ should set this bit only if Endpoint Enable is already set for this
+ endpoint. */
+ uint32_t setd1pid : 1; /**< For Interrupt/BULK enpoints:
+ Set DATA1 PID (SetD1PID)
+ Writing to this field sets the Endpoint Data Pid (DPID) field in
+ this register to DATA1.
+ For Isochronous endpoints:
+ Set Odd (micro)frame (SetOddFr)
+ Writing to this field sets the Even/Odd (micro)frame (EO_FrNum)
+ field to odd (micro)frame. */
+ uint32_t setd0pid : 1; /**< For Interrupt/BULK enpoints:
+ Writing to this field sets the Endpoint Data Pid (DPID) field in
+ this register to DATA0.
+ For Isochronous endpoints:
+ Set Odd (micro)frame (SetEvenFr)
+ Writing to this field sets the Even/Odd (micro)frame (EO_FrNum)
+ field to even (micro)frame. */
+ uint32_t snak : 1; /**< Set NAK (SNAK)
+ A write to this bit sets the NAK bit for the endpoint.
+ Using this bit, the application can control the transmission of
+ NAK handshakes on an endpoint. The core can also set this bit
+ for an endpoint after a SETUP packet is received on the
+ endpoint. */
+ uint32_t cnak : 1; /**< Clear NAK (CNAK)
+ A write to this bit clears the NAK bit for the endpoint. */
+ uint32_t reserved_22_25 : 4;
+ uint32_t stall : 1; /**< STALL Handshake (Stall)
+ For non-control, non-isochronous endpoints:
+ The application sets this bit to stall all tokens from the USB host
+ to this endpoint. If a NAK bit, Global Non-Periodic IN NAK, or
+ Global OUT NAK is set along with this bit, the STALL bit takes
+ priority. Only the application can clear this bit, never the core.
+ For control endpoints:
+ The application can only set this bit, and the core clears it, when
+ a SETUP token i received for this endpoint. If a NAK bit, Global
+ Non-Periodic IN NAK, or Global OUT NAK is set along with this
+ bit, the STALL bit takes priority. Irrespective of this bit's setting,
+ the core always responds to SETUP data packets with an ACK handshake. */
+ uint32_t snp : 1; /**< Snoop Mode (Snp)
+ This bit configures the endpoint to Snoop mode. In Snoop mode,
+ the core does not check the correctness of OUT packets before
+ transferring them to application memory. */
+ uint32_t eptype : 2; /**< Endpoint Type (EPType)
+ This is the transfer type supported by this logical endpoint.
+ * 2'b00: Control
+ * 2'b01: Isochronous
+ * 2'b10: Bulk
+ * 2'b11: Interrupt */
+ uint32_t naksts : 1; /**< NAK Status (NAKSts)
+ Indicates the following:
+ * 1'b0: The core is transmitting non-NAK handshakes based
+ on the FIFO status
+ * 1'b1: The core is transmitting NAK handshakes on this
+ endpoint.
+ When either the application or the core sets this bit:
+ * The core stops receiving any data on an OUT endpoint, even
+ if there is space in the RxFIFO to accomodate the incoming
+ packet. */
+ uint32_t dpid : 1; /**< For interrupt/bulk IN and OUT endpoints:
+ Endpoint Data PID (DPID)
+ Contains the PID of the packet to be received or transmitted on
+ this endpoint. The application should program the PID of the first
+ packet to be received or transmitted on this endpoint, after the
+ endpoint is activated. Applications use the SetD1PID and
+ SetD0PID fields of this register to program either DATA0 or
+ DATA1 PID.
+ * 1'b0: DATA0
+ * 1'b1: DATA1
+ For isochronous IN and OUT endpoints:
+ Even/Odd (Micro)Frame (EO_FrNum)
+ Indicates the (micro)frame number in which the core transmits/
+ receives isochronous data for this endpoint. The application
+ should program the even/odd (micro) frame number in which it
+ intends to transmit/receive isochronous data for this endpoint
+ using the SetEvnFr and SetOddFr fields in this register.
+ * 1'b0: Even (micro)frame
+ * 1'b1: Odd (micro)frame */
+ uint32_t usbactep : 1; /**< USB Active Endpoint (USBActEP)
+ Indicates whether this endpoint is active in the current
+ configuration and interface. The core clears this bit for all
+ endpoints (other than EP 0) after detecting a USB reset. After
+ receiving the SetConfiguration and SetInterface commands, the
+ application must program endpoint registers accordingly and set
+ this bit. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t mps : 11; /**< Maximum Packet Size (MPS)
+ Applies to IN and OUT endpoints.
+ The application must program this field with the maximum
+ packet size for the current logical endpoint. This value is in
+ bytes. */
+#else
+ uint32_t mps : 11;
+ uint32_t reserved_11_14 : 4;
+ uint32_t usbactep : 1;
+ uint32_t dpid : 1;
+ uint32_t naksts : 1;
+ uint32_t eptype : 2;
+ uint32_t snp : 1;
+ uint32_t stall : 1;
+ uint32_t reserved_22_25 : 4;
+ uint32_t cnak : 1;
+ uint32_t snak : 1;
+ uint32_t setd0pid : 1;
+ uint32_t setd1pid : 1;
+ uint32_t epdis : 1;
+ uint32_t epena : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_doepctlx_s cn30xx;
+ struct cvmx_usbcx_doepctlx_s cn31xx;
+ struct cvmx_usbcx_doepctlx_s cn50xx;
+ struct cvmx_usbcx_doepctlx_s cn52xx;
+ struct cvmx_usbcx_doepctlx_s cn52xxp1;
+ struct cvmx_usbcx_doepctlx_s cn56xx;
+ struct cvmx_usbcx_doepctlx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_doepctlx cvmx_usbcx_doepctlx_t;
+
+/**
+ * cvmx_usbc#_doepint#
+ *
+ * Device Endpoint-n Interrupt Register (DOEPINTn)
+ *
+ * This register indicates the status of an endpoint with respect to USB- and AHB-related events.
+ * The application must read this register when the OUT Endpoints Interrupt bit or IN Endpoints
+ * Interrupt bit of the Core Interrupt register (GINTSTS.OEPInt or GINTSTS.IEPInt, respectively)
+ * is set. Before the application can read this register, it must first read the Device All
+ * Endpoints Interrupt (DAINT) register to get the exact endpoint number for the Device Endpoint-n
+ * Interrupt register. The application must clear the appropriate bit in this register to clear the
+ * corresponding bits in the DAINT and GINTSTS registers.
+ */
+union cvmx_usbcx_doepintx {
+ uint32_t u32;
+ struct cvmx_usbcx_doepintx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_5_31 : 27;
+ uint32_t outtknepdis : 1; /**< OUT Token Received When Endpoint Disabled (OUTTknEPdis)
+ Applies only to control OUT endpoints.
+ Indicates that an OUT token was received when the endpoint
+ was not yet enabled. This interrupt is asserted on the endpoint
+ for which the OUT token was received. */
+ uint32_t setup : 1; /**< SETUP Phase Done (SetUp)
+ Applies to control OUT endpoints only.
+ Indicates that the SETUP phase for the control endpoint is
+ complete and no more back-to-back SETUP packets were
+ received for the current control transfer. On this interrupt, the
+ application can decode the received SETUP data packet. */
+ uint32_t ahberr : 1; /**< AHB Error (AHBErr)
+ This is generated only in Internal DMA mode when there is an
+ AHB error during an AHB read/write. The application can read
+ the corresponding endpoint DMA address register to get the
+ error address. */
+ uint32_t epdisbld : 1; /**< Endpoint Disabled Interrupt (EPDisbld)
+ This bit indicates that the endpoint is disabled per the
+ application's request. */
+ uint32_t xfercompl : 1; /**< Transfer Completed Interrupt (XferCompl)
+ Indicates that the programmed transfer is complete on the AHB
+ as well as on the USB, for this endpoint. */
+#else
+ uint32_t xfercompl : 1;
+ uint32_t epdisbld : 1;
+ uint32_t ahberr : 1;
+ uint32_t setup : 1;
+ uint32_t outtknepdis : 1;
+ uint32_t reserved_5_31 : 27;
+#endif
+ } s;
+ struct cvmx_usbcx_doepintx_s cn30xx;
+ struct cvmx_usbcx_doepintx_s cn31xx;
+ struct cvmx_usbcx_doepintx_s cn50xx;
+ struct cvmx_usbcx_doepintx_s cn52xx;
+ struct cvmx_usbcx_doepintx_s cn52xxp1;
+ struct cvmx_usbcx_doepintx_s cn56xx;
+ struct cvmx_usbcx_doepintx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_doepintx cvmx_usbcx_doepintx_t;
+
+/**
+ * cvmx_usbc#_doepmsk
+ *
+ * Device OUT Endpoint Common Interrupt Mask Register (DOEPMSK)
+ *
+ * This register works with each of the Device OUT Endpoint Interrupt (DOEPINTn) registers
+ * for all endpoints to generate an interrupt per OUT endpoint. The OUT endpoint interrupt
+ * for a specific status in the DOEPINTn register can be masked by writing into the
+ * corresponding bit in this register. Status bits are masked by default.
+ * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_doepmsk {
+ uint32_t u32;
+ struct cvmx_usbcx_doepmsk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_5_31 : 27;
+ uint32_t outtknepdismsk : 1; /**< OUT Token Received when Endpoint Disabled Mask
+ (OUTTknEPdisMsk)
+ Applies to control OUT endpoints only. */
+ uint32_t setupmsk : 1; /**< SETUP Phase Done Mask (SetUPMsk)
+ Applies to control endpoints only. */
+ uint32_t ahberrmsk : 1; /**< AHB Error (AHBErrMsk) */
+ uint32_t epdisbldmsk : 1; /**< Endpoint Disabled Interrupt Mask (EPDisbldMsk) */
+ uint32_t xfercomplmsk : 1; /**< Transfer Completed Interrupt Mask (XferComplMsk) */
+#else
+ uint32_t xfercomplmsk : 1;
+ uint32_t epdisbldmsk : 1;
+ uint32_t ahberrmsk : 1;
+ uint32_t setupmsk : 1;
+ uint32_t outtknepdismsk : 1;
+ uint32_t reserved_5_31 : 27;
+#endif
+ } s;
+ struct cvmx_usbcx_doepmsk_s cn30xx;
+ struct cvmx_usbcx_doepmsk_s cn31xx;
+ struct cvmx_usbcx_doepmsk_s cn50xx;
+ struct cvmx_usbcx_doepmsk_s cn52xx;
+ struct cvmx_usbcx_doepmsk_s cn52xxp1;
+ struct cvmx_usbcx_doepmsk_s cn56xx;
+ struct cvmx_usbcx_doepmsk_s cn56xxp1;
+};
+typedef union cvmx_usbcx_doepmsk cvmx_usbcx_doepmsk_t;
+
+/**
+ * cvmx_usbc#_doeptsiz#
+ *
+ * Device Endpoint-n Transfer Size Register (DOEPTSIZn)
+ *
+ * The application must modify this register before enabling the endpoint.
+ * Once the endpoint is enabled using Endpoint Enable bit of the Device Endpoint-n Control
+ * registers (DOEPCTLn.EPEna/DOEPCTLn.EPEna), the core modifies this register. The application
+ * can only read this register once the core has cleared the Endpoint Enable bit.
+ * This register is used only for endpoints other than Endpoint 0.
+ */
+union cvmx_usbcx_doeptsizx {
+ uint32_t u32;
+ struct cvmx_usbcx_doeptsizx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_31_31 : 1;
+ uint32_t mc : 2; /**< Multi Count (MC)
+ Received Data PID (RxDPID)
+ Applies to isochronous OUT endpoints only.
+ This is the data PID received in the last packet for this endpoint.
+ 2'b00: DATA0
+ 2'b01: DATA1
+ 2'b10: DATA2
+ 2'b11: MDATA
+ SETUP Packet Count (SUPCnt)
+ Applies to control OUT Endpoints only.
+ This field specifies the number of back-to-back SETUP data
+ packets the endpoint can receive.
+ 2'b01: 1 packet
+ 2'b10: 2 packets
+ 2'b11: 3 packets */
+ uint32_t pktcnt : 10; /**< Packet Count (PktCnt)
+ Indicates the total number of USB packets that constitute the
+ Transfer Size amount of data for this endpoint.
+ OUT Endpoints: This field is decremented every time a
+ packet (maximum size or short packet) is written to the
+ RxFIFO. */
+ uint32_t xfersize : 19; /**< Transfer Size (XferSize)
+ This field contains the transfer size in bytes for the current
+ endpoint.
+ The core only interrupts the application after it has exhausted
+ the transfer size amount of data. The transfer size can be set to
+ the maximum packet size of the endpoint, to be interrupted at
+ the end of each packet.
+ OUT Endpoints: The core decrements this field every time a
+ packet is read from the RxFIFO and written to the external
+ memory. */
+#else
+ uint32_t xfersize : 19;
+ uint32_t pktcnt : 10;
+ uint32_t mc : 2;
+ uint32_t reserved_31_31 : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_doeptsizx_s cn30xx;
+ struct cvmx_usbcx_doeptsizx_s cn31xx;
+ struct cvmx_usbcx_doeptsizx_s cn50xx;
+ struct cvmx_usbcx_doeptsizx_s cn52xx;
+ struct cvmx_usbcx_doeptsizx_s cn52xxp1;
+ struct cvmx_usbcx_doeptsizx_s cn56xx;
+ struct cvmx_usbcx_doeptsizx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_doeptsizx cvmx_usbcx_doeptsizx_t;
+
+/**
+ * cvmx_usbc#_dptxfsiz#
+ *
+ * Device Periodic Transmit FIFO-n Size Register (DPTXFSIZ)
+ *
+ * This register holds the memory start address of each periodic TxFIFO to implemented
+ * in Device mode. Each periodic FIFO holds the data for one periodic IN endpoint.
+ * This register is repeated for each periodic FIFO instantiated.
+ */
+union cvmx_usbcx_dptxfsizx {
+ uint32_t u32;
+ struct cvmx_usbcx_dptxfsizx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dptxfsize : 16; /**< Device Periodic TxFIFO Size (DPTxFSize)
+ This value is in terms of 32-bit words.
+ * Minimum value is 4
+ * Maximum value is 768 */
+ uint32_t dptxfstaddr : 16; /**< Device Periodic TxFIFO RAM Start Address (DPTxFStAddr)
+ Holds the start address in the RAM for this periodic FIFO. */
+#else
+ uint32_t dptxfstaddr : 16;
+ uint32_t dptxfsize : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_dptxfsizx_s cn30xx;
+ struct cvmx_usbcx_dptxfsizx_s cn31xx;
+ struct cvmx_usbcx_dptxfsizx_s cn50xx;
+ struct cvmx_usbcx_dptxfsizx_s cn52xx;
+ struct cvmx_usbcx_dptxfsizx_s cn52xxp1;
+ struct cvmx_usbcx_dptxfsizx_s cn56xx;
+ struct cvmx_usbcx_dptxfsizx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_dptxfsizx cvmx_usbcx_dptxfsizx_t;
+
+/**
+ * cvmx_usbc#_dsts
+ *
+ * Device Status Register (DSTS)
+ *
+ * This register indicates the status of the core with respect to USB-related events.
+ * It must be read on interrupts from Device All Interrupts (DAINT) register.
+ */
+union cvmx_usbcx_dsts {
+ uint32_t u32;
+ struct cvmx_usbcx_dsts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_22_31 : 10;
+ uint32_t soffn : 14; /**< Frame or Microframe Number of the Received SOF (SOFFN)
+ When the core is operating at high speed, this field contains a
+ microframe number. When the core is operating at full or low
+ speed, this field contains a frame number. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t errticerr : 1; /**< Erratic Error (ErrticErr)
+ The core sets this bit to report any erratic errors
+ (phy_rxvalid_i/phy_rxvldh_i or phy_rxactive_i is asserted for at
+ least 2 ms, due to PHY error) seen on the UTMI+.
+ Due to erratic errors, the O2P USB core goes into Suspended
+ state and an interrupt is generated to the application with Early
+ Suspend bit of the Core Interrupt register (GINTSTS.ErlySusp).
+ If the early suspend is asserted due to an erratic error, the
+ application can only perform a soft disconnect recover. */
+ uint32_t enumspd : 2; /**< Enumerated Speed (EnumSpd)
+ Indicates the speed at which the O2P USB core has come up
+ after speed detection through a chirp sequence.
+ * 2'b00: High speed (PHY clock is running at 30 or 60 MHz)
+ * 2'b01: Full speed (PHY clock is running at 30 or 60 MHz)
+ * 2'b10: Low speed (PHY clock is running at 6 MHz)
+ * 2'b11: Full speed (PHY clock is running at 48 MHz)
+ Low speed is not supported for devices using a UTMI+ PHY. */
+ uint32_t suspsts : 1; /**< Suspend Status (SuspSts)
+ In Device mode, this bit is set as long as a Suspend condition is
+ detected on the USB. The core enters the Suspended state
+ when there is no activity on the phy_line_state_i signal for an
+ extended period of time. The core comes out of the suspend:
+ * When there is any activity on the phy_line_state_i signal
+ * When the application writes to the Remote Wakeup Signaling
+ bit in the Device Control register (DCTL.RmtWkUpSig). */
+#else
+ uint32_t suspsts : 1;
+ uint32_t enumspd : 2;
+ uint32_t errticerr : 1;
+ uint32_t reserved_4_7 : 4;
+ uint32_t soffn : 14;
+ uint32_t reserved_22_31 : 10;
+#endif
+ } s;
+ struct cvmx_usbcx_dsts_s cn30xx;
+ struct cvmx_usbcx_dsts_s cn31xx;
+ struct cvmx_usbcx_dsts_s cn50xx;
+ struct cvmx_usbcx_dsts_s cn52xx;
+ struct cvmx_usbcx_dsts_s cn52xxp1;
+ struct cvmx_usbcx_dsts_s cn56xx;
+ struct cvmx_usbcx_dsts_s cn56xxp1;
+};
+typedef union cvmx_usbcx_dsts cvmx_usbcx_dsts_t;
+
+/**
+ * cvmx_usbc#_dtknqr1
+ *
+ * Device IN Token Sequence Learning Queue Read Register 1 (DTKNQR1)
+ *
+ * The depth of the IN Token Sequence Learning Queue is specified for Device Mode IN Token
+ * Sequence Learning Queue Depth. The queue is 4 bits wide to store the endpoint number.
+ * A read from this register returns the first 5 endpoint entries of the IN Token Sequence
+ * Learning Queue. When the queue is full, the new token is pushed into the queue and oldest
+ * token is discarded.
+ */
+union cvmx_usbcx_dtknqr1 {
+ uint32_t u32;
+ struct cvmx_usbcx_dtknqr1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t eptkn : 24; /**< Endpoint Token (EPTkn)
+ Four bits per token represent the endpoint number of the token:
+ * Bits [31:28]: Endpoint number of Token 5
+ * Bits [27:24]: Endpoint number of Token 4
+ - .......
+ * Bits [15:12]: Endpoint number of Token 1
+ * Bits [11:8]: Endpoint number of Token 0 */
+ uint32_t wrapbit : 1; /**< Wrap Bit (WrapBit)
+ This bit is set when the write pointer wraps. It is cleared when
+ the learning queue is cleared. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t intknwptr : 5; /**< IN Token Queue Write Pointer (INTknWPtr) */
+#else
+ uint32_t intknwptr : 5;
+ uint32_t reserved_5_6 : 2;
+ uint32_t wrapbit : 1;
+ uint32_t eptkn : 24;
+#endif
+ } s;
+ struct cvmx_usbcx_dtknqr1_s cn30xx;
+ struct cvmx_usbcx_dtknqr1_s cn31xx;
+ struct cvmx_usbcx_dtknqr1_s cn50xx;
+ struct cvmx_usbcx_dtknqr1_s cn52xx;
+ struct cvmx_usbcx_dtknqr1_s cn52xxp1;
+ struct cvmx_usbcx_dtknqr1_s cn56xx;
+ struct cvmx_usbcx_dtknqr1_s cn56xxp1;
+};
+typedef union cvmx_usbcx_dtknqr1 cvmx_usbcx_dtknqr1_t;
+
+/**
+ * cvmx_usbc#_dtknqr2
+ *
+ * Device IN Token Sequence Learning Queue Read Register 2 (DTKNQR2)
+ *
+ * A read from this register returns the next 8 endpoint entries of the learning queue.
+ */
+union cvmx_usbcx_dtknqr2 {
+ uint32_t u32;
+ struct cvmx_usbcx_dtknqr2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t eptkn : 32; /**< Endpoint Token (EPTkn)
+ Four bits per token represent the endpoint number of the token:
+ * Bits [31:28]: Endpoint number of Token 13
+ * Bits [27:24]: Endpoint number of Token 12
+ - .......
+ * Bits [7:4]: Endpoint number of Token 7
+ * Bits [3:0]: Endpoint number of Token 6 */
+#else
+ uint32_t eptkn : 32;
+#endif
+ } s;
+ struct cvmx_usbcx_dtknqr2_s cn30xx;
+ struct cvmx_usbcx_dtknqr2_s cn31xx;
+ struct cvmx_usbcx_dtknqr2_s cn50xx;
+ struct cvmx_usbcx_dtknqr2_s cn52xx;
+ struct cvmx_usbcx_dtknqr2_s cn52xxp1;
+ struct cvmx_usbcx_dtknqr2_s cn56xx;
+ struct cvmx_usbcx_dtknqr2_s cn56xxp1;
+};
+typedef union cvmx_usbcx_dtknqr2 cvmx_usbcx_dtknqr2_t;
+
+/**
+ * cvmx_usbc#_dtknqr3
+ *
+ * Device IN Token Sequence Learning Queue Read Register 3 (DTKNQR3)
+ *
+ * A read from this register returns the next 8 endpoint entries of the learning queue.
+ */
+union cvmx_usbcx_dtknqr3 {
+ uint32_t u32;
+ struct cvmx_usbcx_dtknqr3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t eptkn : 32; /**< Endpoint Token (EPTkn)
+ Four bits per token represent the endpoint number of the token:
+ * Bits [31:28]: Endpoint number of Token 21
+ * Bits [27:24]: Endpoint number of Token 20
+ - .......
+ * Bits [7:4]: Endpoint number of Token 15
+ * Bits [3:0]: Endpoint number of Token 14 */
+#else
+ uint32_t eptkn : 32;
+#endif
+ } s;
+ struct cvmx_usbcx_dtknqr3_s cn30xx;
+ struct cvmx_usbcx_dtknqr3_s cn31xx;
+ struct cvmx_usbcx_dtknqr3_s cn50xx;
+ struct cvmx_usbcx_dtknqr3_s cn52xx;
+ struct cvmx_usbcx_dtknqr3_s cn52xxp1;
+ struct cvmx_usbcx_dtknqr3_s cn56xx;
+ struct cvmx_usbcx_dtknqr3_s cn56xxp1;
+};
+typedef union cvmx_usbcx_dtknqr3 cvmx_usbcx_dtknqr3_t;
+
+/**
+ * cvmx_usbc#_dtknqr4
+ *
+ * Device IN Token Sequence Learning Queue Read Register 4 (DTKNQR4)
+ *
+ * A read from this register returns the last 8 endpoint entries of the learning queue.
+ */
+union cvmx_usbcx_dtknqr4 {
+ uint32_t u32;
+ struct cvmx_usbcx_dtknqr4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t eptkn : 32; /**< Endpoint Token (EPTkn)
+ Four bits per token represent the endpoint number of the token:
+ * Bits [31:28]: Endpoint number of Token 29
+ * Bits [27:24]: Endpoint number of Token 28
+ - .......
+ * Bits [7:4]: Endpoint number of Token 23
+ * Bits [3:0]: Endpoint number of Token 22 */
+#else
+ uint32_t eptkn : 32;
+#endif
+ } s;
+ struct cvmx_usbcx_dtknqr4_s cn30xx;
+ struct cvmx_usbcx_dtknqr4_s cn31xx;
+ struct cvmx_usbcx_dtknqr4_s cn50xx;
+ struct cvmx_usbcx_dtknqr4_s cn52xx;
+ struct cvmx_usbcx_dtknqr4_s cn52xxp1;
+ struct cvmx_usbcx_dtknqr4_s cn56xx;
+ struct cvmx_usbcx_dtknqr4_s cn56xxp1;
+};
+typedef union cvmx_usbcx_dtknqr4 cvmx_usbcx_dtknqr4_t;
+
+/**
+ * cvmx_usbc#_gahbcfg
+ *
+ * Core AHB Configuration Register (GAHBCFG)
+ *
+ * This register can be used to configure the core after power-on or a change in mode of operation.
+ * This register mainly contains AHB system-related configuration parameters. The AHB is the processor
+ * interface to the O2P USB core. In general, software need not know about this interface except to
+ * program the values as specified.
+ *
+ * The application must program this register as part of the O2P USB core initialization.
+ * Do not change this register after the initial programming.
+ */
+union cvmx_usbcx_gahbcfg {
+ uint32_t u32;
+ struct cvmx_usbcx_gahbcfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31 : 23;
+ uint32_t ptxfemplvl : 1; /**< Periodic TxFIFO Empty Level (PTxFEmpLvl)
+ Software should set this bit to 0x1.
+ Indicates when the Periodic TxFIFO Empty Interrupt bit in the
+ Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This
+ bit is used only in Slave mode.
+ * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic
+ TxFIFO is half empty
+ * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic
+ TxFIFO is completely empty */
+ uint32_t nptxfemplvl : 1; /**< Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl)
+ Software should set this bit to 0x1.
+ Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in
+ the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered.
+ This bit is used only in Slave mode.
+ * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non-
+ Periodic TxFIFO is half empty
+ * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non-
+ Periodic TxFIFO is completely empty */
+ uint32_t reserved_6_6 : 1;
+ uint32_t dmaen : 1; /**< DMA Enable (DMAEn)
+ * 1'b0: Core operates in Slave mode
+ * 1'b1: Core operates in a DMA mode */
+ uint32_t hbstlen : 4; /**< Burst Length/Type (HBstLen)
+ This field has not effect and should be left as 0x0. */
+ uint32_t glblintrmsk : 1; /**< Global Interrupt Mask (GlblIntrMsk)
+ Software should set this field to 0x1.
+ The application uses this bit to mask or unmask the interrupt
+ line assertion to itself. Irrespective of this bit's setting, the
+ interrupt status registers are updated by the core.
+ * 1'b0: Mask the interrupt assertion to the application.
+ * 1'b1: Unmask the interrupt assertion to the application. */
+#else
+ uint32_t glblintrmsk : 1;
+ uint32_t hbstlen : 4;
+ uint32_t dmaen : 1;
+ uint32_t reserved_6_6 : 1;
+ uint32_t nptxfemplvl : 1;
+ uint32_t ptxfemplvl : 1;
+ uint32_t reserved_9_31 : 23;
+#endif
+ } s;
+ struct cvmx_usbcx_gahbcfg_s cn30xx;
+ struct cvmx_usbcx_gahbcfg_s cn31xx;
+ struct cvmx_usbcx_gahbcfg_s cn50xx;
+ struct cvmx_usbcx_gahbcfg_s cn52xx;
+ struct cvmx_usbcx_gahbcfg_s cn52xxp1;
+ struct cvmx_usbcx_gahbcfg_s cn56xx;
+ struct cvmx_usbcx_gahbcfg_s cn56xxp1;
+};
+typedef union cvmx_usbcx_gahbcfg cvmx_usbcx_gahbcfg_t;
+
+/**
+ * cvmx_usbc#_ghwcfg1
+ *
+ * User HW Config1 Register (GHWCFG1)
+ *
+ * This register contains the logical endpoint direction(s) of the O2P USB core.
+ */
+union cvmx_usbcx_ghwcfg1 {
+ uint32_t u32;
+ struct cvmx_usbcx_ghwcfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t epdir : 32; /**< Endpoint Direction (epdir)
+ Two bits per endpoint represent the direction.
+ * 2'b00: BIDIR (IN and OUT) endpoint
+ * 2'b01: IN endpoint
+ * 2'b10: OUT endpoint
+ * 2'b11: Reserved
+ Bits [31:30]: Endpoint 15 direction
+ Bits [29:28]: Endpoint 14 direction
+ - ...
+ Bits [3:2]: Endpoint 1 direction
+ Bits[1:0]: Endpoint 0 direction (always BIDIR) */
+#else
+ uint32_t epdir : 32;
+#endif
+ } s;
+ struct cvmx_usbcx_ghwcfg1_s cn30xx;
+ struct cvmx_usbcx_ghwcfg1_s cn31xx;
+ struct cvmx_usbcx_ghwcfg1_s cn50xx;
+ struct cvmx_usbcx_ghwcfg1_s cn52xx;
+ struct cvmx_usbcx_ghwcfg1_s cn52xxp1;
+ struct cvmx_usbcx_ghwcfg1_s cn56xx;
+ struct cvmx_usbcx_ghwcfg1_s cn56xxp1;
+};
+typedef union cvmx_usbcx_ghwcfg1 cvmx_usbcx_ghwcfg1_t;
+
+/**
+ * cvmx_usbc#_ghwcfg2
+ *
+ * User HW Config2 Register (GHWCFG2)
+ *
+ * This register contains configuration options of the O2P USB core.
+ */
+union cvmx_usbcx_ghwcfg2 {
+ uint32_t u32;
+ struct cvmx_usbcx_ghwcfg2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_31_31 : 1;
+ uint32_t tknqdepth : 5; /**< Device Mode IN Token Sequence Learning Queue Depth
+ (TknQDepth)
+ Range: 0-30 */
+ uint32_t ptxqdepth : 2; /**< Host Mode Periodic Request Queue Depth (PTxQDepth)
+ * 2'b00: 2
+ * 2'b01: 4
+ * 2'b10: 8
+ * Others: Reserved */
+ uint32_t nptxqdepth : 2; /**< Non-Periodic Request Queue Depth (NPTxQDepth)
+ * 2'b00: 2
+ * 2'b01: 4
+ * 2'b10: 8
+ * Others: Reserved */
+ uint32_t reserved_20_21 : 2;
+ uint32_t dynfifosizing : 1; /**< Dynamic FIFO Sizing Enabled (DynFifoSizing)
+ * 1'b0: No
+ * 1'b1: Yes */
+ uint32_t periosupport : 1; /**< Periodic OUT Channels Supported in Host Mode
+ (PerioSupport)
+ * 1'b0: No
+ * 1'b1: Yes */
+ uint32_t numhstchnl : 4; /**< Number of Host Channels (NumHstChnl)
+ Indicates the number of host channels supported by the core in
+ Host mode. The range of this field is 0-15: 0 specifies 1
+ channel, 15 specifies 16 channels. */
+ uint32_t numdeveps : 4; /**< Number of Device Endpoints (NumDevEps)
+ Indicates the number of device endpoints supported by the core
+ in Device mode in addition to control endpoint 0. The range of
+ this field is 1-15. */
+ uint32_t fsphytype : 2; /**< Full-Speed PHY Interface Type (FSPhyType)
+ * 2'b00: Full-speed interface not supported
+ * 2'b01: Dedicated full-speed interface
+ * 2'b10: FS pins shared with UTMI+ pins
+ * 2'b11: FS pins shared with ULPI pins */
+ uint32_t hsphytype : 2; /**< High-Speed PHY Interface Type (HSPhyType)
+ * 2'b00: High-Speed interface not supported
+ * 2'b01: UTMI+
+ * 2'b10: ULPI
+ * 2'b11: UTMI+ and ULPI */
+ uint32_t singpnt : 1; /**< Point-to-Point (SingPnt)
+ * 1'b0: Multi-point application
+ * 1'b1: Single-point application */
+ uint32_t otgarch : 2; /**< Architecture (OtgArch)
+ * 2'b00: Slave-Only
+ * 2'b01: External DMA
+ * 2'b10: Internal DMA
+ * Others: Reserved */
+ uint32_t otgmode : 3; /**< Mode of Operation (OtgMode)
+ * 3'b000: HNP- and SRP-Capable OTG (Host & Device)
+ * 3'b001: SRP-Capable OTG (Host & Device)
+ * 3'b010: Non-HNP and Non-SRP Capable OTG (Host &
+ Device)
+ * 3'b011: SRP-Capable Device
+ * 3'b100: Non-OTG Device
+ * 3'b101: SRP-Capable Host
+ * 3'b110: Non-OTG Host
+ * Others: Reserved */
+#else
+ uint32_t otgmode : 3;
+ uint32_t otgarch : 2;
+ uint32_t singpnt : 1;
+ uint32_t hsphytype : 2;
+ uint32_t fsphytype : 2;
+ uint32_t numdeveps : 4;
+ uint32_t numhstchnl : 4;
+ uint32_t periosupport : 1;
+ uint32_t dynfifosizing : 1;
+ uint32_t reserved_20_21 : 2;
+ uint32_t nptxqdepth : 2;
+ uint32_t ptxqdepth : 2;
+ uint32_t tknqdepth : 5;
+ uint32_t reserved_31_31 : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_ghwcfg2_s cn30xx;
+ struct cvmx_usbcx_ghwcfg2_s cn31xx;
+ struct cvmx_usbcx_ghwcfg2_s cn50xx;
+ struct cvmx_usbcx_ghwcfg2_s cn52xx;
+ struct cvmx_usbcx_ghwcfg2_s cn52xxp1;
+ struct cvmx_usbcx_ghwcfg2_s cn56xx;
+ struct cvmx_usbcx_ghwcfg2_s cn56xxp1;
+};
+typedef union cvmx_usbcx_ghwcfg2 cvmx_usbcx_ghwcfg2_t;
+
+/**
+ * cvmx_usbc#_ghwcfg3
+ *
+ * User HW Config3 Register (GHWCFG3)
+ *
+ * This register contains the configuration options of the O2P USB core.
+ */
+union cvmx_usbcx_ghwcfg3 {
+ uint32_t u32;
+ struct cvmx_usbcx_ghwcfg3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dfifodepth : 16; /**< DFIFO Depth (DfifoDepth)
+ This value is in terms of 32-bit words.
+ * Minimum value is 32
+ * Maximum value is 32768 */
+ uint32_t reserved_13_15 : 3;
+ uint32_t ahbphysync : 1; /**< AHB and PHY Synchronous (AhbPhySync)
+ Indicates whether AHB and PHY clocks are synchronous to
+ each other.
+ * 1'b0: No
+ * 1'b1: Yes
+ This bit is tied to 1. */
+ uint32_t rsttype : 1; /**< Reset Style for Clocked always Blocks in RTL (RstType)
+ * 1'b0: Asynchronous reset is used in the core
+ * 1'b1: Synchronous reset is used in the core */
+ uint32_t optfeature : 1; /**< Optional Features Removed (OptFeature)
+ Indicates whether the User ID register, GPIO interface ports,
+ and SOF toggle and counter ports were removed for gate count
+ optimization. */
+ uint32_t vendor_control_interface_support : 1;/**< Vendor Control Interface Support
+ * 1'b0: Vendor Control Interface is not available on the core.
+ * 1'b1: Vendor Control Interface is available. */
+ uint32_t i2c_selection : 1; /**< I2C Selection
+ * 1'b0: I2C Interface is not available on the core.
+ * 1'b1: I2C Interface is available on the core. */
+ uint32_t otgen : 1; /**< OTG Function Enabled (OtgEn)
+ The application uses this bit to indicate the O2P USB core's
+ OTG capabilities.
+ * 1'b0: Not OTG capable
+ * 1'b1: OTG Capable */
+ uint32_t pktsizewidth : 3; /**< Width of Packet Size Counters (PktSizeWidth)
+ * 3'b000: 4 bits
+ * 3'b001: 5 bits
+ * 3'b010: 6 bits
+ * 3'b011: 7 bits
+ * 3'b100: 8 bits
+ * 3'b101: 9 bits
+ * 3'b110: 10 bits
+ * Others: Reserved */
+ uint32_t xfersizewidth : 4; /**< Width of Transfer Size Counters (XferSizeWidth)
+ * 4'b0000: 11 bits
+ * 4'b0001: 12 bits
+ - ...
+ * 4'b1000: 19 bits
+ * Others: Reserved */
+#else
+ uint32_t xfersizewidth : 4;
+ uint32_t pktsizewidth : 3;
+ uint32_t otgen : 1;
+ uint32_t i2c_selection : 1;
+ uint32_t vendor_control_interface_support : 1;
+ uint32_t optfeature : 1;
+ uint32_t rsttype : 1;
+ uint32_t ahbphysync : 1;
+ uint32_t reserved_13_15 : 3;
+ uint32_t dfifodepth : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_ghwcfg3_s cn30xx;
+ struct cvmx_usbcx_ghwcfg3_s cn31xx;
+ struct cvmx_usbcx_ghwcfg3_s cn50xx;
+ struct cvmx_usbcx_ghwcfg3_s cn52xx;
+ struct cvmx_usbcx_ghwcfg3_s cn52xxp1;
+ struct cvmx_usbcx_ghwcfg3_s cn56xx;
+ struct cvmx_usbcx_ghwcfg3_s cn56xxp1;
+};
+typedef union cvmx_usbcx_ghwcfg3 cvmx_usbcx_ghwcfg3_t;
+
+/**
+ * cvmx_usbc#_ghwcfg4
+ *
+ * User HW Config4 Register (GHWCFG4)
+ *
+ * This register contains the configuration options of the O2P USB core.
+ */
+union cvmx_usbcx_ghwcfg4 {
+ uint32_t u32;
+ struct cvmx_usbcx_ghwcfg4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31 : 2;
+ uint32_t numdevmodinend : 4; /**< Enable dedicatd transmit FIFO for device IN endpoints. */
+ uint32_t endedtrfifo : 1; /**< Enable dedicatd transmit FIFO for device IN endpoints. */
+ uint32_t sessendfltr : 1; /**< "session_end" Filter Enabled (SessEndFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t bvalidfltr : 1; /**< "b_valid" Filter Enabled (BValidFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t avalidfltr : 1; /**< "a_valid" Filter Enabled (AValidFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t vbusvalidfltr : 1; /**< "vbus_valid" Filter Enabled (VBusValidFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t iddgfltr : 1; /**< "iddig" Filter Enable (IddgFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t numctleps : 4; /**< Number of Device Mode Control Endpoints in Addition to
+ Endpoint 0 (NumCtlEps)
+ Range: 1-15 */
+ uint32_t phydatawidth : 2; /**< UTMI+ PHY/ULPI-to-Internal UTMI+ Wrapper Data Width
+ (PhyDataWidth)
+ When a ULPI PHY is used, an internal wrapper converts ULPI
+ to UTMI+.
+ * 2'b00: 8 bits
+ * 2'b01: 16 bits
+ * 2'b10: 8/16 bits, software selectable
+ * Others: Reserved */
+ uint32_t reserved_6_13 : 8;
+ uint32_t ahbfreq : 1; /**< Minimum AHB Frequency Less Than 60 MHz (AhbFreq)
+ * 1'b0: No
+ * 1'b1: Yes */
+ uint32_t enablepwropt : 1; /**< Enable Power Optimization? (EnablePwrOpt)
+ * 1'b0: No
+ * 1'b1: Yes */
+ uint32_t numdevperioeps : 4; /**< Number of Device Mode Periodic IN Endpoints
+ (NumDevPerioEps)
+ Range: 0-15 */
+#else
+ uint32_t numdevperioeps : 4;
+ uint32_t enablepwropt : 1;
+ uint32_t ahbfreq : 1;
+ uint32_t reserved_6_13 : 8;
+ uint32_t phydatawidth : 2;
+ uint32_t numctleps : 4;
+ uint32_t iddgfltr : 1;
+ uint32_t vbusvalidfltr : 1;
+ uint32_t avalidfltr : 1;
+ uint32_t bvalidfltr : 1;
+ uint32_t sessendfltr : 1;
+ uint32_t endedtrfifo : 1;
+ uint32_t numdevmodinend : 4;
+ uint32_t reserved_30_31 : 2;
+#endif
+ } s;
+ struct cvmx_usbcx_ghwcfg4_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t sessendfltr : 1; /**< "session_end" Filter Enabled (SessEndFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t bvalidfltr : 1; /**< "b_valid" Filter Enabled (BValidFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t avalidfltr : 1; /**< "a_valid" Filter Enabled (AValidFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t vbusvalidfltr : 1; /**< "vbus_valid" Filter Enabled (VBusValidFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t iddgfltr : 1; /**< "iddig" Filter Enable (IddgFltr)
+ * 1'b0: No filter
+ * 1'b1: Filter */
+ uint32_t numctleps : 4; /**< Number of Device Mode Control Endpoints in Addition to
+ Endpoint 0 (NumCtlEps)
+ Range: 1-15 */
+ uint32_t phydatawidth : 2; /**< UTMI+ PHY/ULPI-to-Internal UTMI+ Wrapper Data Width
+ (PhyDataWidth)
+ When a ULPI PHY is used, an internal wrapper converts ULPI
+ to UTMI+.
+ * 2'b00: 8 bits
+ * 2'b01: 16 bits
+ * 2'b10: 8/16 bits, software selectable
+ * Others: Reserved */
+ uint32_t reserved_6_13 : 8;
+ uint32_t ahbfreq : 1; /**< Minimum AHB Frequency Less Than 60 MHz (AhbFreq)
+ * 1'b0: No
+ * 1'b1: Yes */
+ uint32_t enablepwropt : 1; /**< Enable Power Optimization? (EnablePwrOpt)
+ * 1'b0: No
+ * 1'b1: Yes */
+ uint32_t numdevperioeps : 4; /**< Number of Device Mode Periodic IN Endpoints
+ (NumDevPerioEps)
+ Range: 0-15 */
+#else
+ uint32_t numdevperioeps : 4;
+ uint32_t enablepwropt : 1;
+ uint32_t ahbfreq : 1;
+ uint32_t reserved_6_13 : 8;
+ uint32_t phydatawidth : 2;
+ uint32_t numctleps : 4;
+ uint32_t iddgfltr : 1;
+ uint32_t vbusvalidfltr : 1;
+ uint32_t avalidfltr : 1;
+ uint32_t bvalidfltr : 1;
+ uint32_t sessendfltr : 1;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } cn30xx;
+ struct cvmx_usbcx_ghwcfg4_cn30xx cn31xx;
+ struct cvmx_usbcx_ghwcfg4_s cn50xx;
+ struct cvmx_usbcx_ghwcfg4_s cn52xx;
+ struct cvmx_usbcx_ghwcfg4_s cn52xxp1;
+ struct cvmx_usbcx_ghwcfg4_s cn56xx;
+ struct cvmx_usbcx_ghwcfg4_s cn56xxp1;
+};
+typedef union cvmx_usbcx_ghwcfg4 cvmx_usbcx_ghwcfg4_t;
+
+/**
+ * cvmx_usbc#_gintmsk
+ *
+ * Core Interrupt Mask Register (GINTMSK)
+ *
+ * This register works with the Core Interrupt register to interrupt the application.
+ * When an interrupt bit is masked, the interrupt associated with that bit will not be generated.
+ * However, the Core Interrupt (GINTSTS) register bit corresponding to that interrupt will still be set.
+ * Mask interrupt: 1'b0, Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_gintmsk {
+ uint32_t u32;
+ struct cvmx_usbcx_gintmsk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t wkupintmsk : 1; /**< Resume/Remote Wakeup Detected Interrupt Mask
+ (WkUpIntMsk) */
+ uint32_t sessreqintmsk : 1; /**< Session Request/New Session Detected Interrupt Mask
+ (SessReqIntMsk) */
+ uint32_t disconnintmsk : 1; /**< Disconnect Detected Interrupt Mask (DisconnIntMsk) */
+ uint32_t conidstschngmsk : 1; /**< Connector ID Status Change Mask (ConIDStsChngMsk) */
+ uint32_t reserved_27_27 : 1;
+ uint32_t ptxfempmsk : 1; /**< Periodic TxFIFO Empty Mask (PTxFEmpMsk) */
+ uint32_t hchintmsk : 1; /**< Host Channels Interrupt Mask (HChIntMsk) */
+ uint32_t prtintmsk : 1; /**< Host Port Interrupt Mask (PrtIntMsk) */
+ uint32_t reserved_23_23 : 1;
+ uint32_t fetsuspmsk : 1; /**< Data Fetch Suspended Mask (FetSuspMsk) */
+ uint32_t incomplpmsk : 1; /**< Incomplete Periodic Transfer Mask (incomplPMsk)
+ Incomplete Isochronous OUT Transfer Mask
+ (incompISOOUTMsk) */
+ uint32_t incompisoinmsk : 1; /**< Incomplete Isochronous IN Transfer Mask (incompISOINMsk) */
+ uint32_t oepintmsk : 1; /**< OUT Endpoints Interrupt Mask (OEPIntMsk) */
+ uint32_t inepintmsk : 1; /**< IN Endpoints Interrupt Mask (INEPIntMsk) */
+ uint32_t epmismsk : 1; /**< Endpoint Mismatch Interrupt Mask (EPMisMsk) */
+ uint32_t reserved_16_16 : 1;
+ uint32_t eopfmsk : 1; /**< End of Periodic Frame Interrupt Mask (EOPFMsk) */
+ uint32_t isooutdropmsk : 1; /**< Isochronous OUT Packet Dropped Interrupt Mask
+ (ISOOutDropMsk) */
+ uint32_t enumdonemsk : 1; /**< Enumeration Done Mask (EnumDoneMsk) */
+ uint32_t usbrstmsk : 1; /**< USB Reset Mask (USBRstMsk) */
+ uint32_t usbsuspmsk : 1; /**< USB Suspend Mask (USBSuspMsk) */
+ uint32_t erlysuspmsk : 1; /**< Early Suspend Mask (ErlySuspMsk) */
+ uint32_t i2cint : 1; /**< I2C Interrupt Mask (I2CINT) */
+ uint32_t ulpickintmsk : 1; /**< ULPI Carkit Interrupt Mask (ULPICKINTMsk)
+ I2C Carkit Interrupt Mask (I2CCKINTMsk) */
+ uint32_t goutnakeffmsk : 1; /**< Global OUT NAK Effective Mask (GOUTNakEffMsk) */
+ uint32_t ginnakeffmsk : 1; /**< Global Non-Periodic IN NAK Effective Mask (GINNakEffMsk) */
+ uint32_t nptxfempmsk : 1; /**< Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk) */
+ uint32_t rxflvlmsk : 1; /**< Receive FIFO Non-Empty Mask (RxFLvlMsk) */
+ uint32_t sofmsk : 1; /**< Start of (micro)Frame Mask (SofMsk) */
+ uint32_t otgintmsk : 1; /**< OTG Interrupt Mask (OTGIntMsk) */
+ uint32_t modemismsk : 1; /**< Mode Mismatch Interrupt Mask (ModeMisMsk) */
+ uint32_t reserved_0_0 : 1;
+#else
+ uint32_t reserved_0_0 : 1;
+ uint32_t modemismsk : 1;
+ uint32_t otgintmsk : 1;
+ uint32_t sofmsk : 1;
+ uint32_t rxflvlmsk : 1;
+ uint32_t nptxfempmsk : 1;
+ uint32_t ginnakeffmsk : 1;
+ uint32_t goutnakeffmsk : 1;
+ uint32_t ulpickintmsk : 1;
+ uint32_t i2cint : 1;
+ uint32_t erlysuspmsk : 1;
+ uint32_t usbsuspmsk : 1;
+ uint32_t usbrstmsk : 1;
+ uint32_t enumdonemsk : 1;
+ uint32_t isooutdropmsk : 1;
+ uint32_t eopfmsk : 1;
+ uint32_t reserved_16_16 : 1;
+ uint32_t epmismsk : 1;
+ uint32_t inepintmsk : 1;
+ uint32_t oepintmsk : 1;
+ uint32_t incompisoinmsk : 1;
+ uint32_t incomplpmsk : 1;
+ uint32_t fetsuspmsk : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t prtintmsk : 1;
+ uint32_t hchintmsk : 1;
+ uint32_t ptxfempmsk : 1;
+ uint32_t reserved_27_27 : 1;
+ uint32_t conidstschngmsk : 1;
+ uint32_t disconnintmsk : 1;
+ uint32_t sessreqintmsk : 1;
+ uint32_t wkupintmsk : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_gintmsk_s cn30xx;
+ struct cvmx_usbcx_gintmsk_s cn31xx;
+ struct cvmx_usbcx_gintmsk_s cn50xx;
+ struct cvmx_usbcx_gintmsk_s cn52xx;
+ struct cvmx_usbcx_gintmsk_s cn52xxp1;
+ struct cvmx_usbcx_gintmsk_s cn56xx;
+ struct cvmx_usbcx_gintmsk_s cn56xxp1;
+};
+typedef union cvmx_usbcx_gintmsk cvmx_usbcx_gintmsk_t;
+
+/**
+ * cvmx_usbc#_gintsts
+ *
+ * Core Interrupt Register (GINTSTS)
+ *
+ * This register interrupts the application for system-level events in the current mode of operation
+ * (Device mode or Host mode). It is shown in Interrupt. Some of the bits in this register are valid only in Host mode,
+ * while others are valid in Device mode only. This register also indicates the current mode of operation.
+ * In order to clear the interrupt status bits of type R_SS_WC, the application must write 1'b1 into the bit.
+ * The FIFO status interrupts are read only; once software reads from or writes to the FIFO while servicing these
+ * interrupts, FIFO interrupt conditions are cleared automatically.
+ */
+union cvmx_usbcx_gintsts {
+ uint32_t u32;
+ struct cvmx_usbcx_gintsts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t wkupint : 1; /**< Resume/Remote Wakeup Detected Interrupt (WkUpInt)
+ In Device mode, this interrupt is asserted when a resume is
+ detected on the USB. In Host mode, this interrupt is asserted
+ when a remote wakeup is detected on the USB.
+ For more information on how to use this interrupt, see "Partial
+ Power-Down and Clock Gating Programming Model" on
+ page 353. */
+ uint32_t sessreqint : 1; /**< Session Request/New Session Detected Interrupt (SessReqInt)
+ In Host mode, this interrupt is asserted when a session request
+ is detected from the device. In Device mode, this interrupt is
+ asserted when the utmiotg_bvalid signal goes high.
+ For more information on how to use this interrupt, see "Partial
+ Power-Down and Clock Gating Programming Model" on
+ page 353. */
+ uint32_t disconnint : 1; /**< Disconnect Detected Interrupt (DisconnInt)
+ Asserted when a device disconnect is detected. */
+ uint32_t conidstschng : 1; /**< Connector ID Status Change (ConIDStsChng)
+ The core sets this bit when there is a change in connector ID
+ status. */
+ uint32_t reserved_27_27 : 1;
+ uint32_t ptxfemp : 1; /**< Periodic TxFIFO Empty (PTxFEmp)
+ Asserted when the Periodic Transmit FIFO is either half or
+ completely empty and there is space for at least one entry to be
+ written in the Periodic Request Queue. The half or completely
+ empty status is determined by the Periodic TxFIFO Empty Level
+ bit in the Core AHB Configuration register
+ (GAHBCFG.PTxFEmpLvl). */
+ uint32_t hchint : 1; /**< Host Channels Interrupt (HChInt)
+ The core sets this bit to indicate that an interrupt is pending on
+ one of the channels of the core (in Host mode). The application
+ must read the Host All Channels Interrupt (HAINT) register to
+ determine the exact number of the channel on which the
+ interrupt occurred, and then read the corresponding Host
+ Channel-n Interrupt (HCINTn) register to determine the exact
+ cause of the interrupt. The application must clear the
+ appropriate status bit in the HCINTn register to clear this bit. */
+ uint32_t prtint : 1; /**< Host Port Interrupt (PrtInt)
+ The core sets this bit to indicate a change in port status of one
+ of the O2P USB core ports in Host mode. The application must
+ read the Host Port Control and Status (HPRT) register to
+ determine the exact event that caused this interrupt. The
+ application must clear the appropriate status bit in the Host Port
+ Control and Status register to clear this bit. */
+ uint32_t reserved_23_23 : 1;
+ uint32_t fetsusp : 1; /**< Data Fetch Suspended (FetSusp)
+ This interrupt is valid only in DMA mode. This interrupt indicates
+ that the core has stopped fetching data for IN endpoints due to
+ the unavailability of TxFIFO space or Request Queue space.
+ This interrupt is used by the application for an endpoint
+ mismatch algorithm. */
+ uint32_t incomplp : 1; /**< Incomplete Periodic Transfer (incomplP)
+ In Host mode, the core sets this interrupt bit when there are
+ incomplete periodic transactions still pending which are
+ scheduled for the current microframe.
+ Incomplete Isochronous OUT Transfer (incompISOOUT)
+ The Device mode, the core sets this interrupt to indicate that
+ there is at least one isochronous OUT endpoint on which the
+ transfer is not completed in the current microframe. This
+ interrupt is asserted along with the End of Periodic Frame
+ Interrupt (EOPF) bit in this register. */
+ uint32_t incompisoin : 1; /**< Incomplete Isochronous IN Transfer (incompISOIN)
+ The core sets this interrupt to indicate that there is at least one
+ isochronous IN endpoint on which the transfer is not completed
+ in the current microframe. This interrupt is asserted along with
+ the End of Periodic Frame Interrupt (EOPF) bit in this register. */
+ uint32_t oepint : 1; /**< OUT Endpoints Interrupt (OEPInt)
+ The core sets this bit to indicate that an interrupt is pending on
+ one of the OUT endpoints of the core (in Device mode). The
+ application must read the Device All Endpoints Interrupt
+ (DAINT) register to determine the exact number of the OUT
+ endpoint on which the interrupt occurred, and then read the
+ corresponding Device OUT Endpoint-n Interrupt (DOEPINTn)
+ register to determine the exact cause of the interrupt. The
+ application must clear the appropriate status bit in the
+ corresponding DOEPINTn register to clear this bit. */
+ uint32_t iepint : 1; /**< IN Endpoints Interrupt (IEPInt)
+ The core sets this bit to indicate that an interrupt is pending on
+ one of the IN endpoints of the core (in Device mode). The
+ application must read the Device All Endpoints Interrupt
+ (DAINT) register to determine the exact number of the IN
+ endpoint on which the interrupt occurred, and then read the
+ corresponding Device IN Endpoint-n Interrupt (DIEPINTn)
+ register to determine the exact cause of the interrupt. The
+ application must clear the appropriate status bit in the
+ corresponding DIEPINTn register to clear this bit. */
+ uint32_t epmis : 1; /**< Endpoint Mismatch Interrupt (EPMis)
+ Indicates that an IN token has been received for a non-periodic
+ endpoint, but the data for another endpoint is present in the top
+ of the Non-Periodic Transmit FIFO and the IN endpoint
+ mismatch count programmed by the application has expired. */
+ uint32_t reserved_16_16 : 1;
+ uint32_t eopf : 1; /**< End of Periodic Frame Interrupt (EOPF)
+ Indicates that the period specified in the Periodic Frame Interval
+ field of the Device Configuration register (DCFG.PerFrInt) has
+ been reached in the current microframe. */
+ uint32_t isooutdrop : 1; /**< Isochronous OUT Packet Dropped Interrupt (ISOOutDrop)
+ The core sets this bit when it fails to write an isochronous OUT
+ packet into the RxFIFO because the RxFIFO doesn't have
+ enough space to accommodate a maximum packet size packet
+ for the isochronous OUT endpoint. */
+ uint32_t enumdone : 1; /**< Enumeration Done (EnumDone)
+ The core sets this bit to indicate that speed enumeration is
+ complete. The application must read the Device Status (DSTS)
+ register to obtain the enumerated speed. */
+ uint32_t usbrst : 1; /**< USB Reset (USBRst)
+ The core sets this bit to indicate that a reset is detected on the
+ USB. */
+ uint32_t usbsusp : 1; /**< USB Suspend (USBSusp)
+ The core sets this bit to indicate that a suspend was detected
+ on the USB. The core enters the Suspended state when there
+ is no activity on the phy_line_state_i signal for an extended
+ period of time. */
+ uint32_t erlysusp : 1; /**< Early Suspend (ErlySusp)
+ The core sets this bit to indicate that an Idle state has been
+ detected on the USB for 3 ms. */
+ uint32_t i2cint : 1; /**< I2C Interrupt (I2CINT)
+ This bit is always 0x0. */
+ uint32_t ulpickint : 1; /**< ULPI Carkit Interrupt (ULPICKINT)
+ This bit is always 0x0. */
+ uint32_t goutnakeff : 1; /**< Global OUT NAK Effective (GOUTNakEff)
+ Indicates that the Set Global OUT NAK bit in the Device Control
+ register (DCTL.SGOUTNak), set by the application, has taken
+ effect in the core. This bit can be cleared by writing the Clear
+ Global OUT NAK bit in the Device Control register
+ (DCTL.CGOUTNak). */
+ uint32_t ginnakeff : 1; /**< Global IN Non-Periodic NAK Effective (GINNakEff)
+ Indicates that the Set Global Non-Periodic IN NAK bit in the
+ Device Control register (DCTL.SGNPInNak), set by the
+ application, has taken effect in the core. That is, the core has
+ sampled the Global IN NAK bit set by the application. This bit
+ can be cleared by clearing the Clear Global Non-Periodic IN
+ NAK bit in the Device Control register (DCTL.CGNPInNak).
+ This interrupt does not necessarily mean that a NAK handshake
+ is sent out on the USB. The STALL bit takes precedence over
+ the NAK bit. */
+ uint32_t nptxfemp : 1; /**< Non-Periodic TxFIFO Empty (NPTxFEmp)
+ This interrupt is asserted when the Non-Periodic TxFIFO is
+ either half or completely empty, and there is space for at least
+ one entry to be written to the Non-Periodic Transmit Request
+ Queue. The half or completely empty status is determined by
+ the Non-Periodic TxFIFO Empty Level bit in the Core AHB
+ Configuration register (GAHBCFG.NPTxFEmpLvl). */
+ uint32_t rxflvl : 1; /**< RxFIFO Non-Empty (RxFLvl)
+ Indicates that there is at least one packet pending to be read
+ from the RxFIFO. */
+ uint32_t sof : 1; /**< Start of (micro)Frame (Sof)
+ In Host mode, the core sets this bit to indicate that an SOF
+ (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the
+ USB. The application must write a 1 to this bit to clear the
+ interrupt.
+ In Device mode, in the core sets this bit to indicate that an SOF
+ token has been received on the USB. The application can read
+ the Device Status register to get the current (micro)frame
+ number. This interrupt is seen only when the core is operating
+ at either HS or FS. */
+ uint32_t otgint : 1; /**< OTG Interrupt (OTGInt)
+ The core sets this bit to indicate an OTG protocol event. The
+ application must read the OTG Interrupt Status (GOTGINT)
+ register to determine the exact event that caused this interrupt.
+ The application must clear the appropriate status bit in the
+ GOTGINT register to clear this bit. */
+ uint32_t modemis : 1; /**< Mode Mismatch Interrupt (ModeMis)
+ The core sets this bit when the application is trying to access:
+ * A Host mode register, when the core is operating in Device
+ mode
+ * A Device mode register, when the core is operating in Host
+ mode
+ The register access is completed on the AHB with an OKAY
+ response, but is ignored by the core internally and doesn't
+ affect the operation of the core. */
+ uint32_t curmod : 1; /**< Current Mode of Operation (CurMod)
+ Indicates the current mode of operation.
+ * 1'b0: Device mode
+ * 1'b1: Host mode */
+#else
+ uint32_t curmod : 1;
+ uint32_t modemis : 1;
+ uint32_t otgint : 1;
+ uint32_t sof : 1;
+ uint32_t rxflvl : 1;
+ uint32_t nptxfemp : 1;
+ uint32_t ginnakeff : 1;
+ uint32_t goutnakeff : 1;
+ uint32_t ulpickint : 1;
+ uint32_t i2cint : 1;
+ uint32_t erlysusp : 1;
+ uint32_t usbsusp : 1;
+ uint32_t usbrst : 1;
+ uint32_t enumdone : 1;
+ uint32_t isooutdrop : 1;
+ uint32_t eopf : 1;
+ uint32_t reserved_16_16 : 1;
+ uint32_t epmis : 1;
+ uint32_t iepint : 1;
+ uint32_t oepint : 1;
+ uint32_t incompisoin : 1;
+ uint32_t incomplp : 1;
+ uint32_t fetsusp : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t prtint : 1;
+ uint32_t hchint : 1;
+ uint32_t ptxfemp : 1;
+ uint32_t reserved_27_27 : 1;
+ uint32_t conidstschng : 1;
+ uint32_t disconnint : 1;
+ uint32_t sessreqint : 1;
+ uint32_t wkupint : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_gintsts_s cn30xx;
+ struct cvmx_usbcx_gintsts_s cn31xx;
+ struct cvmx_usbcx_gintsts_s cn50xx;
+ struct cvmx_usbcx_gintsts_s cn52xx;
+ struct cvmx_usbcx_gintsts_s cn52xxp1;
+ struct cvmx_usbcx_gintsts_s cn56xx;
+ struct cvmx_usbcx_gintsts_s cn56xxp1;
+};
+typedef union cvmx_usbcx_gintsts cvmx_usbcx_gintsts_t;
+
+/**
+ * cvmx_usbc#_gnptxfsiz
+ *
+ * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ)
+ *
+ * The application can program the RAM size and the memory start address for the Non-Periodic TxFIFO.
+ */
+union cvmx_usbcx_gnptxfsiz {
+ uint32_t u32;
+ struct cvmx_usbcx_gnptxfsiz_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t nptxfdep : 16; /**< Non-Periodic TxFIFO Depth (NPTxFDep)
+ This value is in terms of 32-bit words.
+ Minimum value is 16
+ Maximum value is 32768 */
+ uint32_t nptxfstaddr : 16; /**< Non-Periodic Transmit RAM Start Address (NPTxFStAddr)
+ This field contains the memory start address for Non-Periodic
+ Transmit FIFO RAM. */
+#else
+ uint32_t nptxfstaddr : 16;
+ uint32_t nptxfdep : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_gnptxfsiz_s cn30xx;
+ struct cvmx_usbcx_gnptxfsiz_s cn31xx;
+ struct cvmx_usbcx_gnptxfsiz_s cn50xx;
+ struct cvmx_usbcx_gnptxfsiz_s cn52xx;
+ struct cvmx_usbcx_gnptxfsiz_s cn52xxp1;
+ struct cvmx_usbcx_gnptxfsiz_s cn56xx;
+ struct cvmx_usbcx_gnptxfsiz_s cn56xxp1;
+};
+typedef union cvmx_usbcx_gnptxfsiz cvmx_usbcx_gnptxfsiz_t;
+
+/**
+ * cvmx_usbc#_gnptxsts
+ *
+ * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS)
+ *
+ * This read-only register contains the free space information for the Non-Periodic TxFIFO and
+ * the Non-Periodic Transmit Request Queue
+ */
+union cvmx_usbcx_gnptxsts {
+ uint32_t u32;
+ struct cvmx_usbcx_gnptxsts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_31_31 : 1;
+ uint32_t nptxqtop : 7; /**< Top of the Non-Periodic Transmit Request Queue (NPTxQTop)
+ Entry in the Non-Periodic Tx Request Queue that is currently
+ being processed by the MAC.
+ * Bits [30:27]: Channel/endpoint number
+ * Bits [26:25]:
+ - 2'b00: IN/OUT token
+ - 2'b01: Zero-length transmit packet (device IN/host OUT)
+ - 2'b10: PING/CSPLIT token
+ - 2'b11: Channel halt command
+ * Bit [24]: Terminate (last entry for selected channel/endpoint) */
+ uint32_t nptxqspcavail : 8; /**< Non-Periodic Transmit Request Queue Space Available
+ (NPTxQSpcAvail)
+ Indicates the amount of free space available in the Non-
+ Periodic Transmit Request Queue. This queue holds both IN
+ and OUT requests in Host mode. Device mode has only IN
+ requests.
+ * 8'h0: Non-Periodic Transmit Request Queue is full
+ * 8'h1: 1 location available
+ * 8'h2: 2 locations available
+ * n: n locations available (0..8)
+ * Others: Reserved */
+ uint32_t nptxfspcavail : 16; /**< Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail)
+ Indicates the amount of free space available in the Non-
+ Periodic TxFIFO.
+ Values are in terms of 32-bit words.
+ * 16'h0: Non-Periodic TxFIFO is full
+ * 16'h1: 1 word available
+ * 16'h2: 2 words available
+ * 16'hn: n words available (where 0..32768)
+ * 16'h8000: 32768 words available
+ * Others: Reserved */
+#else
+ uint32_t nptxfspcavail : 16;
+ uint32_t nptxqspcavail : 8;
+ uint32_t nptxqtop : 7;
+ uint32_t reserved_31_31 : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_gnptxsts_s cn30xx;
+ struct cvmx_usbcx_gnptxsts_s cn31xx;
+ struct cvmx_usbcx_gnptxsts_s cn50xx;
+ struct cvmx_usbcx_gnptxsts_s cn52xx;
+ struct cvmx_usbcx_gnptxsts_s cn52xxp1;
+ struct cvmx_usbcx_gnptxsts_s cn56xx;
+ struct cvmx_usbcx_gnptxsts_s cn56xxp1;
+};
+typedef union cvmx_usbcx_gnptxsts cvmx_usbcx_gnptxsts_t;
+
+/**
+ * cvmx_usbc#_gotgctl
+ *
+ * OTG Control and Status Register (GOTGCTL)
+ *
+ * The OTG Control and Status register controls the behavior and reflects the status of the OTG function of the core.:
+ */
+union cvmx_usbcx_gotgctl {
+ uint32_t u32;
+ struct cvmx_usbcx_gotgctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t bsesvld : 1; /**< B-Session Valid (BSesVld)
+ Valid only when O2P USB core is configured as a USB device.
+ Indicates the Device mode transceiver status.
+ * 1'b0: B-session is not valid.
+ * 1'b1: B-session is valid. */
+ uint32_t asesvld : 1; /**< A-Session Valid (ASesVld)
+ Valid only when O2P USB core is configured as a USB host.
+ Indicates the Host mode transceiver status.
+ * 1'b0: A-session is not valid
+ * 1'b1: A-session is valid */
+ uint32_t dbnctime : 1; /**< Long/Short Debounce Time (DbncTime)
+ In the present version of the core this bit will only read as '0'. */
+ uint32_t conidsts : 1; /**< Connector ID Status (ConIDSts)
+ Indicates the connector ID status on a connect event.
+ * 1'b0: The O2P USB core is in A-device mode
+ * 1'b1: The O2P USB core is in B-device mode */
+ uint32_t reserved_12_15 : 4;
+ uint32_t devhnpen : 1; /**< Device HNP Enabled (DevHNPEn)
+ Since O2P USB core is not HNP capable this bit is 0x0. */
+ uint32_t hstsethnpen : 1; /**< Host Set HNP Enable (HstSetHNPEn)
+ Since O2P USB core is not HNP capable this bit is 0x0. */
+ uint32_t hnpreq : 1; /**< HNP Request (HNPReq)
+ Since O2P USB core is not HNP capable this bit is 0x0. */
+ uint32_t hstnegscs : 1; /**< Host Negotiation Success (HstNegScs)
+ Since O2P USB core is not HNP capable this bit is 0x0. */
+ uint32_t reserved_2_7 : 6;
+ uint32_t sesreq : 1; /**< Session Request (SesReq)
+ Since O2P USB core is not SRP capable this bit is 0x0. */
+ uint32_t sesreqscs : 1; /**< Session Request Success (SesReqScs)
+ Since O2P USB core is not SRP capable this bit is 0x0. */
+#else
+ uint32_t sesreqscs : 1;
+ uint32_t sesreq : 1;
+ uint32_t reserved_2_7 : 6;
+ uint32_t hstnegscs : 1;
+ uint32_t hnpreq : 1;
+ uint32_t hstsethnpen : 1;
+ uint32_t devhnpen : 1;
+ uint32_t reserved_12_15 : 4;
+ uint32_t conidsts : 1;
+ uint32_t dbnctime : 1;
+ uint32_t asesvld : 1;
+ uint32_t bsesvld : 1;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_usbcx_gotgctl_s cn30xx;
+ struct cvmx_usbcx_gotgctl_s cn31xx;
+ struct cvmx_usbcx_gotgctl_s cn50xx;
+ struct cvmx_usbcx_gotgctl_s cn52xx;
+ struct cvmx_usbcx_gotgctl_s cn52xxp1;
+ struct cvmx_usbcx_gotgctl_s cn56xx;
+ struct cvmx_usbcx_gotgctl_s cn56xxp1;
+};
+typedef union cvmx_usbcx_gotgctl cvmx_usbcx_gotgctl_t;
+
+/**
+ * cvmx_usbc#_gotgint
+ *
+ * OTG Interrupt Register (GOTGINT)
+ *
+ * The application reads this register whenever there is an OTG interrupt and clears the bits in this register
+ * to clear the OTG interrupt. It is shown in Interrupt .:
+ */
+union cvmx_usbcx_gotgint {
+ uint32_t u32;
+ struct cvmx_usbcx_gotgint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31 : 12;
+ uint32_t dbncedone : 1; /**< Debounce Done (DbnceDone)
+ In the present version of the code this bit is tied to '0'. */
+ uint32_t adevtoutchg : 1; /**< A-Device Timeout Change (ADevTOUTChg)
+ Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */
+ uint32_t hstnegdet : 1; /**< Host Negotiation Detected (HstNegDet)
+ Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */
+ uint32_t reserved_10_16 : 7;
+ uint32_t hstnegsucstschng : 1; /**< Host Negotiation Success Status Change (HstNegSucStsChng)
+ Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */
+ uint32_t sesreqsucstschng : 1; /**< Session Request Success Status Change
+ Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t sesenddet : 1; /**< Session End Detected (SesEndDet)
+ Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */
+ uint32_t reserved_0_1 : 2;
+#else
+ uint32_t reserved_0_1 : 2;
+ uint32_t sesenddet : 1;
+ uint32_t reserved_3_7 : 5;
+ uint32_t sesreqsucstschng : 1;
+ uint32_t hstnegsucstschng : 1;
+ uint32_t reserved_10_16 : 7;
+ uint32_t hstnegdet : 1;
+ uint32_t adevtoutchg : 1;
+ uint32_t dbncedone : 1;
+ uint32_t reserved_20_31 : 12;
+#endif
+ } s;
+ struct cvmx_usbcx_gotgint_s cn30xx;
+ struct cvmx_usbcx_gotgint_s cn31xx;
+ struct cvmx_usbcx_gotgint_s cn50xx;
+ struct cvmx_usbcx_gotgint_s cn52xx;
+ struct cvmx_usbcx_gotgint_s cn52xxp1;
+ struct cvmx_usbcx_gotgint_s cn56xx;
+ struct cvmx_usbcx_gotgint_s cn56xxp1;
+};
+typedef union cvmx_usbcx_gotgint cvmx_usbcx_gotgint_t;
+
+/**
+ * cvmx_usbc#_grstctl
+ *
+ * Core Reset Register (GRSTCTL)
+ *
+ * The application uses this register to reset various hardware features inside the core.
+ */
+union cvmx_usbcx_grstctl {
+ uint32_t u32;
+ struct cvmx_usbcx_grstctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ahbidle : 1; /**< AHB Master Idle (AHBIdle)
+ Indicates that the AHB Master State Machine is in the IDLE
+ condition. */
+ uint32_t dmareq : 1; /**< DMA Request Signal (DMAReq)
+ Indicates that the DMA request is in progress. Used for debug. */
+ uint32_t reserved_11_29 : 19;
+ uint32_t txfnum : 5; /**< TxFIFO Number (TxFNum)
+ This is the FIFO number that must be flushed using the TxFIFO
+ Flush bit. This field must not be changed until the core clears
+ the TxFIFO Flush bit.
+ * 5'h0: Non-Periodic TxFIFO flush
+ * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic
+ TxFIFO flush in Host mode
+ * 5'h2: Periodic TxFIFO 2 flush in Device mode
+ - ...
+ * 5'hF: Periodic TxFIFO 15 flush in Device mode
+ * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the
+ core */
+ uint32_t txfflsh : 1; /**< TxFIFO Flush (TxFFlsh)
+ This bit selectively flushes a single or all transmit FIFOs, but
+ cannot do so if the core is in the midst of a transaction.
+ The application must only write this bit after checking that the
+ core is neither writing to the TxFIFO nor reading from the
+ TxFIFO.
+ The application must wait until the core clears this bit before
+ performing any operations. This bit takes 8 clocks (of phy_clk or
+ hclk, whichever is slower) to clear. */
+ uint32_t rxfflsh : 1; /**< RxFIFO Flush (RxFFlsh)
+ The application can flush the entire RxFIFO using this bit, but
+ must first ensure that the core is not in the middle of a
+ transaction.
+ The application must only write to this bit after checking that the
+ core is neither reading from the RxFIFO nor writing to the
+ RxFIFO.
+ The application must wait until the bit is cleared before
+ performing any other operations. This bit will take 8 clocks
+ (slowest of PHY or AHB clock) to clear. */
+ uint32_t intknqflsh : 1; /**< IN Token Sequence Learning Queue Flush (INTknQFlsh)
+ The application writes this bit to flush the IN Token Sequence
+ Learning Queue. */
+ uint32_t frmcntrrst : 1; /**< Host Frame Counter Reset (FrmCntrRst)
+ The application writes this bit to reset the (micro)frame number
+ counter inside the core. When the (micro)frame counter is reset,
+ the subsequent SOF sent out by the core will have a
+ (micro)frame number of 0. */
+ uint32_t hsftrst : 1; /**< HClk Soft Reset (HSftRst)
+ The application uses this bit to flush the control logic in the AHB
+ Clock domain. Only AHB Clock Domain pipelines are reset.
+ * FIFOs are not flushed with this bit.
+ * All state machines in the AHB clock domain are reset to the
+ Idle state after terminating the transactions on the AHB,
+ following the protocol.
+ * CSR control bits used by the AHB clock domain state
+ machines are cleared.
+ * To clear this interrupt, status mask bits that control the
+ interrupt status and are generated by the AHB clock domain
+ state machine are cleared.
+ * Because interrupt status bits are not cleared, the application
+ can get the status of any core events that occurred after it set
+ this bit.
+ This is a self-clearing bit that the core clears after all necessary
+ logic is reset in the core. This may take several clocks,
+ depending on the core's current state. */
+ uint32_t csftrst : 1; /**< Core Soft Reset (CSftRst)
+ Resets the hclk and phy_clock domains as follows:
+ * Clears the interrupts and all the CSR registers except the
+ following register bits:
+ - PCGCCTL.RstPdwnModule
+ - PCGCCTL.GateHclk
+ - PCGCCTL.PwrClmp
+ - PCGCCTL.StopPPhyLPwrClkSelclk
+ - GUSBCFG.PhyLPwrClkSel
+ - GUSBCFG.DDRSel
+ - GUSBCFG.PHYSel
+ - GUSBCFG.FSIntf
+ - GUSBCFG.ULPI_UTMI_Sel
+ - GUSBCFG.PHYIf
+ - HCFG.FSLSPclkSel
+ - DCFG.DevSpd
+ * All module state machines (except the AHB Slave Unit) are
+ reset to the IDLE state, and all the transmit FIFOs and the
+ receive FIFO are flushed.
+ * Any transactions on the AHB Master are terminated as soon
+ as possible, after gracefully completing the last data phase of
+ an AHB transfer. Any transactions on the USB are terminated
+ immediately.
+ The application can write to this bit any time it wants to reset
+ the core. This is a self-clearing bit and the core clears this bit
+ after all the necessary logic is reset in the core, which may take
+ several clocks, depending on the current state of the core.
+ Once this bit is cleared software should wait at least 3 PHY
+ clocks before doing any access to the PHY domain
+ (synchronization delay). Software should also should check that
+ bit 31 of this register is 1 (AHB Master is IDLE) before starting
+ any operation.
+ Typically software reset is used during software development
+ and also when you dynamically change the PHY selection bits
+ in the USB configuration registers listed above. When you
+ change the PHY, the corresponding clock for the PHY is
+ selected and used in the PHY domain. Once a new clock is
+ selected, the PHY domain has to be reset for proper operation. */
+#else
+ uint32_t csftrst : 1;
+ uint32_t hsftrst : 1;
+ uint32_t frmcntrrst : 1;
+ uint32_t intknqflsh : 1;
+ uint32_t rxfflsh : 1;
+ uint32_t txfflsh : 1;
+ uint32_t txfnum : 5;
+ uint32_t reserved_11_29 : 19;
+ uint32_t dmareq : 1;
+ uint32_t ahbidle : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_grstctl_s cn30xx;
+ struct cvmx_usbcx_grstctl_s cn31xx;
+ struct cvmx_usbcx_grstctl_s cn50xx;
+ struct cvmx_usbcx_grstctl_s cn52xx;
+ struct cvmx_usbcx_grstctl_s cn52xxp1;
+ struct cvmx_usbcx_grstctl_s cn56xx;
+ struct cvmx_usbcx_grstctl_s cn56xxp1;
+};
+typedef union cvmx_usbcx_grstctl cvmx_usbcx_grstctl_t;
+
+/**
+ * cvmx_usbc#_grxfsiz
+ *
+ * Receive FIFO Size Register (GRXFSIZ)
+ *
+ * The application can program the RAM size that must be allocated to the RxFIFO.
+ */
+union cvmx_usbcx_grxfsiz {
+ uint32_t u32;
+ struct cvmx_usbcx_grxfsiz_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t rxfdep : 16; /**< RxFIFO Depth (RxFDep)
+ This value is in terms of 32-bit words.
+ * Minimum value is 16
+ * Maximum value is 32768 */
+#else
+ uint32_t rxfdep : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_grxfsiz_s cn30xx;
+ struct cvmx_usbcx_grxfsiz_s cn31xx;
+ struct cvmx_usbcx_grxfsiz_s cn50xx;
+ struct cvmx_usbcx_grxfsiz_s cn52xx;
+ struct cvmx_usbcx_grxfsiz_s cn52xxp1;
+ struct cvmx_usbcx_grxfsiz_s cn56xx;
+ struct cvmx_usbcx_grxfsiz_s cn56xxp1;
+};
+typedef union cvmx_usbcx_grxfsiz cvmx_usbcx_grxfsiz_t;
+
+/**
+ * cvmx_usbc#_grxstspd
+ *
+ * Receive Status Debug Read Register, Device Mode (GRXSTSPD)
+ *
+ * A read to the Receive Status Read and Pop register returns and additionally pops the top data entry out of the RxFIFO.
+ * This Description is only valid when the core is in Device Mode. For Host Mode use USBC_GRXSTSPH instead.
+ * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the same offset in the O2P USB core.
+ * The offset difference shown in this document is for software clarity and is actually ignored by the
+ * hardware.
+ */
+union cvmx_usbcx_grxstspd {
+ uint32_t u32;
+ struct cvmx_usbcx_grxstspd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t fn : 4; /**< Frame Number (FN)
+ This is the least significant 4 bits of the (micro)frame number in
+ which the packet is received on the USB. This field is supported
+ only when the isochronous OUT endpoints are supported. */
+ uint32_t pktsts : 4; /**< Packet Status (PktSts)
+ Indicates the status of the received packet
+ * 4'b0001: Glogal OUT NAK (triggers an interrupt)
+ * 4'b0010: OUT data packet received
+ * 4'b0100: SETUP transaction completed (triggers an interrupt)
+ * 4'b0110: SETUP data packet received
+ * Others: Reserved */
+ uint32_t dpid : 2; /**< Data PID (DPID)
+ * 2'b00: DATA0
+ * 2'b10: DATA1
+ * 2'b01: DATA2
+ * 2'b11: MDATA */
+ uint32_t bcnt : 11; /**< Byte Count (BCnt)
+ Indicates the byte count of the received data packet */
+ uint32_t epnum : 4; /**< Endpoint Number (EPNum)
+ Indicates the endpoint number to which the current received
+ packet belongs. */
+#else
+ uint32_t epnum : 4;
+ uint32_t bcnt : 11;
+ uint32_t dpid : 2;
+ uint32_t pktsts : 4;
+ uint32_t fn : 4;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_usbcx_grxstspd_s cn30xx;
+ struct cvmx_usbcx_grxstspd_s cn31xx;
+ struct cvmx_usbcx_grxstspd_s cn50xx;
+ struct cvmx_usbcx_grxstspd_s cn52xx;
+ struct cvmx_usbcx_grxstspd_s cn52xxp1;
+ struct cvmx_usbcx_grxstspd_s cn56xx;
+ struct cvmx_usbcx_grxstspd_s cn56xxp1;
+};
+typedef union cvmx_usbcx_grxstspd cvmx_usbcx_grxstspd_t;
+
+/**
+ * cvmx_usbc#_grxstsph
+ *
+ * Receive Status Read and Pop Register, Host Mode (GRXSTSPH)
+ *
+ * A read to the Receive Status Read and Pop register returns and additionally pops the top data entry out of the RxFIFO.
+ * This Description is only valid when the core is in Host Mode. For Device Mode use USBC_GRXSTSPD instead.
+ * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the same offset in the O2P USB core.
+ * The offset difference shown in this document is for software clarity and is actually ignored by the
+ * hardware.
+ */
+union cvmx_usbcx_grxstsph {
+ uint32_t u32;
+ struct cvmx_usbcx_grxstsph_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t pktsts : 4; /**< Packet Status (PktSts)
+ Indicates the status of the received packet
+ * 4'b0010: IN data packet received
+ * 4'b0011: IN transfer completed (triggers an interrupt)
+ * 4'b0101: Data toggle error (triggers an interrupt)
+ * 4'b0111: Channel halted (triggers an interrupt)
+ * Others: Reserved */
+ uint32_t dpid : 2; /**< Data PID (DPID)
+ * 2'b00: DATA0
+ * 2'b10: DATA1
+ * 2'b01: DATA2
+ * 2'b11: MDATA */
+ uint32_t bcnt : 11; /**< Byte Count (BCnt)
+ Indicates the byte count of the received IN data packet */
+ uint32_t chnum : 4; /**< Channel Number (ChNum)
+ Indicates the channel number to which the current received
+ packet belongs. */
+#else
+ uint32_t chnum : 4;
+ uint32_t bcnt : 11;
+ uint32_t dpid : 2;
+ uint32_t pktsts : 4;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } s;
+ struct cvmx_usbcx_grxstsph_s cn30xx;
+ struct cvmx_usbcx_grxstsph_s cn31xx;
+ struct cvmx_usbcx_grxstsph_s cn50xx;
+ struct cvmx_usbcx_grxstsph_s cn52xx;
+ struct cvmx_usbcx_grxstsph_s cn52xxp1;
+ struct cvmx_usbcx_grxstsph_s cn56xx;
+ struct cvmx_usbcx_grxstsph_s cn56xxp1;
+};
+typedef union cvmx_usbcx_grxstsph cvmx_usbcx_grxstsph_t;
+
+/**
+ * cvmx_usbc#_grxstsrd
+ *
+ * Receive Status Debug Read Register, Device Mode (GRXSTSRD)
+ *
+ * A read to the Receive Status Debug Read register returns the contents of the top of the Receive FIFO.
+ * This Description is only valid when the core is in Device Mode. For Host Mode use USBC_GRXSTSRH instead.
+ * NOTE: GRXSTSRH and GRXSTSRD are physically the same register and share the same offset in the O2P USB core.
+ * The offset difference shown in this document is for software clarity and is actually ignored by the
+ * hardware.
+ */
+union cvmx_usbcx_grxstsrd {
+ uint32_t u32;
+ struct cvmx_usbcx_grxstsrd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_25_31 : 7;
+ uint32_t fn : 4; /**< Frame Number (FN)
+ This is the least significant 4 bits of the (micro)frame number in
+ which the packet is received on the USB. This field is supported
+ only when the isochronous OUT endpoints are supported. */
+ uint32_t pktsts : 4; /**< Packet Status (PktSts)
+ Indicates the status of the received packet
+ * 4'b0001: Glogal OUT NAK (triggers an interrupt)
+ * 4'b0010: OUT data packet received
+ * 4'b0100: SETUP transaction completed (triggers an interrupt)
+ * 4'b0110: SETUP data packet received
+ * Others: Reserved */
+ uint32_t dpid : 2; /**< Data PID (DPID)
+ * 2'b00: DATA0
+ * 2'b10: DATA1
+ * 2'b01: DATA2
+ * 2'b11: MDATA */
+ uint32_t bcnt : 11; /**< Byte Count (BCnt)
+ Indicates the byte count of the received data packet */
+ uint32_t epnum : 4; /**< Endpoint Number (EPNum)
+ Indicates the endpoint number to which the current received
+ packet belongs. */
+#else
+ uint32_t epnum : 4;
+ uint32_t bcnt : 11;
+ uint32_t dpid : 2;
+ uint32_t pktsts : 4;
+ uint32_t fn : 4;
+ uint32_t reserved_25_31 : 7;
+#endif
+ } s;
+ struct cvmx_usbcx_grxstsrd_s cn30xx;
+ struct cvmx_usbcx_grxstsrd_s cn31xx;
+ struct cvmx_usbcx_grxstsrd_s cn50xx;
+ struct cvmx_usbcx_grxstsrd_s cn52xx;
+ struct cvmx_usbcx_grxstsrd_s cn52xxp1;
+ struct cvmx_usbcx_grxstsrd_s cn56xx;
+ struct cvmx_usbcx_grxstsrd_s cn56xxp1;
+};
+typedef union cvmx_usbcx_grxstsrd cvmx_usbcx_grxstsrd_t;
+
+/**
+ * cvmx_usbc#_grxstsrh
+ *
+ * Receive Status Debug Read Register, Host Mode (GRXSTSRH)
+ *
+ * A read to the Receive Status Debug Read register returns the contents of the top of the Receive FIFO.
+ * This Description is only valid when the core is in Host Mode. For Device Mode use USBC_GRXSTSRD instead.
+ * NOTE: GRXSTSRH and GRXSTSRD are physically the same register and share the same offset in the O2P USB core.
+ * The offset difference shown in this document is for software clarity and is actually ignored by the
+ * hardware.
+ */
+union cvmx_usbcx_grxstsrh {
+ uint32_t u32;
+ struct cvmx_usbcx_grxstsrh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_21_31 : 11;
+ uint32_t pktsts : 4; /**< Packet Status (PktSts)
+ Indicates the status of the received packet
+ * 4'b0010: IN data packet received
+ * 4'b0011: IN transfer completed (triggers an interrupt)
+ * 4'b0101: Data toggle error (triggers an interrupt)
+ * 4'b0111: Channel halted (triggers an interrupt)
+ * Others: Reserved */
+ uint32_t dpid : 2; /**< Data PID (DPID)
+ * 2'b00: DATA0
+ * 2'b10: DATA1
+ * 2'b01: DATA2
+ * 2'b11: MDATA */
+ uint32_t bcnt : 11; /**< Byte Count (BCnt)
+ Indicates the byte count of the received IN data packet */
+ uint32_t chnum : 4; /**< Channel Number (ChNum)
+ Indicates the channel number to which the current received
+ packet belongs. */
+#else
+ uint32_t chnum : 4;
+ uint32_t bcnt : 11;
+ uint32_t dpid : 2;
+ uint32_t pktsts : 4;
+ uint32_t reserved_21_31 : 11;
+#endif
+ } s;
+ struct cvmx_usbcx_grxstsrh_s cn30xx;
+ struct cvmx_usbcx_grxstsrh_s cn31xx;
+ struct cvmx_usbcx_grxstsrh_s cn50xx;
+ struct cvmx_usbcx_grxstsrh_s cn52xx;
+ struct cvmx_usbcx_grxstsrh_s cn52xxp1;
+ struct cvmx_usbcx_grxstsrh_s cn56xx;
+ struct cvmx_usbcx_grxstsrh_s cn56xxp1;
+};
+typedef union cvmx_usbcx_grxstsrh cvmx_usbcx_grxstsrh_t;
+
+/**
+ * cvmx_usbc#_gsnpsid
+ *
+ * Synopsys ID Register (GSNPSID)
+ *
+ * This is a read-only register that contains the release number of the core being used.
+ */
+union cvmx_usbcx_gsnpsid {
+ uint32_t u32;
+ struct cvmx_usbcx_gsnpsid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t synopsysid : 32; /**< 0x4F54\<version\>A, release number of the core being used.
+ 0x4F54220A => pass1.x, 0x4F54240A => pass2.x */
+#else
+ uint32_t synopsysid : 32;
+#endif
+ } s;
+ struct cvmx_usbcx_gsnpsid_s cn30xx;
+ struct cvmx_usbcx_gsnpsid_s cn31xx;
+ struct cvmx_usbcx_gsnpsid_s cn50xx;
+ struct cvmx_usbcx_gsnpsid_s cn52xx;
+ struct cvmx_usbcx_gsnpsid_s cn52xxp1;
+ struct cvmx_usbcx_gsnpsid_s cn56xx;
+ struct cvmx_usbcx_gsnpsid_s cn56xxp1;
+};
+typedef union cvmx_usbcx_gsnpsid cvmx_usbcx_gsnpsid_t;
+
+/**
+ * cvmx_usbc#_gusbcfg
+ *
+ * Core USB Configuration Register (GUSBCFG)
+ *
+ * This register can be used to configure the core after power-on or a changing to Host mode or Device mode.
+ * It contains USB and USB-PHY related configuration parameters. The application must program this register
+ * before starting any transactions on either the AHB or the USB.
+ * Do not make changes to this register after the initial programming.
+ */
+union cvmx_usbcx_gusbcfg {
+ uint32_t u32;
+ struct cvmx_usbcx_gusbcfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_17_31 : 15;
+ uint32_t otgi2csel : 1; /**< UTMIFS or I2C Interface Select (OtgI2CSel)
+ This bit is always 0x0. */
+ uint32_t phylpwrclksel : 1; /**< PHY Low-Power Clock Select (PhyLPwrClkSel)
+ Software should set this bit to 0x0.
+ Selects either 480-MHz or 48-MHz (low-power) PHY mode. In
+ FS and LS modes, the PHY can usually operate on a 48-MHz
+ clock to save power.
+ * 1'b0: 480-MHz Internal PLL clock
+ * 1'b1: 48-MHz External Clock
+ In 480 MHz mode, the UTMI interface operates at either 60 or
+ 30-MHz, depending upon whether 8- or 16-bit data width is
+ selected. In 48-MHz mode, the UTMI interface operates at 48
+ MHz in FS mode and at either 48 or 6 MHz in LS mode
+ (depending on the PHY vendor).
+ This bit drives the utmi_fsls_low_power core output signal, and
+ is valid only for UTMI+ PHYs. */
+ uint32_t reserved_14_14 : 1;
+ uint32_t usbtrdtim : 4; /**< USB Turnaround Time (USBTrdTim)
+ Sets the turnaround time in PHY clocks.
+ Specifies the response time for a MAC request to the Packet
+ FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM).
+ This must be programmed to 0x5. */
+ uint32_t hnpcap : 1; /**< HNP-Capable (HNPCap)
+ This bit is always 0x0. */
+ uint32_t srpcap : 1; /**< SRP-Capable (SRPCap)
+ This bit is always 0x0. */
+ uint32_t ddrsel : 1; /**< ULPI DDR Select (DDRSel)
+ Software should set this bit to 0x0. */
+ uint32_t physel : 1; /**< USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial
+ Software should set this bit to 0x0. */
+ uint32_t fsintf : 1; /**< Full-Speed Serial Interface Select (FSIntf)
+ Software should set this bit to 0x0. */
+ uint32_t ulpi_utmi_sel : 1; /**< ULPI or UTMI+ Select (ULPI_UTMI_Sel)
+ This bit is always 0x0. */
+ uint32_t phyif : 1; /**< PHY Interface (PHYIf)
+ This bit is always 0x1. */
+ uint32_t toutcal : 3; /**< HS/FS Timeout Calibration (TOutCal)
+ The number of PHY clocks that the application programs in this
+ field is added to the high-speed/full-speed interpacket timeout
+ duration in the core to account for any additional delays
+ introduced by the PHY. This may be required, since the delay
+ introduced by the PHY in generating the linestate condition may
+ vary from one PHY to another.
+ The USB standard timeout value for high-speed operation is
+ 736 to 816 (inclusive) bit times. The USB standard timeout
+ value for full-speed operation is 16 to 18 (inclusive) bit times.
+ The application must program this field based on the speed of
+ enumeration. The number of bit times added per PHY clock are:
+ High-speed operation:
+ * One 30-MHz PHY clock = 16 bit times
+ * One 60-MHz PHY clock = 8 bit times
+ Full-speed operation:
+ * One 30-MHz PHY clock = 0.4 bit times
+ * One 60-MHz PHY clock = 0.2 bit times
+ * One 48-MHz PHY clock = 0.25 bit times */
+#else
+ uint32_t toutcal : 3;
+ uint32_t phyif : 1;
+ uint32_t ulpi_utmi_sel : 1;
+ uint32_t fsintf : 1;
+ uint32_t physel : 1;
+ uint32_t ddrsel : 1;
+ uint32_t srpcap : 1;
+ uint32_t hnpcap : 1;
+ uint32_t usbtrdtim : 4;
+ uint32_t reserved_14_14 : 1;
+ uint32_t phylpwrclksel : 1;
+ uint32_t otgi2csel : 1;
+ uint32_t reserved_17_31 : 15;
+#endif
+ } s;
+ struct cvmx_usbcx_gusbcfg_s cn30xx;
+ struct cvmx_usbcx_gusbcfg_s cn31xx;
+ struct cvmx_usbcx_gusbcfg_s cn50xx;
+ struct cvmx_usbcx_gusbcfg_s cn52xx;
+ struct cvmx_usbcx_gusbcfg_s cn52xxp1;
+ struct cvmx_usbcx_gusbcfg_s cn56xx;
+ struct cvmx_usbcx_gusbcfg_s cn56xxp1;
+};
+typedef union cvmx_usbcx_gusbcfg cvmx_usbcx_gusbcfg_t;
+
+/**
+ * cvmx_usbc#_haint
+ *
+ * Host All Channels Interrupt Register (HAINT)
+ *
+ * When a significant event occurs on a channel, the Host All Channels Interrupt register
+ * interrupts the application using the Host Channels Interrupt bit of the Core Interrupt
+ * register (GINTSTS.HChInt). This is shown in Interrupt . There is one interrupt bit per
+ * channel, up to a maximum of 16 bits. Bits in this register are set and cleared when the
+ * application sets and clears bits in the corresponding Host Channel-n Interrupt register.
+ */
+union cvmx_usbcx_haint {
+ uint32_t u32;
+ struct cvmx_usbcx_haint_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t haint : 16; /**< Channel Interrupts (HAINT)
+ One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15 */
+#else
+ uint32_t haint : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_haint_s cn30xx;
+ struct cvmx_usbcx_haint_s cn31xx;
+ struct cvmx_usbcx_haint_s cn50xx;
+ struct cvmx_usbcx_haint_s cn52xx;
+ struct cvmx_usbcx_haint_s cn52xxp1;
+ struct cvmx_usbcx_haint_s cn56xx;
+ struct cvmx_usbcx_haint_s cn56xxp1;
+};
+typedef union cvmx_usbcx_haint cvmx_usbcx_haint_t;
+
+/**
+ * cvmx_usbc#_haintmsk
+ *
+ * Host All Channels Interrupt Mask Register (HAINTMSK)
+ *
+ * The Host All Channel Interrupt Mask register works with the Host All Channel Interrupt
+ * register to interrupt the application when an event occurs on a channel. There is one
+ * interrupt mask bit per channel, up to a maximum of 16 bits.
+ * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_haintmsk {
+ uint32_t u32;
+ struct cvmx_usbcx_haintmsk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t haintmsk : 16; /**< Channel Interrupt Mask (HAINTMsk)
+ One bit per channel: Bit 0 for channel 0, bit 15 for channel 15 */
+#else
+ uint32_t haintmsk : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_haintmsk_s cn30xx;
+ struct cvmx_usbcx_haintmsk_s cn31xx;
+ struct cvmx_usbcx_haintmsk_s cn50xx;
+ struct cvmx_usbcx_haintmsk_s cn52xx;
+ struct cvmx_usbcx_haintmsk_s cn52xxp1;
+ struct cvmx_usbcx_haintmsk_s cn56xx;
+ struct cvmx_usbcx_haintmsk_s cn56xxp1;
+};
+typedef union cvmx_usbcx_haintmsk cvmx_usbcx_haintmsk_t;
+
+/**
+ * cvmx_usbc#_hcchar#
+ *
+ * Host Channel-n Characteristics Register (HCCHAR)
+ *
+ */
+union cvmx_usbcx_hccharx {
+ uint32_t u32;
+ struct cvmx_usbcx_hccharx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t chena : 1; /**< Channel Enable (ChEna)
+ This field is set by the application and cleared by the OTG host.
+ * 1'b0: Channel disabled
+ * 1'b1: Channel enabled */
+ uint32_t chdis : 1; /**< Channel Disable (ChDis)
+ The application sets this bit to stop transmitting/receiving data
+ on a channel, even before the transfer for that channel is
+ complete. The application must wait for the Channel Disabled
+ interrupt before treating the channel as disabled. */
+ uint32_t oddfrm : 1; /**< Odd Frame (OddFrm)
+ This field is set (reset) by the application to indicate that the
+ OTG host must perform a transfer in an odd (micro)frame. This
+ field is applicable for only periodic (isochronous and interrupt)
+ transactions.
+ * 1'b0: Even (micro)frame
+ * 1'b1: Odd (micro)frame */
+ uint32_t devaddr : 7; /**< Device Address (DevAddr)
+ This field selects the specific device serving as the data source
+ or sink. */
+ uint32_t ec : 2; /**< Multi Count (MC) / Error Count (EC)
+ When the Split Enable bit of the Host Channel-n Split Control
+ register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates
+ to the host the number of transactions that should be executed
+ per microframe for this endpoint.
+ * 2'b00: Reserved. This field yields undefined results.
+ * 2'b01: 1 transaction
+ * 2'b10: 2 transactions to be issued for this endpoint per
+ microframe
+ * 2'b11: 3 transactions to be issued for this endpoint per
+ microframe
+ When HCSPLTn.SpltEna is set (1'b1), this field indicates the
+ number of immediate retries to be performed for a periodic split
+ transactions on transaction errors. This field must be set to at
+ least 2'b01. */
+ uint32_t eptype : 2; /**< Endpoint Type (EPType)
+ Indicates the transfer type selected.
+ * 2'b00: Control
+ * 2'b01: Isochronous
+ * 2'b10: Bulk
+ * 2'b11: Interrupt */
+ uint32_t lspddev : 1; /**< Low-Speed Device (LSpdDev)
+ This field is set by the application to indicate that this channel is
+ communicating to a low-speed device. */
+ uint32_t reserved_16_16 : 1;
+ uint32_t epdir : 1; /**< Endpoint Direction (EPDir)
+ Indicates whether the transaction is IN or OUT.
+ * 1'b0: OUT
+ * 1'b1: IN */
+ uint32_t epnum : 4; /**< Endpoint Number (EPNum)
+ Indicates the endpoint number on the device serving as the
+ data source or sink. */
+ uint32_t mps : 11; /**< Maximum Packet Size (MPS)
+ Indicates the maximum packet size of the associated endpoint. */
+#else
+ uint32_t mps : 11;
+ uint32_t epnum : 4;
+ uint32_t epdir : 1;
+ uint32_t reserved_16_16 : 1;
+ uint32_t lspddev : 1;
+ uint32_t eptype : 2;
+ uint32_t ec : 2;
+ uint32_t devaddr : 7;
+ uint32_t oddfrm : 1;
+ uint32_t chdis : 1;
+ uint32_t chena : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_hccharx_s cn30xx;
+ struct cvmx_usbcx_hccharx_s cn31xx;
+ struct cvmx_usbcx_hccharx_s cn50xx;
+ struct cvmx_usbcx_hccharx_s cn52xx;
+ struct cvmx_usbcx_hccharx_s cn52xxp1;
+ struct cvmx_usbcx_hccharx_s cn56xx;
+ struct cvmx_usbcx_hccharx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hccharx cvmx_usbcx_hccharx_t;
+
+/**
+ * cvmx_usbc#_hcfg
+ *
+ * Host Configuration Register (HCFG)
+ *
+ * This register configures the core after power-on. Do not make changes to this register after initializing the host.
+ */
+union cvmx_usbcx_hcfg {
+ uint32_t u32;
+ struct cvmx_usbcx_hcfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_3_31 : 29;
+ uint32_t fslssupp : 1; /**< FS- and LS-Only Support (FSLSSupp)
+ The application uses this bit to control the core's enumeration
+ speed. Using this bit, the application can make the core
+ enumerate as a FS host, even if the connected device supports
+ HS traffic. Do not make changes to this field after initial
+ programming.
+ * 1'b0: HS/FS/LS, based on the maximum speed supported by
+ the connected device
+ * 1'b1: FS/LS-only, even if the connected device can support HS */
+ uint32_t fslspclksel : 2; /**< FS/LS PHY Clock Select (FSLSPclkSel)
+ When the core is in FS Host mode
+ * 2'b00: PHY clock is running at 30/60 MHz
+ * 2'b01: PHY clock is running at 48 MHz
+ * Others: Reserved
+ When the core is in LS Host mode
+ * 2'b00: PHY clock is running at 30/60 MHz. When the
+ UTMI+/ULPI PHY Low Power mode is not selected, use
+ 30/60 MHz.
+ * 2'b01: PHY clock is running at 48 MHz. When the UTMI+
+ PHY Low Power mode is selected, use 48MHz if the PHY
+ supplies a 48 MHz clock during LS mode.
+ * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode,
+ use 6 MHz when the UTMI+ PHY Low Power mode is
+ selected and the PHY supplies a 6 MHz clock during LS
+ mode. If you select a 6 MHz clock during LS mode, you must
+ do a soft reset.
+ * 2'b11: Reserved */
+#else
+ uint32_t fslspclksel : 2;
+ uint32_t fslssupp : 1;
+ uint32_t reserved_3_31 : 29;
+#endif
+ } s;
+ struct cvmx_usbcx_hcfg_s cn30xx;
+ struct cvmx_usbcx_hcfg_s cn31xx;
+ struct cvmx_usbcx_hcfg_s cn50xx;
+ struct cvmx_usbcx_hcfg_s cn52xx;
+ struct cvmx_usbcx_hcfg_s cn52xxp1;
+ struct cvmx_usbcx_hcfg_s cn56xx;
+ struct cvmx_usbcx_hcfg_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hcfg cvmx_usbcx_hcfg_t;
+
+/**
+ * cvmx_usbc#_hcint#
+ *
+ * Host Channel-n Interrupt Register (HCINT)
+ *
+ * This register indicates the status of a channel with respect to USB- and AHB-related events.
+ * The application must read this register when the Host Channels Interrupt bit of the Core Interrupt
+ * register (GINTSTS.HChInt) is set. Before the application can read this register, it must first read
+ * the Host All Channels Interrupt (HAINT) register to get the exact channel number for the Host Channel-n
+ * Interrupt register. The application must clear the appropriate bit in this register to clear the
+ * corresponding bits in the HAINT and GINTSTS registers.
+ */
+union cvmx_usbcx_hcintx {
+ uint32_t u32;
+ struct cvmx_usbcx_hcintx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_11_31 : 21;
+ uint32_t datatglerr : 1; /**< Data Toggle Error (DataTglErr) */
+ uint32_t frmovrun : 1; /**< Frame Overrun (FrmOvrun) */
+ uint32_t bblerr : 1; /**< Babble Error (BblErr) */
+ uint32_t xacterr : 1; /**< Transaction Error (XactErr) */
+ uint32_t nyet : 1; /**< NYET Response Received Interrupt (NYET) */
+ uint32_t ack : 1; /**< ACK Response Received Interrupt (ACK) */
+ uint32_t nak : 1; /**< NAK Response Received Interrupt (NAK) */
+ uint32_t stall : 1; /**< STALL Response Received Interrupt (STALL) */
+ uint32_t ahberr : 1; /**< This bit is always 0x0. */
+ uint32_t chhltd : 1; /**< Channel Halted (ChHltd)
+ Indicates the transfer completed abnormally either because of
+ any USB transaction error or in response to disable request by
+ the application. */
+ uint32_t xfercompl : 1; /**< Transfer Completed (XferCompl)
+ Transfer completed normally without any errors. */
+#else
+ uint32_t xfercompl : 1;
+ uint32_t chhltd : 1;
+ uint32_t ahberr : 1;
+ uint32_t stall : 1;
+ uint32_t nak : 1;
+ uint32_t ack : 1;
+ uint32_t nyet : 1;
+ uint32_t xacterr : 1;
+ uint32_t bblerr : 1;
+ uint32_t frmovrun : 1;
+ uint32_t datatglerr : 1;
+ uint32_t reserved_11_31 : 21;
+#endif
+ } s;
+ struct cvmx_usbcx_hcintx_s cn30xx;
+ struct cvmx_usbcx_hcintx_s cn31xx;
+ struct cvmx_usbcx_hcintx_s cn50xx;
+ struct cvmx_usbcx_hcintx_s cn52xx;
+ struct cvmx_usbcx_hcintx_s cn52xxp1;
+ struct cvmx_usbcx_hcintx_s cn56xx;
+ struct cvmx_usbcx_hcintx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hcintx cvmx_usbcx_hcintx_t;
+
+/**
+ * cvmx_usbc#_hcintmsk#
+ *
+ * Host Channel-n Interrupt Mask Register (HCINTMSKn)
+ *
+ * This register reflects the mask for each channel status described in the previous section.
+ * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_hcintmskx {
+ uint32_t u32;
+ struct cvmx_usbcx_hcintmskx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_11_31 : 21;
+ uint32_t datatglerrmsk : 1; /**< Data Toggle Error Mask (DataTglErrMsk) */
+ uint32_t frmovrunmsk : 1; /**< Frame Overrun Mask (FrmOvrunMsk) */
+ uint32_t bblerrmsk : 1; /**< Babble Error Mask (BblErrMsk) */
+ uint32_t xacterrmsk : 1; /**< Transaction Error Mask (XactErrMsk) */
+ uint32_t nyetmsk : 1; /**< NYET Response Received Interrupt Mask (NyetMsk) */
+ uint32_t ackmsk : 1; /**< ACK Response Received Interrupt Mask (AckMsk) */
+ uint32_t nakmsk : 1; /**< NAK Response Received Interrupt Mask (NakMsk) */
+ uint32_t stallmsk : 1; /**< STALL Response Received Interrupt Mask (StallMsk) */
+ uint32_t ahberrmsk : 1; /**< AHB Error Mask (AHBErrMsk) */
+ uint32_t chhltdmsk : 1; /**< Channel Halted Mask (ChHltdMsk) */
+ uint32_t xfercomplmsk : 1; /**< Transfer Completed Mask (XferComplMsk) */
+#else
+ uint32_t xfercomplmsk : 1;
+ uint32_t chhltdmsk : 1;
+ uint32_t ahberrmsk : 1;
+ uint32_t stallmsk : 1;
+ uint32_t nakmsk : 1;
+ uint32_t ackmsk : 1;
+ uint32_t nyetmsk : 1;
+ uint32_t xacterrmsk : 1;
+ uint32_t bblerrmsk : 1;
+ uint32_t frmovrunmsk : 1;
+ uint32_t datatglerrmsk : 1;
+ uint32_t reserved_11_31 : 21;
+#endif
+ } s;
+ struct cvmx_usbcx_hcintmskx_s cn30xx;
+ struct cvmx_usbcx_hcintmskx_s cn31xx;
+ struct cvmx_usbcx_hcintmskx_s cn50xx;
+ struct cvmx_usbcx_hcintmskx_s cn52xx;
+ struct cvmx_usbcx_hcintmskx_s cn52xxp1;
+ struct cvmx_usbcx_hcintmskx_s cn56xx;
+ struct cvmx_usbcx_hcintmskx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hcintmskx cvmx_usbcx_hcintmskx_t;
+
+/**
+ * cvmx_usbc#_hcsplt#
+ *
+ * Host Channel-n Split Control Register (HCSPLT)
+ *
+ */
+union cvmx_usbcx_hcspltx {
+ uint32_t u32;
+ struct cvmx_usbcx_hcspltx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t spltena : 1; /**< Split Enable (SpltEna)
+ The application sets this field to indicate that this channel is
+ enabled to perform split transactions. */
+ uint32_t reserved_17_30 : 14;
+ uint32_t compsplt : 1; /**< Do Complete Split (CompSplt)
+ The application sets this field to request the OTG host to
+ perform a complete split transaction. */
+ uint32_t xactpos : 2; /**< Transaction Position (XactPos)
+ This field is used to determine whether to send all, first, middle,
+ or last payloads with each OUT transaction.
+ * 2'b11: All. This is the entire data payload is of this transaction
+ (which is less than or equal to 188 bytes).
+ * 2'b10: Begin. This is the first data payload of this transaction
+ (which is larger than 188 bytes).
+ * 2'b00: Mid. This is the middle payload of this transaction
+ (which is larger than 188 bytes).
+ * 2'b01: End. This is the last payload of this transaction (which
+ is larger than 188 bytes). */
+ uint32_t hubaddr : 7; /**< Hub Address (HubAddr)
+ This field holds the device address of the transaction
+ translator's hub. */
+ uint32_t prtaddr : 7; /**< Port Address (PrtAddr)
+ This field is the port number of the recipient transaction
+ translator. */
+#else
+ uint32_t prtaddr : 7;
+ uint32_t hubaddr : 7;
+ uint32_t xactpos : 2;
+ uint32_t compsplt : 1;
+ uint32_t reserved_17_30 : 14;
+ uint32_t spltena : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_hcspltx_s cn30xx;
+ struct cvmx_usbcx_hcspltx_s cn31xx;
+ struct cvmx_usbcx_hcspltx_s cn50xx;
+ struct cvmx_usbcx_hcspltx_s cn52xx;
+ struct cvmx_usbcx_hcspltx_s cn52xxp1;
+ struct cvmx_usbcx_hcspltx_s cn56xx;
+ struct cvmx_usbcx_hcspltx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hcspltx cvmx_usbcx_hcspltx_t;
+
+/**
+ * cvmx_usbc#_hctsiz#
+ *
+ * Host Channel-n Transfer Size Register (HCTSIZ)
+ *
+ */
+union cvmx_usbcx_hctsizx {
+ uint32_t u32;
+ struct cvmx_usbcx_hctsizx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dopng : 1; /**< Do Ping (DoPng)
+ Setting this field to 1 directs the host to do PING protocol. */
+ uint32_t pid : 2; /**< PID (Pid)
+ The application programs this field with the type of PID to use
+ for the initial transaction. The host will maintain this field for the
+ rest of the transfer.
+ * 2'b00: DATA0
+ * 2'b01: DATA2
+ * 2'b10: DATA1
+ * 2'b11: MDATA (non-control)/SETUP (control) */
+ uint32_t pktcnt : 10; /**< Packet Count (PktCnt)
+ This field is programmed by the application with the expected
+ number of packets to be transmitted (OUT) or received (IN).
+ The host decrements this count on every successful
+ transmission or reception of an OUT/IN packet. Once this count
+ reaches zero, the application is interrupted to indicate normal
+ completion. */
+ uint32_t xfersize : 19; /**< Transfer Size (XferSize)
+ For an OUT, this field is the number of data bytes the host will
+ send during the transfer.
+ For an IN, this field is the buffer size that the application has
+ reserved for the transfer. The application is expected to
+ program this field as an integer multiple of the maximum packet
+ size for IN transactions (periodic and non-periodic). */
+#else
+ uint32_t xfersize : 19;
+ uint32_t pktcnt : 10;
+ uint32_t pid : 2;
+ uint32_t dopng : 1;
+#endif
+ } s;
+ struct cvmx_usbcx_hctsizx_s cn30xx;
+ struct cvmx_usbcx_hctsizx_s cn31xx;
+ struct cvmx_usbcx_hctsizx_s cn50xx;
+ struct cvmx_usbcx_hctsizx_s cn52xx;
+ struct cvmx_usbcx_hctsizx_s cn52xxp1;
+ struct cvmx_usbcx_hctsizx_s cn56xx;
+ struct cvmx_usbcx_hctsizx_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hctsizx cvmx_usbcx_hctsizx_t;
+
+/**
+ * cvmx_usbc#_hfir
+ *
+ * Host Frame Interval Register (HFIR)
+ *
+ * This register stores the frame interval information for the current speed to which the O2P USB core has enumerated.
+ */
+union cvmx_usbcx_hfir {
+ uint32_t u32;
+ struct cvmx_usbcx_hfir_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31 : 16;
+ uint32_t frint : 16; /**< Frame Interval (FrInt)
+ The value that the application programs to this field specifies
+ the interval between two consecutive SOFs (FS) or micro-
+ SOFs (HS) or Keep-Alive tokens (HS). This field contains the
+ number of PHY clocks that constitute the required frame
+ interval. The default value set in this field for a FS operation
+ when the PHY clock frequency is 60 MHz. The application can
+ write a value to this register only after the Port Enable bit of
+ the Host Port Control and Status register (HPRT.PrtEnaPort)
+ has been set. If no value is programmed, the core calculates
+ the value based on the PHY clock specified in the FS/LS PHY
+ Clock Select field of the Host Configuration register
+ (HCFG.FSLSPclkSel). Do not change the value of this field
+ after the initial configuration.
+ * 125 us (PHY clock frequency for HS)
+ * 1 ms (PHY clock frequency for FS/LS) */
+#else
+ uint32_t frint : 16;
+ uint32_t reserved_16_31 : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_hfir_s cn30xx;
+ struct cvmx_usbcx_hfir_s cn31xx;
+ struct cvmx_usbcx_hfir_s cn50xx;
+ struct cvmx_usbcx_hfir_s cn52xx;
+ struct cvmx_usbcx_hfir_s cn52xxp1;
+ struct cvmx_usbcx_hfir_s cn56xx;
+ struct cvmx_usbcx_hfir_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hfir cvmx_usbcx_hfir_t;
+
+/**
+ * cvmx_usbc#_hfnum
+ *
+ * Host Frame Number/Frame Time Remaining Register (HFNUM)
+ *
+ * This register indicates the current frame number.
+ * It also indicates the time remaining (in terms of the number of PHY clocks)
+ * in the current (micro)frame.
+ */
+union cvmx_usbcx_hfnum {
+ uint32_t u32;
+ struct cvmx_usbcx_hfnum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t frrem : 16; /**< Frame Time Remaining (FrRem)
+ Indicates the amount of time remaining in the current
+ microframe (HS) or frame (FS/LS), in terms of PHY clocks.
+ This field decrements on each PHY clock. When it reaches
+ zero, this field is reloaded with the value in the Frame Interval
+ register and a new SOF is transmitted on the USB. */
+ uint32_t frnum : 16; /**< Frame Number (FrNum)
+ This field increments when a new SOF is transmitted on the
+ USB, and is reset to 0 when it reaches 16'h3FFF. */
+#else
+ uint32_t frnum : 16;
+ uint32_t frrem : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_hfnum_s cn30xx;
+ struct cvmx_usbcx_hfnum_s cn31xx;
+ struct cvmx_usbcx_hfnum_s cn50xx;
+ struct cvmx_usbcx_hfnum_s cn52xx;
+ struct cvmx_usbcx_hfnum_s cn52xxp1;
+ struct cvmx_usbcx_hfnum_s cn56xx;
+ struct cvmx_usbcx_hfnum_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hfnum cvmx_usbcx_hfnum_t;
+
+/**
+ * cvmx_usbc#_hprt
+ *
+ * Host Port Control and Status Register (HPRT)
+ *
+ * This register is available in both Host and Device modes.
+ * Currently, the OTG Host supports only one port.
+ * A single register holds USB port-related information such as USB reset, enable, suspend, resume,
+ * connect status, and test mode for each port. The R_SS_WC bits in this register can trigger an
+ * interrupt to the application through the Host Port Interrupt bit of the Core Interrupt
+ * register (GINTSTS.PrtInt). On a Port Interrupt, the application must read this register and clear
+ * the bit that caused the interrupt. For the R_SS_WC bits, the application must write a 1 to the bit
+ * to clear the interrupt.
+ */
+union cvmx_usbcx_hprt {
+ uint32_t u32;
+ struct cvmx_usbcx_hprt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_19_31 : 13;
+ uint32_t prtspd : 2; /**< Port Speed (PrtSpd)
+ Indicates the speed of the device attached to this port.
+ * 2'b00: High speed
+ * 2'b01: Full speed
+ * 2'b10: Low speed
+ * 2'b11: Reserved */
+ uint32_t prttstctl : 4; /**< Port Test Control (PrtTstCtl)
+ The application writes a nonzero value to this field to put
+ the port into a Test mode, and the corresponding pattern is
+ signaled on the port.
+ * 4'b0000: Test mode disabled
+ * 4'b0001: Test_J mode
+ * 4'b0010: Test_K mode
+ * 4'b0011: Test_SE0_NAK mode
+ * 4'b0100: Test_Packet mode
+ * 4'b0101: Test_Force_Enable
+ * Others: Reserved
+ PrtSpd must be zero (i.e. the interface must be in high-speed
+ mode) to use the PrtTstCtl test modes. */
+ uint32_t prtpwr : 1; /**< Port Power (PrtPwr)
+ The application uses this field to control power to this port,
+ and the core clears this bit on an overcurrent condition.
+ * 1'b0: Power off
+ * 1'b1: Power on */
+ uint32_t prtlnsts : 2; /**< Port Line Status (PrtLnSts)
+ Indicates the current logic level USB data lines
+ * Bit [10]: Logic level of D-
+ * Bit [11]: Logic level of D+ */
+ uint32_t reserved_9_9 : 1;
+ uint32_t prtrst : 1; /**< Port Reset (PrtRst)
+ When the application sets this bit, a reset sequence is
+ started on this port. The application must time the reset
+ period and clear this bit after the reset sequence is
+ complete.
+ * 1'b0: Port not in reset
+ * 1'b1: Port in reset
+ The application must leave this bit set for at least a
+ minimum duration mentioned below to start a reset on the
+ port. The application can leave it set for another 10 ms in
+ addition to the required minimum duration, before clearing
+ the bit, even though there is no maximum limit set by the
+ USB standard.
+ * High speed: 50 ms
+ * Full speed/Low speed: 10 ms */
+ uint32_t prtsusp : 1; /**< Port Suspend (PrtSusp)
+ The application sets this bit to put this port in Suspend
+ mode. The core only stops sending SOFs when this is set.
+ To stop the PHY clock, the application must set the Port
+ Clock Stop bit, which will assert the suspend input pin of
+ the PHY.
+ The read value of this bit reflects the current suspend
+ status of the port. This bit is cleared by the core after a
+ remote wakeup signal is detected or the application sets
+ the Port Reset bit or Port Resume bit in this register or the
+ Resume/Remote Wakeup Detected Interrupt bit or
+ Disconnect Detected Interrupt bit in the Core Interrupt
+ register (GINTSTS.WkUpInt or GINTSTS.DisconnInt,
+ respectively).
+ * 1'b0: Port not in Suspend mode
+ * 1'b1: Port in Suspend mode */
+ uint32_t prtres : 1; /**< Port Resume (PrtRes)
+ The application sets this bit to drive resume signaling on
+ the port. The core continues to drive the resume signal
+ until the application clears this bit.
+ If the core detects a USB remote wakeup sequence, as
+ indicated by the Port Resume/Remote Wakeup Detected
+ Interrupt bit of the Core Interrupt register
+ (GINTSTS.WkUpInt), the core starts driving resume
+ signaling without application intervention and clears this bit
+ when it detects a disconnect condition. The read value of
+ this bit indicates whether the core is currently driving
+ resume signaling.
+ * 1'b0: No resume driven
+ * 1'b1: Resume driven */
+ uint32_t prtovrcurrchng : 1; /**< Port Overcurrent Change (PrtOvrCurrChng)
+ The core sets this bit when the status of the Port
+ Overcurrent Active bit (bit 4) in this register changes. */
+ uint32_t prtovrcurract : 1; /**< Port Overcurrent Active (PrtOvrCurrAct)
+ Indicates the overcurrent condition of the port.
+ * 1'b0: No overcurrent condition
+ * 1'b1: Overcurrent condition */
+ uint32_t prtenchng : 1; /**< Port Enable/Disable Change (PrtEnChng)
+ The core sets this bit when the status of the Port Enable bit
+ [2] of this register changes. */
+ uint32_t prtena : 1; /**< Port Enable (PrtEna)
+ A port is enabled only by the core after a reset sequence,
+ and is disabled by an overcurrent condition, a disconnect
+ condition, or by the application clearing this bit. The
+ application cannot set this bit by a register write. It can only
+ clear it to disable the port. This bit does not trigger any
+ interrupt to the application.
+ * 1'b0: Port disabled
+ * 1'b1: Port enabled */
+ uint32_t prtconndet : 1; /**< Port Connect Detected (PrtConnDet)
+ The core sets this bit when a device connection is detected
+ to trigger an interrupt to the application using the Host Port
+ Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt).
+ The application must write a 1 to this bit to clear the
+ interrupt. */
+ uint32_t prtconnsts : 1; /**< Port Connect Status (PrtConnSts)
+ * 0: No device is attached to the port.
+ * 1: A device is attached to the port. */
+#else
+ uint32_t prtconnsts : 1;
+ uint32_t prtconndet : 1;
+ uint32_t prtena : 1;
+ uint32_t prtenchng : 1;
+ uint32_t prtovrcurract : 1;
+ uint32_t prtovrcurrchng : 1;
+ uint32_t prtres : 1;
+ uint32_t prtsusp : 1;
+ uint32_t prtrst : 1;
+ uint32_t reserved_9_9 : 1;
+ uint32_t prtlnsts : 2;
+ uint32_t prtpwr : 1;
+ uint32_t prttstctl : 4;
+ uint32_t prtspd : 2;
+ uint32_t reserved_19_31 : 13;
+#endif
+ } s;
+ struct cvmx_usbcx_hprt_s cn30xx;
+ struct cvmx_usbcx_hprt_s cn31xx;
+ struct cvmx_usbcx_hprt_s cn50xx;
+ struct cvmx_usbcx_hprt_s cn52xx;
+ struct cvmx_usbcx_hprt_s cn52xxp1;
+ struct cvmx_usbcx_hprt_s cn56xx;
+ struct cvmx_usbcx_hprt_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hprt cvmx_usbcx_hprt_t;
+
+/**
+ * cvmx_usbc#_hptxfsiz
+ *
+ * Host Periodic Transmit FIFO Size Register (HPTXFSIZ)
+ *
+ * This register holds the size and the memory start address of the Periodic TxFIFO, as shown in Figures 310 and 311.
+ */
+union cvmx_usbcx_hptxfsiz {
+ uint32_t u32;
+ struct cvmx_usbcx_hptxfsiz_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ptxfsize : 16; /**< Host Periodic TxFIFO Depth (PTxFSize)
+ This value is in terms of 32-bit words.
+ * Minimum value is 16
+ * Maximum value is 32768 */
+ uint32_t ptxfstaddr : 16; /**< Host Periodic TxFIFO Start Address (PTxFStAddr) */
+#else
+ uint32_t ptxfstaddr : 16;
+ uint32_t ptxfsize : 16;
+#endif
+ } s;
+ struct cvmx_usbcx_hptxfsiz_s cn30xx;
+ struct cvmx_usbcx_hptxfsiz_s cn31xx;
+ struct cvmx_usbcx_hptxfsiz_s cn50xx;
+ struct cvmx_usbcx_hptxfsiz_s cn52xx;
+ struct cvmx_usbcx_hptxfsiz_s cn52xxp1;
+ struct cvmx_usbcx_hptxfsiz_s cn56xx;
+ struct cvmx_usbcx_hptxfsiz_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hptxfsiz cvmx_usbcx_hptxfsiz_t;
+
+/**
+ * cvmx_usbc#_hptxsts
+ *
+ * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS)
+ *
+ * This read-only register contains the free space information for the Periodic TxFIFO and
+ * the Periodic Transmit Request Queue
+ */
+union cvmx_usbcx_hptxsts {
+ uint32_t u32;
+ struct cvmx_usbcx_hptxsts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ptxqtop : 8; /**< Top of the Periodic Transmit Request Queue (PTxQTop)
+ This indicates the entry in the Periodic Tx Request Queue that
+ is currently being processes by the MAC.
+ This register is used for debugging.
+ * Bit [31]: Odd/Even (micro)frame
+ - 1'b0: send in even (micro)frame
+ - 1'b1: send in odd (micro)frame
+ * Bits [30:27]: Channel/endpoint number
+ * Bits [26:25]: Type
+ - 2'b00: IN/OUT
+ - 2'b01: Zero-length packet
+ - 2'b10: CSPLIT
+ - 2'b11: Disable channel command
+ * Bit [24]: Terminate (last entry for the selected
+ channel/endpoint) */
+ uint32_t ptxqspcavail : 8; /**< Periodic Transmit Request Queue Space Available
+ (PTxQSpcAvail)
+ Indicates the number of free locations available to be written in
+ the Periodic Transmit Request Queue. This queue holds both
+ IN and OUT requests.
+ * 8'h0: Periodic Transmit Request Queue is full
+ * 8'h1: 1 location available
+ * 8'h2: 2 locations available
+ * n: n locations available (0..8)
+ * Others: Reserved */
+ uint32_t ptxfspcavail : 16; /**< Periodic Transmit Data FIFO Space Available (PTxFSpcAvail)
+ Indicates the number of free locations available to be written to
+ in the Periodic TxFIFO.
+ Values are in terms of 32-bit words
+ * 16'h0: Periodic TxFIFO is full
+ * 16'h1: 1 word available
+ * 16'h2: 2 words available
+ * 16'hn: n words available (where 0..32768)
+ * 16'h8000: 32768 words available
+ * Others: Reserved */
+#else
+ uint32_t ptxfspcavail : 16;
+ uint32_t ptxqspcavail : 8;
+ uint32_t ptxqtop : 8;
+#endif
+ } s;
+ struct cvmx_usbcx_hptxsts_s cn30xx;
+ struct cvmx_usbcx_hptxsts_s cn31xx;
+ struct cvmx_usbcx_hptxsts_s cn50xx;
+ struct cvmx_usbcx_hptxsts_s cn52xx;
+ struct cvmx_usbcx_hptxsts_s cn52xxp1;
+ struct cvmx_usbcx_hptxsts_s cn56xx;
+ struct cvmx_usbcx_hptxsts_s cn56xxp1;
+};
+typedef union cvmx_usbcx_hptxsts cvmx_usbcx_hptxsts_t;
+
+/**
+ * cvmx_usbc#_nptxdfifo#
+ *
+ * NPTX Data Fifo (NPTXDFIFO)
+ *
+ * A slave mode application uses this register to access the Tx FIFO for channel n.
+ */
+union cvmx_usbcx_nptxdfifox {
+ uint32_t u32;
+ struct cvmx_usbcx_nptxdfifox_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t data : 32; /**< Reserved */
+#else
+ uint32_t data : 32;
+#endif
+ } s;
+ struct cvmx_usbcx_nptxdfifox_s cn30xx;
+ struct cvmx_usbcx_nptxdfifox_s cn31xx;
+ struct cvmx_usbcx_nptxdfifox_s cn50xx;
+ struct cvmx_usbcx_nptxdfifox_s cn52xx;
+ struct cvmx_usbcx_nptxdfifox_s cn52xxp1;
+ struct cvmx_usbcx_nptxdfifox_s cn56xx;
+ struct cvmx_usbcx_nptxdfifox_s cn56xxp1;
+};
+typedef union cvmx_usbcx_nptxdfifox cvmx_usbcx_nptxdfifox_t;
+
+/**
+ * cvmx_usbc#_pcgcctl
+ *
+ * Power and Clock Gating Control Register (PCGCCTL)
+ *
+ * The application can use this register to control the core's power-down and clock gating features.
+ */
+union cvmx_usbcx_pcgcctl {
+ uint32_t u32;
+ struct cvmx_usbcx_pcgcctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_5_31 : 27;
+ uint32_t physuspended : 1; /**< PHY Suspended. (PhySuspended)
+ Indicates that the PHY has been suspended. After the
+ application sets the Stop Pclk bit (bit 0), this bit is updated once
+ the PHY is suspended.
+ Since the UTMI+ PHY suspend is controlled through a port, the
+ UTMI+ PHY is suspended immediately after Stop Pclk is set.
+ However, the ULPI PHY takes a few clocks to suspend,
+ because the suspend information is conveyed through the ULPI
+ protocol to the ULPI PHY. */
+ uint32_t rstpdwnmodule : 1; /**< Reset Power-Down Modules (RstPdwnModule)
+ This bit is valid only in Partial Power-Down mode. The
+ application sets this bit when the power is turned off. The
+ application clears this bit after the power is turned on and the
+ PHY clock is up. */
+ uint32_t pwrclmp : 1; /**< Power Clamp (PwrClmp)
+ This bit is only valid in Partial Power-Down mode. The
+ application sets this bit before the power is turned off to clamp
+ the signals between the power-on modules and the power-off
+ modules. The application clears the bit to disable the clamping
+ before the power is turned on. */
+ uint32_t gatehclk : 1; /**< Gate Hclk (GateHclk)
+ The application sets this bit to gate hclk to modules other than
+ the AHB Slave and Master and wakeup logic when the USB is
+ suspended or the session is not valid. The application clears
+ this bit when the USB is resumed or a new session starts. */
+ uint32_t stoppclk : 1; /**< Stop Pclk (StopPclk)
+ The application sets this bit to stop the PHY clock (phy_clk)
+ when the USB is suspended, the session is not valid, or the
+ device is disconnected. The application clears this bit when the
+ USB is resumed or a new session starts. */
+#else
+ uint32_t stoppclk : 1;
+ uint32_t gatehclk : 1;
+ uint32_t pwrclmp : 1;
+ uint32_t rstpdwnmodule : 1;
+ uint32_t physuspended : 1;
+ uint32_t reserved_5_31 : 27;
+#endif
+ } s;
+ struct cvmx_usbcx_pcgcctl_s cn30xx;
+ struct cvmx_usbcx_pcgcctl_s cn31xx;
+ struct cvmx_usbcx_pcgcctl_s cn50xx;
+ struct cvmx_usbcx_pcgcctl_s cn52xx;
+ struct cvmx_usbcx_pcgcctl_s cn52xxp1;
+ struct cvmx_usbcx_pcgcctl_s cn56xx;
+ struct cvmx_usbcx_pcgcctl_s cn56xxp1;
+};
+typedef union cvmx_usbcx_pcgcctl cvmx_usbcx_pcgcctl_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-usbcx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-usbd.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-usbd.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-usbd.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1042 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * "cvmx-usbd.c" defines a set of low level USB functions to help
+ * developers create Octeon USB devices for various operating
+ * systems. These functions provide a generic API to the Octeon
+ * USB blocks, hiding the internal hardware specific
+ * operations.
+ *
+ * <hr>$Revision: 32636 $<hr>
+ */
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-clock.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+#include <asm/octeon/cvmx-usbnx-defs.h>
+#include <asm/octeon/cvmx-usbcx-defs.h>
+#include <asm/octeon/cvmx-usbd.h>
+#include <asm/octeon/cvmx-swap.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+#else
+#include "cvmx.h"
+#include "cvmx-clock.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-usbd.h"
+#include "cvmx-swap.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+#endif
+
+#define ULL unsigned long long
+
+/**
+ * @INTERNAL
+ * Read a USB 32bit CSR. It performs the necessary address swizzle for 32bit
+ * CSRs.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param address 64bit address to read
+ *
+ * @return Result of the read
+ */
+static inline uint32_t __cvmx_usbd_read_csr32(cvmx_usbd_state_t *usb, uint64_t address)
+{
+ uint32_t result = cvmx_read64_uint32(address ^ 4);
+ return result;
+}
+
+
+/**
+ * @INTERNAL
+ * Write a USB 32bit CSR. It performs the necessary address swizzle for 32bit
+ * CSRs.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param address 64bit address to write
+ * @param value Value to write
+ */
+static inline void __cvmx_usbd_write_csr32(cvmx_usbd_state_t *usb, uint64_t address, uint32_t value)
+{
+ cvmx_write64_uint32(address ^ 4, value);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+}
+
+/**
+ * @INTERNAL
+ * Calls the user supplied callback when an event happens.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param reason Reason for the callback
+ * @param endpoint_num
+ * Endpoint number
+ * @param bytes_transferred
+ * Bytes transferred
+ */
+static void __cvmx_usbd_callback(cvmx_usbd_state_t *usb, cvmx_usbd_callback_t reason, int endpoint_num, int bytes_transferred)
+{
+ if (usb->callback[reason])
+ {
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Calling callback reason=%d endpoint=%d bytes=%d func=%p data=%p\n",
+ __FUNCTION__, reason, endpoint_num, bytes_transferred, usb->callback[reason], usb->callback_data[reason]);
+ usb->callback[reason](reason, endpoint_num, bytes_transferred, usb->callback_data[reason]);
+ }
+ else
+ {
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: No callback for reason=%d endpoint=%d bytes=%d\n",
+ __FUNCTION__, reason, endpoint_num, bytes_transferred);
+ }
+}
+
+/**
+ * @INTERNAL
+ * Perform USB device mode initialization after a reset completes.
+ * This should be called after USBC0/1_GINTSTS[USBRESET] and
+ * corresponds to section 22.6.1.1, "Initialization on USB Reset",
+ * in the manual.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ *
+ * @return Zero or negative on error.
+ */
+static int __cvmx_usbd_device_reset_complete(cvmx_usbd_state_t *usb)
+{
+ cvmx_usbcx_ghwcfg2_t usbcx_ghwcfg2;
+ cvmx_usbcx_ghwcfg3_t usbcx_ghwcfg3;
+ cvmx_usbcx_doepmsk_t usbcx_doepmsk;
+ cvmx_usbcx_diepmsk_t usbcx_diepmsk;
+ cvmx_usbcx_daintmsk_t usbc_daintmsk;
+ cvmx_usbcx_gnptxfsiz_t gnptxfsiz;
+ int fifo_space;
+ int i;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Processing reset\n", __FUNCTION__);
+
+ usbcx_ghwcfg2.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GHWCFG2(usb->index));
+ usbcx_ghwcfg3.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));
+
+ /* Set up the data FIFO RAM for each of the FIFOs */
+ fifo_space = usbcx_ghwcfg3.s.dfifodepth;
+
+ /* Start at the top of the FIFO and assign space for each periodic fifo */
+ for (i=usbcx_ghwcfg2.s.numdeveps; i>0; i--)
+ {
+ cvmx_usbcx_dptxfsizx_t siz;
+ siz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DPTXFSIZX(i, usb->index));
+ fifo_space -= siz.s.dptxfsize;
+ siz.s.dptxfstaddr = fifo_space;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DPTXFSIZX(i, usb->index), siz.u32);
+ }
+
+ /* Assign half the leftover space to the non periodic tx fifo */
+ gnptxfsiz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
+ gnptxfsiz.s.nptxfdep = fifo_space / 2;
+ fifo_space -= gnptxfsiz.s.nptxfdep;
+ gnptxfsiz.s.nptxfstaddr = fifo_space;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), gnptxfsiz.u32);
+
+ /* Assign the remain space to the RX fifo */
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GRXFSIZ(usb->index), fifo_space);
+
+ /* Unmask the common endpoint interrupts */
+ usbcx_doepmsk.u32 = 0;
+ usbcx_doepmsk.s.setupmsk = 1;
+ usbcx_doepmsk.s.epdisbldmsk = 1;
+ usbcx_doepmsk.s.xfercomplmsk = 1;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPMSK(usb->index), usbcx_doepmsk.u32);
+ usbcx_diepmsk.u32 = 0;
+ usbcx_diepmsk.s.epdisbldmsk = 1;
+ usbcx_diepmsk.s.xfercomplmsk = 1;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPMSK(usb->index), usbcx_diepmsk.u32);
+
+ usbc_daintmsk.u32 = 0;
+ usbc_daintmsk.s.inepmsk = -1;
+ usbc_daintmsk.s.outepmsk = -1;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DAINTMSK(usb->index), usbc_daintmsk.u32);
+
+ /* Set all endpoints to NAK */
+ for (i=0; i<usbcx_ghwcfg2.s.numdeveps+1; i++)
+ {
+ cvmx_usbcx_doepctlx_t usbc_doepctl;
+ usbc_doepctl.u32 = 0;
+ usbc_doepctl.s.snak = 1;
+ usbc_doepctl.s.usbactep = 1;
+ usbc_doepctl.s.mps = (i==0) ? 0 : 64;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPCTLX(i, usb->index), usbc_doepctl.u32);
+ }
+
+ return 0;
+}
+
+
+/**
+ * Initialize a USB port for use. This must be called before any
+ * other access to the Octeon USB port is made. The port starts
+ * off in the disabled state.
+ *
+ * @param usb Pointer to an empty cvmx_usbd_state_t structure
+ * that will be populated by the initialize call.
+ * This structure is then passed to all other USB
+ * functions.
+ * @param usb_port_number
+ * Which Octeon USB port to initialize.
+ * @param flags Flags to control hardware initialization. See
+ * cvmx_usbd_initialize_flags_t for the flag
+ * definitions. Some flags are mandatory.
+ *
+ * @return Zero or a negative on error.
+ */
+int cvmx_usbd_initialize(cvmx_usbd_state_t *usb,
+ int usb_port_number,
+ cvmx_usbd_initialize_flags_t flags)
+{
+ cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
+ cvmx_usbnx_usbp_ctl_status_t usbn_usbp_ctl_status;
+
+ if (cvmx_unlikely(flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Called\n", __FUNCTION__);
+
+ memset(usb, 0, sizeof(*usb));
+ usb->init_flags = flags;
+ usb->index = usb_port_number;
+
+ /* Try to determine clock type automatically */
+ if ((usb->init_flags & (CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI |
+ CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0)
+ {
+ if (__cvmx_helper_board_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
+ usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI; /* Only 12 MHZ crystals are supported */
+ else
+ usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND;
+ }
+
+ if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
+ {
+ /* Check for auto ref clock frequency */
+ if (!(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
+ switch (__cvmx_helper_board_usb_get_clock_type())
+ {
+ case USB_CLOCK_TYPE_REF_12:
+ usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_24:
+ usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_48:
+ default:
+ usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ }
+ }
+
+ /* Power On Reset and PHY Initialization */
+
+ /* 1. Wait for DCOK to assert (nothing to do) */
+ /* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
+ USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 */
+ usbn_clk_ctl.u64 = cvmx_read_csr(CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hrst = 0;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hclk_rst = 0;
+ usbn_clk_ctl.s.enable = 0;
+ /* 2b. Select the USB reference clock/crystal parameters by writing
+ appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] */
+ if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
+ {
+ /* The USB port uses 12/24/48MHz 2.5V board clock
+ source at USB_XO. USB_XI should be tied to GND.
+ Most Octeon evaluation boards require this setting */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.cn31xx.p_xenbn = 0;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
+ else
+ usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */
+
+ switch (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK)
+ {
+ case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ:
+ usbn_clk_ctl.s.p_c_sel = 0;
+ break;
+ case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ:
+ usbn_clk_ctl.s.p_c_sel = 1;
+ break;
+ case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ:
+ usbn_clk_ctl.s.p_c_sel = 2;
+ break;
+ }
+ }
+ else
+ {
+ /* The USB port uses a 12MHz crystal as clock source
+ at USB_XO and USB_XI */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.cn31xx.p_xenbn = 1;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
+ else
+ usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */
+
+ usbn_clk_ctl.s.p_c_sel = 0;
+ }
+ /* 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
+ setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down such
+ that USB is as close as possible to 125Mhz */
+ {
+ int divisor = (cvmx_clock_get_rate(CVMX_CLOCK_CORE)+125000000-1)/125000000;
+ if (divisor < 4) /* Lower than 4 doesn't seem to work properly */
+ divisor = 4;
+ usbn_clk_ctl.s.divide = divisor;
+ usbn_clk_ctl.s.divide2 = 0;
+ }
+ cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
+ usbn_clk_ctl.s.hclk_rst = 1;
+ cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
+ cvmx_wait(64);
+ /* 3. Program the power-on reset field in the USBN clock-control register:
+ USBN_CLK_CTL[POR] = 0 */
+ usbn_clk_ctl.s.por = 0;
+ cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ /* 4. Wait 1 ms for PHY clock to start */
+ cvmx_wait_usec(1000);
+ /* 5. Program the Reset input from automatic test equipment field in the
+ USBP control and status register: USBN_USBP_CTL_STATUS[ATE_RESET] = 1 */
+ usbn_usbp_ctl_status.u64 = cvmx_read_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
+ usbn_usbp_ctl_status.s.ate_reset = 1;
+ cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
+ /* 6. Wait 10 cycles */
+ cvmx_wait(10);
+ /* 7. Clear ATE_RESET field in the USBN clock-control register:
+ USBN_USBP_CTL_STATUS[ATE_RESET] = 0 */
+ usbn_usbp_ctl_status.s.ate_reset = 0;
+ cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
+ /* 8. Program the PHY reset field in the USBN clock-control register:
+ USBN_CLK_CTL[PRST] = 1 */
+ usbn_clk_ctl.s.prst = 1;
+ cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ /* 9. Program the USBP control and status register to select host or
+ device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
+ device */
+ usbn_usbp_ctl_status.s.hst_mode = 1;
+ usbn_usbp_ctl_status.s.dm_pulld = 0;
+ usbn_usbp_ctl_status.s.dp_pulld = 0;
+ cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
+ /* 10. Wait 1 \xB5s */
+ cvmx_wait_usec(1);
+ /* 11. Program the hreset_n field in the USBN clock-control register:
+ USBN_CLK_CTL[HRST] = 1 */
+ usbn_clk_ctl.s.hrst = 1;
+ cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ /* 12. Proceed to USB core initialization */
+ usbn_clk_ctl.s.enable = 1;
+ cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ cvmx_wait_usec(1);
+
+ /* Program the following fields in the global AHB configuration
+ register (USBC_GAHBCFG)
+ DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
+ Burst length, USBC_GAHBCFG[HBSTLEN] = 0
+ Nonperiodic TxFIFO empty level (slave mode only),
+ USBC_GAHBCFG[NPTXFEMPLVL]
+ Periodic TxFIFO empty level (slave mode only),
+ USBC_GAHBCFG[PTXFEMPLVL]
+ Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 */
+ {
+ cvmx_usbcx_gahbcfg_t usbcx_gahbcfg;
+ usbcx_gahbcfg.u32 = 0;
+ usbcx_gahbcfg.s.dmaen = 1;
+ usbcx_gahbcfg.s.hbstlen = 0;
+ usbcx_gahbcfg.s.nptxfemplvl = 1;
+ usbcx_gahbcfg.s.ptxfemplvl = 1;
+ usbcx_gahbcfg.s.glblintrmsk = 1;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index), usbcx_gahbcfg.u32);
+ }
+
+ /* Program the following fields in USBC_GUSBCFG register.
+ HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
+ ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
+ USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
+ PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 */
+ {
+ cvmx_usbcx_gusbcfg_t usbcx_gusbcfg;
+ usbcx_gusbcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
+ usbcx_gusbcfg.s.toutcal = 0;
+ usbcx_gusbcfg.s.ddrsel = 0;
+ usbcx_gusbcfg.s.usbtrdtim = 0x5;
+ usbcx_gusbcfg.s.phylpwrclksel = 0;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index), usbcx_gusbcfg.u32);
+ }
+
+ /* Program the following fields in the USBC0/1_DCFG register:
+ Device speed, USBC0/1_DCFG[DEVSPD] = 0 (high speed)
+ Non-zero-length status OUT handshake, USBC0/1_DCFG[NZSTSOUTHSHK]=0
+ Periodic frame interval (if periodic endpoints are supported),
+ USBC0/1_DCFG[PERFRINT] = 1 */
+ {
+ cvmx_usbcx_dcfg_t usbcx_dcfg;
+ usbcx_dcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCFG(usb->index));
+ usbcx_dcfg.s.devspd = 0;
+ usbcx_dcfg.s.nzstsouthshk = 0;
+ usbcx_dcfg.s.perfrint = 1;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCFG(usb->index), usbcx_dcfg.u32);
+ }
+
+ /* Program the USBC0/1_GINTMSK register */
+ {
+ cvmx_usbcx_gintmsk_t usbcx_gintmsk;
+ usbcx_gintmsk.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
+ usbcx_gintmsk.s.oepintmsk = 1;
+ usbcx_gintmsk.s.inepintmsk = 1;
+ usbcx_gintmsk.s.enumdonemsk = 1;
+ usbcx_gintmsk.s.usbrstmsk = 1;
+ usbcx_gintmsk.s.usbsuspmsk = 1;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index), usbcx_gintmsk.u32);
+ }
+
+ cvmx_usbd_disable(usb);
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_initialize);
+#endif
+
+
+/**
+ * Shutdown a USB port after a call to cvmx_usbd_initialize().
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ *
+ * @return Zero or a negative on error.
+ */
+int cvmx_usbd_shutdown(cvmx_usbd_state_t *usb)
+{
+ cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Called\n", __FUNCTION__);
+
+ /* Disable the clocks and put them in power on reset */
+ usbn_clk_ctl.u64 = cvmx_read_csr(CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.enable = 1;
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hclk_rst = 1;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hrst = 0;
+ cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_shutdown);
+#endif
+
+
+/**
+ * Enable a USB port. After this call succeeds, the USB port is
+ * online and servicing requests.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return Zero or negative on error.
+ */
+int cvmx_usbd_enable(cvmx_usbd_state_t *usb)
+{
+ cvmx_usbcx_dctl_t usbcx_dctl;
+ usbcx_dctl.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCTL(usb->index));
+ usbcx_dctl.s.cgoutnak = 1;
+ usbcx_dctl.s.sftdiscon = 0;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCTL(usb->index), usbcx_dctl.u32);
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_enable);
+#endif
+
+
+/**
+ * Disable a USB port. After this call the USB port will not
+ * generate data transfers and will not generate events.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return Zero or negative on error.
+ */
+int cvmx_usbd_disable(cvmx_usbd_state_t *usb)
+{
+ cvmx_usbcx_dctl_t usbcx_dctl;
+ usbcx_dctl.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCTL(usb->index));
+ usbcx_dctl.s.sgoutnak = 1;
+ usbcx_dctl.s.sftdiscon = 1;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCTL(usb->index), usbcx_dctl.u32);
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_disable);
+#endif
+
+
+/**
+ * Register a callback function to process USB events
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param reason The reason this callback should be called
+ * @param func Function to call
+ * @param user_data User supplied data for the callback
+ *
+ * @return Zero on succes, negative on failure
+ */
+int cvmx_usbd_register(cvmx_usbd_state_t *usb, cvmx_usbd_callback_t reason, cvmx_usbd_callback_func_t func, void *user_data)
+{
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Register reason=%d func=%p data=%p\n",
+ __FUNCTION__, reason, func, user_data);
+ usb->callback[reason] = func;
+ usb->callback_data[reason] = user_data;
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_register);
+#endif
+
+/**
+ * @INTERNAL
+ * Poll a device mode endpoint for status
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint to poll
+ *
+ * @return Zero on success
+ */
+static int __cvmx_usbd_poll_in_endpoint(cvmx_usbd_state_t *usb, int endpoint_num)
+{
+ cvmx_usbcx_diepintx_t usbc_diepint;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: endpoint=%d\n", __FUNCTION__, endpoint_num);
+
+ usbc_diepint.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index));
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index), usbc_diepint.u32);
+
+ if (usbc_diepint.s.epdisbld)
+ {
+ /* Endpoint Disabled Interrupt (EPDisbld)
+ This bit indicates that the endpoint is disabled per the
+ application's request. */
+ /* Nothing to do */
+ }
+ if (usbc_diepint.s.xfercompl)
+ {
+ cvmx_usbcx_dieptsizx_t usbc_dieptsiz;
+ int bytes_transferred;
+ /* Transfer Completed Interrupt (XferCompl)
+ Indicates that the programmed transfer is complete on the AHB
+ as well as on the USB, for this endpoint. */
+ usbc_dieptsiz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DIEPTSIZX(endpoint_num, usb->index));
+ bytes_transferred = usb->endpoint[endpoint_num].buffer_length - usbc_dieptsiz.s.xfersize;
+ __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_IN_COMPLETE, endpoint_num, bytes_transferred);
+ }
+ return 0;
+}
+
+
+/**
+ * @INTERNAL
+ * Poll a device mode endpoint for status
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint to poll
+ *
+ * @return Zero on success
+ */
+static int __cvmx_usbd_poll_out_endpoint(cvmx_usbd_state_t *usb, int endpoint_num)
+{
+ cvmx_usbcx_doepintx_t usbc_doepint;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: endpoint=%d\n", __FUNCTION__, endpoint_num);
+
+ usbc_doepint.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DOEPINTX(endpoint_num, usb->index));
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPINTX(endpoint_num, usb->index), usbc_doepint.u32);
+
+ if (usbc_doepint.s.setup)
+ {
+ /* SETUP Phase Done (SetUp)
+ Applies to control OUT endpoints only.
+ Indicates that the SETUP phase for the control endpoint is
+ complete and no more back-to-back SETUP packets were
+ received for the current control transfer. On this interrupt, the
+ application can decode the received SETUP data packet. */
+ __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_DEVICE_SETUP, endpoint_num, 0);
+ }
+ if (usbc_doepint.s.epdisbld)
+ {
+ /* Endpoint Disabled Interrupt (EPDisbld)
+ This bit indicates that the endpoint is disabled per the
+ application's request. */
+ /* Nothing to do */
+ }
+ if (usbc_doepint.s.xfercompl)
+ {
+ cvmx_usbcx_doeptsizx_t usbc_doeptsiz;
+ int bytes_transferred;
+ /* Transfer Completed Interrupt (XferCompl)
+ Indicates that the programmed transfer is complete on the AHB
+ as well as on the USB, for this endpoint. */
+ usbc_doeptsiz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DOEPTSIZX(endpoint_num, usb->index));
+ bytes_transferred = usb->endpoint[endpoint_num].buffer_length - usbc_doeptsiz.s.xfersize;
+ __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_OUT_COMPLETE, endpoint_num, bytes_transferred);
+ }
+
+ return 0;
+}
+
+
+/**
+ * Poll the USB block for status and call all needed callback
+ * handlers. This function is meant to be called in the interrupt
+ * handler for the USB controller. It can also be called
+ * periodically in a loop for non-interrupt based operation.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ *
+ * @return Zero or negative on error.
+ */
+int cvmx_usbd_poll(cvmx_usbd_state_t *usb)
+{
+ cvmx_usbcx_gintsts_t usbc_gintsts;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: Called\n", __FUNCTION__);
+
+ /* Read the pending interrupts */
+ usbc_gintsts.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTSTS(usb->index));
+ usbc_gintsts.u32 &= __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
+
+ /* Clear the interrupts now that we know about them */
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), usbc_gintsts.u32);
+
+ if (usbc_gintsts.s.usbsusp)
+ __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_SUSPEND, 0, 0);
+
+ if (usbc_gintsts.s.enumdone)
+ __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_ENUM_COMPLETE, 0, 0);
+
+ if (usbc_gintsts.s.usbrst)
+ {
+ /* USB Reset (USBRst)
+ The core sets this bit to indicate that a reset is
+ detected on the USB. */
+ __cvmx_usbd_device_reset_complete(usb);
+ __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_RESET, 0, 0);
+ }
+
+ if (usbc_gintsts.s.oepint || usbc_gintsts.s.iepint)
+ {
+ cvmx_usbcx_daint_t usbc_daint;
+ usbc_daint.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DAINT(usb->index));
+ if (usbc_daint.s.inepint)
+ {
+ int active_endpoints = usbc_daint.s.inepint;
+
+ while (active_endpoints)
+ {
+ int endpoint;
+ CVMX_CLZ(endpoint, active_endpoints);
+ endpoint = 31 - endpoint;
+ __cvmx_usbd_poll_in_endpoint(usb, endpoint);
+ active_endpoints ^= 1<<endpoint;
+ }
+ }
+ if (usbc_daint.s.outepint)
+ {
+ int active_endpoints = usbc_daint.s.outepint;
+
+ while (active_endpoints)
+ {
+ int endpoint;
+ CVMX_CLZ(endpoint, active_endpoints);
+ endpoint = 31 - endpoint;
+ __cvmx_usbd_poll_out_endpoint(usb, endpoint);
+ active_endpoints ^= 1<<endpoint;
+ }
+ }
+ }
+
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_poll);
+#endif
+
+/**
+ * Get the current USB address
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ *
+ * @return The USB address
+ */
+int cvmx_usbd_get_address(cvmx_usbd_state_t *usb)
+{
+ cvmx_usbcx_dcfg_t usbc_dcfg;
+ usbc_dcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCFG(usb->index));
+ return usbc_dcfg.s.devaddr;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_get_address);
+#endif
+
+/**
+ * Set the current USB address
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param address Address to set
+ */
+void cvmx_usbd_set_address(cvmx_usbd_state_t *usb, int address)
+{
+ cvmx_usbcx_dcfg_t usbc_dcfg;
+ usbc_dcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCFG(usb->index));
+ usbc_dcfg.s.devaddr = address;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCFG(usb->index), usbc_dcfg.u32);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_set_address);
+#endif
+
+/**
+ * Get the current USB speed
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ *
+ * @return The USB speed
+ */
+cvmx_usbd_speed_t cvmx_usbd_get_speed(cvmx_usbd_state_t *usb)
+{
+ cvmx_usbcx_dsts_t usbcx_dsts;
+ usbcx_dsts.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DSTS(usb->index));
+ return usbcx_dsts.s.enumspd;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_get_speed);
+#endif
+
+/**
+ * Set the current USB speed
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param speed The requested speed
+ */
+void cvmx_usbd_set_speed(cvmx_usbd_state_t *usb, cvmx_usbd_speed_t speed)
+{
+ cvmx_usbcx_dcfg_t usbcx_dcfg;
+ usbcx_dcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCFG(usb->index));
+ usbcx_dcfg.s.devspd = speed;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCFG(usb->index), usbcx_dcfg.u32);
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_set_speed);
+#endif
+
+/**
+ * Enable an endpoint to respond to an OUT transaction
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint number to enable
+ * @param transfer_type
+ * Transfer type for the endpoint
+ * @param max_packet_size
+ * Maximum packet size for the endpoint
+ * @param buffer Buffer to receive the data
+ * @param buffer_length
+ * Length of the buffer in bytes
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_usbd_out_endpoint_enable(cvmx_usbd_state_t *usb,
+ int endpoint_num, cvmx_usbd_transfer_t transfer_type,
+ int max_packet_size, uint64_t buffer, int buffer_length)
+{
+ cvmx_usbcx_doepctlx_t usbc_doepctl;
+ cvmx_usbcx_doeptsizx_t usbc_doeptsiz;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: endpoint=%d buffer=0x%llx length=%d\n",
+ __FUNCTION__, endpoint_num, (ULL)buffer, buffer_length);
+
+ usb->endpoint[endpoint_num].buffer_length = buffer_length;
+
+ CVMX_SYNCW; /* Flush out pending writes before enable */
+
+ /* Clear any pending interrupts */
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPINTX(endpoint_num, usb->index),
+ __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DOEPINTX(endpoint_num, usb->index)));
+
+ /* Setup the locations the DMA engines use */
+ cvmx_write_csr(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + endpoint_num*8, buffer);
+
+ usbc_doeptsiz.u32 = 0;
+ usbc_doeptsiz.s.mc = 1;
+ usbc_doeptsiz.s.pktcnt = (buffer_length + max_packet_size - 1) / max_packet_size;
+ if (usbc_doeptsiz.s.pktcnt == 0)
+ usbc_doeptsiz.s.pktcnt = 1;
+ usbc_doeptsiz.s.xfersize = buffer_length;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPTSIZX(endpoint_num, usb->index), usbc_doeptsiz.u32);
+
+ usbc_doepctl.u32 = 0;
+ usbc_doepctl.s.epena = 1;
+ usbc_doepctl.s.setd1pid = 0;
+ usbc_doepctl.s.setd0pid = 0;
+ usbc_doepctl.s.cnak = 1;
+ usbc_doepctl.s.eptype = transfer_type;
+ usbc_doepctl.s.usbactep = 1;
+ if (endpoint_num == 0)
+ {
+ switch (max_packet_size)
+ {
+ case 8:
+ usbc_doepctl.s.mps = 3;
+ break;
+ case 16:
+ usbc_doepctl.s.mps = 2;
+ break;
+ case 32:
+ usbc_doepctl.s.mps = 1;
+ break;
+ default:
+ usbc_doepctl.s.mps = 0;
+ break;
+ }
+ }
+ else
+ usbc_doepctl.s.mps = max_packet_size;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPCTLX(endpoint_num, usb->index), usbc_doepctl.u32);
+
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_out_endpoint_enable);
+#endif
+
+
+/**
+ * Disable an OUT endpoint
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint number to disable
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_usbd_out_endpoint_disable(cvmx_usbd_state_t *usb, int endpoint_num)
+{
+ cvmx_usbcx_doepctlx_t usbc_doepctl;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: endpoint=%d\n", __FUNCTION__, endpoint_num);
+
+ usbc_doepctl.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DOEPCTLX(endpoint_num, usb->index));
+ if (usbc_doepctl.s.epena && !usbc_doepctl.s.epdis)
+ {
+ usbc_doepctl.s.epdis = 1;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPCTLX(endpoint_num, usb->index), usbc_doepctl.u32);
+ }
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_out_endpoint_disable);
+#endif
+
+
+/**
+ * Enable an endpoint to respond to an IN transaction
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint number to enable
+ * @param transfer_type
+ * Transfer type for the endpoint
+ * @param max_packet_size
+ * Maximum packet size for the endpoint
+ * @param buffer Buffer to send
+ * @param buffer_length
+ * Length of the buffer in bytes
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_usbd_in_endpoint_enable(cvmx_usbd_state_t *usb,
+ int endpoint_num, cvmx_usbd_transfer_t transfer_type,
+ int max_packet_size, uint64_t buffer, int buffer_length)
+{
+ cvmx_usbcx_diepctlx_t usbc_diepctl;
+ cvmx_usbcx_dieptsizx_t usbc_dieptsiz;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: endpoint=%d buffer=0x%llx length=%d\n",
+ __FUNCTION__, endpoint_num, (ULL)buffer, buffer_length);
+
+ usb->endpoint[endpoint_num].buffer_length = buffer_length;
+
+ CVMX_SYNCW; /* Flush out pending writes before enable */
+
+ /* Clear any pending interrupts */
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index),
+ __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index)));
+
+ usbc_dieptsiz.u32 = 0;
+ usbc_dieptsiz.s.mc = 1;
+ if (buffer)
+ {
+ cvmx_write_csr(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + endpoint_num*8, buffer);
+ usbc_dieptsiz.s.pktcnt = (buffer_length + max_packet_size - 1) / max_packet_size;
+ if (usbc_dieptsiz.s.pktcnt == 0)
+ usbc_dieptsiz.s.pktcnt = 1;
+ usbc_dieptsiz.s.xfersize = buffer_length;
+ }
+ else
+ {
+ usbc_dieptsiz.s.pktcnt = 0;
+ usbc_dieptsiz.s.xfersize = 0;
+ }
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPTSIZX(endpoint_num, usb->index), usbc_dieptsiz.u32);
+
+ usbc_diepctl.u32 = 0;
+ usbc_diepctl.s.epena = (buffer != 0);
+ usbc_diepctl.s.setd1pid = 0;
+ usbc_diepctl.s.setd0pid = (buffer == 0);
+ usbc_diepctl.s.cnak = 1;
+ usbc_diepctl.s.txfnum = endpoint_num;
+ usbc_diepctl.s.eptype = transfer_type;
+ usbc_diepctl.s.usbactep = 1;
+ usbc_diepctl.s.nextep = endpoint_num;
+ if (endpoint_num == 0)
+ {
+ switch (max_packet_size)
+ {
+ case 8:
+ usbc_diepctl.s.mps = 3;
+ break;
+ case 16:
+ usbc_diepctl.s.mps = 2;
+ break;
+ case 32:
+ usbc_diepctl.s.mps = 1;
+ break;
+ default:
+ usbc_diepctl.s.mps = 0;
+ break;
+ }
+ }
+ else
+ usbc_diepctl.s.mps = max_packet_size;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPCTLX(endpoint_num, usb->index), usbc_diepctl.u32);
+
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_in_endpoint_enable);
+#endif
+
+
+/**
+ * Disable an IN endpoint
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint number to disable
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_usbd_in_endpoint_disable(cvmx_usbd_state_t *usb, int endpoint_num)
+{
+ cvmx_usbcx_diepctlx_t usbc_diepctl;
+
+ if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
+ cvmx_dprintf("%s: endpoint=%d\n", __FUNCTION__, endpoint_num);
+
+ usbc_diepctl.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DIEPCTLX(endpoint_num, usb->index));
+ if (usbc_diepctl.s.epena && !usbc_diepctl.s.epdis)
+ {
+ usbc_diepctl.s.epdis = 1;
+ __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPCTLX(endpoint_num, usb->index), usbc_diepctl.u32);
+ }
+ return 0;
+}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_usbd_in_endpoint_disable);
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-usbd.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-usbd.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-usbd.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-usbd.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,301 @@
+/* $MidnightBSD$ */
+#ifndef __CVMX_USBD_H__
+#define __CVMX_USBD_H__
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * "cvmx-usbd.h" defines a set of low level USB functions to help developers
+ * create Octeon USB devices for various operating systems. These functions
+ * provide a generic API to the Octeon USB blocks, hiding the internal hardware
+ * specific operations.
+ *
+ * <hr>$Revision: 32636 $<hr>
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum
+{
+ CVMX_USBD_TRANSFER_CONTROL = 0,
+ CVMX_USBD_TRANSFER_ISOCHRONOUS = 1,
+ CVMX_USBD_TRANSFER_BULK = 2,
+ CVMX_USBD_TRANSFER_INTERRUPT = 3,
+} cvmx_usbd_transfer_t;
+
+typedef enum
+{
+ CVMX_USBD_SPEED_HIGH = 0,
+ CVMX_USBD_SPEED_FULL = 1,
+ CVMX_USBD_SPEED_LOW = 2,
+} cvmx_usbd_speed_t;
+
+typedef enum
+{
+ CVMX_USBD_CALLBACK_SUSPEND,
+ CVMX_USBD_CALLBACK_RESET,
+ CVMX_USBD_CALLBACK_ENUM_COMPLETE,
+ CVMX_USBD_CALLBACK_DEVICE_SETUP,
+ CVMX_USBD_CALLBACK_IN_COMPLETE,
+ CVMX_USBD_CALLBACK_OUT_COMPLETE,
+ __CVMX_USBD_CALLBACK_END
+} cvmx_usbd_callback_t;
+
+typedef enum
+{
+ CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI = 1<<0, /**< The USB port uses a 12MHz crystal as clock source
+ at USB_XO and USB_XI. */
+ CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND = 1<<1, /**< The USB port uses 12/24/48MHz 2.5V board clock
+ source at USB_XO. USB_XI should be tied to GND.*/
+ CVMX_USBD_INITIALIZE_FLAGS_CLOCK_AUTO = 0, /**< Automatically determine clock type based on function
+ in cvmx-helper-board.c. */
+ CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3<<3, /**< Mask for clock speed field */
+ CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ = 1<<3, /**< Speed of reference clock or crystal */
+ CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ = 2<<3, /**< Speed of reference clock */
+ CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ = 3<<3, /**< Speed of reference clock */
+ /* Bits 3-4 used to encode the clock frequency */
+ CVMX_USBD_INITIALIZE_FLAGS_DEBUG = 1<<16
+} cvmx_usbd_initialize_flags_t;
+
+typedef void (*cvmx_usbd_callback_func_t)(cvmx_usbd_callback_t reason, int endpoint_num, int bytes_transferred, void *user_data);
+
+typedef struct
+{
+ int init_flags;
+ int index;
+ cvmx_usbd_callback_func_t callback[__CVMX_USBD_CALLBACK_END];
+ void *callback_data[__CVMX_USBD_CALLBACK_END];
+ struct {
+ int buffer_length;
+ } endpoint[16];
+} cvmx_usbd_state_t;
+
+/**
+ * Initialize a USB port for use. This must be called before any
+ * other access to the Octeon USB port is made. The port starts
+ * off in the disabled state.
+ *
+ * @param usb Pointer to an empty cvmx_usbd_state_t structure
+ * that will be populated by the initialize call.
+ * This structure is then passed to all other USB
+ * functions.
+ * @param usb_port_number
+ * Which Octeon USB port to initialize.
+ * @param flags Flags to control hardware initialization. See
+ * cvmx_usbd_initialize_flags_t for the flag
+ * definitions. Some flags are mandatory.
+ *
+ * @return Zero or a negative on error.
+ */
+int cvmx_usbd_initialize(cvmx_usbd_state_t *usb, int usb_port_number,
+ cvmx_usbd_initialize_flags_t flags);
+
+/**
+ * Shutdown a USB port after a call to cvmx_usbd_initialize().
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ *
+ * @return Zero or a negative on error.
+ */
+int cvmx_usbd_shutdown(cvmx_usbd_state_t *usb);
+
+/**
+ * Enable a USB port. After this call succeeds, the USB port is
+ * online and servicing requests.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return Zero or negative on error.
+ */
+int cvmx_usbd_enable(cvmx_usbd_state_t *usb);
+
+/**
+ * Disable a USB port. After this call the USB port will not
+ * generate data transfers and will not generate events.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * @return Zero or negative on error.
+ */
+int cvmx_usbd_disable(cvmx_usbd_state_t *usb);
+
+/**
+ * Register a callback function to process USB events
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param reason The reason this callback should be called
+ * @param func Function to call
+ * @param user_data User supplied data for the callback
+ *
+ * @return Zero on succes, negative on failure
+ */
+int cvmx_usbd_register(cvmx_usbd_state_t *usb, cvmx_usbd_callback_t reason, cvmx_usbd_callback_func_t func, void *user_data);
+
+/**
+ * Poll the USB block for status and call all needed callback
+ * handlers. This function is meant to be called in the interrupt
+ * handler for the USB controller. It can also be called
+ * periodically in a loop for non-interrupt based operation.
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ *
+ * @return Zero or negative on error.
+ */
+int cvmx_usbd_poll(cvmx_usbd_state_t *usb);
+
+/**
+ * Get the current USB address
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ *
+ * @return The USB address
+ */
+int cvmx_usbd_get_address(cvmx_usbd_state_t *usb);
+
+/**
+ * Set the current USB address
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param address Address to set
+ */
+void cvmx_usbd_set_address(cvmx_usbd_state_t *usb, int address);
+
+/**
+ * Get the current USB speed
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ *
+ * @return The USB speed
+ */
+cvmx_usbd_speed_t cvmx_usbd_get_speed(cvmx_usbd_state_t *usb);
+
+/**
+ * Set the current USB speed
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param speed The requested speed
+ */
+void cvmx_usbd_set_speed(cvmx_usbd_state_t *usb, cvmx_usbd_speed_t speed);
+
+/**
+ * Enable an endpoint to respond to an OUT transaction
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint number to enable
+ * @param transfer_type
+ * Transfer type for the endpoint
+ * @param max_packet_size
+ * Maximum packet size for the endpoint
+ * @param buffer Buffer to receive the data
+ * @param buffer_length
+ * Length of the buffer in bytes
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_usbd_out_endpoint_enable(cvmx_usbd_state_t *usb,
+ int endpoint_num, cvmx_usbd_transfer_t transfer_type,
+ int max_packet_size, uint64_t buffer, int buffer_length);
+
+/**
+ * Disable an OUT endpoint
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint number to disable
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_usbd_out_endpoint_disable(cvmx_usbd_state_t *usb, int endpoint_num);
+
+/**
+ * Enable an endpoint to respond to an IN transaction
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint number to enable
+ * @param transfer_type
+ * Transfer type for the endpoint
+ * @param max_packet_size
+ * Maximum packet size for the endpoint
+ * @param buffer Buffer to send
+ * @param buffer_length
+ * Length of the buffer in bytes
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_usbd_in_endpoint_enable(cvmx_usbd_state_t *usb,
+ int endpoint_num, cvmx_usbd_transfer_t transfer_type,
+ int max_packet_size, uint64_t buffer, int buffer_length);
+
+/**
+ * Disable an IN endpoint
+ *
+ * @param usb USB device state populated by
+ * cvmx_usbd_initialize().
+ * @param endpoint_num
+ * Endpoint number to disable
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_usbd_in_endpoint_disable(cvmx_usbd_state_t *usb, int endpoint_num);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_USBD_H__ */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-usbd.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-usbnx-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-usbnx-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-usbnx-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,2333 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-usbnx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon usbnx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_USBNX_DEFS_H__
+#define __CVMX_USBNX_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_BIST_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_BIST_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800680007F8ull) + ((block_id) & 1) * 0x10000000ull;
+}
+#else
+#define CVMX_USBNX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800680007F8ull) + ((block_id) & 1) * 0x10000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_CLK_CTL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_CLK_CTL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180068000010ull) + ((block_id) & 1) * 0x10000000ull;
+}
+#else
+#define CVMX_USBNX_CLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180068000010ull) + ((block_id) & 1) * 0x10000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_CTL_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_CTL_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000800ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000800ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_INB_CHN0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_INB_CHN0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000818ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_INB_CHN0(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000818ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_INB_CHN1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_INB_CHN1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000820ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_INB_CHN1(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000820ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_INB_CHN2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_INB_CHN2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000828ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_INB_CHN2(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000828ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_INB_CHN3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_INB_CHN3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000830ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_INB_CHN3(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000830ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_INB_CHN4(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_INB_CHN4(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000838ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_INB_CHN4(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000838ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_INB_CHN5(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_INB_CHN5(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000840ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_INB_CHN5(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000840ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_INB_CHN6(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_INB_CHN6(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000848ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_INB_CHN6(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000848ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_INB_CHN7(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_INB_CHN7(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000850ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_INB_CHN7(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000850ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_OUTB_CHN0(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_OUTB_CHN0(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000858ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_OUTB_CHN0(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000858ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_OUTB_CHN1(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_OUTB_CHN1(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000860ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_OUTB_CHN1(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000860ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_OUTB_CHN2(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_OUTB_CHN2(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000868ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_OUTB_CHN2(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000868ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_OUTB_CHN3(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_OUTB_CHN3(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000870ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_OUTB_CHN3(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000870ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_OUTB_CHN4(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_OUTB_CHN4(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000878ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_OUTB_CHN4(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000878ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_OUTB_CHN5(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_OUTB_CHN5(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000880ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_OUTB_CHN5(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000880ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_OUTB_CHN6(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_OUTB_CHN6(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000888ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_OUTB_CHN6(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000888ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA0_OUTB_CHN7(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA0_OUTB_CHN7(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000890ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA0_OUTB_CHN7(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000890ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_DMA_TEST(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_DMA_TEST(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00016F0000000808ull) + ((block_id) & 1) * 0x100000000000ull;
+}
+#else
+#define CVMX_USBNX_DMA_TEST(block_id) (CVMX_ADD_IO_SEG(0x00016F0000000808ull) + ((block_id) & 1) * 0x100000000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_INT_ENB(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_INT_ENB(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180068000008ull) + ((block_id) & 1) * 0x10000000ull;
+}
+#else
+#define CVMX_USBNX_INT_ENB(block_id) (CVMX_ADD_IO_SEG(0x0001180068000008ull) + ((block_id) & 1) * 0x10000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_INT_SUM(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_INT_SUM(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180068000000ull) + ((block_id) & 1) * 0x10000000ull;
+}
+#else
+#define CVMX_USBNX_INT_SUM(block_id) (CVMX_ADD_IO_SEG(0x0001180068000000ull) + ((block_id) & 1) * 0x10000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_USBNX_USBP_CTL_STATUS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_USBNX_USBP_CTL_STATUS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x0001180068000018ull) + ((block_id) & 1) * 0x10000000ull;
+}
+#else
+#define CVMX_USBNX_USBP_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x0001180068000018ull) + ((block_id) & 1) * 0x10000000ull)
+#endif
+
+/**
+ * cvmx_usbn#_bist_status
+ *
+ * USBN_BIST_STATUS = USBN's Control and Status
+ *
+ * Contain general control bits and status information for the USBN.
+ */
+union cvmx_usbnx_bist_status {
+ uint64_t u64;
+ struct cvmx_usbnx_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t u2nc_bis : 1; /**< Bist status U2N CTL FIFO Memory. */
+ uint64_t u2nf_bis : 1; /**< Bist status U2N FIFO Memory. */
+ uint64_t e2hc_bis : 1; /**< Bist status E2H CTL FIFO Memory. */
+ uint64_t n2uf_bis : 1; /**< Bist status N2U FIFO Memory. */
+ uint64_t usbc_bis : 1; /**< Bist status USBC FIFO Memory. */
+ uint64_t nif_bis : 1; /**< Bist status for Inbound Memory. */
+ uint64_t nof_bis : 1; /**< Bist status for Outbound Memory. */
+#else
+ uint64_t nof_bis : 1;
+ uint64_t nif_bis : 1;
+ uint64_t usbc_bis : 1;
+ uint64_t n2uf_bis : 1;
+ uint64_t e2hc_bis : 1;
+ uint64_t u2nf_bis : 1;
+ uint64_t u2nc_bis : 1;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_usbnx_bist_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t usbc_bis : 1; /**< Bist status USBC FIFO Memory. */
+ uint64_t nif_bis : 1; /**< Bist status for Inbound Memory. */
+ uint64_t nof_bis : 1; /**< Bist status for Outbound Memory. */
+#else
+ uint64_t nof_bis : 1;
+ uint64_t nif_bis : 1;
+ uint64_t usbc_bis : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_usbnx_bist_status_cn30xx cn31xx;
+ struct cvmx_usbnx_bist_status_s cn50xx;
+ struct cvmx_usbnx_bist_status_s cn52xx;
+ struct cvmx_usbnx_bist_status_s cn52xxp1;
+ struct cvmx_usbnx_bist_status_s cn56xx;
+ struct cvmx_usbnx_bist_status_s cn56xxp1;
+};
+typedef union cvmx_usbnx_bist_status cvmx_usbnx_bist_status_t;
+
+/**
+ * cvmx_usbn#_clk_ctl
+ *
+ * USBN_CLK_CTL = USBN's Clock Control
+ *
+ * This register is used to control the frequency of the hclk and the hreset and phy_rst signals.
+ */
+union cvmx_usbnx_clk_ctl {
+ uint64_t u64;
+ struct cvmx_usbnx_clk_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t divide2 : 2; /**< The 'hclk' used by the USB subsystem is derived
+ from the eclk.
+ Also see the field DIVIDE. DIVIDE2<1> must currently
+ be zero because it is not implemented, so the maximum
+ ratio of eclk/hclk is currently 16.
+ The actual divide number for hclk is:
+ (DIVIDE2 + 1) * (DIVIDE + 1) */
+ uint64_t hclk_rst : 1; /**< When this field is '0' the HCLK-DIVIDER used to
+ generate the hclk in the USB Subsystem is held
+ in reset. This bit must be set to '0' before
+ changing the value os DIVIDE in this register.
+ The reset to the HCLK_DIVIDERis also asserted
+ when core reset is asserted. */
+ uint64_t p_x_on : 1; /**< Force USB-PHY on during suspend.
+ '1' USB-PHY XO block is powered-down during
+ suspend.
+ '0' USB-PHY XO block is powered-up during
+ suspend.
+ The value of this field must be set while POR is
+ active. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t p_com_on : 1; /**< '0' Force USB-PHY XO Bias, Bandgap and PLL to
+ remain powered in Suspend Mode.
+ '1' The USB-PHY XO Bias, Bandgap and PLL are
+ powered down in suspend mode.
+ The value of this field must be set while POR is
+ active. */
+ uint64_t p_c_sel : 2; /**< Phy clock speed select.
+ Selects the reference clock / crystal frequency.
+ '11': Reserved
+ '10': 48 MHz (reserved when a crystal is used)
+ '01': 24 MHz (reserved when a crystal is used)
+ '00': 12 MHz
+ The value of this field must be set while POR is
+ active.
+ NOTE: if a crystal is used as a reference clock,
+ this field must be set to 12 MHz. */
+ uint64_t cdiv_byp : 1; /**< Used to enable the bypass input to the USB_CLK_DIV. */
+ uint64_t sd_mode : 2; /**< Scaledown mode for the USBC. Control timing events
+ in the USBC, for normal operation this must be '0'. */
+ uint64_t s_bist : 1; /**< Starts bist on the hclk memories, during the '0'
+ to '1' transition. */
+ uint64_t por : 1; /**< Power On Reset for the PHY.
+ Resets all the PHYS registers and state machines. */
+ uint64_t enable : 1; /**< When '1' allows the generation of the hclk. When
+ '0' the hclk will not be generated. SEE DIVIDE
+ field of this register. */
+ uint64_t prst : 1; /**< When this field is '0' the reset associated with
+ the phy_clk functionality in the USB Subsystem is
+ help in reset. This bit should not be set to '1'
+ until the time it takes 6 clocks (hclk or phy_clk,
+ whichever is slower) has passed. Under normal
+ operation once this bit is set to '1' it should not
+ be set to '0'. */
+ uint64_t hrst : 1; /**< When this field is '0' the reset associated with
+ the hclk functioanlity in the USB Subsystem is
+ held in reset.This bit should not be set to '1'
+ until 12ms after phy_clk is stable. Under normal
+ operation, once this bit is set to '1' it should
+ not be set to '0'. */
+ uint64_t divide : 3; /**< The frequency of 'hclk' used by the USB subsystem
+ is the eclk frequency divided by the value of
+ (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
+ DIVIDE2 of this register.
+ The hclk frequency should be less than 125Mhz.
+ After writing a value to this field the SW should
+ read the field for the value written.
+ The ENABLE field of this register should not be set
+ until AFTER this field is set and then read. */
+#else
+ uint64_t divide : 3;
+ uint64_t hrst : 1;
+ uint64_t prst : 1;
+ uint64_t enable : 1;
+ uint64_t por : 1;
+ uint64_t s_bist : 1;
+ uint64_t sd_mode : 2;
+ uint64_t cdiv_byp : 1;
+ uint64_t p_c_sel : 2;
+ uint64_t p_com_on : 1;
+ uint64_t reserved_14_15 : 2;
+ uint64_t p_x_on : 1;
+ uint64_t hclk_rst : 1;
+ uint64_t divide2 : 2;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_usbnx_clk_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63 : 46;
+ uint64_t hclk_rst : 1; /**< When this field is '0' the HCLK-DIVIDER used to
+ generate the hclk in the USB Subsystem is held
+ in reset. This bit must be set to '0' before
+ changing the value os DIVIDE in this register.
+ The reset to the HCLK_DIVIDERis also asserted
+ when core reset is asserted. */
+ uint64_t p_x_on : 1; /**< Force USB-PHY on during suspend.
+ '1' USB-PHY XO block is powered-down during
+ suspend.
+ '0' USB-PHY XO block is powered-up during
+ suspend.
+ The value of this field must be set while POR is
+ active. */
+ uint64_t p_rclk : 1; /**< Phy refrence clock enable.
+ '1' The PHY PLL uses the XO block output as a
+ reference.
+ '0' Reserved. */
+ uint64_t p_xenbn : 1; /**< Phy external clock enable.
+ '1' The XO block uses the clock from a crystal.
+ '0' The XO block uses an external clock supplied
+ on the XO pin. USB_XI should be tied to
+ ground for this usage. */
+ uint64_t p_com_on : 1; /**< '0' Force USB-PHY XO Bias, Bandgap and PLL to
+ remain powered in Suspend Mode.
+ '1' The USB-PHY XO Bias, Bandgap and PLL are
+ powered down in suspend mode.
+ The value of this field must be set while POR is
+ active. */
+ uint64_t p_c_sel : 2; /**< Phy clock speed select.
+ Selects the reference clock / crystal frequency.
+ '11': Reserved
+ '10': 48 MHz
+ '01': 24 MHz
+ '00': 12 MHz
+ The value of this field must be set while POR is
+ active. */
+ uint64_t cdiv_byp : 1; /**< Used to enable the bypass input to the USB_CLK_DIV. */
+ uint64_t sd_mode : 2; /**< Scaledown mode for the USBC. Control timing events
+ in the USBC, for normal operation this must be '0'. */
+ uint64_t s_bist : 1; /**< Starts bist on the hclk memories, during the '0'
+ to '1' transition. */
+ uint64_t por : 1; /**< Power On Reset for the PHY.
+ Resets all the PHYS registers and state machines. */
+ uint64_t enable : 1; /**< When '1' allows the generation of the hclk. When
+ '0' the hclk will not be generated. */
+ uint64_t prst : 1; /**< When this field is '0' the reset associated with
+ the phy_clk functionality in the USB Subsystem is
+ help in reset. This bit should not be set to '1'
+ until the time it takes 6 clocks (hclk or phy_clk,
+ whichever is slower) has passed. Under normal
+ operation once this bit is set to '1' it should not
+ be set to '0'. */
+ uint64_t hrst : 1; /**< When this field is '0' the reset associated with
+ the hclk functioanlity in the USB Subsystem is
+ held in reset.This bit should not be set to '1'
+ until 12ms after phy_clk is stable. Under normal
+ operation, once this bit is set to '1' it should
+ not be set to '0'. */
+ uint64_t divide : 3; /**< The 'hclk' used by the USB subsystem is derived
+ from the eclk. The eclk will be divided by the
+ value of this field +1 to determine the hclk
+ frequency. (Also see HRST of this register).
+ The hclk frequency must be less than 125 MHz. */
+#else
+ uint64_t divide : 3;
+ uint64_t hrst : 1;
+ uint64_t prst : 1;
+ uint64_t enable : 1;
+ uint64_t por : 1;
+ uint64_t s_bist : 1;
+ uint64_t sd_mode : 2;
+ uint64_t cdiv_byp : 1;
+ uint64_t p_c_sel : 2;
+ uint64_t p_com_on : 1;
+ uint64_t p_xenbn : 1;
+ uint64_t p_rclk : 1;
+ uint64_t p_x_on : 1;
+ uint64_t hclk_rst : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn30xx;
+ struct cvmx_usbnx_clk_ctl_cn30xx cn31xx;
+ struct cvmx_usbnx_clk_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63 : 44;
+ uint64_t divide2 : 2; /**< The 'hclk' used by the USB subsystem is derived
+ from the eclk.
+ Also see the field DIVIDE. DIVIDE2<1> must currently
+ be zero because it is not implemented, so the maximum
+ ratio of eclk/hclk is currently 16.
+ The actual divide number for hclk is:
+ (DIVIDE2 + 1) * (DIVIDE + 1) */
+ uint64_t hclk_rst : 1; /**< When this field is '0' the HCLK-DIVIDER used to
+ generate the hclk in the USB Subsystem is held
+ in reset. This bit must be set to '0' before
+ changing the value os DIVIDE in this register.
+ The reset to the HCLK_DIVIDERis also asserted
+ when core reset is asserted. */
+ uint64_t reserved_16_16 : 1;
+ uint64_t p_rtype : 2; /**< PHY reference clock type
+ '0' The USB-PHY uses a 12MHz crystal as a clock
+ source at the USB_XO and USB_XI pins
+ '1' Reserved
+ '2' The USB_PHY uses 12/24/48MHz 2.5V board clock
+ at the USB_XO pin. USB_XI should be tied to
+ ground in this case.
+ '3' Reserved
+ (bit 14 was P_XENBN on 3xxx)
+ (bit 15 was P_RCLK on 3xxx) */
+ uint64_t p_com_on : 1; /**< '0' Force USB-PHY XO Bias, Bandgap and PLL to
+ remain powered in Suspend Mode.
+ '1' The USB-PHY XO Bias, Bandgap and PLL are
+ powered down in suspend mode.
+ The value of this field must be set while POR is
+ active. */
+ uint64_t p_c_sel : 2; /**< Phy clock speed select.
+ Selects the reference clock / crystal frequency.
+ '11': Reserved
+ '10': 48 MHz (reserved when a crystal is used)
+ '01': 24 MHz (reserved when a crystal is used)
+ '00': 12 MHz
+ The value of this field must be set while POR is
+ active.
+ NOTE: if a crystal is used as a reference clock,
+ this field must be set to 12 MHz. */
+ uint64_t cdiv_byp : 1; /**< Used to enable the bypass input to the USB_CLK_DIV. */
+ uint64_t sd_mode : 2; /**< Scaledown mode for the USBC. Control timing events
+ in the USBC, for normal operation this must be '0'. */
+ uint64_t s_bist : 1; /**< Starts bist on the hclk memories, during the '0'
+ to '1' transition. */
+ uint64_t por : 1; /**< Power On Reset for the PHY.
+ Resets all the PHYS registers and state machines. */
+ uint64_t enable : 1; /**< When '1' allows the generation of the hclk. When
+ '0' the hclk will not be generated. SEE DIVIDE
+ field of this register. */
+ uint64_t prst : 1; /**< When this field is '0' the reset associated with
+ the phy_clk functionality in the USB Subsystem is
+ help in reset. This bit should not be set to '1'
+ until the time it takes 6 clocks (hclk or phy_clk,
+ whichever is slower) has passed. Under normal
+ operation once this bit is set to '1' it should not
+ be set to '0'. */
+ uint64_t hrst : 1; /**< When this field is '0' the reset associated with
+ the hclk functioanlity in the USB Subsystem is
+ held in reset.This bit should not be set to '1'
+ until 12ms after phy_clk is stable. Under normal
+ operation, once this bit is set to '1' it should
+ not be set to '0'. */
+ uint64_t divide : 3; /**< The frequency of 'hclk' used by the USB subsystem
+ is the eclk frequency divided by the value of
+ (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
+ DIVIDE2 of this register.
+ The hclk frequency should be less than 125Mhz.
+ After writing a value to this field the SW should
+ read the field for the value written.
+ The ENABLE field of this register should not be set
+ until AFTER this field is set and then read. */
+#else
+ uint64_t divide : 3;
+ uint64_t hrst : 1;
+ uint64_t prst : 1;
+ uint64_t enable : 1;
+ uint64_t por : 1;
+ uint64_t s_bist : 1;
+ uint64_t sd_mode : 2;
+ uint64_t cdiv_byp : 1;
+ uint64_t p_c_sel : 2;
+ uint64_t p_com_on : 1;
+ uint64_t p_rtype : 2;
+ uint64_t reserved_16_16 : 1;
+ uint64_t hclk_rst : 1;
+ uint64_t divide2 : 2;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn50xx;
+ struct cvmx_usbnx_clk_ctl_cn50xx cn52xx;
+ struct cvmx_usbnx_clk_ctl_cn50xx cn52xxp1;
+ struct cvmx_usbnx_clk_ctl_cn50xx cn56xx;
+ struct cvmx_usbnx_clk_ctl_cn50xx cn56xxp1;
+};
+typedef union cvmx_usbnx_clk_ctl cvmx_usbnx_clk_ctl_t;
+
+/**
+ * cvmx_usbn#_ctl_status
+ *
+ * USBN_CTL_STATUS = USBN's Control And Status Register
+ *
+ * Contains general control and status information for the USBN block.
+ */
+union cvmx_usbnx_ctl_status {
+ uint64_t u64;
+ struct cvmx_usbnx_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63 : 58;
+ uint64_t dma_0pag : 1; /**< When '1' sets the DMA engine will set the zero-Page
+ bit in the L2C store operation to the IOB. */
+ uint64_t dma_stt : 1; /**< When '1' sets the DMA engine to use STT operations. */
+ uint64_t dma_test : 1; /**< When '1' sets the DMA engine into Test-Mode.
+ For normal operation this bit should be '0'. */
+ uint64_t inv_a2 : 1; /**< When '1' causes the address[2] driven on the AHB
+ for USB-CORE FIFO access to be inverted. Also data
+ writen to and read from the AHB will have it byte
+ order swapped. If the orginal order was A-B-C-D the
+ new byte order will be D-C-B-A. */
+ uint64_t l2c_emod : 2; /**< Endian format for data from/to the L2C.
+ IN: A-B-C-D-E-F-G-H
+ OUT0: A-B-C-D-E-F-G-H
+ OUT1: H-G-F-E-D-C-B-A
+ OUT2: D-C-B-A-H-G-F-E
+ OUT3: E-F-G-H-A-B-C-D */
+#else
+ uint64_t l2c_emod : 2;
+ uint64_t inv_a2 : 1;
+ uint64_t dma_test : 1;
+ uint64_t dma_stt : 1;
+ uint64_t dma_0pag : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_usbnx_ctl_status_s cn30xx;
+ struct cvmx_usbnx_ctl_status_s cn31xx;
+ struct cvmx_usbnx_ctl_status_s cn50xx;
+ struct cvmx_usbnx_ctl_status_s cn52xx;
+ struct cvmx_usbnx_ctl_status_s cn52xxp1;
+ struct cvmx_usbnx_ctl_status_s cn56xx;
+ struct cvmx_usbnx_ctl_status_s cn56xxp1;
+};
+typedef union cvmx_usbnx_ctl_status cvmx_usbnx_ctl_status_t;
+
+/**
+ * cvmx_usbn#_dma0_inb_chn0
+ *
+ * USBN_DMA0_INB_CHN0 = USBN's Inbound DMA for USB0 Channel0
+ *
+ * Contains the starting address for use when USB0 writes to L2C via Channel0.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_inb_chn0 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_inb_chn0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Write to L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_inb_chn0_s cn30xx;
+ struct cvmx_usbnx_dma0_inb_chn0_s cn31xx;
+ struct cvmx_usbnx_dma0_inb_chn0_s cn50xx;
+ struct cvmx_usbnx_dma0_inb_chn0_s cn52xx;
+ struct cvmx_usbnx_dma0_inb_chn0_s cn52xxp1;
+ struct cvmx_usbnx_dma0_inb_chn0_s cn56xx;
+ struct cvmx_usbnx_dma0_inb_chn0_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_inb_chn0 cvmx_usbnx_dma0_inb_chn0_t;
+
+/**
+ * cvmx_usbn#_dma0_inb_chn1
+ *
+ * USBN_DMA0_INB_CHN1 = USBN's Inbound DMA for USB0 Channel1
+ *
+ * Contains the starting address for use when USB0 writes to L2C via Channel1.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_inb_chn1 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_inb_chn1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Write to L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_inb_chn1_s cn30xx;
+ struct cvmx_usbnx_dma0_inb_chn1_s cn31xx;
+ struct cvmx_usbnx_dma0_inb_chn1_s cn50xx;
+ struct cvmx_usbnx_dma0_inb_chn1_s cn52xx;
+ struct cvmx_usbnx_dma0_inb_chn1_s cn52xxp1;
+ struct cvmx_usbnx_dma0_inb_chn1_s cn56xx;
+ struct cvmx_usbnx_dma0_inb_chn1_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_inb_chn1 cvmx_usbnx_dma0_inb_chn1_t;
+
+/**
+ * cvmx_usbn#_dma0_inb_chn2
+ *
+ * USBN_DMA0_INB_CHN2 = USBN's Inbound DMA for USB0 Channel2
+ *
+ * Contains the starting address for use when USB0 writes to L2C via Channel2.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_inb_chn2 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_inb_chn2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Write to L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_inb_chn2_s cn30xx;
+ struct cvmx_usbnx_dma0_inb_chn2_s cn31xx;
+ struct cvmx_usbnx_dma0_inb_chn2_s cn50xx;
+ struct cvmx_usbnx_dma0_inb_chn2_s cn52xx;
+ struct cvmx_usbnx_dma0_inb_chn2_s cn52xxp1;
+ struct cvmx_usbnx_dma0_inb_chn2_s cn56xx;
+ struct cvmx_usbnx_dma0_inb_chn2_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_inb_chn2 cvmx_usbnx_dma0_inb_chn2_t;
+
+/**
+ * cvmx_usbn#_dma0_inb_chn3
+ *
+ * USBN_DMA0_INB_CHN3 = USBN's Inbound DMA for USB0 Channel3
+ *
+ * Contains the starting address for use when USB0 writes to L2C via Channel3.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_inb_chn3 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_inb_chn3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Write to L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_inb_chn3_s cn30xx;
+ struct cvmx_usbnx_dma0_inb_chn3_s cn31xx;
+ struct cvmx_usbnx_dma0_inb_chn3_s cn50xx;
+ struct cvmx_usbnx_dma0_inb_chn3_s cn52xx;
+ struct cvmx_usbnx_dma0_inb_chn3_s cn52xxp1;
+ struct cvmx_usbnx_dma0_inb_chn3_s cn56xx;
+ struct cvmx_usbnx_dma0_inb_chn3_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_inb_chn3 cvmx_usbnx_dma0_inb_chn3_t;
+
+/**
+ * cvmx_usbn#_dma0_inb_chn4
+ *
+ * USBN_DMA0_INB_CHN4 = USBN's Inbound DMA for USB0 Channel4
+ *
+ * Contains the starting address for use when USB0 writes to L2C via Channel4.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_inb_chn4 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_inb_chn4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Write to L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_inb_chn4_s cn30xx;
+ struct cvmx_usbnx_dma0_inb_chn4_s cn31xx;
+ struct cvmx_usbnx_dma0_inb_chn4_s cn50xx;
+ struct cvmx_usbnx_dma0_inb_chn4_s cn52xx;
+ struct cvmx_usbnx_dma0_inb_chn4_s cn52xxp1;
+ struct cvmx_usbnx_dma0_inb_chn4_s cn56xx;
+ struct cvmx_usbnx_dma0_inb_chn4_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_inb_chn4 cvmx_usbnx_dma0_inb_chn4_t;
+
+/**
+ * cvmx_usbn#_dma0_inb_chn5
+ *
+ * USBN_DMA0_INB_CHN5 = USBN's Inbound DMA for USB0 Channel5
+ *
+ * Contains the starting address for use when USB0 writes to L2C via Channel5.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_inb_chn5 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_inb_chn5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Write to L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_inb_chn5_s cn30xx;
+ struct cvmx_usbnx_dma0_inb_chn5_s cn31xx;
+ struct cvmx_usbnx_dma0_inb_chn5_s cn50xx;
+ struct cvmx_usbnx_dma0_inb_chn5_s cn52xx;
+ struct cvmx_usbnx_dma0_inb_chn5_s cn52xxp1;
+ struct cvmx_usbnx_dma0_inb_chn5_s cn56xx;
+ struct cvmx_usbnx_dma0_inb_chn5_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_inb_chn5 cvmx_usbnx_dma0_inb_chn5_t;
+
+/**
+ * cvmx_usbn#_dma0_inb_chn6
+ *
+ * USBN_DMA0_INB_CHN6 = USBN's Inbound DMA for USB0 Channel6
+ *
+ * Contains the starting address for use when USB0 writes to L2C via Channel6.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_inb_chn6 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_inb_chn6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Write to L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_inb_chn6_s cn30xx;
+ struct cvmx_usbnx_dma0_inb_chn6_s cn31xx;
+ struct cvmx_usbnx_dma0_inb_chn6_s cn50xx;
+ struct cvmx_usbnx_dma0_inb_chn6_s cn52xx;
+ struct cvmx_usbnx_dma0_inb_chn6_s cn52xxp1;
+ struct cvmx_usbnx_dma0_inb_chn6_s cn56xx;
+ struct cvmx_usbnx_dma0_inb_chn6_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_inb_chn6 cvmx_usbnx_dma0_inb_chn6_t;
+
+/**
+ * cvmx_usbn#_dma0_inb_chn7
+ *
+ * USBN_DMA0_INB_CHN7 = USBN's Inbound DMA for USB0 Channel7
+ *
+ * Contains the starting address for use when USB0 writes to L2C via Channel7.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_inb_chn7 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_inb_chn7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Write to L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_inb_chn7_s cn30xx;
+ struct cvmx_usbnx_dma0_inb_chn7_s cn31xx;
+ struct cvmx_usbnx_dma0_inb_chn7_s cn50xx;
+ struct cvmx_usbnx_dma0_inb_chn7_s cn52xx;
+ struct cvmx_usbnx_dma0_inb_chn7_s cn52xxp1;
+ struct cvmx_usbnx_dma0_inb_chn7_s cn56xx;
+ struct cvmx_usbnx_dma0_inb_chn7_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_inb_chn7 cvmx_usbnx_dma0_inb_chn7_t;
+
+/**
+ * cvmx_usbn#_dma0_outb_chn0
+ *
+ * USBN_DMA0_OUTB_CHN0 = USBN's Outbound DMA for USB0 Channel0
+ *
+ * Contains the starting address for use when USB0 reads from L2C via Channel0.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_outb_chn0 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_outb_chn0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Read from L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_outb_chn0_s cn30xx;
+ struct cvmx_usbnx_dma0_outb_chn0_s cn31xx;
+ struct cvmx_usbnx_dma0_outb_chn0_s cn50xx;
+ struct cvmx_usbnx_dma0_outb_chn0_s cn52xx;
+ struct cvmx_usbnx_dma0_outb_chn0_s cn52xxp1;
+ struct cvmx_usbnx_dma0_outb_chn0_s cn56xx;
+ struct cvmx_usbnx_dma0_outb_chn0_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_outb_chn0 cvmx_usbnx_dma0_outb_chn0_t;
+
+/**
+ * cvmx_usbn#_dma0_outb_chn1
+ *
+ * USBN_DMA0_OUTB_CHN1 = USBN's Outbound DMA for USB0 Channel1
+ *
+ * Contains the starting address for use when USB0 reads from L2C via Channel1.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_outb_chn1 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_outb_chn1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Read from L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_outb_chn1_s cn30xx;
+ struct cvmx_usbnx_dma0_outb_chn1_s cn31xx;
+ struct cvmx_usbnx_dma0_outb_chn1_s cn50xx;
+ struct cvmx_usbnx_dma0_outb_chn1_s cn52xx;
+ struct cvmx_usbnx_dma0_outb_chn1_s cn52xxp1;
+ struct cvmx_usbnx_dma0_outb_chn1_s cn56xx;
+ struct cvmx_usbnx_dma0_outb_chn1_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_outb_chn1 cvmx_usbnx_dma0_outb_chn1_t;
+
+/**
+ * cvmx_usbn#_dma0_outb_chn2
+ *
+ * USBN_DMA0_OUTB_CHN2 = USBN's Outbound DMA for USB0 Channel2
+ *
+ * Contains the starting address for use when USB0 reads from L2C via Channel2.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_outb_chn2 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_outb_chn2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Read from L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_outb_chn2_s cn30xx;
+ struct cvmx_usbnx_dma0_outb_chn2_s cn31xx;
+ struct cvmx_usbnx_dma0_outb_chn2_s cn50xx;
+ struct cvmx_usbnx_dma0_outb_chn2_s cn52xx;
+ struct cvmx_usbnx_dma0_outb_chn2_s cn52xxp1;
+ struct cvmx_usbnx_dma0_outb_chn2_s cn56xx;
+ struct cvmx_usbnx_dma0_outb_chn2_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_outb_chn2 cvmx_usbnx_dma0_outb_chn2_t;
+
+/**
+ * cvmx_usbn#_dma0_outb_chn3
+ *
+ * USBN_DMA0_OUTB_CHN3 = USBN's Outbound DMA for USB0 Channel3
+ *
+ * Contains the starting address for use when USB0 reads from L2C via Channel3.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_outb_chn3 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_outb_chn3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Read from L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_outb_chn3_s cn30xx;
+ struct cvmx_usbnx_dma0_outb_chn3_s cn31xx;
+ struct cvmx_usbnx_dma0_outb_chn3_s cn50xx;
+ struct cvmx_usbnx_dma0_outb_chn3_s cn52xx;
+ struct cvmx_usbnx_dma0_outb_chn3_s cn52xxp1;
+ struct cvmx_usbnx_dma0_outb_chn3_s cn56xx;
+ struct cvmx_usbnx_dma0_outb_chn3_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_outb_chn3 cvmx_usbnx_dma0_outb_chn3_t;
+
+/**
+ * cvmx_usbn#_dma0_outb_chn4
+ *
+ * USBN_DMA0_OUTB_CHN4 = USBN's Outbound DMA for USB0 Channel4
+ *
+ * Contains the starting address for use when USB0 reads from L2C via Channel4.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_outb_chn4 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_outb_chn4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Read from L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_outb_chn4_s cn30xx;
+ struct cvmx_usbnx_dma0_outb_chn4_s cn31xx;
+ struct cvmx_usbnx_dma0_outb_chn4_s cn50xx;
+ struct cvmx_usbnx_dma0_outb_chn4_s cn52xx;
+ struct cvmx_usbnx_dma0_outb_chn4_s cn52xxp1;
+ struct cvmx_usbnx_dma0_outb_chn4_s cn56xx;
+ struct cvmx_usbnx_dma0_outb_chn4_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_outb_chn4 cvmx_usbnx_dma0_outb_chn4_t;
+
+/**
+ * cvmx_usbn#_dma0_outb_chn5
+ *
+ * USBN_DMA0_OUTB_CHN5 = USBN's Outbound DMA for USB0 Channel5
+ *
+ * Contains the starting address for use when USB0 reads from L2C via Channel5.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_outb_chn5 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_outb_chn5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Read from L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_outb_chn5_s cn30xx;
+ struct cvmx_usbnx_dma0_outb_chn5_s cn31xx;
+ struct cvmx_usbnx_dma0_outb_chn5_s cn50xx;
+ struct cvmx_usbnx_dma0_outb_chn5_s cn52xx;
+ struct cvmx_usbnx_dma0_outb_chn5_s cn52xxp1;
+ struct cvmx_usbnx_dma0_outb_chn5_s cn56xx;
+ struct cvmx_usbnx_dma0_outb_chn5_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_outb_chn5 cvmx_usbnx_dma0_outb_chn5_t;
+
+/**
+ * cvmx_usbn#_dma0_outb_chn6
+ *
+ * USBN_DMA0_OUTB_CHN6 = USBN's Outbound DMA for USB0 Channel6
+ *
+ * Contains the starting address for use when USB0 reads from L2C via Channel6.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_outb_chn6 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_outb_chn6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Read from L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_outb_chn6_s cn30xx;
+ struct cvmx_usbnx_dma0_outb_chn6_s cn31xx;
+ struct cvmx_usbnx_dma0_outb_chn6_s cn50xx;
+ struct cvmx_usbnx_dma0_outb_chn6_s cn52xx;
+ struct cvmx_usbnx_dma0_outb_chn6_s cn52xxp1;
+ struct cvmx_usbnx_dma0_outb_chn6_s cn56xx;
+ struct cvmx_usbnx_dma0_outb_chn6_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_outb_chn6 cvmx_usbnx_dma0_outb_chn6_t;
+
+/**
+ * cvmx_usbn#_dma0_outb_chn7
+ *
+ * USBN_DMA0_OUTB_CHN7 = USBN's Outbound DMA for USB0 Channel7
+ *
+ * Contains the starting address for use when USB0 reads from L2C via Channel7.
+ * Writing of this register sets the base address.
+ */
+union cvmx_usbnx_dma0_outb_chn7 {
+ uint64_t u64;
+ struct cvmx_usbnx_dma0_outb_chn7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 36; /**< Base address for DMA Read from L2C. */
+#else
+ uint64_t addr : 36;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_usbnx_dma0_outb_chn7_s cn30xx;
+ struct cvmx_usbnx_dma0_outb_chn7_s cn31xx;
+ struct cvmx_usbnx_dma0_outb_chn7_s cn50xx;
+ struct cvmx_usbnx_dma0_outb_chn7_s cn52xx;
+ struct cvmx_usbnx_dma0_outb_chn7_s cn52xxp1;
+ struct cvmx_usbnx_dma0_outb_chn7_s cn56xx;
+ struct cvmx_usbnx_dma0_outb_chn7_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma0_outb_chn7 cvmx_usbnx_dma0_outb_chn7_t;
+
+/**
+ * cvmx_usbn#_dma_test
+ *
+ * USBN_DMA_TEST = USBN's DMA TestRegister
+ *
+ * This register can cause the external DMA engine to the USB-Core to make transfers from/to L2C/USB-FIFOs
+ */
+union cvmx_usbnx_dma_test {
+ uint64_t u64;
+ struct cvmx_usbnx_dma_test_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63 : 24;
+ uint64_t done : 1; /**< This field is set when a DMA completes. Writing a
+ '1' to this field clears this bit. */
+ uint64_t req : 1; /**< DMA Request. Writing a 1 to this register
+ will cause a DMA request as specified in the other
+ fields of this register to take place. This field
+ will always read as '0'. */
+ uint64_t f_addr : 18; /**< The address to read from in the Data-Fifo. */
+ uint64_t count : 11; /**< DMA Request Count. */
+ uint64_t channel : 5; /**< DMA Channel/Enpoint. */
+ uint64_t burst : 4; /**< DMA Burst Size. */
+#else
+ uint64_t burst : 4;
+ uint64_t channel : 5;
+ uint64_t count : 11;
+ uint64_t f_addr : 18;
+ uint64_t req : 1;
+ uint64_t done : 1;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } s;
+ struct cvmx_usbnx_dma_test_s cn30xx;
+ struct cvmx_usbnx_dma_test_s cn31xx;
+ struct cvmx_usbnx_dma_test_s cn50xx;
+ struct cvmx_usbnx_dma_test_s cn52xx;
+ struct cvmx_usbnx_dma_test_s cn52xxp1;
+ struct cvmx_usbnx_dma_test_s cn56xx;
+ struct cvmx_usbnx_dma_test_s cn56xxp1;
+};
+typedef union cvmx_usbnx_dma_test cvmx_usbnx_dma_test_t;
+
+/**
+ * cvmx_usbn#_int_enb
+ *
+ * USBN_INT_ENB = USBN's Interrupt Enable
+ *
+ * The USBN's interrupt enable register.
+ */
+union cvmx_usbnx_int_enb {
+ uint64_t u64;
+ struct cvmx_usbnx_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t nd4o_dpf : 1; /**< When set (1) and bit 37 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nd4o_dpe : 1; /**< When set (1) and bit 36 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nd4o_rpf : 1; /**< When set (1) and bit 35 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nd4o_rpe : 1; /**< When set (1) and bit 34 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t ltl_f_pf : 1; /**< When set (1) and bit 33 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t ltl_f_pe : 1; /**< When set (1) and bit 32 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t u2n_c_pe : 1; /**< When set (1) and bit 31 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t u2n_c_pf : 1; /**< When set (1) and bit 30 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t u2n_d_pf : 1; /**< When set (1) and bit 29 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t u2n_d_pe : 1; /**< When set (1) and bit 28 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t n2u_pe : 1; /**< When set (1) and bit 27 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t n2u_pf : 1; /**< When set (1) and bit 26 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t uod_pf : 1; /**< When set (1) and bit 25 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t uod_pe : 1; /**< When set (1) and bit 24 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rq_q3_e : 1; /**< When set (1) and bit 23 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rq_q3_f : 1; /**< When set (1) and bit 22 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rq_q2_e : 1; /**< When set (1) and bit 21 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rq_q2_f : 1; /**< When set (1) and bit 20 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rg_fi_f : 1; /**< When set (1) and bit 19 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rg_fi_e : 1; /**< When set (1) and bit 18 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t l2_fi_f : 1; /**< When set (1) and bit 17 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t l2_fi_e : 1; /**< When set (1) and bit 16 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t l2c_a_f : 1; /**< When set (1) and bit 15 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t l2c_s_e : 1; /**< When set (1) and bit 14 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t dcred_f : 1; /**< When set (1) and bit 13 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t dcred_e : 1; /**< When set (1) and bit 12 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t lt_pu_f : 1; /**< When set (1) and bit 11 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t lt_po_e : 1; /**< When set (1) and bit 10 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nt_pu_f : 1; /**< When set (1) and bit 9 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nt_po_e : 1; /**< When set (1) and bit 8 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t pt_pu_f : 1; /**< When set (1) and bit 7 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t pt_po_e : 1; /**< When set (1) and bit 6 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t lr_pu_f : 1; /**< When set (1) and bit 5 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t lr_po_e : 1; /**< When set (1) and bit 4 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nr_pu_f : 1; /**< When set (1) and bit 3 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nr_po_e : 1; /**< When set (1) and bit 2 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t pr_pu_f : 1; /**< When set (1) and bit 1 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t pr_po_e : 1; /**< When set (1) and bit 0 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+#else
+ uint64_t pr_po_e : 1;
+ uint64_t pr_pu_f : 1;
+ uint64_t nr_po_e : 1;
+ uint64_t nr_pu_f : 1;
+ uint64_t lr_po_e : 1;
+ uint64_t lr_pu_f : 1;
+ uint64_t pt_po_e : 1;
+ uint64_t pt_pu_f : 1;
+ uint64_t nt_po_e : 1;
+ uint64_t nt_pu_f : 1;
+ uint64_t lt_po_e : 1;
+ uint64_t lt_pu_f : 1;
+ uint64_t dcred_e : 1;
+ uint64_t dcred_f : 1;
+ uint64_t l2c_s_e : 1;
+ uint64_t l2c_a_f : 1;
+ uint64_t l2_fi_e : 1;
+ uint64_t l2_fi_f : 1;
+ uint64_t rg_fi_e : 1;
+ uint64_t rg_fi_f : 1;
+ uint64_t rq_q2_f : 1;
+ uint64_t rq_q2_e : 1;
+ uint64_t rq_q3_f : 1;
+ uint64_t rq_q3_e : 1;
+ uint64_t uod_pe : 1;
+ uint64_t uod_pf : 1;
+ uint64_t n2u_pf : 1;
+ uint64_t n2u_pe : 1;
+ uint64_t u2n_d_pe : 1;
+ uint64_t u2n_d_pf : 1;
+ uint64_t u2n_c_pf : 1;
+ uint64_t u2n_c_pe : 1;
+ uint64_t ltl_f_pe : 1;
+ uint64_t ltl_f_pf : 1;
+ uint64_t nd4o_rpe : 1;
+ uint64_t nd4o_rpf : 1;
+ uint64_t nd4o_dpe : 1;
+ uint64_t nd4o_dpf : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_usbnx_int_enb_s cn30xx;
+ struct cvmx_usbnx_int_enb_s cn31xx;
+ struct cvmx_usbnx_int_enb_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t nd4o_dpf : 1; /**< When set (1) and bit 37 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nd4o_dpe : 1; /**< When set (1) and bit 36 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nd4o_rpf : 1; /**< When set (1) and bit 35 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nd4o_rpe : 1; /**< When set (1) and bit 34 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t ltl_f_pf : 1; /**< When set (1) and bit 33 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t ltl_f_pe : 1; /**< When set (1) and bit 32 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t uod_pf : 1; /**< When set (1) and bit 25 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t uod_pe : 1; /**< When set (1) and bit 24 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rq_q3_e : 1; /**< When set (1) and bit 23 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rq_q3_f : 1; /**< When set (1) and bit 22 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rq_q2_e : 1; /**< When set (1) and bit 21 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rq_q2_f : 1; /**< When set (1) and bit 20 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rg_fi_f : 1; /**< When set (1) and bit 19 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t rg_fi_e : 1; /**< When set (1) and bit 18 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t l2_fi_f : 1; /**< When set (1) and bit 17 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t l2_fi_e : 1; /**< When set (1) and bit 16 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t l2c_a_f : 1; /**< When set (1) and bit 15 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t l2c_s_e : 1; /**< When set (1) and bit 14 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t dcred_f : 1; /**< When set (1) and bit 13 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t dcred_e : 1; /**< When set (1) and bit 12 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t lt_pu_f : 1; /**< When set (1) and bit 11 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t lt_po_e : 1; /**< When set (1) and bit 10 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nt_pu_f : 1; /**< When set (1) and bit 9 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nt_po_e : 1; /**< When set (1) and bit 8 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t pt_pu_f : 1; /**< When set (1) and bit 7 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t pt_po_e : 1; /**< When set (1) and bit 6 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t lr_pu_f : 1; /**< When set (1) and bit 5 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t lr_po_e : 1; /**< When set (1) and bit 4 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nr_pu_f : 1; /**< When set (1) and bit 3 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t nr_po_e : 1; /**< When set (1) and bit 2 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t pr_pu_f : 1; /**< When set (1) and bit 1 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+ uint64_t pr_po_e : 1; /**< When set (1) and bit 0 of the USBN_INT_SUM
+ register is asserted the USBN will assert an
+ interrupt. */
+#else
+ uint64_t pr_po_e : 1;
+ uint64_t pr_pu_f : 1;
+ uint64_t nr_po_e : 1;
+ uint64_t nr_pu_f : 1;
+ uint64_t lr_po_e : 1;
+ uint64_t lr_pu_f : 1;
+ uint64_t pt_po_e : 1;
+ uint64_t pt_pu_f : 1;
+ uint64_t nt_po_e : 1;
+ uint64_t nt_pu_f : 1;
+ uint64_t lt_po_e : 1;
+ uint64_t lt_pu_f : 1;
+ uint64_t dcred_e : 1;
+ uint64_t dcred_f : 1;
+ uint64_t l2c_s_e : 1;
+ uint64_t l2c_a_f : 1;
+ uint64_t l2_fi_e : 1;
+ uint64_t l2_fi_f : 1;
+ uint64_t rg_fi_e : 1;
+ uint64_t rg_fi_f : 1;
+ uint64_t rq_q2_f : 1;
+ uint64_t rq_q2_e : 1;
+ uint64_t rq_q3_f : 1;
+ uint64_t rq_q3_e : 1;
+ uint64_t uod_pe : 1;
+ uint64_t uod_pf : 1;
+ uint64_t reserved_26_31 : 6;
+ uint64_t ltl_f_pe : 1;
+ uint64_t ltl_f_pf : 1;
+ uint64_t nd4o_rpe : 1;
+ uint64_t nd4o_rpf : 1;
+ uint64_t nd4o_dpe : 1;
+ uint64_t nd4o_dpf : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } cn50xx;
+ struct cvmx_usbnx_int_enb_cn50xx cn52xx;
+ struct cvmx_usbnx_int_enb_cn50xx cn52xxp1;
+ struct cvmx_usbnx_int_enb_cn50xx cn56xx;
+ struct cvmx_usbnx_int_enb_cn50xx cn56xxp1;
+};
+typedef union cvmx_usbnx_int_enb cvmx_usbnx_int_enb_t;
+
+/**
+ * cvmx_usbn#_int_sum
+ *
+ * USBN_INT_SUM = USBN's Interrupt Summary Register
+ *
+ * Contains the diffrent interrupt summary bits of the USBN.
+ */
+union cvmx_usbnx_int_sum {
+ uint64_t u64;
+ struct cvmx_usbnx_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t nd4o_dpf : 1; /**< NCB DMA Out Data Fifo Push Full. */
+ uint64_t nd4o_dpe : 1; /**< NCB DMA Out Data Fifo Pop Empty. */
+ uint64_t nd4o_rpf : 1; /**< NCB DMA Out Request Fifo Push Full. */
+ uint64_t nd4o_rpe : 1; /**< NCB DMA Out Request Fifo Pop Empty. */
+ uint64_t ltl_f_pf : 1; /**< L2C Transfer Length Fifo Push Full. */
+ uint64_t ltl_f_pe : 1; /**< L2C Transfer Length Fifo Pop Empty. */
+ uint64_t u2n_c_pe : 1; /**< U2N Control Fifo Pop Empty. */
+ uint64_t u2n_c_pf : 1; /**< U2N Control Fifo Push Full. */
+ uint64_t u2n_d_pf : 1; /**< U2N Data Fifo Push Full. */
+ uint64_t u2n_d_pe : 1; /**< U2N Data Fifo Pop Empty. */
+ uint64_t n2u_pe : 1; /**< N2U Fifo Pop Empty. */
+ uint64_t n2u_pf : 1; /**< N2U Fifo Push Full. */
+ uint64_t uod_pf : 1; /**< UOD Fifo Push Full. */
+ uint64_t uod_pe : 1; /**< UOD Fifo Pop Empty. */
+ uint64_t rq_q3_e : 1; /**< Request Queue-3 Fifo Pushed When Full. */
+ uint64_t rq_q3_f : 1; /**< Request Queue-3 Fifo Pushed When Full. */
+ uint64_t rq_q2_e : 1; /**< Request Queue-2 Fifo Pushed When Full. */
+ uint64_t rq_q2_f : 1; /**< Request Queue-2 Fifo Pushed When Full. */
+ uint64_t rg_fi_f : 1; /**< Register Request Fifo Pushed When Full. */
+ uint64_t rg_fi_e : 1; /**< Register Request Fifo Pushed When Full. */
+ uint64_t lt_fi_f : 1; /**< L2C Request Fifo Pushed When Full. */
+ uint64_t lt_fi_e : 1; /**< L2C Request Fifo Pushed When Full. */
+ uint64_t l2c_a_f : 1; /**< L2C Credit Count Added When Full. */
+ uint64_t l2c_s_e : 1; /**< L2C Credit Count Subtracted When Empty. */
+ uint64_t dcred_f : 1; /**< Data CreditFifo Pushed When Full. */
+ uint64_t dcred_e : 1; /**< Data Credit Fifo Pushed When Full. */
+ uint64_t lt_pu_f : 1; /**< L2C Trasaction Fifo Pushed When Full. */
+ uint64_t lt_po_e : 1; /**< L2C Trasaction Fifo Popped When Full. */
+ uint64_t nt_pu_f : 1; /**< NPI Trasaction Fifo Pushed When Full. */
+ uint64_t nt_po_e : 1; /**< NPI Trasaction Fifo Popped When Full. */
+ uint64_t pt_pu_f : 1; /**< PP Trasaction Fifo Pushed When Full. */
+ uint64_t pt_po_e : 1; /**< PP Trasaction Fifo Popped When Full. */
+ uint64_t lr_pu_f : 1; /**< L2C Request Fifo Pushed When Full. */
+ uint64_t lr_po_e : 1; /**< L2C Request Fifo Popped When Empty. */
+ uint64_t nr_pu_f : 1; /**< NPI Request Fifo Pushed When Full. */
+ uint64_t nr_po_e : 1; /**< NPI Request Fifo Popped When Empty. */
+ uint64_t pr_pu_f : 1; /**< PP Request Fifo Pushed When Full. */
+ uint64_t pr_po_e : 1; /**< PP Request Fifo Popped When Empty. */
+#else
+ uint64_t pr_po_e : 1;
+ uint64_t pr_pu_f : 1;
+ uint64_t nr_po_e : 1;
+ uint64_t nr_pu_f : 1;
+ uint64_t lr_po_e : 1;
+ uint64_t lr_pu_f : 1;
+ uint64_t pt_po_e : 1;
+ uint64_t pt_pu_f : 1;
+ uint64_t nt_po_e : 1;
+ uint64_t nt_pu_f : 1;
+ uint64_t lt_po_e : 1;
+ uint64_t lt_pu_f : 1;
+ uint64_t dcred_e : 1;
+ uint64_t dcred_f : 1;
+ uint64_t l2c_s_e : 1;
+ uint64_t l2c_a_f : 1;
+ uint64_t lt_fi_e : 1;
+ uint64_t lt_fi_f : 1;
+ uint64_t rg_fi_e : 1;
+ uint64_t rg_fi_f : 1;
+ uint64_t rq_q2_f : 1;
+ uint64_t rq_q2_e : 1;
+ uint64_t rq_q3_f : 1;
+ uint64_t rq_q3_e : 1;
+ uint64_t uod_pe : 1;
+ uint64_t uod_pf : 1;
+ uint64_t n2u_pf : 1;
+ uint64_t n2u_pe : 1;
+ uint64_t u2n_d_pe : 1;
+ uint64_t u2n_d_pf : 1;
+ uint64_t u2n_c_pf : 1;
+ uint64_t u2n_c_pe : 1;
+ uint64_t ltl_f_pe : 1;
+ uint64_t ltl_f_pf : 1;
+ uint64_t nd4o_rpe : 1;
+ uint64_t nd4o_rpf : 1;
+ uint64_t nd4o_dpe : 1;
+ uint64_t nd4o_dpf : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_usbnx_int_sum_s cn30xx;
+ struct cvmx_usbnx_int_sum_s cn31xx;
+ struct cvmx_usbnx_int_sum_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t nd4o_dpf : 1; /**< NCB DMA Out Data Fifo Push Full. */
+ uint64_t nd4o_dpe : 1; /**< NCB DMA Out Data Fifo Pop Empty. */
+ uint64_t nd4o_rpf : 1; /**< NCB DMA Out Request Fifo Push Full. */
+ uint64_t nd4o_rpe : 1; /**< NCB DMA Out Request Fifo Pop Empty. */
+ uint64_t ltl_f_pf : 1; /**< L2C Transfer Length Fifo Push Full. */
+ uint64_t ltl_f_pe : 1; /**< L2C Transfer Length Fifo Pop Empty. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t uod_pf : 1; /**< UOD Fifo Push Full. */
+ uint64_t uod_pe : 1; /**< UOD Fifo Pop Empty. */
+ uint64_t rq_q3_e : 1; /**< Request Queue-3 Fifo Pushed When Full. */
+ uint64_t rq_q3_f : 1; /**< Request Queue-3 Fifo Pushed When Full. */
+ uint64_t rq_q2_e : 1; /**< Request Queue-2 Fifo Pushed When Full. */
+ uint64_t rq_q2_f : 1; /**< Request Queue-2 Fifo Pushed When Full. */
+ uint64_t rg_fi_f : 1; /**< Register Request Fifo Pushed When Full. */
+ uint64_t rg_fi_e : 1; /**< Register Request Fifo Pushed When Full. */
+ uint64_t lt_fi_f : 1; /**< L2C Request Fifo Pushed When Full. */
+ uint64_t lt_fi_e : 1; /**< L2C Request Fifo Pushed When Full. */
+ uint64_t l2c_a_f : 1; /**< L2C Credit Count Added When Full. */
+ uint64_t l2c_s_e : 1; /**< L2C Credit Count Subtracted When Empty. */
+ uint64_t dcred_f : 1; /**< Data CreditFifo Pushed When Full. */
+ uint64_t dcred_e : 1; /**< Data Credit Fifo Pushed When Full. */
+ uint64_t lt_pu_f : 1; /**< L2C Trasaction Fifo Pushed When Full. */
+ uint64_t lt_po_e : 1; /**< L2C Trasaction Fifo Popped When Full. */
+ uint64_t nt_pu_f : 1; /**< NPI Trasaction Fifo Pushed When Full. */
+ uint64_t nt_po_e : 1; /**< NPI Trasaction Fifo Popped When Full. */
+ uint64_t pt_pu_f : 1; /**< PP Trasaction Fifo Pushed When Full. */
+ uint64_t pt_po_e : 1; /**< PP Trasaction Fifo Popped When Full. */
+ uint64_t lr_pu_f : 1; /**< L2C Request Fifo Pushed When Full. */
+ uint64_t lr_po_e : 1; /**< L2C Request Fifo Popped When Empty. */
+ uint64_t nr_pu_f : 1; /**< NPI Request Fifo Pushed When Full. */
+ uint64_t nr_po_e : 1; /**< NPI Request Fifo Popped When Empty. */
+ uint64_t pr_pu_f : 1; /**< PP Request Fifo Pushed When Full. */
+ uint64_t pr_po_e : 1; /**< PP Request Fifo Popped When Empty. */
+#else
+ uint64_t pr_po_e : 1;
+ uint64_t pr_pu_f : 1;
+ uint64_t nr_po_e : 1;
+ uint64_t nr_pu_f : 1;
+ uint64_t lr_po_e : 1;
+ uint64_t lr_pu_f : 1;
+ uint64_t pt_po_e : 1;
+ uint64_t pt_pu_f : 1;
+ uint64_t nt_po_e : 1;
+ uint64_t nt_pu_f : 1;
+ uint64_t lt_po_e : 1;
+ uint64_t lt_pu_f : 1;
+ uint64_t dcred_e : 1;
+ uint64_t dcred_f : 1;
+ uint64_t l2c_s_e : 1;
+ uint64_t l2c_a_f : 1;
+ uint64_t lt_fi_e : 1;
+ uint64_t lt_fi_f : 1;
+ uint64_t rg_fi_e : 1;
+ uint64_t rg_fi_f : 1;
+ uint64_t rq_q2_f : 1;
+ uint64_t rq_q2_e : 1;
+ uint64_t rq_q3_f : 1;
+ uint64_t rq_q3_e : 1;
+ uint64_t uod_pe : 1;
+ uint64_t uod_pf : 1;
+ uint64_t reserved_26_31 : 6;
+ uint64_t ltl_f_pe : 1;
+ uint64_t ltl_f_pf : 1;
+ uint64_t nd4o_rpe : 1;
+ uint64_t nd4o_rpf : 1;
+ uint64_t nd4o_dpe : 1;
+ uint64_t nd4o_dpf : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } cn50xx;
+ struct cvmx_usbnx_int_sum_cn50xx cn52xx;
+ struct cvmx_usbnx_int_sum_cn50xx cn52xxp1;
+ struct cvmx_usbnx_int_sum_cn50xx cn56xx;
+ struct cvmx_usbnx_int_sum_cn50xx cn56xxp1;
+};
+typedef union cvmx_usbnx_int_sum cvmx_usbnx_int_sum_t;
+
+/**
+ * cvmx_usbn#_usbp_ctl_status
+ *
+ * USBN_USBP_CTL_STATUS = USBP Control And Status Register
+ *
+ * Contains general control and status information for the USBN block.
+ */
+union cvmx_usbnx_usbp_ctl_status {
+ uint64_t u64;
+ struct cvmx_usbnx_usbp_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t txrisetune : 1; /**< HS Transmitter Rise/Fall Time Adjustment */
+ uint64_t txvreftune : 4; /**< HS DC Voltage Level Adjustment */
+ uint64_t txfslstune : 4; /**< FS/LS Source Impedence Adjustment */
+ uint64_t txhsxvtune : 2; /**< Transmitter High-Speed Crossover Adjustment */
+ uint64_t sqrxtune : 3; /**< Squelch Threshold Adjustment */
+ uint64_t compdistune : 3; /**< Disconnect Threshold Adjustment */
+ uint64_t otgtune : 3; /**< VBUS Valid Threshold Adjustment */
+ uint64_t otgdisable : 1; /**< OTG Block Disable */
+ uint64_t portreset : 1; /**< Per_Port Reset */
+ uint64_t drvvbus : 1; /**< Drive VBUS */
+ uint64_t lsbist : 1; /**< Low-Speed BIST Enable. */
+ uint64_t fsbist : 1; /**< Full-Speed BIST Enable. */
+ uint64_t hsbist : 1; /**< High-Speed BIST Enable. */
+ uint64_t bist_done : 1; /**< PHY Bist Done.
+ Asserted at the end of the PHY BIST sequence. */
+ uint64_t bist_err : 1; /**< PHY Bist Error.
+ Indicates an internal error was detected during
+ the BIST sequence. */
+ uint64_t tdata_out : 4; /**< PHY Test Data Out.
+ Presents either internaly generated signals or
+ test register contents, based upon the value of
+ test_data_out_sel. */
+ uint64_t siddq : 1; /**< Drives the USBP (USB-PHY) SIDDQ input.
+ Normally should be set to zero.
+ When customers have no intent to use USB PHY
+ interface, they should:
+ - still provide 3.3V to USB_VDD33, and
+ - tie USB_REXT to 3.3V supply, and
+ - set USBN*_USBP_CTL_STATUS[SIDDQ]=1 */
+ uint64_t txpreemphasistune : 1; /**< HS Transmitter Pre-Emphasis Enable */
+ uint64_t dma_bmode : 1; /**< When set to 1 the L2C DMA address will be updated
+ with byte-counts between packets. When set to 0
+ the L2C DMA address is incremented to the next
+ 4-byte aligned address after adding byte-count. */
+ uint64_t usbc_end : 1; /**< Bigendian input to the USB Core. This should be
+ set to '0' for operation. */
+ uint64_t usbp_bist : 1; /**< PHY, This is cleared '0' to run BIST on the USBP. */
+ uint64_t tclk : 1; /**< PHY Test Clock, used to load TDATA_IN to the USBP. */
+ uint64_t dp_pulld : 1; /**< PHY DP_PULLDOWN input to the USB-PHY.
+ This signal enables the pull-down resistance on
+ the D+ line. '1' pull down-resistance is connected
+ to D+/ '0' pull down resistance is not connected
+ to D+. When an A/B device is acting as a host
+ (downstream-facing port), dp_pulldown and
+ dm_pulldown are enabled. This must not toggle
+ during normal opeartion. */
+ uint64_t dm_pulld : 1; /**< PHY DM_PULLDOWN input to the USB-PHY.
+ This signal enables the pull-down resistance on
+ the D- line. '1' pull down-resistance is connected
+ to D-. '0' pull down resistance is not connected
+ to D-. When an A/B device is acting as a host
+ (downstream-facing port), dp_pulldown and
+ dm_pulldown are enabled. This must not toggle
+ during normal opeartion. */
+ uint64_t hst_mode : 1; /**< When '0' the USB is acting as HOST, when '1'
+ USB is acting as device. This field needs to be
+ set while the USB is in reset. */
+ uint64_t tuning : 4; /**< Transmitter Tuning for High-Speed Operation.
+ Tunes the current supply and rise/fall output
+ times for high-speed operation.
+ [20:19] == 11: Current supply increased
+ approximately 9%
+ [20:19] == 10: Current supply increased
+ approximately 4.5%
+ [20:19] == 01: Design default.
+ [20:19] == 00: Current supply decreased
+ approximately 4.5%
+ [22:21] == 11: Rise and fall times are increased.
+ [22:21] == 10: Design default.
+ [22:21] == 01: Rise and fall times are decreased.
+ [22:21] == 00: Rise and fall times are decreased
+ further as compared to the 01 setting. */
+ uint64_t tx_bs_enh : 1; /**< Transmit Bit Stuffing on [15:8].
+ Enables or disables bit stuffing on data[15:8]
+ when bit-stuffing is enabled. */
+ uint64_t tx_bs_en : 1; /**< Transmit Bit Stuffing on [7:0].
+ Enables or disables bit stuffing on data[7:0]
+ when bit-stuffing is enabled. */
+ uint64_t loop_enb : 1; /**< PHY Loopback Test Enable.
+ '1': During data transmission the receive is
+ enabled.
+ '0': During data transmission the receive is
+ disabled.
+ Must be '0' for normal operation. */
+ uint64_t vtest_enb : 1; /**< Analog Test Pin Enable.
+ '1' The PHY's analog_test pin is enabled for the
+ input and output of applicable analog test signals.
+ '0' THe analog_test pin is disabled. */
+ uint64_t bist_enb : 1; /**< Built-In Self Test Enable.
+ Used to activate BIST in the PHY. */
+ uint64_t tdata_sel : 1; /**< Test Data Out Select.
+ '1' test_data_out[3:0] (PHY) register contents
+ are output. '0' internaly generated signals are
+ output. */
+ uint64_t taddr_in : 4; /**< Mode Address for Test Interface.
+ Specifies the register address for writing to or
+ reading from the PHY test interface register. */
+ uint64_t tdata_in : 8; /**< Internal Testing Register Input Data and Select
+ This is a test bus. Data is present on [3:0],
+ and its corresponding select (enable) is present
+ on bits [7:4]. */
+ uint64_t ate_reset : 1; /**< Reset input from automatic test equipment.
+ This is a test signal. When the USB Core is
+ powered up (not in Susned Mode), an automatic
+ tester can use this to disable phy_clock and
+ free_clk, then re-eanable them with an aligned
+ phase.
+ '1': The phy_clk and free_clk outputs are
+ disabled. "0": The phy_clock and free_clk outputs
+ are available within a specific period after the
+ de-assertion. */
+#else
+ uint64_t ate_reset : 1;
+ uint64_t tdata_in : 8;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_sel : 1;
+ uint64_t bist_enb : 1;
+ uint64_t vtest_enb : 1;
+ uint64_t loop_enb : 1;
+ uint64_t tx_bs_en : 1;
+ uint64_t tx_bs_enh : 1;
+ uint64_t tuning : 4;
+ uint64_t hst_mode : 1;
+ uint64_t dm_pulld : 1;
+ uint64_t dp_pulld : 1;
+ uint64_t tclk : 1;
+ uint64_t usbp_bist : 1;
+ uint64_t usbc_end : 1;
+ uint64_t dma_bmode : 1;
+ uint64_t txpreemphasistune : 1;
+ uint64_t siddq : 1;
+ uint64_t tdata_out : 4;
+ uint64_t bist_err : 1;
+ uint64_t bist_done : 1;
+ uint64_t hsbist : 1;
+ uint64_t fsbist : 1;
+ uint64_t lsbist : 1;
+ uint64_t drvvbus : 1;
+ uint64_t portreset : 1;
+ uint64_t otgdisable : 1;
+ uint64_t otgtune : 3;
+ uint64_t compdistune : 3;
+ uint64_t sqrxtune : 3;
+ uint64_t txhsxvtune : 2;
+ uint64_t txfslstune : 4;
+ uint64_t txvreftune : 4;
+ uint64_t txrisetune : 1;
+#endif
+ } s;
+ struct cvmx_usbnx_usbp_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63 : 26;
+ uint64_t bist_done : 1; /**< PHY Bist Done.
+ Asserted at the end of the PHY BIST sequence. */
+ uint64_t bist_err : 1; /**< PHY Bist Error.
+ Indicates an internal error was detected during
+ the BIST sequence. */
+ uint64_t tdata_out : 4; /**< PHY Test Data Out.
+ Presents either internaly generated signals or
+ test register contents, based upon the value of
+ test_data_out_sel. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t dma_bmode : 1; /**< When set to 1 the L2C DMA address will be updated
+ with byte-counts between packets. When set to 0
+ the L2C DMA address is incremented to the next
+ 4-byte aligned address after adding byte-count. */
+ uint64_t usbc_end : 1; /**< Bigendian input to the USB Core. This should be
+ set to '0' for operation. */
+ uint64_t usbp_bist : 1; /**< PHY, This is cleared '0' to run BIST on the USBP. */
+ uint64_t tclk : 1; /**< PHY Test Clock, used to load TDATA_IN to the USBP. */
+ uint64_t dp_pulld : 1; /**< PHY DP_PULLDOWN input to the USB-PHY.
+ This signal enables the pull-down resistance on
+ the D+ line. '1' pull down-resistance is connected
+ to D+/ '0' pull down resistance is not connected
+ to D+. When an A/B device is acting as a host
+ (downstream-facing port), dp_pulldown and
+ dm_pulldown are enabled. This must not toggle
+ during normal opeartion. */
+ uint64_t dm_pulld : 1; /**< PHY DM_PULLDOWN input to the USB-PHY.
+ This signal enables the pull-down resistance on
+ the D- line. '1' pull down-resistance is connected
+ to D-. '0' pull down resistance is not connected
+ to D-. When an A/B device is acting as a host
+ (downstream-facing port), dp_pulldown and
+ dm_pulldown are enabled. This must not toggle
+ during normal opeartion. */
+ uint64_t hst_mode : 1; /**< When '0' the USB is acting as HOST, when '1'
+ USB is acting as device. This field needs to be
+ set while the USB is in reset. */
+ uint64_t tuning : 4; /**< Transmitter Tuning for High-Speed Operation.
+ Tunes the current supply and rise/fall output
+ times for high-speed operation.
+ [20:19] == 11: Current supply increased
+ approximately 9%
+ [20:19] == 10: Current supply increased
+ approximately 4.5%
+ [20:19] == 01: Design default.
+ [20:19] == 00: Current supply decreased
+ approximately 4.5%
+ [22:21] == 11: Rise and fall times are increased.
+ [22:21] == 10: Design default.
+ [22:21] == 01: Rise and fall times are decreased.
+ [22:21] == 00: Rise and fall times are decreased
+ further as compared to the 01 setting. */
+ uint64_t tx_bs_enh : 1; /**< Transmit Bit Stuffing on [15:8].
+ Enables or disables bit stuffing on data[15:8]
+ when bit-stuffing is enabled. */
+ uint64_t tx_bs_en : 1; /**< Transmit Bit Stuffing on [7:0].
+ Enables or disables bit stuffing on data[7:0]
+ when bit-stuffing is enabled. */
+ uint64_t loop_enb : 1; /**< PHY Loopback Test Enable.
+ '1': During data transmission the receive is
+ enabled.
+ '0': During data transmission the receive is
+ disabled.
+ Must be '0' for normal operation. */
+ uint64_t vtest_enb : 1; /**< Analog Test Pin Enable.
+ '1' The PHY's analog_test pin is enabled for the
+ input and output of applicable analog test signals.
+ '0' THe analog_test pin is disabled. */
+ uint64_t bist_enb : 1; /**< Built-In Self Test Enable.
+ Used to activate BIST in the PHY. */
+ uint64_t tdata_sel : 1; /**< Test Data Out Select.
+ '1' test_data_out[3:0] (PHY) register contents
+ are output. '0' internaly generated signals are
+ output. */
+ uint64_t taddr_in : 4; /**< Mode Address for Test Interface.
+ Specifies the register address for writing to or
+ reading from the PHY test interface register. */
+ uint64_t tdata_in : 8; /**< Internal Testing Register Input Data and Select
+ This is a test bus. Data is present on [3:0],
+ and its corresponding select (enable) is present
+ on bits [7:4]. */
+ uint64_t ate_reset : 1; /**< Reset input from automatic test equipment.
+ This is a test signal. When the USB Core is
+ powered up (not in Susned Mode), an automatic
+ tester can use this to disable phy_clock and
+ free_clk, then re-eanable them with an aligned
+ phase.
+ '1': The phy_clk and free_clk outputs are
+ disabled. "0": The phy_clock and free_clk outputs
+ are available within a specific period after the
+ de-assertion. */
+#else
+ uint64_t ate_reset : 1;
+ uint64_t tdata_in : 8;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_sel : 1;
+ uint64_t bist_enb : 1;
+ uint64_t vtest_enb : 1;
+ uint64_t loop_enb : 1;
+ uint64_t tx_bs_en : 1;
+ uint64_t tx_bs_enh : 1;
+ uint64_t tuning : 4;
+ uint64_t hst_mode : 1;
+ uint64_t dm_pulld : 1;
+ uint64_t dp_pulld : 1;
+ uint64_t tclk : 1;
+ uint64_t usbp_bist : 1;
+ uint64_t usbc_end : 1;
+ uint64_t dma_bmode : 1;
+ uint64_t reserved_30_31 : 2;
+ uint64_t tdata_out : 4;
+ uint64_t bist_err : 1;
+ uint64_t bist_done : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } cn30xx;
+ struct cvmx_usbnx_usbp_ctl_status_cn30xx cn31xx;
+ struct cvmx_usbnx_usbp_ctl_status_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t txrisetune : 1; /**< HS Transmitter Rise/Fall Time Adjustment */
+ uint64_t txvreftune : 4; /**< HS DC Voltage Level Adjustment */
+ uint64_t txfslstune : 4; /**< FS/LS Source Impedence Adjustment */
+ uint64_t txhsxvtune : 2; /**< Transmitter High-Speed Crossover Adjustment */
+ uint64_t sqrxtune : 3; /**< Squelch Threshold Adjustment */
+ uint64_t compdistune : 3; /**< Disconnect Threshold Adjustment */
+ uint64_t otgtune : 3; /**< VBUS Valid Threshold Adjustment */
+ uint64_t otgdisable : 1; /**< OTG Block Disable */
+ uint64_t portreset : 1; /**< Per_Port Reset */
+ uint64_t drvvbus : 1; /**< Drive VBUS */
+ uint64_t lsbist : 1; /**< Low-Speed BIST Enable. */
+ uint64_t fsbist : 1; /**< Full-Speed BIST Enable. */
+ uint64_t hsbist : 1; /**< High-Speed BIST Enable. */
+ uint64_t bist_done : 1; /**< PHY Bist Done.
+ Asserted at the end of the PHY BIST sequence. */
+ uint64_t bist_err : 1; /**< PHY Bist Error.
+ Indicates an internal error was detected during
+ the BIST sequence. */
+ uint64_t tdata_out : 4; /**< PHY Test Data Out.
+ Presents either internaly generated signals or
+ test register contents, based upon the value of
+ test_data_out_sel. */
+ uint64_t reserved_31_31 : 1;
+ uint64_t txpreemphasistune : 1; /**< HS Transmitter Pre-Emphasis Enable */
+ uint64_t dma_bmode : 1; /**< When set to 1 the L2C DMA address will be updated
+ with byte-counts between packets. When set to 0
+ the L2C DMA address is incremented to the next
+ 4-byte aligned address after adding byte-count. */
+ uint64_t usbc_end : 1; /**< Bigendian input to the USB Core. This should be
+ set to '0' for operation. */
+ uint64_t usbp_bist : 1; /**< PHY, This is cleared '0' to run BIST on the USBP. */
+ uint64_t tclk : 1; /**< PHY Test Clock, used to load TDATA_IN to the USBP. */
+ uint64_t dp_pulld : 1; /**< PHY DP_PULLDOWN input to the USB-PHY.
+ This signal enables the pull-down resistance on
+ the D+ line. '1' pull down-resistance is connected
+ to D+/ '0' pull down resistance is not connected
+ to D+. When an A/B device is acting as a host
+ (downstream-facing port), dp_pulldown and
+ dm_pulldown are enabled. This must not toggle
+ during normal opeartion. */
+ uint64_t dm_pulld : 1; /**< PHY DM_PULLDOWN input to the USB-PHY.
+ This signal enables the pull-down resistance on
+ the D- line. '1' pull down-resistance is connected
+ to D-. '0' pull down resistance is not connected
+ to D-. When an A/B device is acting as a host
+ (downstream-facing port), dp_pulldown and
+ dm_pulldown are enabled. This must not toggle
+ during normal opeartion. */
+ uint64_t hst_mode : 1; /**< When '0' the USB is acting as HOST, when '1'
+ USB is acting as device. This field needs to be
+ set while the USB is in reset. */
+ uint64_t reserved_19_22 : 4;
+ uint64_t tx_bs_enh : 1; /**< Transmit Bit Stuffing on [15:8].
+ Enables or disables bit stuffing on data[15:8]
+ when bit-stuffing is enabled. */
+ uint64_t tx_bs_en : 1; /**< Transmit Bit Stuffing on [7:0].
+ Enables or disables bit stuffing on data[7:0]
+ when bit-stuffing is enabled. */
+ uint64_t loop_enb : 1; /**< PHY Loopback Test Enable.
+ '1': During data transmission the receive is
+ enabled.
+ '0': During data transmission the receive is
+ disabled.
+ Must be '0' for normal operation. */
+ uint64_t vtest_enb : 1; /**< Analog Test Pin Enable.
+ '1' The PHY's analog_test pin is enabled for the
+ input and output of applicable analog test signals.
+ '0' THe analog_test pin is disabled. */
+ uint64_t bist_enb : 1; /**< Built-In Self Test Enable.
+ Used to activate BIST in the PHY. */
+ uint64_t tdata_sel : 1; /**< Test Data Out Select.
+ '1' test_data_out[3:0] (PHY) register contents
+ are output. '0' internaly generated signals are
+ output. */
+ uint64_t taddr_in : 4; /**< Mode Address for Test Interface.
+ Specifies the register address for writing to or
+ reading from the PHY test interface register. */
+ uint64_t tdata_in : 8; /**< Internal Testing Register Input Data and Select
+ This is a test bus. Data is present on [3:0],
+ and its corresponding select (enable) is present
+ on bits [7:4]. */
+ uint64_t ate_reset : 1; /**< Reset input from automatic test equipment.
+ This is a test signal. When the USB Core is
+ powered up (not in Susned Mode), an automatic
+ tester can use this to disable phy_clock and
+ free_clk, then re-eanable them with an aligned
+ phase.
+ '1': The phy_clk and free_clk outputs are
+ disabled. "0": The phy_clock and free_clk outputs
+ are available within a specific period after the
+ de-assertion. */
+#else
+ uint64_t ate_reset : 1;
+ uint64_t tdata_in : 8;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_sel : 1;
+ uint64_t bist_enb : 1;
+ uint64_t vtest_enb : 1;
+ uint64_t loop_enb : 1;
+ uint64_t tx_bs_en : 1;
+ uint64_t tx_bs_enh : 1;
+ uint64_t reserved_19_22 : 4;
+ uint64_t hst_mode : 1;
+ uint64_t dm_pulld : 1;
+ uint64_t dp_pulld : 1;
+ uint64_t tclk : 1;
+ uint64_t usbp_bist : 1;
+ uint64_t usbc_end : 1;
+ uint64_t dma_bmode : 1;
+ uint64_t txpreemphasistune : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t tdata_out : 4;
+ uint64_t bist_err : 1;
+ uint64_t bist_done : 1;
+ uint64_t hsbist : 1;
+ uint64_t fsbist : 1;
+ uint64_t lsbist : 1;
+ uint64_t drvvbus : 1;
+ uint64_t portreset : 1;
+ uint64_t otgdisable : 1;
+ uint64_t otgtune : 3;
+ uint64_t compdistune : 3;
+ uint64_t sqrxtune : 3;
+ uint64_t txhsxvtune : 2;
+ uint64_t txfslstune : 4;
+ uint64_t txvreftune : 4;
+ uint64_t txrisetune : 1;
+#endif
+ } cn50xx;
+ struct cvmx_usbnx_usbp_ctl_status_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t txrisetune : 1; /**< HS Transmitter Rise/Fall Time Adjustment */
+ uint64_t txvreftune : 4; /**< HS DC Voltage Level Adjustment */
+ uint64_t txfslstune : 4; /**< FS/LS Source Impedence Adjustment */
+ uint64_t txhsxvtune : 2; /**< Transmitter High-Speed Crossover Adjustment */
+ uint64_t sqrxtune : 3; /**< Squelch Threshold Adjustment */
+ uint64_t compdistune : 3; /**< Disconnect Threshold Adjustment */
+ uint64_t otgtune : 3; /**< VBUS Valid Threshold Adjustment */
+ uint64_t otgdisable : 1; /**< OTG Block Disable */
+ uint64_t portreset : 1; /**< Per_Port Reset */
+ uint64_t drvvbus : 1; /**< Drive VBUS */
+ uint64_t lsbist : 1; /**< Low-Speed BIST Enable. */
+ uint64_t fsbist : 1; /**< Full-Speed BIST Enable. */
+ uint64_t hsbist : 1; /**< High-Speed BIST Enable. */
+ uint64_t bist_done : 1; /**< PHY Bist Done.
+ Asserted at the end of the PHY BIST sequence. */
+ uint64_t bist_err : 1; /**< PHY Bist Error.
+ Indicates an internal error was detected during
+ the BIST sequence. */
+ uint64_t tdata_out : 4; /**< PHY Test Data Out.
+ Presents either internaly generated signals or
+ test register contents, based upon the value of
+ test_data_out_sel. */
+ uint64_t siddq : 1; /**< Drives the USBP (USB-PHY) SIDDQ input.
+ Normally should be set to zero.
+ When customers have no intent to use USB PHY
+ interface, they should:
+ - still provide 3.3V to USB_VDD33, and
+ - tie USB_REXT to 3.3V supply, and
+ - set USBN*_USBP_CTL_STATUS[SIDDQ]=1 */
+ uint64_t txpreemphasistune : 1; /**< HS Transmitter Pre-Emphasis Enable */
+ uint64_t dma_bmode : 1; /**< When set to 1 the L2C DMA address will be updated
+ with byte-counts between packets. When set to 0
+ the L2C DMA address is incremented to the next
+ 4-byte aligned address after adding byte-count. */
+ uint64_t usbc_end : 1; /**< Bigendian input to the USB Core. This should be
+ set to '0' for operation. */
+ uint64_t usbp_bist : 1; /**< PHY, This is cleared '0' to run BIST on the USBP. */
+ uint64_t tclk : 1; /**< PHY Test Clock, used to load TDATA_IN to the USBP. */
+ uint64_t dp_pulld : 1; /**< PHY DP_PULLDOWN input to the USB-PHY.
+ This signal enables the pull-down resistance on
+ the D+ line. '1' pull down-resistance is connected
+ to D+/ '0' pull down resistance is not connected
+ to D+. When an A/B device is acting as a host
+ (downstream-facing port), dp_pulldown and
+ dm_pulldown are enabled. This must not toggle
+ during normal opeartion. */
+ uint64_t dm_pulld : 1; /**< PHY DM_PULLDOWN input to the USB-PHY.
+ This signal enables the pull-down resistance on
+ the D- line. '1' pull down-resistance is connected
+ to D-. '0' pull down resistance is not connected
+ to D-. When an A/B device is acting as a host
+ (downstream-facing port), dp_pulldown and
+ dm_pulldown are enabled. This must not toggle
+ during normal opeartion. */
+ uint64_t hst_mode : 1; /**< When '0' the USB is acting as HOST, when '1'
+ USB is acting as device. This field needs to be
+ set while the USB is in reset. */
+ uint64_t reserved_19_22 : 4;
+ uint64_t tx_bs_enh : 1; /**< Transmit Bit Stuffing on [15:8].
+ Enables or disables bit stuffing on data[15:8]
+ when bit-stuffing is enabled. */
+ uint64_t tx_bs_en : 1; /**< Transmit Bit Stuffing on [7:0].
+ Enables or disables bit stuffing on data[7:0]
+ when bit-stuffing is enabled. */
+ uint64_t loop_enb : 1; /**< PHY Loopback Test Enable.
+ '1': During data transmission the receive is
+ enabled.
+ '0': During data transmission the receive is
+ disabled.
+ Must be '0' for normal operation. */
+ uint64_t vtest_enb : 1; /**< Analog Test Pin Enable.
+ '1' The PHY's analog_test pin is enabled for the
+ input and output of applicable analog test signals.
+ '0' THe analog_test pin is disabled. */
+ uint64_t bist_enb : 1; /**< Built-In Self Test Enable.
+ Used to activate BIST in the PHY. */
+ uint64_t tdata_sel : 1; /**< Test Data Out Select.
+ '1' test_data_out[3:0] (PHY) register contents
+ are output. '0' internaly generated signals are
+ output. */
+ uint64_t taddr_in : 4; /**< Mode Address for Test Interface.
+ Specifies the register address for writing to or
+ reading from the PHY test interface register. */
+ uint64_t tdata_in : 8; /**< Internal Testing Register Input Data and Select
+ This is a test bus. Data is present on [3:0],
+ and its corresponding select (enable) is present
+ on bits [7:4]. */
+ uint64_t ate_reset : 1; /**< Reset input from automatic test equipment.
+ This is a test signal. When the USB Core is
+ powered up (not in Susned Mode), an automatic
+ tester can use this to disable phy_clock and
+ free_clk, then re-eanable them with an aligned
+ phase.
+ '1': The phy_clk and free_clk outputs are
+ disabled. "0": The phy_clock and free_clk outputs
+ are available within a specific period after the
+ de-assertion. */
+#else
+ uint64_t ate_reset : 1;
+ uint64_t tdata_in : 8;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_sel : 1;
+ uint64_t bist_enb : 1;
+ uint64_t vtest_enb : 1;
+ uint64_t loop_enb : 1;
+ uint64_t tx_bs_en : 1;
+ uint64_t tx_bs_enh : 1;
+ uint64_t reserved_19_22 : 4;
+ uint64_t hst_mode : 1;
+ uint64_t dm_pulld : 1;
+ uint64_t dp_pulld : 1;
+ uint64_t tclk : 1;
+ uint64_t usbp_bist : 1;
+ uint64_t usbc_end : 1;
+ uint64_t dma_bmode : 1;
+ uint64_t txpreemphasistune : 1;
+ uint64_t siddq : 1;
+ uint64_t tdata_out : 4;
+ uint64_t bist_err : 1;
+ uint64_t bist_done : 1;
+ uint64_t hsbist : 1;
+ uint64_t fsbist : 1;
+ uint64_t lsbist : 1;
+ uint64_t drvvbus : 1;
+ uint64_t portreset : 1;
+ uint64_t otgdisable : 1;
+ uint64_t otgtune : 3;
+ uint64_t compdistune : 3;
+ uint64_t sqrxtune : 3;
+ uint64_t txhsxvtune : 2;
+ uint64_t txfslstune : 4;
+ uint64_t txvreftune : 4;
+ uint64_t txrisetune : 1;
+#endif
+ } cn52xx;
+ struct cvmx_usbnx_usbp_ctl_status_cn50xx cn52xxp1;
+ struct cvmx_usbnx_usbp_ctl_status_cn52xx cn56xx;
+ struct cvmx_usbnx_usbp_ctl_status_cn50xx cn56xxp1;
+};
+typedef union cvmx_usbnx_usbp_ctl_status cvmx_usbnx_usbp_ctl_status_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-usbnx-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-utils.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-utils.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-utils.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,216 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ * Small utility functions and macros to ease programming of Octeon.
+ *
+ * <hr>$Revision: 38306 $<hr>
+*/
+#ifndef __CVMX_UTILS_H__
+#define __CVMX_UTILS_H__
+
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#include <stdarg.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef TRUE
+#define FALSE 0
+#define TRUE (!(FALSE))
+#endif
+
+/*
+ * The macros cvmx_likely and cvmx_unlikely use the
+ * __builtin_expect GCC operation to control branch
+ * probabilities for a conditional. For example, an "if"
+ * statement in the code that will almost always be
+ * executed should be written as "if (cvmx_likely(...))".
+ * If the "else" section of an if statement is more
+ * probable, use "if (cvmx_unlikey(...))".
+ */
+#define cvmx_likely(x) __builtin_expect(!!(x), 1)
+#define cvmx_unlikely(x) __builtin_expect(!!(x), 0)
+
+#if CVMX_ENABLE_DEBUG_PRINTS
+ #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ #define cvmx_dprintf printk
+ #define cvmx_dvprintf vprintk
+ #elif defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+ void cvmx_dvprintf(const char *, va_list);
+ void cvmx_dprintf(const char *, ...) __attribute__ ((format(printf, 1, 2)));
+ #else
+ #define cvmx_dprintf printf
+ #define cvmx_dvprintf vprintf
+ #endif
+#else
+ static inline void cvmx_dvprintf(const char *format, va_list ap)
+ {
+ /* Prints are disbled, do nothing */
+ }
+
+ static inline void cvmx_dprintf(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
+ static inline void cvmx_dprintf(const char *format, ...)
+ {
+ /* Prints are disbled, do nothing */
+ }
+#endif
+
+#define CAST64(v) ((long long)(long)(v)) // use only when 'v' is a pointer
+#define CASTPTR(type, v) ((type *)(long)(v))
+#define CVMX_CACHE_LINE_SIZE (128) // In bytes
+#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) // In bytes
+#define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned (CVMX_CACHE_LINE_SIZE)))
+
+/**
+ * This macro spins on a field waiting for it to reach a value. It
+ * is common in code to need to wait for a specific field in a CSR
+ * to match a specific value. Conceptually this macro expands to:
+ *
+ * 1) read csr at "address" with a csr typedef of "type"
+ * 2) Check if ("type".s."field" "op" "value")
+ * 3) If #2 isn't true loop to #1 unless too much time has passed.
+ */
+#define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, timeout_usec)\
+ ({int result; \
+ do { \
+ uint64_t done = cvmx_clock_get_count(CVMX_CLOCK_CORE) + (uint64_t)timeout_usec * \
+ cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000; \
+ type c; \
+ while (1) \
+ { \
+ c.u64 = cvmx_read_csr(address); \
+ if ((c.s.field) op (value)) { \
+ result = 0; \
+ break; \
+ } else if (cvmx_clock_get_count(CVMX_CLOCK_CORE) > done) { \
+ result = -1; \
+ break; \
+ } else \
+ cvmx_wait(100); \
+ } \
+ } while (0); \
+ result;})
+
+#define CVMX_BUILD_ASSERT_ZERO(e) (sizeof(struct {int __static_assert:(e)?1:-1;}))
+#define CVMX_BUILD_ASSERT(condition) ((void)CVMX_BUILD_ASSERT_ZERO(condition))
+
+/**
+ * Builds a bit mask given the required size in bits.
+ *
+ * @param bits Number of bits in the mask
+ * @return The mask
+ */
+static inline uint64_t cvmx_build_mask(uint64_t bits)
+{
+ return ~((~0x0ull) << bits);
+}
+
+
+/**
+ * Builds a memory address for I/O based on the Major and Sub DID.
+ *
+ * @param major_did 5 bit major did
+ * @param sub_did 3 bit sub did
+ * @return I/O base address
+ */
+static inline uint64_t cvmx_build_io_address(uint64_t major_did, uint64_t sub_did)
+{
+ return ((0x1ull << 48) | (major_did << 43) | (sub_did << 40));
+}
+
+
+/**
+ * Perform mask and shift to place the supplied value into
+ * the supplied bit rage.
+ *
+ * Example: cvmx_build_bits(39,24,value)
+ * <pre>
+ * 6 5 4 3 3 2 1
+ * 3 5 7 9 1 3 5 7 0
+ * +-------+-------+-------+-------+-------+-------+-------+------+
+ * 000000000000000000000000___________value000000000000000000000000
+ * </pre>
+ *
+ * @param high_bit Highest bit value can occupy (inclusive) 0-63
+ * @param low_bit Lowest bit value can occupy inclusive 0-high_bit
+ * @param value Value to use
+ * @return Value masked and shifted
+ */
+static inline uint64_t cvmx_build_bits(uint64_t high_bit, uint64_t low_bit, uint64_t value)
+{
+ return ((value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit);
+}
+
+
+/**
+ * Return the number of cores available in the chip
+ *
+ * @return
+ */
+static inline uint32_t cvmx_octeon_num_cores(void)
+{
+ uint32_t ciu_fuse = (uint32_t)cvmx_read_csr(CVMX_CIU_FUSE) & 0xffffffffull;
+ return cvmx_pop(ciu_fuse);
+}
+
+
+/**
+ * Return true if Octeon is CN36XX
+ *
+ * @return
+ */
+static inline int cvmx_octeon_model_CN36XX(void)
+{
+ return(OCTEON_IS_MODEL(OCTEON_CN38XX)
+ &&cvmx_fuse_read(264));
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_UTILS_H__ */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-utils.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-version.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-version.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-version.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,49 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ **********************license end****************/
+
+/* Version information is made available at compile time in two forms:
+** 1) a version string for printing
+** 2) a combined SDK version and build number, suitable for comparisons
+** to determine what SDK version is being used.
+** SDK 1.2.3 build 567 => 102030567
+** Note that 2 digits are used for each version number, so that:
+** 1.9.0 == 01.09.00 < 01.10.00 == 1.10.0
+** 10.9.0 == 10.09.00 > 09.10.00 == 9.10.0
+**
+*/
+#define OCTEON_SDK_VERSION_NUM 203000427ull
+#define OCTEON_SDK_VERSION_STRING "Cavium Inc. OCTEON SDK version 2.3.0, build 427"
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-version.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-warn.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-warn.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-warn.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,90 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for warning users about errors and such.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+#include "cvmx.h"
+#include "cvmx-warn.h"
+
+void cvmx_warn(const char *format, ...)
+{
+#ifdef CVMX_BUILD_FOR_UBOOT
+ DECLARE_GLOBAL_DATA_PTR;
+ if (!gd->have_console)
+ {
+ /* If the serial port is not set up yet,
+ ** save pointer to error message (most likely a constant in flash)
+ ** to print out once we can. */
+#ifdef U_BOOT_OLD
+ gd->err_msg = (void *)format;
+#else
+ gd->ogd.err_msg = (void *)format;
+#endif
+ return;
+ }
+#endif
+ va_list args;
+ va_start(args, format);
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ printk("WARNING:");
+ vprintk(format, args);
+#else
+#ifdef CVMX_BUILD_FOR_FREEBSD_KERNEL
+ printf("WARNING: ");
+#else
+ printf("WARNING:\n");
+#endif
+ vprintf(format, args);
+#endif
+ va_end(args);
+}
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-warn.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-warn.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-warn.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-warn.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,77 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Functions for warning users about errors and such.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ */
+#ifndef __CVMX_WARN_H__
+#define __CVMX_WARN_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef cvmx_warn
+#ifdef printf
+extern void cvmx_warn(const char *format, ...);
+#else
+extern void cvmx_warn(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
+#endif
+#endif
+
+#define cvmx_warn_if(expression, format, ...) if (expression) cvmx_warn(format, ##__VA_ARGS__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_WARN_H__ */
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-warn.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-wqe.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-wqe.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-wqe.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,712 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * This header file defines the work queue entry (wqe) data structure.
+ * Since this is a commonly used structure that depends on structures
+ * from several hardware blocks, those definitions have been placed
+ * in this file to create a single point of definition of the wqe
+ * format.
+ * Data structures are still named according to the block that they
+ * relate to.
+ *
+ * This file must not depend on any other header files, except for cvmx.h!!!
+ *
+ *
+ * <hr>$Revision: 70030 $<hr>
+ *
+ *
+ */
+
+#ifndef __CVMX_WQE_H__
+#define __CVMX_WQE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define OCT_TAG_TYPE_STRING(x) (((x) == CVMX_POW_TAG_TYPE_ORDERED) ? "ORDERED" : \
+ (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ? "ATOMIC" : \
+ (((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : \
+ "NULL_NULL")))
+
+
+/**
+ * HW decode / err_code in work queue entry
+ */
+typedef union
+{
+ uint64_t u64;
+
+ /** Use this struct if the hardware determines that the packet is IP */
+ struct
+ {
+ uint64_t bufs : 8; /**< HW sets this to the number of buffers used by this packet */
+ uint64_t ip_offset : 8; /**< HW sets to the number of L2 bytes prior to the IP */
+ uint64_t vlan_valid : 1; /**< set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_stacked : 1; /**< Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t unassigned : 1;
+ uint64_t vlan_cfi : 1; /**< HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
+ uint64_t vlan_id :12; /**< HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
+ uint64_t varies:12; /**< 38xx and 68xx have different definitions. */
+ uint64_t dec_ipcomp : 1; /**< the packet needs to be decompressed */
+ uint64_t tcp_or_udp : 1; /**< the packet is either TCP or UDP */
+ uint64_t dec_ipsec : 1; /**< the packet needs to be decrypted (ESP or AH) */
+ uint64_t is_v6 : 1; /**< the packet is IPv6 */
+
+ /* (rcv_error, not_IP, IP_exc, is_frag, L4_error, software, etc.) */
+
+ uint64_t software : 1; /**< reserved for software use, hardware will clear on packet creation */
+ /* exceptional conditions below */
+ uint64_t L4_error : 1; /**< the receive interface hardware detected an L4 error (only applies if !is_frag)
+ (only applies if !rcv_error && !not_IP && !IP_exc && !is_frag)
+ failure indicated in err_code below, decode:
+ - 1 = Malformed L4
+ - 2 = L4 Checksum Error: the L4 checksum value is
+ - 3 = UDP Length Error: The UDP length field would make the UDP data longer than what
+ remains in the IP packet (as defined by the IP header length field).
+ - 4 = Bad L4 Port: either the source or destination TCP/UDP port is 0.
+ - 8 = TCP FIN Only: the packet is TCP and only the FIN flag set.
+ - 9 = TCP No Flags: the packet is TCP and no flags are set.
+ - 10 = TCP FIN RST: the packet is TCP and both FIN and RST are set.
+ - 11 = TCP SYN URG: the packet is TCP and both SYN and URG are set.
+ - 12 = TCP SYN RST: the packet is TCP and both SYN and RST are set.
+ - 13 = TCP SYN FIN: the packet is TCP and both SYN and FIN are set. */
+
+
+
+ uint64_t is_frag : 1; /**< set if the packet is a fragment */
+ uint64_t IP_exc : 1; /**< the receive interface hardware detected an IP error / exception
+ (only applies if !rcv_error && !not_IP) failure indicated in err_code below, decode:
+ - 1 = Not IP: the IP version field is neither 4 nor 6.
+ - 2 = IPv4 Header Checksum Error: the IPv4 header has a checksum violation.
+ - 3 = IP Malformed Header: the packet is not long enough to contain the IP header.
+ - 4 = IP Malformed: the packet is not long enough to contain the bytes indicated by the IP
+ header. Pad is allowed.
+ - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6 Hop Count field are zero.
+ - 6 = IP Options */
+
+ uint64_t is_bcast : 1; /**< set if the hardware determined that the packet is a broadcast */
+ uint64_t is_mcast : 1; /**< set if the hardware determined that the packet is a multi-cast */
+ uint64_t not_IP : 1; /**< set if the packet may not be IP (must be zero in this case) */
+ uint64_t rcv_error : 1; /**< the receive interface hardware detected a receive error (must be zero in this case) */
+ /* lower err_code = first-level descriptor of the work */
+ /* zero for packet submitted by hardware that isn't on the slow path */
+
+ uint64_t err_code : 8; /**< type is cvmx_pip_err_t */
+ } s;
+ struct
+ {
+ uint64_t bufs : 8; /**< HW sets this to the number of buffers used by this packet */
+ uint64_t ip_offset : 8; /**< HW sets to the number of L2 bytes prior to the IP */
+ uint64_t vlan_valid : 1; /**< set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_stacked : 1; /**< Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t unassigned : 1;
+ uint64_t vlan_cfi : 1; /**< HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
+ uint64_t vlan_id :12; /**< HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
+
+ uint64_t port:12; /**< MAC/PIP port number. */
+
+ uint64_t dec_ipcomp : 1; /**< the packet needs to be decompressed */
+ uint64_t tcp_or_udp : 1; /**< the packet is either TCP or UDP */
+ uint64_t dec_ipsec : 1; /**< the packet needs to be decrypted (ESP or AH) */
+ uint64_t is_v6 : 1; /**< the packet is IPv6 */
+
+ /* (rcv_error, not_IP, IP_exc, is_frag, L4_error, software, etc.) */
+
+ uint64_t software : 1; /**< reserved for software use, hardware will clear on packet creation */
+ /* exceptional conditions below */
+ uint64_t L4_error : 1; /**< the receive interface hardware detected an L4 error (only applies if !is_frag)
+ (only applies if !rcv_error && !not_IP && !IP_exc && !is_frag)
+ failure indicated in err_code below, decode:
+ - 1 = Malformed L4
+ - 2 = L4 Checksum Error: the L4 checksum value is
+ - 3 = UDP Length Error: The UDP length field would make the UDP data longer than what
+ remains in the IP packet (as defined by the IP header length field).
+ - 4 = Bad L4 Port: either the source or destination TCP/UDP port is 0.
+ - 8 = TCP FIN Only: the packet is TCP and only the FIN flag set.
+ - 9 = TCP No Flags: the packet is TCP and no flags are set.
+ - 10 = TCP FIN RST: the packet is TCP and both FIN and RST are set.
+ - 11 = TCP SYN URG: the packet is TCP and both SYN and URG are set.
+ - 12 = TCP SYN RST: the packet is TCP and both SYN and RST are set.
+ - 13 = TCP SYN FIN: the packet is TCP and both SYN and FIN are set. */
+
+
+
+ uint64_t is_frag : 1; /**< set if the packet is a fragment */
+ uint64_t IP_exc : 1; /**< the receive interface hardware detected an IP error / exception
+ (only applies if !rcv_error && !not_IP) failure indicated in err_code below, decode:
+ - 1 = Not IP: the IP version field is neither 4 nor 6.
+ - 2 = IPv4 Header Checksum Error: the IPv4 header has a checksum violation.
+ - 3 = IP Malformed Header: the packet is not long enough to contain the IP header.
+ - 4 = IP Malformed: the packet is not long enough to contain the bytes indicated by the IP
+ header. Pad is allowed.
+ - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6 Hop Count field are zero.
+ - 6 = IP Options */
+
+ uint64_t is_bcast : 1; /**< set if the hardware determined that the packet is a broadcast */
+ uint64_t is_mcast : 1; /**< set if the hardware determined that the packet is a multi-cast */
+ uint64_t not_IP : 1; /**< set if the packet may not be IP (must be zero in this case) */
+ uint64_t rcv_error : 1; /**< the receive interface hardware detected a receive error (must be zero in this case) */
+ /* lower err_code = first-level descriptor of the work */
+ /* zero for packet submitted by hardware that isn't on the slow path */
+
+ uint64_t err_code : 8; /**< type is cvmx_pip_err_t */
+ } s_cn68xx;
+ struct
+ {
+ uint64_t bufs : 8; /**< HW sets this to the number of buffers used by this packet */
+ uint64_t ip_offset : 8; /**< HW sets to the number of L2 bytes prior to the IP */
+ uint64_t vlan_valid : 1; /**< set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_stacked : 1; /**< Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t unassigned : 1;
+ uint64_t vlan_cfi : 1; /**< HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
+ uint64_t vlan_id :12; /**< HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
+ uint64_t pr : 4; /**< Ring Identifier (if PCIe). Requires PIP_GBL_CTL[RING_EN]=1 */
+ uint64_t unassigned2a :4;
+ uint64_t unassigned2 :4;
+
+ uint64_t dec_ipcomp : 1; /**< the packet needs to be decompressed */
+ uint64_t tcp_or_udp : 1; /**< the packet is either TCP or UDP */
+ uint64_t dec_ipsec : 1; /**< the packet needs to be decrypted (ESP or AH) */
+ uint64_t is_v6 : 1; /**< the packet is IPv6 */
+
+ /* (rcv_error, not_IP, IP_exc, is_frag, L4_error, software, etc.) */
+
+ uint64_t software : 1; /**< reserved for software use, hardware will clear on packet creation */
+ /* exceptional conditions below */
+ uint64_t L4_error : 1; /**< the receive interface hardware detected an L4 error (only applies if !is_frag)
+ (only applies if !rcv_error && !not_IP && !IP_exc && !is_frag)
+ failure indicated in err_code below, decode:
+ - 1 = Malformed L4
+ - 2 = L4 Checksum Error: the L4 checksum value is
+ - 3 = UDP Length Error: The UDP length field would make the UDP data longer than what
+ remains in the IP packet (as defined by the IP header length field).
+ - 4 = Bad L4 Port: either the source or destination TCP/UDP port is 0.
+ - 8 = TCP FIN Only: the packet is TCP and only the FIN flag set.
+ - 9 = TCP No Flags: the packet is TCP and no flags are set.
+ - 10 = TCP FIN RST: the packet is TCP and both FIN and RST are set.
+ - 11 = TCP SYN URG: the packet is TCP and both SYN and URG are set.
+ - 12 = TCP SYN RST: the packet is TCP and both SYN and RST are set.
+ - 13 = TCP SYN FIN: the packet is TCP and both SYN and FIN are set. */
+
+
+
+ uint64_t is_frag : 1; /**< set if the packet is a fragment */
+ uint64_t IP_exc : 1; /**< the receive interface hardware detected an IP error / exception
+ (only applies if !rcv_error && !not_IP) failure indicated in err_code below, decode:
+ - 1 = Not IP: the IP version field is neither 4 nor 6.
+ - 2 = IPv4 Header Checksum Error: the IPv4 header has a checksum violation.
+ - 3 = IP Malformed Header: the packet is not long enough to contain the IP header.
+ - 4 = IP Malformed: the packet is not long enough to contain the bytes indicated by the IP
+ header. Pad is allowed.
+ - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6 Hop Count field are zero.
+ - 6 = IP Options */
+
+ uint64_t is_bcast : 1; /**< set if the hardware determined that the packet is a broadcast */
+ uint64_t is_mcast : 1; /**< set if the hardware determined that the packet is a multi-cast */
+ uint64_t not_IP : 1; /**< set if the packet may not be IP (must be zero in this case) */
+ uint64_t rcv_error : 1; /**< the receive interface hardware detected a receive error (must be zero in this case) */
+ /* lower err_code = first-level descriptor of the work */
+ /* zero for packet submitted by hardware that isn't on the slow path */
+
+ uint64_t err_code : 8; /**< type is cvmx_pip_err_t */
+ } s_cn38xx;
+
+ /**< use this to get at the 16 vlan bits */
+ struct
+ {
+ uint64_t unused1 :16;
+ uint64_t vlan :16;
+ uint64_t unused2 :32;
+ } svlan;
+
+ /**< use this struct if the hardware could not determine that the packet is ip */
+ struct
+ {
+ uint64_t bufs : 8; /**< HW sets this to the number of buffers used by this packet */
+ uint64_t unused : 8;
+ uint64_t vlan_valid : 1; /**< set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_stacked : 1; /**< Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t unassigned : 1;
+ uint64_t vlan_cfi : 1; /**< HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
+ uint64_t vlan_id :12; /**< HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
+
+ uint64_t varies:12; /**< 38xx and 68xx have different definitions. */
+ uint64_t unassigned2:4;
+
+ uint64_t software : 1; /**< reserved for software use, hardware will clear on packet creation */
+ uint64_t unassigned3 : 1;
+ uint64_t is_rarp : 1; /**< set if the hardware determined that the packet is rarp */
+ uint64_t is_arp : 1; /**< set if the hardware determined that the packet is arp */
+ uint64_t is_bcast : 1; /**< set if the hardware determined that the packet is a broadcast */
+ uint64_t is_mcast : 1; /**< set if the hardware determined that the packet is a multi-cast */
+ uint64_t not_IP : 1; /**< set if the packet may not be IP (must be one in this case) */
+ uint64_t rcv_error : 1; /**< the receive interface hardware detected a receive error.
+ Failure indicated in err_code below, decode:
+ - 1 = partial error: a packet was partially received, but internal
+ buffering / bandwidth was not adequate to receive the entire packet.
+ - 2 = jabber error: the RGMII packet was too large and is truncated.
+ - 3 = overrun error: the RGMII packet is longer than allowed and had
+ an FCS error.
+ - 4 = oversize error: the RGMII packet is longer than allowed.
+ - 5 = alignment error: the RGMII packet is not an integer number of bytes
+ and had an FCS error (100M and 10M only).
+ - 6 = fragment error: the RGMII packet is shorter than allowed and had an
+ FCS error.
+ - 7 = GMX FCS error: the RGMII packet had an FCS error.
+ - 8 = undersize error: the RGMII packet is shorter than allowed.
+ - 9 = extend error: the RGMII packet had an extend error.
+ - 10 = length mismatch error: the RGMII packet had a length that did not
+ match the length field in the L2 HDR.
+ - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII packet had one or more
+ data reception errors (RXERR) or the SPI4 packet had one or more DIP4
+ errors.
+ - 12 = RGMII skip error/SPI4 Abort Error: the RGMII packet was not large
+ enough to cover the skipped bytes or the SPI4 packet was terminated
+ with an About EOPS.
+ - 13 = RGMII nibble error/SPI4 Port NXA Error: the RGMII packet had a
+ studder error (data not repeated - 10/100M only) or the SPI4 packet
+ was sent to an NXA.
+ - 16 = FCS error: a SPI4.2 packet had an FCS error.
+ - 17 = Skip error: a packet was not large enough to cover the skipped bytes.
+ - 18 = L2 header malformed: the packet is not long enough to contain the L2 */
+
+
+ /* lower err_code = first-level descriptor of the work */
+ /* zero for packet submitted by hardware that isn't on the slow path */
+ uint64_t err_code : 8; /* type is cvmx_pip_err_t (union, so can't use directly */
+ } snoip;
+ struct
+ {
+ uint64_t bufs : 8; /**< HW sets this to the number of buffers used by this packet */
+ uint64_t unused : 8;
+ uint64_t vlan_valid : 1; /**< set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_stacked : 1; /**< Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t unassigned : 1;
+ uint64_t vlan_cfi : 1; /**< HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
+ uint64_t vlan_id :12; /**< HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
+
+ uint64_t port:12; /**< MAC/PIP port number. */
+ uint64_t unassigned2:4;
+
+ uint64_t software : 1; /**< reserved for software use, hardware will clear on packet creation */
+ uint64_t unassigned3 : 1;
+ uint64_t is_rarp : 1; /**< set if the hardware determined that the packet is rarp */
+ uint64_t is_arp : 1; /**< set if the hardware determined that the packet is arp */
+ uint64_t is_bcast : 1; /**< set if the hardware determined that the packet is a broadcast */
+ uint64_t is_mcast : 1; /**< set if the hardware determined that the packet is a multi-cast */
+ uint64_t not_IP : 1; /**< set if the packet may not be IP (must be one in this case) */
+ uint64_t rcv_error : 1; /**< the receive interface hardware detected a receive error.
+ Failure indicated in err_code below, decode:
+ - 1 = partial error: a packet was partially received, but internal
+ buffering / bandwidth was not adequate to receive the entire packet.
+ - 2 = jabber error: the RGMII packet was too large and is truncated.
+ - 3 = overrun error: the RGMII packet is longer than allowed and had
+ an FCS error.
+ - 4 = oversize error: the RGMII packet is longer than allowed.
+ - 5 = alignment error: the RGMII packet is not an integer number of bytes
+ and had an FCS error (100M and 10M only).
+ - 6 = fragment error: the RGMII packet is shorter than allowed and had an
+ FCS error.
+ - 7 = GMX FCS error: the RGMII packet had an FCS error.
+ - 8 = undersize error: the RGMII packet is shorter than allowed.
+ - 9 = extend error: the RGMII packet had an extend error.
+ - 10 = length mismatch error: the RGMII packet had a length that did not
+ match the length field in the L2 HDR.
+ - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII packet had one or more
+ data reception errors (RXERR) or the SPI4 packet had one or more DIP4
+ errors.
+ - 12 = RGMII skip error/SPI4 Abort Error: the RGMII packet was not large
+ enough to cover the skipped bytes or the SPI4 packet was terminated
+ with an About EOPS.
+ - 13 = RGMII nibble error/SPI4 Port NXA Error: the RGMII packet had a
+ studder error (data not repeated - 10/100M only) or the SPI4 packet
+ was sent to an NXA.
+ - 16 = FCS error: a SPI4.2 packet had an FCS error.
+ - 17 = Skip error: a packet was not large enough to cover the skipped bytes.
+ - 18 = L2 header malformed: the packet is not long enough to contain the L2 */
+
+
+ /* lower err_code = first-level descriptor of the work */
+ /* zero for packet submitted by hardware that isn't on the slow path */
+ uint64_t err_code : 8; /* type is cvmx_pip_err_t (union, so can't use directly */
+ } snoip_cn68xx;
+ struct
+ {
+ uint64_t bufs : 8; /**< HW sets this to the number of buffers used by this packet */
+ uint64_t unused : 8;
+ uint64_t vlan_valid : 1; /**< set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_stacked : 1; /**< Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t unassigned : 1;
+ uint64_t vlan_cfi : 1; /**< HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
+ uint64_t vlan_id :12; /**< HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
+ uint64_t pr : 4; /**< Ring Identifier (if PCIe). Requires PIP_GBL_CTL[RING_EN]=1 */
+ uint64_t unassigned2a :8;
+ uint64_t unassigned2 :4;
+
+ uint64_t software : 1; /**< reserved for software use, hardware will clear on packet creation */
+ uint64_t unassigned3 : 1;
+ uint64_t is_rarp : 1; /**< set if the hardware determined that the packet is rarp */
+ uint64_t is_arp : 1; /**< set if the hardware determined that the packet is arp */
+ uint64_t is_bcast : 1; /**< set if the hardware determined that the packet is a broadcast */
+ uint64_t is_mcast : 1; /**< set if the hardware determined that the packet is a multi-cast */
+ uint64_t not_IP : 1; /**< set if the packet may not be IP (must be one in this case) */
+ uint64_t rcv_error : 1; /**< the receive interface hardware detected a receive error.
+ Failure indicated in err_code below, decode:
+ - 1 = partial error: a packet was partially received, but internal
+ buffering / bandwidth was not adequate to receive the entire packet.
+ - 2 = jabber error: the RGMII packet was too large and is truncated.
+ - 3 = overrun error: the RGMII packet is longer than allowed and had
+ an FCS error.
+ - 4 = oversize error: the RGMII packet is longer than allowed.
+ - 5 = alignment error: the RGMII packet is not an integer number of bytes
+ and had an FCS error (100M and 10M only).
+ - 6 = fragment error: the RGMII packet is shorter than allowed and had an
+ FCS error.
+ - 7 = GMX FCS error: the RGMII packet had an FCS error.
+ - 8 = undersize error: the RGMII packet is shorter than allowed.
+ - 9 = extend error: the RGMII packet had an extend error.
+ - 10 = length mismatch error: the RGMII packet had a length that did not
+ match the length field in the L2 HDR.
+ - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII packet had one or more
+ data reception errors (RXERR) or the SPI4 packet had one or more DIP4
+ errors.
+ - 12 = RGMII skip error/SPI4 Abort Error: the RGMII packet was not large
+ enough to cover the skipped bytes or the SPI4 packet was terminated
+ with an About EOPS.
+ - 13 = RGMII nibble error/SPI4 Port NXA Error: the RGMII packet had a
+ studder error (data not repeated - 10/100M only) or the SPI4 packet
+ was sent to an NXA.
+ - 16 = FCS error: a SPI4.2 packet had an FCS error.
+ - 17 = Skip error: a packet was not large enough to cover the skipped bytes.
+ - 18 = L2 header malformed: the packet is not long enough to contain the L2 */
+
+
+ /* lower err_code = first-level descriptor of the work */
+ /* zero for packet submitted by hardware that isn't on the slow path */
+ uint64_t err_code : 8; /* type is cvmx_pip_err_t (union, so can't use directly */
+ } snoip_cn38xx;
+
+} cvmx_pip_wqe_word2_t;
+
+typedef union {
+ struct {
+ /**
+ * raw chksum result generated by the HW
+ */
+ uint16_t hw_chksum;
+ /**
+ * Field unused by hardware - available for software
+ */
+ uint8_t unused;
+ /**
+ * Next pointer used by hardware for list maintenance.
+ * May be written/read by HW before the work queue
+ * entry is scheduled to a PP (Only 36 bits used in
+ * Octeon 1)
+ */
+ uint64_t next_ptr : 40;
+
+ } cn38xx;
+ struct {
+ uint64_t l4ptr:8; /* 56..63 */
+ uint64_t unused0:8; /* 48..55 */
+ uint64_t l3ptr:8; /* 40..47 */
+ uint64_t l2ptr:8; /* 32..39 */
+ uint64_t unused1:18; /* 14..31 */
+ uint64_t bpid:6; /* 8..13 */
+ uint64_t unused2:2; /* 6..7 */
+ uint64_t pknd:6; /* 0..5 */
+ } cn68xx;
+} cvmx_pip_wqe_word0_t;
+
+typedef union {
+ uint64_t u64;
+ cvmx_pip_wqe_word0_t pip;
+ struct {
+ uint64_t unused:24;
+ uint64_t next_ptr:40; /* on cn68xx this is unused as well */
+
+ } raw;
+} cvmx_wqe_word0_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+ uint64_t len:16;
+ uint64_t varies:14;
+ /**
+ * the type of the tag (ORDERED, ATOMIC, NULL)
+ */
+ cvmx_pow_tag_type_t tag_type:2;
+ uint64_t tag:32;
+ } s;
+ struct {
+ uint64_t len:16;
+ uint64_t zero_0:1;
+ /**
+ * HW sets this to what it thought the priority of the input packet was
+ */
+ uint64_t qos:3;
+
+ uint64_t zero_1:1;
+ /**
+ * the group that the work queue entry will be scheduled to
+ */
+ uint64_t grp:6;
+ uint64_t zero_2:3;
+ cvmx_pow_tag_type_t tag_type:2;
+ uint64_t tag:32;
+ } cn68xx;
+ struct {
+ uint64_t len:16;
+ /**
+ * HW sets this to input physical port
+ */
+ uint64_t ipprt:6;
+
+ /**
+ * HW sets this to what it thought the priority of the input packet was
+ */
+ uint64_t qos:3;
+
+ /**
+ * the group that the work queue entry will be scheduled to
+ */
+ uint64_t grp:4;
+ uint64_t zero_2:1;
+ cvmx_pow_tag_type_t tag_type:2;
+ uint64_t tag:32;
+ } cn38xx;
+} cvmx_wqe_word1_t;
+
+/**
+ * Work queue entry format
+ *
+ * must be 8-byte aligned
+ */
+typedef struct
+{
+
+ /*****************************************************************
+ * WORD 0
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ */
+
+ cvmx_wqe_word0_t word0;
+
+ /*****************************************************************
+ * WORD 1
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ */
+
+ cvmx_wqe_word1_t word1;
+ /**
+ * WORD 2
+ * HW WRITE: the following 64-bits are filled in by hardware when a packet arrives
+ * This indicates a variety of status and error conditions.
+ */
+ cvmx_pip_wqe_word2_t word2;
+
+ /**
+ * Pointer to the first segment of the packet.
+ */
+ cvmx_buf_ptr_t packet_ptr;
+
+ /**
+ * HW WRITE: octeon will fill in a programmable amount from the
+ * packet, up to (at most, but perhaps less) the amount
+ * needed to fill the work queue entry to 128 bytes
+ * If the packet is recognized to be IP, the hardware starts (except that
+ * the IPv4 header is padded for appropriate alignment) writing here where
+ * the IP header starts.
+ * If the packet is not recognized to be IP, the hardware starts writing
+ * the beginning of the packet here.
+ */
+ uint8_t packet_data[96];
+
+
+ /**
+ * If desired, SW can make the work Q entry any length. For the
+ * purposes of discussion here, Assume 128B always, as this is all that
+ * the hardware deals with.
+ *
+ */
+
+} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
+
+static inline int cvmx_wqe_get_port(cvmx_wqe_t *work)
+{
+ int port;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ port = work->word2.s_cn68xx.port;
+ else
+ port = work->word1.cn38xx.ipprt;
+
+ return port;
+}
+
+static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ port = work->word2.s_cn68xx.port = port;
+ else
+ port = work->word1.cn38xx.ipprt = port;
+}
+
+static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
+{
+ int grp;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ grp = work->word1.cn68xx.grp;
+ else
+ grp = work->word1.cn38xx.grp;
+
+ return grp;
+}
+
+static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ work->word1.cn68xx.grp = grp;
+ else
+ work->word1.cn38xx.grp = grp;
+}
+
+static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
+{
+ int qos;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ qos = work->word1.cn68xx.qos;
+ else
+ qos = work->word1.cn38xx.qos;
+
+ return qos;
+}
+
+static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ work->word1.cn68xx.qos = qos;
+ else
+ work->word1.cn38xx.qos = qos;
+}
+
+static inline int cvmx_wqe_get_len(cvmx_wqe_t *work)
+{
+ int len;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ len = work->word1.cn68xx.len;
+ else
+ len = work->word1.cn38xx.len;
+
+ return len;
+}
+
+static inline void cvmx_wqe_set_len(cvmx_wqe_t *work, int len)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ work->word1.cn68xx.len = len;
+ else
+ work->word1.cn38xx.len = len;
+}
+
+static inline uint32_t cvmx_wqe_get_tag(cvmx_wqe_t *work)
+{
+ return work->word1.s.tag;
+}
+
+static inline void cvmx_wqe_set_tag(cvmx_wqe_t *work, uint32_t tag)
+{
+ work->word1.s.tag = tag;
+}
+
+static inline int cvmx_wqe_get_tt(cvmx_wqe_t *work)
+{
+ return work->word1.s.tag_type;
+}
+
+static inline void cvmx_wqe_set_tt(cvmx_wqe_t *work, int tt)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ {
+ work->word1.cn68xx.tag_type = (cvmx_pow_tag_type_t)tt;
+ work->word1.cn68xx.zero_2 = 0;
+ }
+ else
+ {
+ work->word1.cn38xx.tag_type = (cvmx_pow_tag_type_t)tt;
+ work->word1.cn38xx.zero_2 = 0;
+ }
+}
+
+static inline int cvmx_wqe_get_unused8(cvmx_wqe_t *work)
+{
+ int len;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ len = work->word0.pip.cn68xx.unused1;
+ else
+ len = work->word0.pip.cn38xx.unused;
+
+ return len;
+}
+
+static inline void cvmx_wqe_set_unused8(cvmx_wqe_t *work, int v)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ work->word0.pip.cn68xx.unused1 = v;
+ else
+ work->word0.pip.cn38xx.unused = v;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_WQE_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-wqe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-zip-defs.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-zip-defs.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-zip-defs.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,1114 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2012 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-zip-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon zip.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_ZIP_DEFS_H__
+#define __CVMX_ZIP_DEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_CMD_BIST_RESULT CVMX_ZIP_CMD_BIST_RESULT_FUNC()
+static inline uint64_t CVMX_ZIP_CMD_BIST_RESULT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_CMD_BIST_RESULT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000080ull);
+}
+#else
+#define CVMX_ZIP_CMD_BIST_RESULT (CVMX_ADD_IO_SEG(0x0001180038000080ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_CMD_BUF CVMX_ZIP_CMD_BUF_FUNC()
+static inline uint64_t CVMX_ZIP_CMD_BUF_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_CMD_BUF not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000008ull);
+}
+#else
+#define CVMX_ZIP_CMD_BUF (CVMX_ADD_IO_SEG(0x0001180038000008ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_CMD_CTL CVMX_ZIP_CMD_CTL_FUNC()
+static inline uint64_t CVMX_ZIP_CMD_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_CMD_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000000ull);
+}
+#else
+#define CVMX_ZIP_CMD_CTL (CVMX_ADD_IO_SEG(0x0001180038000000ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_CONSTANTS CVMX_ZIP_CONSTANTS_FUNC()
+static inline uint64_t CVMX_ZIP_CONSTANTS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_CONSTANTS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800380000A0ull);
+}
+#else
+#define CVMX_ZIP_CONSTANTS (CVMX_ADD_IO_SEG(0x00011800380000A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ZIP_COREX_BIST_STATUS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ZIP_COREX_BIST_STATUS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180038000520ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ZIP_COREX_BIST_STATUS(offset) (CVMX_ADD_IO_SEG(0x0001180038000520ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_CTL_BIST_STATUS CVMX_ZIP_CTL_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_ZIP_CTL_BIST_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_CTL_BIST_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000510ull);
+}
+#else
+#define CVMX_ZIP_CTL_BIST_STATUS (CVMX_ADD_IO_SEG(0x0001180038000510ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_CTL_CFG CVMX_ZIP_CTL_CFG_FUNC()
+static inline uint64_t CVMX_ZIP_CTL_CFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_CTL_CFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000560ull);
+}
+#else
+#define CVMX_ZIP_CTL_CFG (CVMX_ADD_IO_SEG(0x0001180038000560ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ZIP_DBG_COREX_INST(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ZIP_DBG_COREX_INST(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180038000640ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ZIP_DBG_COREX_INST(offset) (CVMX_ADD_IO_SEG(0x0001180038000640ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ZIP_DBG_COREX_STA(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ZIP_DBG_COREX_STA(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180038000680ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ZIP_DBG_COREX_STA(offset) (CVMX_ADD_IO_SEG(0x0001180038000680ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ZIP_DBG_QUEX_STA(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ZIP_DBG_QUEX_STA(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180038000600ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ZIP_DBG_QUEX_STA(offset) (CVMX_ADD_IO_SEG(0x0001180038000600ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_DEBUG0 CVMX_ZIP_DEBUG0_FUNC()
+static inline uint64_t CVMX_ZIP_DEBUG0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_DEBUG0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000098ull);
+}
+#else
+#define CVMX_ZIP_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180038000098ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_ECC_CTL CVMX_ZIP_ECC_CTL_FUNC()
+static inline uint64_t CVMX_ZIP_ECC_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_ECC_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000568ull);
+}
+#else
+#define CVMX_ZIP_ECC_CTL (CVMX_ADD_IO_SEG(0x0001180038000568ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_ERROR CVMX_ZIP_ERROR_FUNC()
+static inline uint64_t CVMX_ZIP_ERROR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_ERROR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000088ull);
+}
+#else
+#define CVMX_ZIP_ERROR (CVMX_ADD_IO_SEG(0x0001180038000088ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_INT_ENA CVMX_ZIP_INT_ENA_FUNC()
+static inline uint64_t CVMX_ZIP_INT_ENA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_INT_ENA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000580ull);
+}
+#else
+#define CVMX_ZIP_INT_ENA (CVMX_ADD_IO_SEG(0x0001180038000580ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_INT_MASK CVMX_ZIP_INT_MASK_FUNC()
+static inline uint64_t CVMX_ZIP_INT_MASK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_INT_MASK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000090ull);
+}
+#else
+#define CVMX_ZIP_INT_MASK (CVMX_ADD_IO_SEG(0x0001180038000090ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_INT_REG CVMX_ZIP_INT_REG_FUNC()
+static inline uint64_t CVMX_ZIP_INT_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_INT_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000570ull);
+}
+#else
+#define CVMX_ZIP_INT_REG (CVMX_ADD_IO_SEG(0x0001180038000570ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ZIP_QUEX_BUF(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ZIP_QUEX_BUF(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180038000100ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ZIP_QUEX_BUF(offset) (CVMX_ADD_IO_SEG(0x0001180038000100ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ZIP_QUEX_ECC_ERR_STA(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ZIP_QUEX_ECC_ERR_STA(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180038000590ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ZIP_QUEX_ECC_ERR_STA(offset) (CVMX_ADD_IO_SEG(0x0001180038000590ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ZIP_QUEX_MAP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_ZIP_QUEX_MAP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001180038000300ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_ZIP_QUEX_MAP(offset) (CVMX_ADD_IO_SEG(0x0001180038000300ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_QUE_ENA CVMX_ZIP_QUE_ENA_FUNC()
+static inline uint64_t CVMX_ZIP_QUE_ENA_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_QUE_ENA not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000500ull);
+}
+#else
+#define CVMX_ZIP_QUE_ENA (CVMX_ADD_IO_SEG(0x0001180038000500ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_QUE_PRI CVMX_ZIP_QUE_PRI_FUNC()
+static inline uint64_t CVMX_ZIP_QUE_PRI_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_QUE_PRI not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000508ull);
+}
+#else
+#define CVMX_ZIP_QUE_PRI (CVMX_ADD_IO_SEG(0x0001180038000508ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ZIP_THROTTLE CVMX_ZIP_THROTTLE_FUNC()
+static inline uint64_t CVMX_ZIP_THROTTLE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ cvmx_warn("CVMX_ZIP_THROTTLE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001180038000010ull);
+}
+#else
+#define CVMX_ZIP_THROTTLE (CVMX_ADD_IO_SEG(0x0001180038000010ull))
+#endif
+
+/**
+ * cvmx_zip_cmd_bist_result
+ *
+ * ZIP_CMD_BIST_RESULT = ZIP Command BIST Result Register
+ *
+ * Description:
+ * This register is a reformatted register with same fields as O63 2.x.
+ * The purpose of this register is for software backward compatibility.
+ * Some bits are the bist result of combined status of memories (per bit, 0=pass and 1=fail).
+ */
+union cvmx_zip_cmd_bist_result {
+ uint64_t u64;
+ struct cvmx_zip_cmd_bist_result_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63 : 7;
+ uint64_t zip_core : 53; /**< BiST result of the ZIP_CORE memories */
+ uint64_t zip_ctl : 4; /**< BiST result of the ZIP_CTL memories */
+#else
+ uint64_t zip_ctl : 4;
+ uint64_t zip_core : 53;
+ uint64_t reserved_57_63 : 7;
+#endif
+ } s;
+ struct cvmx_zip_cmd_bist_result_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63 : 33;
+ uint64_t zip_core : 27; /**< BiST result of the ZIP_CORE memories */
+ uint64_t zip_ctl : 4; /**< BiST result of the ZIP_CTL memories */
+#else
+ uint64_t zip_ctl : 4;
+ uint64_t zip_core : 27;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn31xx;
+ struct cvmx_zip_cmd_bist_result_cn31xx cn38xx;
+ struct cvmx_zip_cmd_bist_result_cn31xx cn38xxp2;
+ struct cvmx_zip_cmd_bist_result_cn31xx cn56xx;
+ struct cvmx_zip_cmd_bist_result_cn31xx cn56xxp1;
+ struct cvmx_zip_cmd_bist_result_cn31xx cn58xx;
+ struct cvmx_zip_cmd_bist_result_cn31xx cn58xxp1;
+ struct cvmx_zip_cmd_bist_result_s cn61xx;
+ struct cvmx_zip_cmd_bist_result_s cn63xx;
+ struct cvmx_zip_cmd_bist_result_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63 : 21;
+ uint64_t zip_core : 39; /**< BiST result of the ZIP_CORE memories */
+ uint64_t zip_ctl : 4; /**< BiST result of the ZIP_CTL memories */
+#else
+ uint64_t zip_ctl : 4;
+ uint64_t zip_core : 39;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } cn63xxp1;
+ struct cvmx_zip_cmd_bist_result_s cn66xx;
+ struct cvmx_zip_cmd_bist_result_s cn68xx;
+ struct cvmx_zip_cmd_bist_result_s cn68xxp1;
+};
+typedef union cvmx_zip_cmd_bist_result cvmx_zip_cmd_bist_result_t;
+
+/**
+ * cvmx_zip_cmd_buf
+ *
+ * ZIP_CMD_BUF = ZIP Command Buffer Parameter Register
+ *
+ * Description:
+ * This is an alias to ZIP_QUE0_BUF. The purpose of this register is for software backward compatibility.
+ * This register set the buffer parameters for the instruction queue 0.
+ */
+union cvmx_zip_cmd_buf {
+ uint64_t u64;
+ struct cvmx_zip_cmd_buf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t dwb : 9; /**< Number of DontWriteBacks */
+ uint64_t pool : 3; /**< Free list used to free command buffer segments */
+ uint64_t size : 13; /**< Number of uint64s per command buffer segment */
+ uint64_t ptr : 33; /**< Initial command buffer pointer[39:7] (128B-aligned) */
+#else
+ uint64_t ptr : 33;
+ uint64_t size : 13;
+ uint64_t pool : 3;
+ uint64_t dwb : 9;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } s;
+ struct cvmx_zip_cmd_buf_s cn31xx;
+ struct cvmx_zip_cmd_buf_s cn38xx;
+ struct cvmx_zip_cmd_buf_s cn38xxp2;
+ struct cvmx_zip_cmd_buf_s cn56xx;
+ struct cvmx_zip_cmd_buf_s cn56xxp1;
+ struct cvmx_zip_cmd_buf_s cn58xx;
+ struct cvmx_zip_cmd_buf_s cn58xxp1;
+ struct cvmx_zip_cmd_buf_s cn61xx;
+ struct cvmx_zip_cmd_buf_s cn63xx;
+ struct cvmx_zip_cmd_buf_s cn63xxp1;
+ struct cvmx_zip_cmd_buf_s cn66xx;
+ struct cvmx_zip_cmd_buf_s cn68xx;
+ struct cvmx_zip_cmd_buf_s cn68xxp1;
+};
+typedef union cvmx_zip_cmd_buf cvmx_zip_cmd_buf_t;
+
+/**
+ * cvmx_zip_cmd_ctl
+ *
+ * ZIP_CMD_CTL = ZIP Clock/Reset Control Register
+ *
+ * Description:
+ * This register controls clock and reset.
+ */
+union cvmx_zip_cmd_ctl {
+ uint64_t u64;
+ struct cvmx_zip_cmd_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t forceclk : 1; /**< Force zip_ctl__zip<0|1>_clock_on_b == 1 when set */
+ uint64_t reset : 1; /**< Reset one-shot pulse for zip cores */
+#else
+ uint64_t reset : 1;
+ uint64_t forceclk : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_zip_cmd_ctl_s cn31xx;
+ struct cvmx_zip_cmd_ctl_s cn38xx;
+ struct cvmx_zip_cmd_ctl_s cn38xxp2;
+ struct cvmx_zip_cmd_ctl_s cn56xx;
+ struct cvmx_zip_cmd_ctl_s cn56xxp1;
+ struct cvmx_zip_cmd_ctl_s cn58xx;
+ struct cvmx_zip_cmd_ctl_s cn58xxp1;
+ struct cvmx_zip_cmd_ctl_s cn61xx;
+ struct cvmx_zip_cmd_ctl_s cn63xx;
+ struct cvmx_zip_cmd_ctl_s cn63xxp1;
+ struct cvmx_zip_cmd_ctl_s cn66xx;
+ struct cvmx_zip_cmd_ctl_s cn68xx;
+ struct cvmx_zip_cmd_ctl_s cn68xxp1;
+};
+typedef union cvmx_zip_cmd_ctl cvmx_zip_cmd_ctl_t;
+
+/**
+ * cvmx_zip_constants
+ *
+ * ZIP_CONSTANTS = ZIP Constants Register
+ *
+ * Description:
+ * This contains all the current implementation related parameters of the zip core in this chip.
+ */
+union cvmx_zip_constants {
+ uint64_t u64;
+ struct cvmx_zip_constants_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nexec : 8; /**< Number of available ZIP Exec Units */
+ uint64_t reserved_49_55 : 7;
+ uint64_t syncflush_capable : 1; /**< 1: SYNCFLUSH is supported
+ - 0: SYNCFLUSH is not supported.
+ Note: for O68 1.0, SYNCFLUSH is supported
+ although this field is 0. */
+ uint64_t depth : 16; /**< Maximum search depth for compression */
+ uint64_t onfsize : 12; /**< Output near full threshold in bytes */
+ uint64_t ctxsize : 12; /**< Decompression Context size in bytes */
+ uint64_t reserved_1_7 : 7;
+ uint64_t disabled : 1; /**< 1=zip is disabled, 0=zip is enabled */
+#else
+ uint64_t disabled : 1;
+ uint64_t reserved_1_7 : 7;
+ uint64_t ctxsize : 12;
+ uint64_t onfsize : 12;
+ uint64_t depth : 16;
+ uint64_t syncflush_capable : 1;
+ uint64_t reserved_49_55 : 7;
+ uint64_t nexec : 8;
+#endif
+ } s;
+ struct cvmx_zip_constants_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t depth : 16; /**< Maximum search depth for compression */
+ uint64_t onfsize : 12; /**< Output near full threshhold in bytes */
+ uint64_t ctxsize : 12; /**< Context size in bytes */
+ uint64_t reserved_1_7 : 7;
+ uint64_t disabled : 1; /**< 1=zip unit isdisabled, 0=zip unit not disabled */
+#else
+ uint64_t disabled : 1;
+ uint64_t reserved_1_7 : 7;
+ uint64_t ctxsize : 12;
+ uint64_t onfsize : 12;
+ uint64_t depth : 16;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } cn31xx;
+ struct cvmx_zip_constants_cn31xx cn38xx;
+ struct cvmx_zip_constants_cn31xx cn38xxp2;
+ struct cvmx_zip_constants_cn31xx cn56xx;
+ struct cvmx_zip_constants_cn31xx cn56xxp1;
+ struct cvmx_zip_constants_cn31xx cn58xx;
+ struct cvmx_zip_constants_cn31xx cn58xxp1;
+ struct cvmx_zip_constants_s cn61xx;
+ struct cvmx_zip_constants_cn31xx cn63xx;
+ struct cvmx_zip_constants_cn31xx cn63xxp1;
+ struct cvmx_zip_constants_s cn66xx;
+ struct cvmx_zip_constants_s cn68xx;
+ struct cvmx_zip_constants_cn31xx cn68xxp1;
+};
+typedef union cvmx_zip_constants cvmx_zip_constants_t;
+
+/**
+ * cvmx_zip_core#_bist_status
+ *
+ * ZIP_CORE_BIST_STATUS = ZIP CORE Bist Status Registers
+ *
+ * Description:
+ * Those register have the bist status of memories in zip cores.
+ * Each bit is the bist result of an individual memory (per bit, 0=pass and 1=fail).
+ */
+union cvmx_zip_corex_bist_status {
+ uint64_t u64;
+ struct cvmx_zip_corex_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_53_63 : 11;
+ uint64_t bstatus : 53; /**< BIST result of the ZIP_CORE memories */
+#else
+ uint64_t bstatus : 53;
+ uint64_t reserved_53_63 : 11;
+#endif
+ } s;
+ struct cvmx_zip_corex_bist_status_s cn68xx;
+ struct cvmx_zip_corex_bist_status_s cn68xxp1;
+};
+typedef union cvmx_zip_corex_bist_status cvmx_zip_corex_bist_status_t;
+
+/**
+ * cvmx_zip_ctl_bist_status
+ *
+ * ZIP_CTL_BIST_STATUS = ZIP CONTROL Bist Status Register
+ *
+ * Description:
+ * This register has the bist status of memories in zip_ctl (Instruction Buffer, G/S Pointer Fifo, Input Data Buffer,
+ * Output Data Buffers).
+ * Each bit is the bist result of an individual memory (per bit, 0=pass and 1=fail).
+ */
+union cvmx_zip_ctl_bist_status {
+ uint64_t u64;
+ struct cvmx_zip_ctl_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63 : 57;
+ uint64_t bstatus : 7; /**< BIST result of the memories */
+#else
+ uint64_t bstatus : 7;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } s;
+ struct cvmx_zip_ctl_bist_status_s cn68xx;
+ struct cvmx_zip_ctl_bist_status_s cn68xxp1;
+};
+typedef union cvmx_zip_ctl_bist_status cvmx_zip_ctl_bist_status_t;
+
+/**
+ * cvmx_zip_ctl_cfg
+ *
+ * ZIP_CTL_CFG = ZIP Controller Configuration Register
+ *
+ * Description:
+ * This register controls the behavior zip dma engine. It is recommended to kept those field in the default values for normal
+ * operation. Changing the values of the fields may be useful for diagnostics.
+ */
+union cvmx_zip_ctl_cfg {
+ uint64_t u64;
+ struct cvmx_zip_ctl_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63 : 37;
+ uint64_t ildf : 3; /**< Instruction Load Command FIFO Credits <= 4 */
+ uint64_t reserved_22_23 : 2;
+ uint64_t iprf : 2; /**< Instruction Page Return Cmd FIFO Credits <= 2 */
+ uint64_t reserved_19_19 : 1;
+ uint64_t gstf : 3; /**< G/S Tag FIFO Credits <= 4 */
+ uint64_t reserved_15_15 : 1;
+ uint64_t stcf : 3; /**< Store Command FIFO Credits <= 4 */
+ uint64_t reserved_11_11 : 1;
+ uint64_t ldf : 3; /**< Load Cmd FIFO Credits <= 4 */
+ uint64_t reserved_6_7 : 2;
+ uint64_t wkqf : 2; /**< WorkQueue FIFO Credits <= 2 */
+ uint64_t reserved_2_3 : 2;
+ uint64_t busy : 1; /**< 1: ZIP system is busy; 0: ZIP system is idle. */
+ uint64_t lmod : 1; /**< Legacy Mode. */
+#else
+ uint64_t lmod : 1;
+ uint64_t busy : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t wkqf : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t ldf : 3;
+ uint64_t reserved_11_11 : 1;
+ uint64_t stcf : 3;
+ uint64_t reserved_15_15 : 1;
+ uint64_t gstf : 3;
+ uint64_t reserved_19_19 : 1;
+ uint64_t iprf : 2;
+ uint64_t reserved_22_23 : 2;
+ uint64_t ildf : 3;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } s;
+ struct cvmx_zip_ctl_cfg_s cn68xx;
+ struct cvmx_zip_ctl_cfg_s cn68xxp1;
+};
+typedef union cvmx_zip_ctl_cfg cvmx_zip_ctl_cfg_t;
+
+/**
+ * cvmx_zip_dbg_core#_inst
+ *
+ * ZIP_DBG_COREX_INST = ZIP Core Current Instruction Registers
+ *
+ * Description:
+ * This register reflects the status of the current instruction that zip core is executing/ has executed.
+ * This register is only for debug use.
+ */
+union cvmx_zip_dbg_corex_inst {
+ uint64_t u64;
+ struct cvmx_zip_dbg_corex_inst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t busy : 1; /**< Core State: 1 - Core is busy; 0 - Core is idle */
+ uint64_t reserved_33_62 : 30;
+ uint64_t qid : 1; /**< Queue Index of instruction executed (BUSY=0) or
+ being executed (BUSY=1) on this core */
+ uint64_t iid : 32; /**< Instruction Index executed (BUSY=0) or being
+ executed (BUSY=1) on this core */
+#else
+ uint64_t iid : 32;
+ uint64_t qid : 1;
+ uint64_t reserved_33_62 : 30;
+ uint64_t busy : 1;
+#endif
+ } s;
+ struct cvmx_zip_dbg_corex_inst_s cn68xx;
+ struct cvmx_zip_dbg_corex_inst_s cn68xxp1;
+};
+typedef union cvmx_zip_dbg_corex_inst cvmx_zip_dbg_corex_inst_t;
+
+/**
+ * cvmx_zip_dbg_core#_sta
+ *
+ * ZIP_DBG_COREX_STA = ZIP Core Status Registers
+ *
+ * Description:
+ * These register reflect the status of the zip cores.
+ * This register is only for debug use.
+ */
+union cvmx_zip_dbg_corex_sta {
+ uint64_t u64;
+ struct cvmx_zip_dbg_corex_sta_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t busy : 1; /**< Core State: 1 - Core is busy; 0 - Core is idle */
+ uint64_t reserved_37_62 : 26;
+ uint64_t ist : 5; /**< State of current instruction is executing */
+ uint64_t nie : 32; /**< Number of instructions executed on this core */
+#else
+ uint64_t nie : 32;
+ uint64_t ist : 5;
+ uint64_t reserved_37_62 : 26;
+ uint64_t busy : 1;
+#endif
+ } s;
+ struct cvmx_zip_dbg_corex_sta_s cn68xx;
+ struct cvmx_zip_dbg_corex_sta_s cn68xxp1;
+};
+typedef union cvmx_zip_dbg_corex_sta cvmx_zip_dbg_corex_sta_t;
+
+/**
+ * cvmx_zip_dbg_que#_sta
+ *
+ * ZIP_DBG_QUEX_STA = ZIP Queue Status Registers
+ *
+ * Description:
+ * This register reflects status of the zip instruction queue.
+ * This register is only for debug use.
+ */
+union cvmx_zip_dbg_quex_sta {
+ uint64_t u64;
+ struct cvmx_zip_dbg_quex_sta_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t busy : 1; /**< Queue State: 1 - Queue is busy; 0 - Queue is idle */
+ uint64_t reserved_52_62 : 11;
+ uint64_t cdbc : 20; /**< Current DoorBell Counter */
+ uint64_t nii : 32; /**< Number of instructions issued from this queue.
+ Reset to 0 when ZIP_QUEn_BUF is written. */
+#else
+ uint64_t nii : 32;
+ uint64_t cdbc : 20;
+ uint64_t reserved_52_62 : 11;
+ uint64_t busy : 1;
+#endif
+ } s;
+ struct cvmx_zip_dbg_quex_sta_s cn68xx;
+ struct cvmx_zip_dbg_quex_sta_s cn68xxp1;
+};
+typedef union cvmx_zip_dbg_quex_sta cvmx_zip_dbg_quex_sta_t;
+
+/**
+ * cvmx_zip_debug0
+ *
+ * ZIP_DEBUG0 = ZIP DEBUG Register
+ *
+ * Description:
+ */
+union cvmx_zip_debug0 {
+ uint64_t u64;
+ struct cvmx_zip_debug0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63 : 34;
+ uint64_t asserts : 30; /**< FIFO assertion checks */
+#else
+ uint64_t asserts : 30;
+ uint64_t reserved_30_63 : 34;
+#endif
+ } s;
+ struct cvmx_zip_debug0_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63 : 50;
+ uint64_t asserts : 14; /**< FIFO assertion checks */
+#else
+ uint64_t asserts : 14;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } cn31xx;
+ struct cvmx_zip_debug0_cn31xx cn38xx;
+ struct cvmx_zip_debug0_cn31xx cn38xxp2;
+ struct cvmx_zip_debug0_cn31xx cn56xx;
+ struct cvmx_zip_debug0_cn31xx cn56xxp1;
+ struct cvmx_zip_debug0_cn31xx cn58xx;
+ struct cvmx_zip_debug0_cn31xx cn58xxp1;
+ struct cvmx_zip_debug0_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63 : 47;
+ uint64_t asserts : 17; /**< FIFO assertion checks */
+#else
+ uint64_t asserts : 17;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn61xx;
+ struct cvmx_zip_debug0_cn61xx cn63xx;
+ struct cvmx_zip_debug0_cn61xx cn63xxp1;
+ struct cvmx_zip_debug0_cn61xx cn66xx;
+ struct cvmx_zip_debug0_s cn68xx;
+ struct cvmx_zip_debug0_s cn68xxp1;
+};
+typedef union cvmx_zip_debug0 cvmx_zip_debug0_t;
+
+/**
+ * cvmx_zip_ecc_ctl
+ *
+ * ZIP_ECC_CTL = ZIP ECC Control Register
+ *
+ * Description:
+ * This register enables ECC for each individual internal memory that requires ECC. For debug purpose, it can also
+ * control 1 or 2 bits be flipped in the ECC data.
+ */
+union cvmx_zip_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_zip_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63 : 30;
+ uint64_t ibge : 2; /**< controls instruction buffer flip syndrome
+ 2'b00 : No Error Generation
+ 2'b10, 2'b01: Flip 1 bit
+ 2'b11 : Flip 2 bits */
+ uint64_t reserved_1_31 : 31;
+ uint64_t iben : 1; /**< 1: ECC Enabled for instruction buffer
+ - 0: ECC Disabled for instruction buffer */
+#else
+ uint64_t iben : 1;
+ uint64_t reserved_1_31 : 31;
+ uint64_t ibge : 2;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_zip_ecc_ctl_s cn68xx;
+ struct cvmx_zip_ecc_ctl_s cn68xxp1;
+};
+typedef union cvmx_zip_ecc_ctl cvmx_zip_ecc_ctl_t;
+
+/**
+ * cvmx_zip_error
+ *
+ * ZIP_ERROR = ZIP ERROR Register
+ *
+ * Description:
+ * This register is an alias to ZIP_INT_REG[DOORBELL0].
+ * The purpose of this register is for software backward compatibility.
+ */
+union cvmx_zip_error {
+ uint64_t u64;
+ struct cvmx_zip_error_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t doorbell : 1; /**< A doorbell count has overflowed */
+#else
+ uint64_t doorbell : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_zip_error_s cn31xx;
+ struct cvmx_zip_error_s cn38xx;
+ struct cvmx_zip_error_s cn38xxp2;
+ struct cvmx_zip_error_s cn56xx;
+ struct cvmx_zip_error_s cn56xxp1;
+ struct cvmx_zip_error_s cn58xx;
+ struct cvmx_zip_error_s cn58xxp1;
+ struct cvmx_zip_error_s cn61xx;
+ struct cvmx_zip_error_s cn63xx;
+ struct cvmx_zip_error_s cn63xxp1;
+ struct cvmx_zip_error_s cn66xx;
+ struct cvmx_zip_error_s cn68xx;
+ struct cvmx_zip_error_s cn68xxp1;
+};
+typedef union cvmx_zip_error cvmx_zip_error_t;
+
+/**
+ * cvmx_zip_int_ena
+ *
+ * ZIP_INT_ENA = ZIP Interrupt Enable Register
+ *
+ * Description:
+ * Only when an interrupt source is enabled, an interrupt can be fired.
+ * When a bit is set to 1, the corresponding interrupt is enabled.
+ */
+union cvmx_zip_int_ena {
+ uint64_t u64;
+ struct cvmx_zip_int_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t doorbell1 : 1; /**< Enable for Doorbell 1 count overflow */
+ uint64_t doorbell0 : 1; /**< Enable for Doorbell 0 count overflow */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ibdbe : 1; /**< Enable for IBUF Double Bit Error */
+ uint64_t ibsbe : 1; /**< Enable for IBUF Single Bit Error */
+ uint64_t fife : 1; /**< Enable for FIFO errors */
+#else
+ uint64_t fife : 1;
+ uint64_t ibsbe : 1;
+ uint64_t ibdbe : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t doorbell0 : 1;
+ uint64_t doorbell1 : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_zip_int_ena_s cn68xx;
+ struct cvmx_zip_int_ena_s cn68xxp1;
+};
+typedef union cvmx_zip_int_ena cvmx_zip_int_ena_t;
+
+/**
+ * cvmx_zip_int_mask
+ *
+ * ZIP_INT_MASK = ZIP Interrupt Mask Register
+ *
+ * Description:
+ * This register is an alias to ZIP_INT_ENA[DOORBELL0].
+ * The purpose of this register is for software backward compatibility.
+ */
+union cvmx_zip_int_mask {
+ uint64_t u64;
+ struct cvmx_zip_int_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t doorbell : 1; /**< Bit mask corresponding to ZIP_ERROR[0] above */
+#else
+ uint64_t doorbell : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_zip_int_mask_s cn31xx;
+ struct cvmx_zip_int_mask_s cn38xx;
+ struct cvmx_zip_int_mask_s cn38xxp2;
+ struct cvmx_zip_int_mask_s cn56xx;
+ struct cvmx_zip_int_mask_s cn56xxp1;
+ struct cvmx_zip_int_mask_s cn58xx;
+ struct cvmx_zip_int_mask_s cn58xxp1;
+ struct cvmx_zip_int_mask_s cn61xx;
+ struct cvmx_zip_int_mask_s cn63xx;
+ struct cvmx_zip_int_mask_s cn63xxp1;
+ struct cvmx_zip_int_mask_s cn66xx;
+ struct cvmx_zip_int_mask_s cn68xx;
+ struct cvmx_zip_int_mask_s cn68xxp1;
+};
+typedef union cvmx_zip_int_mask cvmx_zip_int_mask_t;
+
+/**
+ * cvmx_zip_int_reg
+ *
+ * ZIP_INT_REG = ZIP Interrupt Status Register
+ *
+ * Description:
+ * This registers contains the status of all the interrupt source. An interrupt will be generated only when
+ * the corresponding interrupt source is enabled in ZIP_INT_ENA.
+ */
+union cvmx_zip_int_reg {
+ uint64_t u64;
+ struct cvmx_zip_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63 : 54;
+ uint64_t doorbell1 : 1; /**< Doorbell 1 count has overflowed */
+ uint64_t doorbell0 : 1; /**< Doorbell 0 count has overflowed */
+ uint64_t reserved_3_7 : 5;
+ uint64_t ibdbe : 1; /**< IBUF Double Bit Error */
+ uint64_t ibsbe : 1; /**< IBUF Single Bit Error */
+ uint64_t fife : 1; /**< FIFO errors and the detailed status is in
+ ZIP_DEBUG0 */
+#else
+ uint64_t fife : 1;
+ uint64_t ibsbe : 1;
+ uint64_t ibdbe : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t doorbell0 : 1;
+ uint64_t doorbell1 : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_zip_int_reg_s cn68xx;
+ struct cvmx_zip_int_reg_s cn68xxp1;
+};
+typedef union cvmx_zip_int_reg cvmx_zip_int_reg_t;
+
+/**
+ * cvmx_zip_que#_buf
+ *
+ * NOTE: Fields NEXEC and SYNCFLUSH_CAPABLE are only valid for chips after O68 2.0 (including O68 2.0).
+ *
+ *
+ * ZIP_QUEX_BUF = ZIP Queue Buffer Parameter Registers
+ *
+ * Description:
+ * These registers set the buffer parameters for the instruction queues . The size of the instruction buffer
+ * segments is measured in uint64s. The pool specifies (1 of 8 free lists to be used when freeing command
+ * buffer segments). The PTR field is overwritten with the next pointer each time that the command
+ * buffer segment is exhausted. When quiescent (i.e. outstanding doorbell count is 0), it is safe
+ * to rewrite this register to effectively reset the command buffer state machine. New commands
+ * will then be read from the newly specified command buffer pointer.
+ */
+union cvmx_zip_quex_buf {
+ uint64_t u64;
+ struct cvmx_zip_quex_buf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63 : 6;
+ uint64_t dwb : 9; /**< Number of DontWriteBacks */
+ uint64_t pool : 3; /**< Free list used to free command buffer segments */
+ uint64_t size : 13; /**< Number of uint64s per command buffer segment */
+ uint64_t ptr : 33; /**< Initial command buffer pointer[39:7] (128B-aligned) */
+#else
+ uint64_t ptr : 33;
+ uint64_t size : 13;
+ uint64_t pool : 3;
+ uint64_t dwb : 9;
+ uint64_t reserved_58_63 : 6;
+#endif
+ } s;
+ struct cvmx_zip_quex_buf_s cn68xx;
+ struct cvmx_zip_quex_buf_s cn68xxp1;
+};
+typedef union cvmx_zip_quex_buf cvmx_zip_quex_buf_t;
+
+/**
+ * cvmx_zip_que#_ecc_err_sta
+ *
+ * ZIP_QUEX_ECC_ERR_STA = ZIP Queue ECC ERROR STATUS Register
+ *
+ * Description:
+ * This register contains the first ECC SBE/DBE status for the instruction buffer of a given zip instruction queue.
+ */
+union cvmx_zip_quex_ecc_err_sta {
+ uint64_t u64;
+ struct cvmx_zip_quex_ecc_err_sta_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63 : 29;
+ uint64_t wnum : 3; /**< Index of the first IWORD that DBE happened
+ (Valid when ZIP_INT_REG[IBDBE] or [IBSBE] is set). */
+ uint64_t inum : 32; /**< Index of the first instruction that DBE happened
+ (Valid when ZIP_INT_REG[IBDBE] or [IBSBE] is set). */
+#else
+ uint64_t inum : 32;
+ uint64_t wnum : 3;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } s;
+ struct cvmx_zip_quex_ecc_err_sta_s cn68xx;
+ struct cvmx_zip_quex_ecc_err_sta_s cn68xxp1;
+};
+typedef union cvmx_zip_quex_ecc_err_sta cvmx_zip_quex_ecc_err_sta_t;
+
+/**
+ * cvmx_zip_que#_map
+ *
+ * ZIP_QUEX_MAP = ZIP Queue Mapping Registers
+ *
+ * Description:
+ * These registers control how each instruction queue maps to 2 zip cores.
+ * Bit[0] corresponds to zip core 0 and bit[1] corresponds to zip core 1.
+ * A "1" means instructions from the queue can be served by the corresponding zip core.
+ */
+union cvmx_zip_quex_map {
+ uint64_t u64;
+ struct cvmx_zip_quex_map_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t zce : 2; /**< Zip Core Enable
+ Controls the logical instruction queue can be
+ serviced by which zip core. Setting ZCE==0
+ effectively disables the queue from being served
+ (however the instruction can still be fetched).
+ ZCE[1]=1, zip core 1 can serve the queue.
+ ZCE[0]=1, zip core 0 can serve the queue. */
+#else
+ uint64_t zce : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_zip_quex_map_s cn68xx;
+ struct cvmx_zip_quex_map_s cn68xxp1;
+};
+typedef union cvmx_zip_quex_map cvmx_zip_quex_map_t;
+
+/**
+ * cvmx_zip_que_ena
+ *
+ * ZIP_QUE_ENA = ZIP Queue Enable Register
+ *
+ * Description:
+ * If a queue is disabled, ZIP_CTL will stop fetching instructions from the queue.
+ */
+union cvmx_zip_que_ena {
+ uint64_t u64;
+ struct cvmx_zip_que_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t ena : 2; /**< Enables the logical instruction queues.
+ - 1: Queue is enabled. 0: Queue is disabled
+ ENA[1]=1 enables queue 1
+ ENA[0]=1 enables queue 0 */
+#else
+ uint64_t ena : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_zip_que_ena_s cn68xx;
+ struct cvmx_zip_que_ena_s cn68xxp1;
+};
+typedef union cvmx_zip_que_ena cvmx_zip_que_ena_t;
+
+/**
+ * cvmx_zip_que_pri
+ *
+ * ZIP_QUE_PRI = ZIP Queue Priority Register
+ *
+ * Description:
+ * This registers defines the priority between instruction queue 1 and instruction queue 0.
+ * Bit[0] corresponds to queue 0 and bit[1] corresponds to queue 1. A "1" means high priority.
+ */
+union cvmx_zip_que_pri {
+ uint64_t u64;
+ struct cvmx_zip_que_pri_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t pri : 2; /**< Priority
+ 2'b10: Queue 1 has higher priority.
+ 2'b01: Queue 0 has higher priority.
+ 2'b11,2'b00: round robin */
+#else
+ uint64_t pri : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_zip_que_pri_s cn68xx;
+ struct cvmx_zip_que_pri_s cn68xxp1;
+};
+typedef union cvmx_zip_que_pri cvmx_zip_que_pri_t;
+
+/**
+ * cvmx_zip_throttle
+ *
+ * ZIP_THROTTLE = ZIP Throttle Register
+ *
+ * Description:
+ * This register controls the maximum number of in-flight X2I data fetch transactions. Values > 16 are illegal.
+ * Writing 0 to this register causes the ZIP module to temporarily suspend NCB accesses; it is not recommended
+ * for normal operation, but may be useful for diagnostics.
+ */
+union cvmx_zip_throttle {
+ uint64_t u64;
+ struct cvmx_zip_throttle_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t max_infl : 5; /**< Maximum number of in-flight data fetch transactions on
+ NCB. */
+#else
+ uint64_t max_infl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_zip_throttle_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63 : 60;
+ uint64_t max_infl : 4; /**< Maximum number of inflight data fetch transactions
+ on NCB. */
+#else
+ uint64_t max_infl : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn61xx;
+ struct cvmx_zip_throttle_cn61xx cn63xx;
+ struct cvmx_zip_throttle_cn61xx cn63xxp1;
+ struct cvmx_zip_throttle_cn61xx cn66xx;
+ struct cvmx_zip_throttle_s cn68xx;
+ struct cvmx_zip_throttle_s cn68xxp1;
+};
+typedef union cvmx_zip_throttle cvmx_zip_throttle_t;
+
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-zip-defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-zip.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-zip.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-zip.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,235 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Source file for the zip (deflate) block
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#include "executive-config.h"
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-cmd-queue.h"
+#include "cvmx-zip.h"
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+
+/**
+ * Initialize the ZIP block
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_initialize(void)
+{
+ cvmx_zip_cmd_buf_t zip_cmd_buf;
+ cvmx_cmd_queue_result_t result;
+ result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_ZIP, 0,
+ CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE);
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return -1;
+
+ zip_cmd_buf.u64 = 0;
+ zip_cmd_buf.s.dwb = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
+ zip_cmd_buf.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
+ zip_cmd_buf.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
+ zip_cmd_buf.s.ptr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_ZIP))>>7;
+ cvmx_write_csr(CVMX_ZIP_CMD_BUF, zip_cmd_buf.u64);
+ cvmx_write_csr(CVMX_ZIP_ERROR, 1);
+ cvmx_read_csr(CVMX_ZIP_CMD_BUF); /* Read to make sure setup is complete */
+ return 0;
+}
+
+/**
+ * Initialize the ZIP QUEUE buffer
+ *
+ * @param queue : ZIP instruction queue
+ * @param zcoremask : ZIP coremask to use for this queue
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_queue_initialize(int queue, int zcoremask)
+{
+ cvmx_zip_quex_buf_t zip_que_buf;
+ cvmx_cmd_queue_result_t result;
+ cvmx_zip_quex_map_t que_map;
+ cvmx_zip_que_ena_t que_ena;
+ cvmx_zip_int_reg_t int_reg;
+
+ /* Previous Octeon models has only one instruction queue, call
+ cvmx_zip_inititalize() to initialize the ZIP block */
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return -1;
+
+ result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_ZIP_QUE(queue), 0,
+ CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE);
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return -1;
+
+ /* 1. Program ZIP_QUE0/1_BUF to have the correct buffer pointer and
+ size configured for each instruction queue */
+ zip_que_buf.u64 = 0;
+ zip_que_buf.s.dwb = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
+ zip_que_buf.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
+ zip_que_buf.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
+ zip_que_buf.s.ptr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_ZIP_QUE(queue)))>>7;
+ cvmx_write_csr(CVMX_ZIP_QUEX_BUF(queue), zip_que_buf.u64);
+
+ /* 2. Change the queue-to-ZIP core mapping by programming ZIP_QUE0/1_MAP. */
+ que_map.u64 = cvmx_read_csr(CVMX_ZIP_QUEX_MAP(queue));
+ que_map.s.zce = zcoremask;
+ cvmx_write_csr(CVMX_ZIP_QUEX_MAP(queue), que_map.u64);
+
+ /* Enable the queue */
+ que_ena.u64 = cvmx_read_csr(CVMX_ZIP_QUE_ENA);
+ que_ena.s.ena |= (1<<queue);
+ cvmx_write_csr(CVMX_ZIP_QUE_ENA, que_ena.u64);
+
+ /* Use round robin to have equal priority for each instruction queue */
+ cvmx_write_csr(CVMX_ZIP_QUE_PRI, 0x3);
+
+ int_reg.u64 = cvmx_read_csr(CVMX_ZIP_INT_REG);
+ if (queue)
+ int_reg.s.doorbell1 = 1;
+ else
+ int_reg.s.doorbell0 = 1;
+
+ cvmx_write_csr(CVMX_ZIP_INT_REG, int_reg.u64);
+ /* Read back to make sure the setup is complete */
+ cvmx_read_csr(CVMX_ZIP_QUEX_BUF(queue));
+ return 0;
+}
+
+/**
+ * Shutdown the ZIP block. ZIP must be idle when
+ * this function is called.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_shutdown(void)
+{
+ cvmx_zip_cmd_ctl_t zip_cmd_ctl;
+
+ if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_ZIP))
+ {
+ cvmx_dprintf("ERROR: cvmx_zip_shutdown: ZIP not idle.\n");
+ return -1;
+ }
+
+ zip_cmd_ctl.u64 = cvmx_read_csr(CVMX_ZIP_CMD_CTL);
+ zip_cmd_ctl.s.reset = 1;
+ cvmx_write_csr(CVMX_ZIP_CMD_CTL, zip_cmd_ctl.u64);
+ cvmx_wait(100);
+
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_ZIP);
+ return 0;
+}
+
+/**
+ * Shutdown the ZIP block for a queue. ZIP must be idle when
+ * this function is called.
+ *
+ * @param queue Zip instruction queue of the command
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_queue_shutdown(int queue)
+{
+ cvmx_zip_cmd_ctl_t zip_cmd_ctl;
+
+ if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_ZIP_QUE(queue)))
+ {
+ cvmx_dprintf("ERROR: cvmx_zip_shutdown: ZIP not idle.\n");
+ return -1;
+ }
+
+ zip_cmd_ctl.u64 = cvmx_read_csr(CVMX_ZIP_CMD_CTL);
+ zip_cmd_ctl.s.reset = 1;
+ cvmx_write_csr(CVMX_ZIP_CMD_CTL, zip_cmd_ctl.u64);
+ cvmx_wait(100);
+
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_ZIP_QUE(queue));
+ return 0;
+}
+
+/**
+ * Submit a command to the ZIP block
+ *
+ * @param command Zip command to submit
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_submit(cvmx_zip_command_t *command)
+{
+ cvmx_cmd_queue_result_t result = cvmx_cmd_queue_write(CVMX_CMD_QUEUE_ZIP, 1, 8, command->u64);
+ if (result == CVMX_CMD_QUEUE_SUCCESS)
+ cvmx_write_csr(CVMX_ADDR_DID(CVMX_FULL_DID(7, 0)), 8);
+ return result;
+}
+
+/**
+ * Submit a command to the ZIP block
+ *
+ * @param command Zip command to submit
+ * @param queue Zip instruction queue of the command
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_queue_submit(cvmx_zip_command_t *command, int queue)
+{
+ cvmx_cmd_queue_result_t result = cvmx_cmd_queue_write(CVMX_CMD_QUEUE_ZIP_QUE(queue), 1, 8, command->u64);
+ if (result == CVMX_CMD_QUEUE_SUCCESS)
+ cvmx_write_csr((CVMX_ADDR_DID(CVMX_FULL_DID(7, 0)) | queue << 3), 8);
+ return result;
+}
+
+#endif
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-zip.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-zip.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-zip.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-zip.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,283 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Header file for the zip (deflate) block
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+#ifndef __CVMX_ZIP_H__
+#define __CVMX_ZIP_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef union {
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused : 5;
+ uint64_t full_block_write : 1;
+ uint64_t no_l2_alloc : 1;
+ uint64_t little_endian : 1;
+ uint64_t length : 16;
+ uint64_t ptr : 40;
+#else
+ uint64_t ptr : 40;
+ uint64_t length : 16;
+ uint64_t little_endian : 1;
+ uint64_t no_l2_alloc : 1;
+ uint64_t full_block_write : 1;
+ uint64_t unused : 5;
+#endif
+ } s;
+} cvmx_zip_ptr_t;
+#define CVMX_ZIP_PTR_MAX_LEN ((1 << 16) - 1)
+
+
+typedef enum {
+ CVMX_ZIP_COMPLETION_NOTDONE = 0,
+ CVMX_ZIP_COMPLETION_SUCCESS = 1,
+ CVMX_ZIP_COMPLETION_OTRUNC = 2,
+ CVMX_ZIP_COMPLETION_STOP = 3,
+ CVMX_ZIP_COMPLETION_ITRUNC = 4,
+ CVMX_ZIP_COMPLETION_RBLOCK = 5,
+ CVMX_ZIP_COMPLETION_NLEN = 6,
+ CVMX_ZIP_COMPLETION_BADCODE = 7,
+ CVMX_ZIP_COMPLETION_BADCODE2 = 8,
+ CVMX_ZIP_COMPLETION_ZERO_LEN = 9,
+ CVMX_ZIP_COMPLETION_PARITY = 10,
+ CVMX_ZIP_COMPLETION_FATAL = 11
+} cvmx_zip_completion_code_t;
+
+typedef union {
+ uint64_t u64[3];
+ struct {
+
+ // WORD 0
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t crc32 : 32;
+ uint64_t adler : 32;
+#else
+ uint64_t adler : 32;
+ uint64_t crc32 : 32;
+#endif
+
+ // WORD 1
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t totalbyteswritten : 32;
+ uint64_t totalbytesread : 32;
+#else
+ uint64_t totalbytesread : 32;
+ uint64_t totalbyteswritten : 32;
+#endif
+
+ // WORD 2
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t totalbitsprocessed : 32; // decompression only
+ uint64_t unused20 : 5;
+ uint64_t exnum : 3; // compression only
+ uint64_t unused21 : 1;
+ uint64_t exbits : 7; // compression only
+ uint64_t unused22 : 7;
+ uint64_t eof : 1; // decompression only
+ cvmx_zip_completion_code_t completioncode : 8; // If polling, SW should set this to zero and wait for non-zero
+#else
+ cvmx_zip_completion_code_t completioncode : 8; // If polling, SW should set this to zero and wait for non-zero
+ uint64_t eof : 1; // decompression only
+ uint64_t unused22 : 7;
+ uint64_t exbits : 7; // compression only
+ uint64_t unused21 : 1;
+ uint64_t exnum : 3; // compression only
+ uint64_t unused20 : 5;
+ uint64_t totalbitsprocessed : 32; // decompression only
+#endif
+ } s;
+} cvmx_zip_result_t;
+
+typedef union {
+ uint64_t u64[8];
+ struct {
+
+ // WORD 0
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused00 : 8;
+ uint64_t totaloutputlength : 24;
+ uint64_t unused01 : 5;
+ uint64_t exnum : 3;
+ uint64_t unused02 : 1;
+ uint64_t exbits : 7;
+ uint64_t unused03 : 4;
+ uint64_t flush : 1;
+ uint64_t speed : 2;
+ uint64_t forcefixed : 1;
+ uint64_t forcedynamic : 1;
+ uint64_t eof : 1;
+ uint64_t bof : 1;
+ uint64_t compress : 1;
+ uint64_t unused04 : 1;
+ uint64_t dscatter : 1;
+ uint64_t dgather : 1;
+ uint64_t hgather : 1;
+#else
+ uint64_t hgather : 1;
+ uint64_t dgather : 1;
+ uint64_t dscatter : 1;
+ uint64_t unused04 : 1;
+ uint64_t compress : 1;
+ uint64_t bof : 1;
+ uint64_t eof : 1;
+ uint64_t forcedynamic : 1;
+ uint64_t forcefixed : 1;
+ uint64_t speed : 2;
+ uint64_t flush : 1;
+ uint64_t unused03 : 4;
+ uint64_t exbits : 7;
+ uint64_t unused02 : 1;
+ uint64_t exnum : 3;
+ uint64_t unused01 : 5;
+ uint64_t totaloutputlength : 24;
+ uint64_t unused00 : 8;
+#endif
+
+ // WORD 1
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t historylength : 16;
+ uint64_t unused10 : 16;
+ uint64_t adler32 : 32;
+#else
+ uint64_t adler32 : 32;
+ uint64_t unused10 : 16;
+ uint64_t historylength : 16;
+#endif
+
+ // WORD 2
+ cvmx_zip_ptr_t ctx_ptr;
+
+ // WORD 3
+ cvmx_zip_ptr_t hist_ptr;
+
+ // WORD 4
+ cvmx_zip_ptr_t in_ptr;
+
+ // WORD 5
+ cvmx_zip_ptr_t out_ptr;
+
+ // WORD 6
+ cvmx_zip_ptr_t result_ptr;
+
+ // WORD 7
+ cvmx_zip_ptr_t wq_ptr;
+
+ } s;
+} cvmx_zip_command_t;
+
+
+/**
+ * Initialize the ZIP block
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_initialize(void);
+
+/**
+ * Initialize the ZIP QUEUE buffer
+ *
+ * @param queue : ZIP instruction queue
+ * @param zcoremask : ZIP coremask to use for this queue
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_queue_initialize(int queue, int zcoremask);
+
+/**
+ * Shutdown the ZIP block. ZIP must be idle when
+ * this function is called.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_shutdown(void);
+
+/**
+ * Shutdown the ZIP block for a queue. ZIP must be idle when
+ * this function is called.
+ *
+ * @param queue Zip instruction queue of the command
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_queue_shutdown(int queue);
+
+/**
+ * Submit a command to the ZIP block
+ *
+ * @param command Zip command to submit
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_submit(cvmx_zip_command_t *command);
+
+/**
+ * Submit a command to the ZIP block
+ *
+ * @param command Zip command to submit
+ * @param queue Zip instruction queue of the command
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_zip_queue_submit(cvmx_zip_command_t *command, int queue);
+
+/* CSR typedefs have been moved to cvmx-zip-defs.h */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ZIP_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-zip.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx-zone.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx-zone.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx-zone.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,174 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support library for the Zone Allocator.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-malloc.h"
+
+
+
+
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+cvmx_zone_t cvmx_zone_create_from_addr(char *name, uint32_t elem_size, uint32_t num_elem,
+ void* mem_ptr, uint64_t mem_size, uint32_t flags)
+{
+ cvmx_zone_t zone;
+ unsigned int i;
+
+ if ((unsigned long)mem_ptr & (sizeof(void *) -1))
+ {
+ return(NULL); //invalid alignment
+ }
+ if (mem_size < sizeof(struct cvmx_zone) + elem_size * num_elem)
+ {
+ return(NULL); // not enough room
+ }
+
+ zone = (cvmx_zone_t) ((char *)mem_ptr + elem_size * num_elem);
+ zone->elem_size = elem_size;
+ zone->num_elem = num_elem;
+ zone->name = name;
+ zone->align = 0; // not used
+ zone->baseptr = NULL;
+ zone->freelist = NULL;
+ zone->lock.value = CVMX_SPINLOCK_UNLOCKED_VAL;
+
+ zone->baseptr = (char *)mem_ptr;
+
+ for(i=0;i<num_elem;i++)
+ {
+ *(void **)(zone->baseptr + (i*elem_size)) = zone->freelist;
+ zone->freelist = (void *)(zone->baseptr + (i*elem_size));
+ }
+
+ return(zone);
+
+}
+
+cvmx_zone_t cvmx_zone_create_from_arena(char *name, uint32_t elem_size, uint32_t num_elem, uint32_t align, cvmx_arena_list_t arena_list, uint32_t flags)
+{
+ unsigned int i;
+ cvmx_zone_t zone;
+
+ zone = (cvmx_zone_t)cvmx_malloc(arena_list, sizeof(struct cvmx_zone));
+
+ if (NULL == zone)
+ {
+ return(NULL);
+ }
+ zone->elem_size = elem_size;
+ zone->num_elem = num_elem;
+ zone->name = name;
+ zone->align = align;
+ zone->baseptr = NULL;
+ zone->freelist = NULL;
+ zone->lock.value = CVMX_SPINLOCK_UNLOCKED_VAL;
+
+ zone->baseptr = (char *)cvmx_memalign(arena_list, align, num_elem * elem_size);
+ if (NULL == zone->baseptr)
+ {
+ return(NULL);
+ }
+
+ for(i=0;i<num_elem;i++)
+ {
+ *(void **)(zone->baseptr + (i*elem_size)) = zone->freelist;
+ zone->freelist = (void *)(zone->baseptr + (i*elem_size));
+ }
+
+ return(zone);
+
+}
+#endif
+
+
+
+void * cvmx_zone_alloc(cvmx_zone_t zone, uint32_t flags)
+{
+ cvmx_zone_t item;
+
+ assert(zone != NULL);
+ assert(zone->baseptr != NULL);
+ cvmx_spinlock_lock(&zone->lock);
+
+ item = (cvmx_zone_t)zone->freelist;
+ if(item != NULL)
+ {
+ zone->freelist = *(void **)item;
+ }
+ else
+ {
+// cvmx_dprintf("No more elements in zone %s\n", zone->name);
+ }
+
+ cvmx_spinlock_unlock(&zone->lock);
+ return(item);
+}
+
+void cvmx_zone_free(cvmx_zone_t zone, void *ptr)
+{
+
+ assert(zone != NULL);
+ assert(zone->baseptr != NULL);
+ assert((unsigned long)ptr - (unsigned long)zone->baseptr < zone->num_elem * zone->elem_size);
+
+ cvmx_spinlock_lock(&zone->lock);
+ *(void **)ptr = zone->freelist;
+ zone->freelist = ptr;
+ cvmx_spinlock_unlock(&zone->lock);
+}
+
+
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx-zone.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/cvmx.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/cvmx.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/cvmx.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,110 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+/**
+ * @file
+ *
+ * Main Octeon executive header file (This should be the second header
+ * file included by an application).
+ *
+ * <hr>$Revision: 70030 $<hr>
+*/
+#ifndef __CVMX_H__
+#define __CVMX_H__
+
+/* Control whether simple executive applications use 1-1 TLB mappings to access physical
+** memory addresses. This must be disabled to allow large programs that use more than
+** the 0x10000000 - 0x20000000 virtual address range.
+**
+** The FreeBSD kernel ifdefs elsewhere should mean that this is never even checked,
+** and so does not need to be defined.
+*/
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
+#ifndef CVMX_USE_1_TO_1_TLB_MAPPINGS
+#define CVMX_USE_1_TO_1_TLB_MAPPINGS 1
+#endif
+#endif
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+ #ifndef CVMX_ENABLE_PARAMETER_CHECKING
+ #ifdef INVARIANTS
+ #define CVMX_ENABLE_PARAMETER_CHECKING 1
+ #else
+ #define CVMX_ENABLE_PARAMETER_CHECKING 0
+ #endif
+ #endif
+#else
+ #ifndef CVMX_ENABLE_PARAMETER_CHECKING
+ #define CVMX_ENABLE_PARAMETER_CHECKING 1
+ #endif
+#endif
+
+#ifndef CVMX_ENABLE_DEBUG_PRINTS
+#define CVMX_ENABLE_DEBUG_PRINTS 1
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "cvmx-platform.h"
+#include "cvmx-access.h"
+#include "cvmx-address.h"
+#include "cvmx-asm.h"
+#include "cvmx-packet.h"
+#include "cvmx-warn.h"
+#include "cvmx-sysinfo.h"
+#include "octeon-model.h"
+#include "cvmx-csr.h"
+#include "cvmx-utils.h"
+#include "cvmx-clock.h"
+#include "octeon-feature.h"
+
+#if defined(__mips__) && !defined(CVMX_BUILD_FOR_LINUX_HOST)
+#include "cvmx-access-native.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/cvmx.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/octeon-boot-info.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/octeon-boot-info.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/octeon-boot-info.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,226 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2011 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+/**
+ * @file
+ *
+ * Interface to Octeon boot structure
+ *
+ * <hr>$Revision: $<hr>
+ */
+
+#ifndef __OCTEON_BOOT_INFO_H__
+#define __OCTEON_BOOT_INFO_H__
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/types.h>
+#include <asm/octeon/cvmx-asm.h>
+#else
+#include "cvmx-asm.h"
+#endif
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This structure is access by bootloader, Linux kernel and the Linux
+ * user space utility "bootoct".
+
+ * In the bootloader, this structure is accessed by assembly code in start.S,
+ * so any changes to content or size must be reflected there as well.
+
+ * This is placed at a fixed address in DRAM, so that cores can access it
+ * when they come out of reset. It is used to setup the minimal bootloader
+ * runtime environment (stack, but no heap, global data ptr) that is needed
+ * by the non-boot cores to setup the environment for the applications.
+ * The boot_info_addr is the address of a boot_info_block_t structure
+ * which contains more core-specific information.
+ *
+ * The Linux kernel and the Linux bootoct utility access this structure for
+ * implementing CPU hotplug functionality and booting of idle cores with SE
+ * apps respectively.
+ *
+ */
+typedef struct
+{
+ /* First stage address - in ram instead of flash */
+ uint64_t code_addr;
+ /* Setup code for application, NOT application entry point */
+ uint32_t app_start_func_addr;
+ /* k0 is used for global data - needs to be passed to other cores */
+ uint32_t k0_val;
+ /* Address of boot info block structure */
+ uint64_t boot_info_addr;
+ uint32_t flags; /* flags */
+ uint32_t pad;
+} boot_init_vector_t;
+
+#if defined(__ASM_GBL_DATA_H) /* defined above */
+/*
+ * Definition of a data structure to mimic the old u-boot gd_t data structure.
+ */
+#undef GD_TMP_STR_SIZE
+#define GD_TMP_STR_SIZE 32
+
+#define LINUX_APP_GLOBAL_DATA_MAGIC 0x221eb111476f410full
+#define LINUX_APP_GLOBAL_DATA_VERSION 2
+
+struct linux_app_global_data {
+ bd_t *bd;
+ unsigned long flags;
+ unsigned long baudrate;
+ unsigned long have_console; /* serial_init() was called */
+ uint64_t ram_size; /* RAM size */
+ uint64_t reloc_off; /* Relocation Offset */
+ unsigned long env_addr; /* Address of Environment struct */
+ unsigned long env_valid; /* Checksum of Environment valid? */
+ unsigned long cpu_clock_mhz; /* CPU clock speed in MHz */
+ unsigned long ddr_clock_mhz; /* DDR clock (not data rate!) in MHz */
+ unsigned long ddr_ref_hertz; /* DDR Ref clock Hertz */
+ int mcu_rev_maj;
+ int mcu_rev_min;
+ int console_uart;
+
+ /* EEPROM data structures as read from EEPROM or populated by other
+ * means on boards without an EEPROM
+ */
+ octeon_eeprom_board_desc_t board_desc;
+ octeon_eeprom_clock_desc_t clock_desc;
+ octeon_eeprom_mac_addr_t mac_desc;
+
+ void **jt; /* jump table, not used */
+ char *err_msg; /* pointer to error message to save
+ * until console is up. Not used.
+ */
+ union {
+ struct { /* Keep under 32 bytes! */
+ uint64_t magic;
+ uint32_t version;
+ uint32_t fdt_addr;
+ };
+ char tmp_str[GD_TMP_STR_SIZE];
+ };
+ unsigned long uboot_flash_address; /* Address of normal bootloader
+ * in flash
+ */
+ unsigned long uboot_flash_size; /* Size of normal bootloader */
+ uint64_t dfm_ram_size; /* DFM RAM size */
+};
+typedef struct linux_app_global_data linux_app_global_data_t;
+
+/* Flags for linux_app_global_data */
+#define LA_GD_FLG_RELOC 0x0001 /* Code was relocated to RAM */
+#define LA_GD_FLG_DEVINIT 0x0002 /* Devices have been initialized */
+#define LA_GD_FLG_SILENT 0x0004 /* Silent mode */
+#define LA_GD_FLG_CLOCK_DESC_MISSING 0x0008
+#define LA_GD_FLG_BOARD_DESC_MISSING 0x0010
+#define LA_GD_FLG_DDR_VERBOSE 0x0020
+#define LA_GD_FLG_DDR0_CLK_INITIALIZED 0x0040
+#define LA_GD_FLG_DDR1_CLK_INITIALIZED 0x0080
+#define LA_GD_FLG_DDR2_CLK_INITIALIZED 0x0100
+#define LA_GD_FLG_DDR3_CLK_INITIALIZED 0x0200
+#define LA_GD_FLG_FAILSAFE_MODE 0x0400 /* Use failsafe mode */
+#define LA_GD_FLG_DDR_TRACE_INIT 0x0800
+#define LA_GD_FLG_DFM_CLK_INITIALIZED 0x1000
+#define LA_GD_FLG_DFM_VERBOSE 0x2000
+#define LA_GD_FLG_DFM_TRACE_INIT 0x4000
+#define LA_GD_FLG_MEMORY_PRESERVED 0x8000
+#define LA_GD_FLG_RAM_RESIDENT 0x10000 /* RAM boot detected */
+#endif /* __ASM_GBL_DATA_H */
+
+/*
+ * Definition of a data structure setup by the bootloader to enable Linux to
+ * launch SE apps on idle cores.
+ */
+
+struct linux_app_boot_info
+{
+ uint32_t labi_signature;
+ uint32_t start_core0_addr;
+ uint32_t avail_coremask;
+ uint32_t pci_console_active;
+ uint32_t icache_prefetch_disable;
+ uint64_t InitTLBStart_addr;
+ uint32_t start_app_addr;
+ uint32_t cur_exception_base;
+ uint32_t no_mark_private_data;
+ uint32_t compact_flash_common_base_addr;
+ uint32_t compact_flash_attribute_base_addr;
+ uint32_t led_display_base_addr;
+#if defined(__ASM_GBL_DATA_H) /* defined above */
+ linux_app_global_data_t gd;
+#endif
+};
+typedef struct linux_app_boot_info linux_app_boot_info_t;
+
+#endif
+
+/* If not to copy a lot of bootloader's structures
+ here is only offset of requested member */
+#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK 0x765c
+
+/* hardcoded in bootloader */
+#define LABI_ADDR_IN_BOOTLOADER 0x700
+
+#define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot"
+
+#define LABI_SIGNATURE 0xAABBCC01
+
+/* from uboot-headers/octeon_mem_map.h */
+#if defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CVMX_BUILD_FOR_TOOLCHAIN)
+#define EXCEPTION_BASE_INCR (4 * 1024)
+#endif
+
+/* Increment size for exception base addresses (4k minimum) */
+#define EXCEPTION_BASE_BASE 0
+#define BOOTLOADER_PRIV_DATA_BASE (EXCEPTION_BASE_BASE + 0x800)
+#define BOOTLOADER_BOOT_VECTOR (BOOTLOADER_PRIV_DATA_BASE)
+#define BOOTLOADER_DEBUG_TRAMPOLINE (BOOTLOADER_BOOT_VECTOR + BOOT_VECTOR_SIZE) /* WORD */
+#define BOOTLOADER_DEBUG_TRAMPOLINE_CORE (BOOTLOADER_DEBUG_TRAMPOLINE + 4) /* WORD */
+
+#define OCTEON_EXCEPTION_VECTOR_BLOCK_SIZE (CVMX_MAX_CORES*EXCEPTION_BASE_INCR) /* 32 4k blocks */
+#define BOOTLOADER_DEBUG_REG_SAVE_BASE (EXCEPTION_BASE_BASE + OCTEON_EXCEPTION_VECTOR_BLOCK_SIZE)
+
+#define BOOT_VECTOR_NUM_WORDS (8)
+#define BOOT_VECTOR_SIZE ((CVMX_MAX_CORES*4)*BOOT_VECTOR_NUM_WORDS)
+
+
+#endif /* __OCTEON_BOOT_INFO_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/octeon-boot-info.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/octeon-feature.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/octeon-feature.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/octeon-feature.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,147 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * File defining checks for different Octeon features.
+ *
+ * <hr>$Revision: 1 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/init.h>
+
+#include <asm/octeon/octeon.h>
+#else
+#include "cvmx.h"
+#endif
+
+CVMX_SHARED uint8_t octeon_feature_map[FEATURE_MAP_SIZE] __attribute__((aligned(128)));
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(octeon_feature_map);
+#else
+#define __init
+#endif
+
+/*
+ * Set the bit in octeon_feature_map for feature.
+ *
+ * @param feature
+ * @return 0 for success and nonzero for error.
+ */
+static int __init octeon_feature_set(octeon_feature_t feature)
+{
+ int bit, byte;
+
+ byte = feature >> 3;
+ bit = feature & 0x7;
+ octeon_feature_map[byte] |= (((uint8_t)1) << bit);
+
+ return 0;
+}
+
+void __init octeon_feature_init(void)
+{
+ octeon_feature_result_t val;
+
+ /*
+ * Check feature map size
+ */
+ if (OCTEON_MAX_FEATURE > (FEATURE_MAP_SIZE * 8 - 1))
+ {
+ val = OCTEON_FEATURE_MAP_OVERFLOW;
+ goto feature_check;
+ }
+
+ /*
+ * Feature settings
+ */
+#define OCTEON_FEATURE_SET(feature_x) \
+ if (old_octeon_has_feature(feature_x)) \
+ octeon_feature_set(feature_x)
+
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_SAAD);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_ZIP);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_CRYPTO);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_DORM_CRYPTO);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_PCIE);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_SRIO);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_ILK);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_KEY_MEMORY);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_LED_CONTROLLER);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_TRA);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_MGMT_PORT);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_RAID);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_USB);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_NO_WPTR);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_DFA);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_MDIO_CLAUSE_45);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_NPEI);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_PKND);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_CN68XX_WQE);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_HFA);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_DFM);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_CIU2);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_DICI_MODE);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_BIT_EXTRACTOR);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_NAND);
+ OCTEON_FEATURE_SET(OCTEON_FEATURE_MMC);
+
+ val = OCTEON_FEATURE_SUCCESS;
+
+feature_check:
+
+ if (val != OCTEON_FEATURE_SUCCESS)
+ {
+ cvmx_dprintf("octeon_feature_init(): ");
+ switch (val)
+ {
+ case OCTEON_FEATURE_MAP_OVERFLOW:
+ cvmx_dprintf("feature map overflow.\n");
+ break;
+ default:
+ cvmx_dprintf("unknown error %d.\n", val);
+ break;
+ }
+#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(__U_BOOT__) && !defined(CVMX_BUILD_FOR_TOOLCHAIN) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+ exit (1);
+#endif
+ }
+}
Property changes on: trunk/sys/contrib/octeon-sdk/octeon-feature.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/octeon-feature.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/octeon-feature.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/octeon-feature.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,318 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * File defining checks for different Octeon features.
+ *
+ * <hr>$Revision: 30468 $<hr>
+ */
+
+#ifndef __OCTEON_FEATURE_H__
+#define __OCTEON_FEATURE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Errors
+ */
+typedef enum
+{
+ OCTEON_FEATURE_SUCCESS = 0,
+ OCTEON_FEATURE_MAP_OVERFLOW = -1,
+} octeon_feature_result_t;
+
+/*
+ * Octeon models are declared after the macros in octeon-model.h with the
+ * suffix _FEATURE. The individual features are declared with the
+ * _FEATURE_ infix.
+ */
+typedef enum
+{
+ /*
+ * Checks on the critical path are moved to the top (8 positions)
+ * so that the compiler generates one less insn than for the rest
+ * of the checks.
+ */
+ OCTEON_FEATURE_PKND, /**< CN68XX uses port kinds for packet interface */
+ OCTEON_FEATURE_CN68XX_WQE, /**< CN68XX has different fields in word0 - word2 */
+
+ /*
+ * Features
+ */
+ OCTEON_FEATURE_SAAD, /**< Octeon models in the CN5XXX family and higher support atomic add instructions to memory (saa/saad) */
+ OCTEON_FEATURE_ZIP, /**< Does this Octeon support the ZIP offload engine? */
+ OCTEON_FEATURE_CRYPTO, /**< Does this Octeon support crypto acceleration using COP2? */
+ OCTEON_FEATURE_DORM_CRYPTO, /**< Can crypto be enabled by calling cvmx_crypto_dormant_enable()? */
+ OCTEON_FEATURE_PCIE, /**< Does this Octeon support PCI express? */
+ OCTEON_FEATURE_SRIO, /**< Does this Octeon support SRIO */
+ OCTEON_FEATURE_ILK, /**< Does this Octeon support Interlaken */
+ OCTEON_FEATURE_KEY_MEMORY, /**< Some Octeon models support internal memory for storing cryptographic keys */
+ OCTEON_FEATURE_LED_CONTROLLER, /**< Octeon has a LED controller for banks of external LEDs */
+ OCTEON_FEATURE_TRA, /**< Octeon has a trace buffer */
+ OCTEON_FEATURE_MGMT_PORT, /**< Octeon has a management port */
+ OCTEON_FEATURE_RAID, /**< Octeon has a raid unit */
+ OCTEON_FEATURE_USB, /**< Octeon has a builtin USB */
+ OCTEON_FEATURE_NO_WPTR, /**< Octeon IPD can run without using work queue entries */
+ OCTEON_FEATURE_DFA, /**< Octeon has DFA state machines */
+ OCTEON_FEATURE_MDIO_CLAUSE_45, /**< Octeon MDIO block supports clause 45 transactions for 10 Gig support */
+ OCTEON_FEATURE_NPEI, /**< CN52XX and CN56XX used a block named NPEI for PCIe access. Newer chips replaced this with SLI+DPI */
+ OCTEON_FEATURE_HFA, /**< Octeon has DFA/HFA */
+ OCTEON_FEATURE_DFM, /**< Octeon has DFM */
+ OCTEON_FEATURE_CIU2, /**< Octeon has CIU2 */
+ OCTEON_FEATURE_DICI_MODE, /**< Octeon has DMA Instruction Completion Interrupt mode */
+ OCTEON_FEATURE_BIT_EXTRACTOR, /**< Octeon has Bit Select Extractor schedulor */
+ OCTEON_FEATURE_NAND, /**< Octeon has NAND */
+ OCTEON_FEATURE_MMC, /**< Octeon has built-in MMC support */
+ OCTEON_MAX_FEATURE
+} octeon_feature_t;
+
+/**
+ * Determine if the current Octeon supports a specific feature. These
+ * checks have been optimized to be fairly quick, but they should still
+ * be kept out of fast path code.
+ *
+ * @param feature Feature to check for. This should always be a constant so the
+ * compiler can remove the switch statement through optimization.
+ *
+ * @return Non zero if the feature exists. Zero if the feature does not
+ * exist.
+ *
+ * Note: This was octeon_has_feature before the feature map and is
+ * called only after model-checking is set up in octeon_feature_init().
+ */
+static inline int old_octeon_has_feature(octeon_feature_t feature)
+{
+ switch (feature)
+ {
+ case OCTEON_FEATURE_SAAD:
+ return !OCTEON_IS_MODEL(OCTEON_CN3XXX);
+
+ case OCTEON_FEATURE_ZIP:
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ return 0;
+ else
+ return !cvmx_fuse_read(121);
+
+ case OCTEON_FEATURE_CRYPTO:
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ cvmx_mio_fus_dat2_t fus_2;
+ fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
+ if (fus_2.s.nocrypto || fus_2.s.nomul) {
+ return 0;
+ } else if (!fus_2.s.dorm_crypto) {
+ return 1;
+ } else {
+ cvmx_rnm_ctl_status_t st;
+ st.u64 = cvmx_read_csr(CVMX_RNM_CTL_STATUS);
+ return st.s.eer_val;
+ }
+ } else {
+ return !cvmx_fuse_read(90);
+ }
+
+ case OCTEON_FEATURE_DORM_CRYPTO:
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
+ cvmx_mio_fus_dat2_t fus_2;
+ fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
+ return !fus_2.s.nocrypto && !fus_2.s.nomul && fus_2.s.dorm_crypto;
+ } else {
+ return 0;
+ }
+
+ case OCTEON_FEATURE_PCIE:
+ return (OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX)
+ || OCTEON_IS_MODEL(OCTEON_CNF7XXX));
+
+ case OCTEON_FEATURE_SRIO:
+ return (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX));
+
+ case OCTEON_FEATURE_ILK:
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX));
+
+ case OCTEON_FEATURE_KEY_MEMORY:
+ return (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX)
+ || OCTEON_IS_MODEL(OCTEON_CNF7XXX));
+
+ case OCTEON_FEATURE_LED_CONTROLLER:
+ return OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX);
+
+ case OCTEON_FEATURE_TRA:
+ return !(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX));
+ case OCTEON_FEATURE_MGMT_PORT:
+ return (OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX));
+
+ case OCTEON_FEATURE_RAID:
+ return OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN6XXX);
+
+ case OCTEON_FEATURE_USB:
+ return !(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX));
+
+ case OCTEON_FEATURE_NO_WPTR:
+ return ((OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX)
+ || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
+ && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
+ && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X));
+
+ case OCTEON_FEATURE_DFA:
+ if (!OCTEON_IS_MODEL(OCTEON_CN38XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX) && !OCTEON_IS_MODEL(OCTEON_CN58XX))
+ return 0;
+ else if (OCTEON_IS_MODEL(OCTEON_CN3020))
+ return 0;
+ else
+ return !cvmx_fuse_read(120);
+
+ case OCTEON_FEATURE_HFA:
+ if (!OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ return 0;
+ else
+ return !cvmx_fuse_read(90);
+
+ case OCTEON_FEATURE_DFM:
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ return 0;
+ else
+ return !cvmx_fuse_read(90);
+
+ case OCTEON_FEATURE_MDIO_CLAUSE_45:
+ return !(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX));
+
+ case OCTEON_FEATURE_NPEI:
+ return (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX));
+
+ case OCTEON_FEATURE_PKND:
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX));
+
+ case OCTEON_FEATURE_CN68XX_WQE:
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX));
+
+ case OCTEON_FEATURE_CIU2:
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX));
+
+ case OCTEON_FEATURE_NAND:
+ return (OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN63XX)
+ || OCTEON_IS_MODEL(OCTEON_CN66XX)
+ || OCTEON_IS_MODEL(OCTEON_CN68XX));
+
+ case OCTEON_FEATURE_DICI_MODE:
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X)
+ || OCTEON_IS_MODEL(OCTEON_CN61XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF71XX));
+
+ case OCTEON_FEATURE_BIT_EXTRACTOR:
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X)
+ || OCTEON_IS_MODEL(OCTEON_CN61XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF71XX));
+
+ case OCTEON_FEATURE_MMC:
+ return (OCTEON_IS_MODEL(OCTEON_CN61XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF71XX));
+ default:
+ break;
+ }
+ return 0;
+}
+
+/*
+ * bit map for octeon features
+ */
+#define FEATURE_MAP_SIZE 128
+extern uint8_t octeon_feature_map[FEATURE_MAP_SIZE];
+
+/*
+ * Answer ``Is the bit for feature set in the bitmap?''
+ * @param feature
+ * @return 1 when the feature is present and 0 otherwise, -1 in case of error.
+ */
+#if defined(__U_BOOT__) || defined(CVMX_BUILD_FOR_LINUX_HOST) || defined(CVMX_BUILD_FOR_TOOLCHAIN)
+#define octeon_has_feature old_octeon_has_feature
+#else
+#if defined(USE_RUNTIME_MODEL_CHECKS) || (defined(__FreeBSD__) && defined(_KERNEL))
+static inline int octeon_has_feature(octeon_feature_t feature)
+{
+ int byte, bit;
+ byte = feature >> 3;
+ bit = feature & 0x7;
+
+ if (byte >= FEATURE_MAP_SIZE)
+ {
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+ printk("ERROR: octeon_feature_map: Invalid Octeon Feature 0x%x\n", feature);
+#else
+ printf("ERROR: octeon_feature_map: Invalid Octeon Feature 0x%x\n", feature);
+#endif
+ return -1;
+ }
+
+ return (octeon_feature_map[byte] & ((1 << bit))) ? 1 : 0;
+}
+#else
+#define octeon_has_feature old_octeon_has_feature
+#endif
+#endif
+
+/*
+ * initialize octeon_feature_map[]
+ */
+extern void octeon_feature_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __OCTEON_FEATURE_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/octeon-feature.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/octeon-model.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/octeon-model.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/octeon-model.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,455 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * File defining functions for working with different Octeon
+ * models.
+ *
+ * <hr>$Revision: 70030 $<hr>
+ */
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-clock.h>
+#else
+#include "cvmx.h"
+#include "cvmx-pow.h"
+#include "cvmx-warn.h"
+#endif
+
+#if defined(CVMX_BUILD_FOR_LINUX_USER) || defined(CVMX_BUILD_FOR_STANDALONE) || defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+#include <octeon-app-init.h>
+#endif
+#include "cvmx-sysinfo.h"
+
+/**
+ * This function checks to see if the software is compatible with the
+ * chip it is running on. This is called in the application startup code
+ * and does not need to be called directly by the application.
+ * Does not return if software is incompatible, unless compiled for the
+ * FreeBSD kernel, in which case it returns -1.
+ *
+ * @param chip_id chip id that the software is being run on.
+ *
+ * @return 0: runtime checking or exact version match
+ * 1: chip is newer revision than compiled for, but software will run properly.
+ * -1: software is incompatible
+ */
+int octeon_model_version_check(uint32_t chip_id __attribute__ ((unused)))
+{
+ //printf("Model Number: %s\n", octeon_model_get_string(chip_id));
+#if !OCTEON_IS_COMMON_BINARY()
+ /* Check for special case of mismarked 3005 samples, and adjust cpuid */
+ if (chip_id == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
+ chip_id |= 0x10;
+
+ if ((OCTEON_MODEL & 0xffffff) != chip_id)
+ {
+ if (!OCTEON_IS_MODEL((OM_IGNORE_REVISION | chip_id)) || (OCTEON_MODEL & 0xffffff) > chip_id || (((OCTEON_MODEL & 0xffffff) ^ chip_id) & 0x10))
+ {
+ printf("ERROR: Software not configured for this chip\n"
+ " Expecting ID=0x%08x, Chip is 0x%08x\n", (OCTEON_MODEL & 0xffffff), (unsigned int)chip_id);
+ if ((OCTEON_MODEL & 0xffffff) > chip_id)
+ printf("Refusing to run on older revision than program was compiled for.\n");
+#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
+ exit(-1);
+#else
+ return(-1);
+#endif
+ }
+ else
+ {
+ printf("\n###################################################\n");
+ printf("WARNING: Software configured for older revision than running on.\n"
+ " Compiled for ID=0x%08x, Chip is 0x%08x\n", (OCTEON_MODEL & 0xffffff), (unsigned int)chip_id);
+ printf("###################################################\n\n");
+ return(1);
+ }
+ }
+#endif
+
+ cvmx_warn_if(CVMX_ENABLE_PARAMETER_CHECKING, "Parameter checks are enabled. Expect some performance loss due to the extra checking\n");
+ cvmx_warn_if(CVMX_ENABLE_CSR_ADDRESS_CHECKING, "CSR address checks are enabled. Expect some performance loss due to the extra checking\n");
+ cvmx_warn_if(CVMX_ENABLE_POW_CHECKS, "POW state checks are enabled. Expect some performance loss due to the extra checking\n");
+
+ return(0);
+}
+
+#endif
+/**
+ * Given the chip processor ID from COP0, this function returns a
+ * string representing the chip model number. The string is of the
+ * form CNXXXXpX.X-FREQ-SUFFIX.
+ * - XXXX = The chip model number
+ * - X.X = Chip pass number
+ * - FREQ = Current frequency in Mhz
+ * - SUFFIX = NSP, EXP, SCP, SSP, or CP
+ *
+ * @param chip_id Chip ID
+ *
+ * @return Model string
+ */
+const char *octeon_model_get_string(uint32_t chip_id)
+{
+ static char buffer[32];
+ return octeon_model_get_string_buffer(chip_id,buffer);
+}
+
+/* Version of octeon_model_get_string() that takes buffer as argument, as
+** running early in u-boot static/global variables don't work when running from
+** flash
+*/
+const char *octeon_model_get_string_buffer(uint32_t chip_id, char * buffer)
+{
+ const char * family;
+ const char * core_model;
+ char pass[4];
+#ifndef CVMX_BUILD_FOR_UBOOT
+ int clock_mhz;
+#endif
+ const char * suffix;
+ cvmx_l2d_fus3_t fus3;
+ int num_cores;
+ cvmx_mio_fus_dat2_t fus_dat2;
+ cvmx_mio_fus_dat3_t fus_dat3;
+ char fuse_model[10];
+ uint32_t fuse_data = 0;
+
+ fus3.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
+ fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+ num_cores = cvmx_pop(cvmx_read_csr(CVMX_CIU_FUSE));
+
+ /* Make sure the non existent devices look disabled */
+ switch ((chip_id >> 8) & 0xff)
+ {
+ case 6: /* CN50XX */
+ case 2: /* CN30XX */
+ fus_dat3.s.nodfa_dte = 1;
+ fus_dat3.s.nozip = 1;
+ break;
+ case 4: /* CN57XX or CN56XX */
+ fus_dat3.s.nodfa_dte = 1;
+ break;
+ default:
+ break;
+ }
+
+ /* Make a guess at the suffix */
+ /* NSP = everything */
+ /* EXP = No crypto */
+ /* SCP = No DFA, No zip */
+ /* CP = No DFA, No crypto, No zip */
+ if (fus_dat3.s.nodfa_dte)
+ {
+ if (fus_dat2.s.nocrypto)
+ suffix = "CP";
+ else
+ suffix = "SCP";
+ }
+ else if (fus_dat2.s.nocrypto)
+ suffix = "EXP";
+ else
+ suffix = "NSP";
+
+ /* Assume pass number is encoded using <5:3><2:0>. Exceptions will be
+ fixed later */
+ sprintf(pass, "%d.%d", (int)((chip_id>>3)&7)+1, (int)chip_id&7);
+
+ /* Use the number of cores to determine the last 2 digits of the model
+ number. There are some exceptions that are fixed later */
+ switch (num_cores)
+ {
+ case 32: core_model = "80"; break;
+ case 24: core_model = "70"; break;
+ case 16: core_model = "60"; break;
+ case 15: core_model = "58"; break;
+ case 14: core_model = "55"; break;
+ case 13: core_model = "52"; break;
+ case 12: core_model = "50"; break;
+ case 11: core_model = "48"; break;
+ case 10: core_model = "45"; break;
+ case 9: core_model = "42"; break;
+ case 8: core_model = "40"; break;
+ case 7: core_model = "38"; break;
+ case 6: core_model = "34"; break;
+ case 5: core_model = "32"; break;
+ case 4: core_model = "30"; break;
+ case 3: core_model = "25"; break;
+ case 2: core_model = "20"; break;
+ case 1: core_model = "10"; break;
+ default: core_model = "XX"; break;
+ }
+
+ /* Now figure out the family, the first two digits */
+ switch ((chip_id >> 8) & 0xff)
+ {
+ case 0: /* CN38XX, CN37XX or CN36XX */
+ if (fus3.cn38xx.crip_512k)
+ {
+ /* For some unknown reason, the 16 core one is called 37 instead of 36 */
+ if (num_cores >= 16)
+ family = "37";
+ else
+ family = "36";
+ }
+ else
+ family = "38";
+ /* This series of chips didn't follow the standard pass numbering */
+ switch (chip_id & 0xf)
+ {
+ case 0: strcpy(pass, "1.X"); break;
+ case 1: strcpy(pass, "2.X"); break;
+ case 3: strcpy(pass, "3.X"); break;
+ default:strcpy(pass, "X.X"); break;
+ }
+ break;
+ case 1: /* CN31XX or CN3020 */
+ if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
+ family = "30";
+ else
+ family = "31";
+ /* This series of chips didn't follow the standard pass numbering */
+ switch (chip_id & 0xf)
+ {
+ case 0: strcpy(pass, "1.0"); break;
+ case 2: strcpy(pass, "1.1"); break;
+ default:strcpy(pass, "X.X"); break;
+ }
+ break;
+ case 2: /* CN3010 or CN3005 */
+ family = "30";
+ /* A chip with half cache is an 05 */
+ if (fus3.cn30xx.crip_64k)
+ core_model = "05";
+ /* This series of chips didn't follow the standard pass numbering */
+ switch (chip_id & 0xf)
+ {
+ case 0: strcpy(pass, "1.0"); break;
+ case 2: strcpy(pass, "1.1"); break;
+ default:strcpy(pass, "X.X"); break;
+ }
+ break;
+ case 3: /* CN58XX */
+ family = "58";
+ /* Special case. 4 core, half cache (CP with half cache) */
+ if ((num_cores == 4) && fus3.cn58xx.crip_1024k && !strncmp(suffix, "CP", 2))
+ core_model = "29";
+
+ /* Pass 1 uses different encodings for pass numbers */
+ if ((chip_id & 0xFF)< 0x8)
+ {
+ switch (chip_id & 0x3)
+ {
+ case 0: strcpy(pass, "1.0"); break;
+ case 1: strcpy(pass, "1.1"); break;
+ case 3: strcpy(pass, "1.2"); break;
+ default:strcpy(pass, "1.X"); break;
+ }
+ }
+ break;
+ case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
+ if (fus_dat2.cn56xx.raid_en)
+ {
+ if (fus3.cn56xx.crip_1024k)
+ family = "55";
+ else
+ family = "57";
+ if (fus_dat2.cn56xx.nocrypto)
+ suffix = "SP";
+ else
+ suffix = "SSP";
+ }
+ else
+ {
+ if (fus_dat2.cn56xx.nocrypto)
+ suffix = "CP";
+ else
+ {
+ suffix = "NSP";
+ if (fus_dat3.s.nozip)
+ suffix = "SCP";
+
+ if (fus_dat3.s.bar2_en)
+ suffix = "NSPB2";
+ }
+ if (fus3.cn56xx.crip_1024k)
+ family = "54";
+ else
+ family = "56";
+ }
+ break;
+ case 6: /* CN50XX */
+ family = "50";
+ break;
+ case 7: /* CN52XX */
+ if (fus3.cn52xx.crip_256k)
+ family = "51";
+ else
+ family = "52";
+ break;
+ case 0x93: /* CN61XX */
+ family = "61";
+ if (fus_dat3.cn61xx.nozip)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x90: /* CN63XX */
+ family = "63";
+ if (fus_dat3.s.l2c_crip == 2)
+ family = "62";
+ if (num_cores == 6) /* Other core counts match generic */
+ core_model = "35";
+ if (fus_dat2.cn63xx.nocrypto)
+ suffix = "CP";
+ else if (fus_dat2.cn63xx.dorm_crypto)
+ suffix = "DAP";
+ else if (fus_dat3.cn63xx.nozip)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x92: /* CN66XX */
+ family = "66";
+ if (num_cores == 6) /* Other core counts match generic */
+ core_model = "35";
+ if (fus_dat2.cn66xx.nocrypto && fus_dat2.cn66xx.dorm_crypto)
+ suffix = "AP";
+ if (fus_dat2.cn66xx.nocrypto)
+ suffix = "CP";
+ else if (fus_dat2.cn66xx.dorm_crypto)
+ suffix = "DAP";
+ else if (fus_dat3.cn66xx.nozip && fus_dat2.cn66xx.raid_en)
+ suffix = "SCP";
+ else if (!fus_dat2.cn66xx.raid_en)
+ suffix = "HAP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x91: /* CN68XX */
+ family = "68";
+ if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn68xx.nozip)
+ suffix = "CP";
+ else if (fus_dat2.cn68xx.dorm_crypto)
+ suffix = "DAP";
+ else if (fus_dat3.cn68xx.nozip)
+ suffix = "SCP";
+ else if (fus_dat2.cn68xx.nocrypto)
+ suffix = "SP";
+ else if (!fus_dat2.cn68xx.raid_en)
+ suffix = "HAP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x94: /* CNF71XX */
+ family = "F71";
+ if (fus_dat3.cnf71xx.nozip)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ default:
+ family = "XX";
+ core_model = "XX";
+ strcpy(pass, "X.X");
+ suffix = "XXX";
+ break;
+ }
+
+#ifndef CVMX_BUILD_FOR_UBOOT
+ clock_mhz = cvmx_clock_get_rate(CVMX_CLOCK_RCLK) / 1000000;
+#endif
+
+ if (family[0] != '3')
+ {
+ int fuse_base = 384/8;
+ if (family[0] == '6')
+ fuse_base = 832/8;
+
+ /* Check for model in fuses, overrides normal decode */
+ /* This is _not_ valid for Octeon CN3XXX models */
+ fuse_data |= cvmx_fuse_read_byte(fuse_base + 3);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(fuse_base + 2);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(fuse_base + 1);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(fuse_base);
+ if (fuse_data & 0x7ffff)
+ {
+ int model = fuse_data & 0x3fff;
+ int suffix = (fuse_data >> 14) & 0x1f;
+ if (suffix && model) /* Have both number and suffix in fuses, so both */
+ {
+ sprintf(fuse_model, "%d%c",model, 'A' + suffix - 1);
+ core_model = "";
+ family = fuse_model;
+ }
+ else if (suffix && !model) /* Only have suffix, so add suffix to 'normal' model number */
+ {
+ sprintf(fuse_model, "%s%c", core_model, 'A' + suffix - 1);
+ core_model = fuse_model;
+ }
+ else /* Don't have suffix, so just use model from fuses */
+ {
+ sprintf(fuse_model, "%d",model);
+ core_model = "";
+ family = fuse_model;
+ }
+ }
+ }
+#ifdef CVMX_BUILD_FOR_UBOOT
+ sprintf(buffer, "CN%s%s-%s pass %s", family, core_model, suffix, pass);
+#else
+ sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix);
+#endif
+ return buffer;
+}
Property changes on: trunk/sys/contrib/octeon-sdk/octeon-model.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/octeon-model.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/octeon-model.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/octeon-model.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,385 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * File defining different Octeon model IDs and macros to
+ * compare them.
+ *
+ * <hr>$Revision: 70338 $<hr>
+ */
+
+#ifndef __OCTEON_MODEL_H__
+#define __OCTEON_MODEL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* NOTE: These must match what is checked in common-config.mk */
+/* Defines to represent the different versions of Octeon. */
+
+
+/* IMPORTANT: When the default pass is updated for an Octeon Model,
+** the corresponding change must also be made in the oct-sim script. */
+
+
+/* The defines below should be used with the OCTEON_IS_MODEL() macro to
+** determine what model of chip the software is running on. Models ending
+** in 'XX' match multiple models (families), while specific models match only
+** that model. If a pass (revision) is specified, then only that revision
+** will be matched. Care should be taken when checking for both specific
+** models and families that the specific models are checked for first.
+** While these defines are similar to the processor ID, they are not intended
+** to be used by anything other that the OCTEON_IS_MODEL framework, and
+** the values are subject to change at anytime without notice.
+**
+** NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN* macros
+** should be used outside of this file. All other macros are for internal
+** use only, and may change without notice.
+*/
+
+#define OCTEON_FAMILY_MASK 0x00ffff00
+
+/* Flag bits in top byte */
+#define OM_IGNORE_REVISION 0x01000000 /* Ignores revision in model checks */
+#define OM_CHECK_SUBMODEL 0x02000000 /* Check submodels */
+#define OM_MATCH_PREVIOUS_MODELS 0x04000000 /* Match all models previous than the one specified */
+#define OM_IGNORE_MINOR_REVISION 0x08000000 /* Ignores the minor revison on newer parts */
+#define OM_FLAG_MASK 0xff000000
+
+#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000 /* Match all cn5XXX Octeon models. */
+#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 /* Match all cn6XXX Octeon models. */
+#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000 /* Match all cnf7XXX Octeon models. */
+
+/*
+ * CNF7XXX models with new revision encoding
+ */
+#define OCTEON_CNF71XX_PASS1_0 0x000d9400
+
+#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+/*
+ * CN6XXX models with new revision encoding
+ */
+#define OCTEON_CN68XX_PASS1_0 0x000d9100
+#define OCTEON_CN68XX_PASS1_1 0x000d9101
+#define OCTEON_CN68XX_PASS1_2 0x000d9102
+#define OCTEON_CN68XX_PASS2_0 0x000d9108
+
+#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX_PASS2_X (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN68XX_PASS1 OCTEON_CN68XX_PASS1_X
+#define OCTEON_CN68XX_PASS2 OCTEON_CN68XX_PASS2_X
+
+#define OCTEON_CN66XX_PASS1_0 0x000d9200
+#define OCTEON_CN66XX_PASS1_2 0x000d9202
+
+#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN66XX_PASS1_X (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN63XX_PASS1_0 0x000d9000
+#define OCTEON_CN63XX_PASS1_1 0x000d9001
+#define OCTEON_CN63XX_PASS1_2 0x000d9002
+#define OCTEON_CN63XX_PASS2_0 0x000d9008
+#define OCTEON_CN63XX_PASS2_1 0x000d9009
+#define OCTEON_CN63XX_PASS2_2 0x000d900a
+
+#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN61XX_PASS1_0 0x000d9300
+
+#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+/*
+ * CN5XXX models with new revision encoding
+ */
+#define OCTEON_CN58XX_PASS1_0 0x000d0300
+#define OCTEON_CN58XX_PASS1_1 0x000d0301
+#define OCTEON_CN58XX_PASS1_2 0x000d0303
+#define OCTEON_CN58XX_PASS2_0 0x000d0308
+#define OCTEON_CN58XX_PASS2_1 0x000d0309
+#define OCTEON_CN58XX_PASS2_2 0x000d030a
+#define OCTEON_CN58XX_PASS2_3 0x000d030b
+
+#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
+#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
+
+#define OCTEON_CN56XX_PASS1_0 0x000d0400
+#define OCTEON_CN56XX_PASS1_1 0x000d0401
+#define OCTEON_CN56XX_PASS2_0 0x000d0408
+#define OCTEON_CN56XX_PASS2_1 0x000d0409
+
+#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
+#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
+
+#define OCTEON_CN57XX OCTEON_CN56XX
+#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN55XX OCTEON_CN56XX
+#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN54XX OCTEON_CN56XX
+#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN50XX_PASS1_0 0x000d0600
+
+#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
+
+/* NOTE: Octeon CN5000F model is not identifiable using the OCTEON_IS_MODEL()
+ functions, but are treated as CN50XX */
+
+#define OCTEON_CN52XX_PASS1_0 0x000d0700
+#define OCTEON_CN52XX_PASS2_0 0x000d0708
+
+#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
+#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
+
+/*
+ * CN3XXX models with old revision enconding
+ */
+//#define OCTEON_CN38XX_PASS1 0x000d0000 // is not supported
+#define OCTEON_CN38XX_PASS2 0x000d0001
+#define OCTEON_CN38XX_PASS3 0x000d0003
+#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
+
+#define OCTEON_CN36XX OCTEON_CN38XX
+#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
+#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
+
+/* The OCTEON_CN31XX matches CN31XX models and the CN3020 */
+#define OCTEON_CN31XX_PASS1 0x000d0100
+#define OCTEON_CN31XX_PASS1_1 0x000d0102
+#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
+
+/* This model is only used for internal checks, it
+** is not a valid model for the OCTEON_MODEL environment variable.
+** This matches the CN3010 and CN3005 but NOT the CN3020*/
+#define OCTEON_CN30XX_PASS1 0x000d0200
+#define OCTEON_CN30XX_PASS1_1 0x000d0202
+#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
+
+#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+
+#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+
+#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+
+
+
+/* This matches the complete family of CN3xxx CPUs, and not subsequent models */
+#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
+#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
+#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+#define OCTEON_CNF7XXX (OCTEON_CNF71XX_PASS1_0 | OM_MATCH_F7XXX_FAMILY_MODELS)
+
+/* The revision byte (low byte) has two different encodings.
+** CN3XXX:
+**
+** bits
+** <7:5>: reserved (0)
+** <4>: alternate package
+** <3:0>: revision
+**
+** CN5XXX:
+**
+** bits
+** <7>: reserved (0)
+** <6>: alternate package
+** <5:3>: major revision
+** <2:0>: minor revision
+**
+*/
+
+/* Masks used for the various types of model/family/revision matching */
+#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
+#define OCTEON_38XX_FAMILY_REV_MASK 0x00ffff0f
+#define OCTEON_38XX_MODEL_MASK 0x00ffff10
+#define OCTEON_38XX_MODEL_REV_MASK (OCTEON_38XX_FAMILY_REV_MASK | OCTEON_38XX_MODEL_MASK)
+
+/* CN5XXX and later use different layout of bits in the revision ID field */
+#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
+#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
+#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
+#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
+#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
+
+
+#define __OCTEON_MATCH_MASK__(x,y,z) (((x) & (z)) == ((y) & (z)))
+
+
+/* NOTE: This for internal use only!!!!! */
+#define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \
+ ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && (\
+ ((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == 0) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_FAMILY_REV_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_FAMILY_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_REV_MASK)) || \
+ ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
+ && (((chip_model) & OCTEON_38XX_MODEL_MASK) < ((arg_model) & OCTEON_38XX_MODEL_MASK))) \
+ )) || \
+ (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0) && (\
+ ((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == 0) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_REV_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_MINOR_REVISION) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MINOR_REV_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \
+ ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \
+ && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \
+ ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \
+ && ((chip_model) >= OCTEON_CN63XX_PASS1_0) && ((chip_model) < OCTEON_CNF71XX_PASS1_0)) || \
+ ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \
+ && ((chip_model) >= OCTEON_CNF71XX_PASS1_0)) || \
+ ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
+ && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \
+ )))
+
+#ifndef OCTEON_IS_MODEL
+#if defined(USE_RUNTIME_MODEL_CHECKS) || defined(__U_BOOT__) || (defined(__linux__) && defined(__KERNEL__)) || defined(CVMX_BUILD_FOR_TOOLCHAIN) || (defined(__FreeBSD__) && defined(_KERNEL) && !defined(OCTEON_MODEL))
+
+/* NOTE: This for internal use only!!!!! */
+static inline int __octeon_is_model_runtime__(uint32_t model)
+{
+ uint32_t cpuid = cvmx_get_proc_id();
+
+ /* Check for special case of mismarked 3005 samples. We only need to check
+ if the sub model isn't being ignored */
+ if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL)
+ {
+ if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
+ cpuid |= 0x10;
+ }
+ return(__OCTEON_IS_MODEL_COMPILE__(model, cpuid));
+}
+
+/* The OCTEON_IS_MODEL macro should be used for all Octeon model checking done
+** in a program.
+** This should be kept runtime if at all possible and must be conditionalized
+** with OCTEON_IS_COMMON_BINARY() if runtime checking support is required.
+**
+** Use of the macro in preprocessor directives ( #if OCTEON_IS_MODEL(...) )
+** is NOT SUPPORTED, and should be replaced with CVMX_COMPILED_FOR()
+** I.e.:
+** #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
+*/
+#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
+#define OCTEON_IS_COMMON_BINARY() 1
+#undef OCTEON_MODEL
+#else
+#define CVMX_COMPILED_FOR(x) __OCTEON_IS_MODEL_COMPILE__(x, OCTEON_MODEL)
+
+#define OCTEON_IS_MODEL(x) \
+ (((x & (OM_IGNORE_REVISION | OM_IGNORE_MINOR_REVISION)) != 0) \
+ ? __OCTEON_IS_MODEL_COMPILE__(x, OCTEON_MODEL) \
+ : __OCTEON_IS_MODEL_COMPILE__(x, cvmx_get_proc_id()))
+
+#define OCTEON_IS_COMMON_BINARY() 0
+#endif
+#endif
+
+int octeon_model_version_check(uint32_t chip_id);
+const char *octeon_model_get_string(uint32_t chip_id);
+const char *octeon_model_get_string_buffer(uint32_t chip_id, char * buffer);
+
+/**
+ * Return the octeon family, i.e., ProcessorID of the PrID register.
+ *
+ * @return the octeon family on success, ((unint32_t)-1) on error.
+ */
+static inline uint32_t cvmx_get_octeon_family(void)
+{
+#if defined(USE_RUNTIME_MODEL_CHECKS) || defined(__U_BOOT__) || (defined(__linux__) && defined(__KERNEL__)) || defined(CVMX_BUILD_FOR_TOOLCHAIN) || (defined(__FreeBSD__) && defined(_KERNEL))
+ return (cvmx_get_proc_id() & OCTEON_FAMILY_MASK);
+#else
+ return (OCTEON_MODEL & OCTEON_FAMILY_MASK);
+#endif
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __OCTEON_MODEL_H__ */
Property changes on: trunk/sys/contrib/octeon-sdk/octeon-model.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/octeon-pci-console.c
===================================================================
--- trunk/sys/contrib/octeon-sdk/octeon-pci-console.c (rev 0)
+++ trunk/sys/contrib/octeon-sdk/octeon-pci-console.c 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,500 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+#define CVMX_USE_1_TO_1_TLB_MAPPINGS 0
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/kernel.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/octeon-pci-console.h>
+
+#define MIN(a,b) min((a),(b))
+
+#else
+#include "cvmx-platform.h"
+
+#include "cvmx.h"
+#include "cvmx-spinlock.h"
+#ifndef MIN
+# define MIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+#include "cvmx-bootmem.h"
+#include "octeon-pci-console.h"
+#endif
+#ifdef __U_BOOT__
+#include <watchdog.h>
+#endif
+
+#if defined(__linux__) && !defined(__KERNEL__) && !defined(OCTEON_TARGET)
+#include "octeon-pci.h"
+#endif
+
+
+/* The following code is only used in standalone CVMX applications. It does
+ not apply for kernel or Linux programming */
+#if defined(OCTEON_TARGET) && !defined(__linux__) && !defined(CVMX_BUILD_FOR_LINUX_KERNEL)
+
+static int cvmx_pci_console_num = 0;
+static int per_core_pci_consoles = 0;
+static uint64_t pci_console_desc_addr = 0;
+/* This function for simple executive internal use only - do not use in any application */
+int __cvmx_pci_console_write (int fd, char *buf, int nbytes)
+{
+ int console_num;
+ if (fd >= 0x10000000)
+ {
+ console_num = fd & 0xFFFF;
+ }
+ else if (per_core_pci_consoles)
+ {
+ console_num = cvmx_get_core_num();
+ }
+ else
+ console_num = cvmx_pci_console_num;
+
+ if (!pci_console_desc_addr)
+ {
+ const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(OCTEON_PCI_CONSOLE_BLOCK_NAME);
+ pci_console_desc_addr = block_desc->base_addr;
+ }
+
+
+ return octeon_pci_console_write(pci_console_desc_addr, console_num, buf, nbytes, 0);
+
+}
+
+#endif
+
+
+#if !defined(CONFIG_OCTEON_U_BOOT) || (defined(CONFIG_OCTEON_U_BOOT) && (defined(CFG_PCI_CONSOLE) || defined(CONFIG_SYS_PCI_CONSOLE)))
+static int octeon_pci_console_buffer_free_bytes(uint32_t buffer_size, uint32_t wr_idx, uint32_t rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return (((buffer_size -1) - (wr_idx - rd_idx))%buffer_size);
+}
+static int octeon_pci_console_buffer_avail_bytes(uint32_t buffer_size, uint32_t wr_idx, uint32_t rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return (buffer_size - 1 - octeon_pci_console_buffer_free_bytes(buffer_size, wr_idx, rd_idx));
+}
+#endif
+
+
+
+/* The following code is only used under Linux userspace when you are using
+ CVMX */
+#if defined(__linux__) && !defined(__KERNEL__) && !defined(OCTEON_TARGET)
+int octeon_pci_console_host_write(uint64_t console_desc_addr, unsigned int console_num, const char * buffer, int write_reqest_size, uint32_t flags)
+{
+ if (!console_desc_addr)
+ return -1;
+
+ /* Get global pci console information and look up specific console structure. */
+ uint32_t num_consoles = octeon_read_mem32(console_desc_addr + offsetof(octeon_pci_console_desc_t, num_consoles));
+// printf("Num consoles: %d, buf size: %d\n", num_consoles, console_buffer_size);
+ if (console_num >= num_consoles)
+ {
+ printf("ERROR: attempting to read non-existant console: %d\n", console_num);
+ return(-1);
+ }
+ uint64_t console_addr = octeon_read_mem64(console_desc_addr + offsetof(octeon_pci_console_desc_t, console_addr_array) + console_num *8);
+// printf("Console %d is at 0x%llx\n", console_num, (long long)console_addr);
+
+ uint32_t console_buffer_size = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, buf_size));
+ /* Check to see if any data is available */
+ uint32_t rd_idx, wr_idx;
+ uint64_t base_addr;
+
+ base_addr = octeon_read_mem64(console_addr + offsetof(octeon_pci_console_t, input_base_addr));
+ rd_idx = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, input_read_index));
+ wr_idx = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, input_write_index));
+
+// printf("Input base: 0x%llx, rd: %d(0x%x), wr: %d(0x%x)\n", (long long)base_addr, rd_idx, rd_idx, wr_idx, wr_idx);
+ int bytes_to_write = octeon_pci_console_buffer_free_bytes(console_buffer_size, wr_idx, rd_idx);
+ if (bytes_to_write <= 0)
+ return bytes_to_write;
+ bytes_to_write = MIN(bytes_to_write, write_reqest_size);
+ /* Check to see if what we want to write is not contiguous, and limit ourselves to the contiguous block*/
+ if (wr_idx + bytes_to_write >= console_buffer_size)
+ bytes_to_write = console_buffer_size - wr_idx;
+
+// printf("Attempting to write %d bytes, (buf size: %d)\n", bytes_to_write, write_reqest_size);
+
+ octeon_pci_write_mem(base_addr + wr_idx, buffer, bytes_to_write, OCTEON_PCI_ENDIAN_64BIT_SWAP);
+ octeon_write_mem32(console_addr + offsetof(octeon_pci_console_t, input_write_index), (wr_idx + bytes_to_write)%console_buffer_size);
+
+ return bytes_to_write;
+
+}
+
+int octeon_pci_console_host_read(uint64_t console_desc_addr, unsigned int console_num, char * buffer, int buf_size, uint32_t flags)
+{
+ if (!console_desc_addr)
+ return -1;
+
+ /* Get global pci console information and look up specific console structure. */
+ uint32_t num_consoles = octeon_read_mem32(console_desc_addr + offsetof(octeon_pci_console_desc_t, num_consoles));
+// printf("Num consoles: %d, buf size: %d\n", num_consoles, console_buffer_size);
+ if (console_num >= num_consoles)
+ {
+ printf("ERROR: attempting to read non-existant console: %d\n", console_num);
+ return(-1);
+ }
+ uint64_t console_addr = octeon_read_mem64(console_desc_addr + offsetof(octeon_pci_console_desc_t, console_addr_array) + console_num *8);
+ uint32_t console_buffer_size = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, buf_size));
+// printf("Console %d is at 0x%llx\n", console_num, (long long)console_addr);
+
+ /* Check to see if any data is available */
+ uint32_t rd_idx, wr_idx;
+ uint64_t base_addr;
+
+ base_addr = octeon_read_mem64(console_addr + offsetof(octeon_pci_console_t, output_base_addr));
+ rd_idx = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, output_read_index));
+ wr_idx = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, output_write_index));
+
+// printf("Read buffer base: 0x%llx, rd: %d(0x%x), wr: %d(0x%x)\n", (long long)base_addr, rd_idx, rd_idx, wr_idx, wr_idx);
+ int bytes_to_read = octeon_pci_console_buffer_avail_bytes(console_buffer_size, wr_idx, rd_idx);
+ if (bytes_to_read <= 0)
+ return bytes_to_read;
+
+
+ bytes_to_read = MIN(bytes_to_read, buf_size);
+ /* Check to see if what we want to read is not contiguous, and limit ourselves to the contiguous block*/
+ if (rd_idx + bytes_to_read >= console_buffer_size)
+ bytes_to_read = console_buffer_size - rd_idx;
+
+
+ octeon_pci_read_mem(buffer, base_addr + rd_idx, bytes_to_read,OCTEON_PCI_ENDIAN_64BIT_SWAP);
+ octeon_write_mem32(console_addr + offsetof(octeon_pci_console_t, output_read_index), (rd_idx + bytes_to_read)%console_buffer_size);
+
+ return bytes_to_read;
+}
+
+
+int octeon_pci_console_host_write_avail(uint64_t console_desc_addr, unsigned int console_num)
+{
+ if (!console_desc_addr)
+ return -1;
+
+ /* Get global pci console information and look up specific console structure. */
+ uint32_t num_consoles = octeon_read_mem32(console_desc_addr + offsetof(octeon_pci_console_desc_t, num_consoles));
+// printf("Num consoles: %d, buf size: %d\n", num_consoles, console_buffer_size);
+ if (console_num >= num_consoles)
+ {
+ printf("ERROR: attempting to read non-existant console: %d\n", console_num);
+ return -1;
+ }
+ uint64_t console_addr = octeon_read_mem64(console_desc_addr + offsetof(octeon_pci_console_desc_t, console_addr_array) + console_num *8);
+// printf("Console %d is at 0x%llx\n", console_num, (long long)console_addr);
+
+ uint32_t console_buffer_size = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, buf_size));
+ /* Check to see if any data is available */
+ uint32_t rd_idx, wr_idx;
+ uint64_t base_addr;
+
+ base_addr = octeon_read_mem64(console_addr + offsetof(octeon_pci_console_t, input_base_addr));
+ rd_idx = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, input_read_index));
+ wr_idx = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, input_write_index));
+
+// printf("Input base: 0x%llx, rd: %d(0x%x), wr: %d(0x%x)\n", (long long)base_addr, rd_idx, rd_idx, wr_idx, wr_idx);
+ return octeon_pci_console_buffer_free_bytes(console_buffer_size, wr_idx, rd_idx);
+}
+
+
+int octeon_pci_console_host_read_avail(uint64_t console_desc_addr, unsigned int console_num)
+{
+ if (!console_desc_addr)
+ return -1;
+
+ /* Get global pci console information and look up specific console structure. */
+ uint32_t num_consoles = octeon_read_mem32(console_desc_addr + offsetof(octeon_pci_console_desc_t, num_consoles));
+// printf("Num consoles: %d, buf size: %d\n", num_consoles, console_buffer_size);
+ if (console_num >= num_consoles)
+ {
+ printf("ERROR: attempting to read non-existant console: %d\n", console_num);
+ return(-1);
+ }
+ uint64_t console_addr = octeon_read_mem64(console_desc_addr + offsetof(octeon_pci_console_desc_t, console_addr_array) + console_num *8);
+ uint32_t console_buffer_size = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, buf_size));
+// printf("Console %d is at 0x%llx\n", console_num, (long long)console_addr);
+
+ /* Check to see if any data is available */
+ uint32_t rd_idx, wr_idx;
+ uint64_t base_addr;
+
+ base_addr = octeon_read_mem64(console_addr + offsetof(octeon_pci_console_t, output_base_addr));
+ rd_idx = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, output_read_index));
+ wr_idx = octeon_read_mem32(console_addr + offsetof(octeon_pci_console_t, output_write_index));
+
+// printf("Read buffer base: 0x%llx, rd: %d(0x%x), wr: %d(0x%x)\n", (long long)base_addr, rd_idx, rd_idx, wr_idx, wr_idx);
+ return octeon_pci_console_buffer_avail_bytes(console_buffer_size, wr_idx, rd_idx);
+}
+
+
+#endif /* TARGET_HOST */
+
+
+
+
+
+
+/* This code is only available in a kernel or CVMX standalone. It can't be used
+ from userspace */
+#if (!defined(CONFIG_OCTEON_U_BOOT) && (!defined(__linux__) || defined(__KERNEL__))) || (defined(CONFIG_OCTEON_U_BOOT) && (defined(CFG_PCI_CONSOLE) || defined(CONFIG_SYS_PCI_CONSOLE))) || defined(CVMX_BUILD_FOR_LINUX_KERNEL)
+
+static octeon_pci_console_t *octeon_pci_console_get_ptr(uint64_t console_desc_addr, unsigned int console_num)
+{
+ octeon_pci_console_desc_t *cons_desc_ptr;
+
+ if (!console_desc_addr)
+ return NULL;
+
+ cons_desc_ptr = (octeon_pci_console_desc_t *)cvmx_phys_to_ptr(console_desc_addr);
+ if (console_num >= cons_desc_ptr->num_consoles)
+ return NULL;
+
+ return (octeon_pci_console_t *)cvmx_phys_to_ptr(cons_desc_ptr->console_addr_array[console_num]);
+}
+
+
+int octeon_pci_console_write(uint64_t console_desc_addr, unsigned int console_num, const char * buffer, int bytes_to_write, uint32_t flags)
+{
+ octeon_pci_console_t *cons_ptr;
+ cvmx_spinlock_t *lock;
+ int bytes_available;
+ char *buf_ptr;
+ int bytes_written;
+
+ cons_ptr = octeon_pci_console_get_ptr(console_desc_addr, console_num);
+ if (!cons_ptr)
+ return -1;
+
+ lock = (cvmx_spinlock_t *)&cons_ptr->lock;
+
+ buf_ptr = (char*)cvmx_phys_to_ptr(cons_ptr->output_base_addr);
+ bytes_written = 0;
+ cvmx_spinlock_lock(lock);
+ while (bytes_to_write > 0)
+ {
+ bytes_available = octeon_pci_console_buffer_free_bytes(cons_ptr->buf_size, cons_ptr->output_write_index, cons_ptr->output_read_index);
+// printf("Console %d has %d bytes available for writes\n", console_num, bytes_available);
+ if (bytes_available > 0)
+ {
+ int write_size = MIN(bytes_available, bytes_to_write);
+ /* Limit ourselves to what we can output in a contiguous block */
+ if (cons_ptr->output_write_index + write_size >= cons_ptr->buf_size)
+ write_size = cons_ptr->buf_size - cons_ptr->output_write_index;
+
+ memcpy(buf_ptr + cons_ptr->output_write_index, buffer + bytes_written, write_size);
+ CVMX_SYNCW; /* Make sure data is visible before changing write index */
+ cons_ptr->output_write_index = (cons_ptr->output_write_index + write_size)%cons_ptr->buf_size;
+ bytes_to_write -= write_size;
+ bytes_written += write_size;
+ }
+ else if (bytes_available == 0)
+ {
+ /* Check to see if we should wait for room, or return after a partial write */
+ if (flags & OCT_PCI_CON_FLAG_NONBLOCK)
+ goto done;
+
+#ifdef __U_BOOT__
+ WATCHDOG_RESET();
+#endif
+ cvmx_wait(1000000); /* Delay if we are spinning */
+ }
+ else
+ {
+ bytes_written = -1;
+ goto done;
+ }
+ }
+
+done:
+ cvmx_spinlock_unlock(lock);
+ return(bytes_written);
+}
+
+int octeon_pci_console_read(uint64_t console_desc_addr, unsigned int console_num, char * buffer, int buffer_size, uint32_t flags)
+{
+ int bytes_available;
+ char *buf_ptr;
+ cvmx_spinlock_t *lock;
+ int bytes_read;
+ int read_size;
+ octeon_pci_console_t *cons_ptr = octeon_pci_console_get_ptr(console_desc_addr, console_num);
+ if (!cons_ptr)
+ return -1;
+
+ buf_ptr = (char*)cvmx_phys_to_ptr(cons_ptr->input_base_addr);
+
+ bytes_available = octeon_pci_console_buffer_avail_bytes(cons_ptr->buf_size, cons_ptr->input_write_index, cons_ptr->input_read_index);
+ if (bytes_available < 0)
+ return bytes_available;
+
+ lock = (cvmx_spinlock_t *)&cons_ptr->lock;
+ cvmx_spinlock_lock(lock);
+
+ if (!(flags & OCT_PCI_CON_FLAG_NONBLOCK))
+ {
+ /* Wait for some data to be available */
+ while (0 == (bytes_available = octeon_pci_console_buffer_avail_bytes(cons_ptr->buf_size, cons_ptr->input_write_index, cons_ptr->input_read_index)))
+ {
+ cvmx_wait(1000000);
+#ifdef __U_BOOT__
+ WATCHDOG_RESET();
+#endif
+ }
+ }
+
+ bytes_read = 0;
+// printf("Console %d has %d bytes available for writes\n", console_num, bytes_available);
+
+ /* Don't overflow the buffer passed to us */
+ read_size = MIN(bytes_available, buffer_size);
+
+ /* Limit ourselves to what we can input in a contiguous block */
+ if (cons_ptr->input_read_index + read_size >= cons_ptr->buf_size)
+ read_size = cons_ptr->buf_size - cons_ptr->input_read_index;
+
+ memcpy(buffer, buf_ptr + cons_ptr->input_read_index, read_size);
+ cons_ptr->input_read_index = (cons_ptr->input_read_index + read_size)%cons_ptr->buf_size;
+ bytes_read += read_size;
+
+ cvmx_spinlock_unlock(lock);
+ return(bytes_read);
+}
+
+
+int octeon_pci_console_write_avail(uint64_t console_desc_addr, unsigned int console_num)
+{
+ int bytes_available;
+ octeon_pci_console_t *cons_ptr = octeon_pci_console_get_ptr(console_desc_addr, console_num);
+ if (!cons_ptr)
+ return -1;
+
+ bytes_available = octeon_pci_console_buffer_free_bytes(cons_ptr->buf_size, cons_ptr->input_write_index, cons_ptr->input_read_index);
+ if (bytes_available >= 0)
+ return(bytes_available);
+ else
+ return 0;
+}
+
+
+int octeon_pci_console_read_avail(uint64_t console_desc_addr, unsigned int console_num)
+{
+ int bytes_available;
+ octeon_pci_console_t *cons_ptr = octeon_pci_console_get_ptr(console_desc_addr, console_num);
+ if (!cons_ptr)
+ return -1;
+
+ bytes_available = octeon_pci_console_buffer_avail_bytes(cons_ptr->buf_size, cons_ptr->input_write_index, cons_ptr->input_read_index);
+ if (bytes_available >= 0)
+ return(bytes_available);
+ else
+ return 0;
+}
+
+#endif
+
+
+/* This code can only be used in the bootloader */
+#if defined(CONFIG_OCTEON_U_BOOT) && (defined(CFG_PCI_CONSOLE) || defined(CONFIG_SYS_PCI_CONSOLE))
+uint64_t octeon_pci_console_init(int num_consoles, int buffer_size)
+{
+ octeon_pci_console_desc_t *cons_desc_ptr;
+ octeon_pci_console_t *cons_ptr;
+
+ /* Compute size required for pci console structure */
+ int alloc_size = num_consoles * (buffer_size * 2 + sizeof(octeon_pci_console_t) + sizeof(uint64_t)) + sizeof(octeon_pci_console_desc_t);
+
+ /* Allocate memory for the consoles. This must be in the range addresssible by the bootloader.
+ ** Try to do so in a manner which minimizes fragmentation. We try to put it at the top of DDR0 or bottom of
+ ** DDR2 first, and only do generic allocation if those fail */
+ int64_t console_block_addr = cvmx_bootmem_phy_named_block_alloc(alloc_size, OCTEON_DDR0_SIZE - alloc_size - 128, OCTEON_DDR0_SIZE, 128, OCTEON_PCI_CONSOLE_BLOCK_NAME, CVMX_BOOTMEM_FLAG_END_ALLOC);
+ if (console_block_addr < 0)
+ console_block_addr = cvmx_bootmem_phy_named_block_alloc(alloc_size, OCTEON_DDR2_BASE + 1, OCTEON_DDR2_BASE + alloc_size + 128, 128, OCTEON_PCI_CONSOLE_BLOCK_NAME, CVMX_BOOTMEM_FLAG_END_ALLOC);
+ if (console_block_addr < 0)
+ console_block_addr = cvmx_bootmem_phy_named_block_alloc(alloc_size, 0, 0x7fffffff, 128, OCTEON_PCI_CONSOLE_BLOCK_NAME, CVMX_BOOTMEM_FLAG_END_ALLOC);
+ if (console_block_addr < 0)
+ return 0;
+
+ cons_desc_ptr = (void *)(uint32_t)console_block_addr;
+
+ memset(cons_desc_ptr, 0, alloc_size); /* Clear entire alloc'ed memory */
+
+ cons_desc_ptr->lock = 1; /* initialize as locked until we are done */
+ CVMX_SYNCW;
+ cons_desc_ptr->num_consoles = num_consoles;
+ cons_desc_ptr->flags = 0;
+ cons_desc_ptr->major_version = OCTEON_PCI_CONSOLE_MAJOR_VERSION;
+ cons_desc_ptr->minor_version = OCTEON_PCI_CONSOLE_MINOR_VERSION;
+
+ int i;
+ uint64_t avail_addr = console_block_addr + sizeof(octeon_pci_console_desc_t) + num_consoles * sizeof(uint64_t);
+ for (i = 0; i < num_consoles;i++)
+ {
+ cons_desc_ptr->console_addr_array[i] = avail_addr;
+ cons_ptr = (void *)(uint32_t)cons_desc_ptr->console_addr_array[i];
+ avail_addr += sizeof(octeon_pci_console_t);
+ cons_ptr->input_base_addr = avail_addr;
+ avail_addr += buffer_size;
+ cons_ptr->output_base_addr = avail_addr;
+ avail_addr += buffer_size;
+ cons_ptr->buf_size = buffer_size;
+ }
+ CVMX_SYNCW;
+ cons_desc_ptr->lock = 0;
+
+ return console_block_addr;
+
+
+}
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/octeon-pci-console.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/contrib/octeon-sdk/octeon-pci-console.h
===================================================================
--- trunk/sys/contrib/octeon-sdk/octeon-pci-console.h (rev 0)
+++ trunk/sys/contrib/octeon-sdk/octeon-pci-console.h 2018-05-28 18:54:17 UTC (rev 10136)
@@ -0,0 +1,143 @@
+/* $MidnightBSD$ */
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Inc. (support at cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+
+
+
+
+
+
+#ifndef __OCTEON_PCI_CONSOLE_H__
+#define __OCTEON_PCI_CONSOLE_H__
+
+#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
+#include "cvmx-platform.h"
+#endif
+
+/* Current versions */
+#define OCTEON_PCI_CONSOLE_MAJOR_VERSION 1
+#define OCTEON_PCI_CONSOLE_MINOR_VERSION 0
+
+#define OCTEON_PCI_CONSOLE_BLOCK_NAME "__pci_console"
+
+
+/* Structure that defines a single console.
+
+
+* Note: when read_index == write_index, the buffer is empty. The actual usable size
+* of each console is console_buf_size -1;
+*/
+typedef struct {
+ uint64_t input_base_addr;
+ uint32_t input_read_index;
+ uint32_t input_write_index;
+ uint64_t output_base_addr;
+ uint32_t output_read_index;
+ uint32_t output_write_index;
+ uint32_t lock;
+ uint32_t buf_size;
+} octeon_pci_console_t;
+
+
+/* This is the main container structure that contains all the information
+about all PCI consoles. The address of this structure is passed to various
+routines that operation on PCI consoles.
+*/
+typedef struct {
+ uint32_t major_version;
+ uint32_t minor_version;
+ uint32_t lock;
+ uint32_t flags;
+ uint32_t num_consoles;
+ uint32_t pad;
+ /* must be 64 bit aligned here... */
+ uint64_t console_addr_array[0]; /* Array of addresses of octeon_pci_console_t structures */
+ /* Implicit storage for console_addr_array */
+} octeon_pci_console_desc_t;
+
+
+/* Flag definitions for octeon_pci_console_desc_t */
+enum {
+ OCT_PCI_CON_DESC_FLAG_PERCPU = 1 << 0, /* If set, output from core N will be sent to console N */
+};
+
+#if defined(OCTEON_TARGET) && !defined(__linux__)
+/**
+ * This is an internal-only function that is called from within the simple executive
+ * C library, and is not intended for any other use.
+ *
+ * @param fd
+ * @param buf
+ * @param nbytes
+ *
+ * @return
+ */
+int __cvmx_pci_console_write (int fd, char *buf, int nbytes);
+#endif
+
+
+#ifdef CVMX_BUILD_FOR_UBOOT
+uint64_t octeon_pci_console_init(int num_consoles, int buffer_size);
+#endif
+
+/* Flag definitions for read/write functions */
+enum {
+ OCT_PCI_CON_FLAG_NONBLOCK = 1 << 0, /* If set, read/write functions won't block waiting for space or data.
+ * For reads, 0 bytes may be read, and for writes not all of the
+ * supplied data may be written.*/
+};
+
+#if !defined(__linux__) || defined(__KERNEL__)
+int octeon_pci_console_write(uint64_t console_desc_addr, unsigned int console_num, const char * buffer, int bytes_to_write, uint32_t flags);
+int octeon_pci_console_write_avail(uint64_t console_desc_addr, unsigned int console_num);
+
+int octeon_pci_console_read(uint64_t console_desc_addr, unsigned int console_num, char * buffer, int buffer_size, uint32_t flags);
+int octeon_pci_console_read_avail(uint64_t console_desc_addr, unsigned int console_num);
+#endif
+
+#if !defined(OCTEON_TARGET) && defined(__linux__) && !defined(__KERNEL__)
+int octeon_pci_console_host_write(uint64_t console_desc_addr, unsigned int console_num, const char * buffer, int write_reqest_size, uint32_t flags);
+int octeon_pci_console_host_write_avail(uint64_t console_desc_addr, unsigned int console_num);
+
+int octeon_pci_console_host_read(uint64_t console_desc_addr, unsigned int console_num, char * buffer, int buf_size, uint32_t flags);
+int octeon_pci_console_host_read_avail(uint64_t console_desc_addr, unsigned int console_num);
+#endif
+#endif
Property changes on: trunk/sys/contrib/octeon-sdk/octeon-pci-console.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
More information about the Midnightbsd-cvs
mailing list